repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
wwitzel3/awx | awx/main/tests/functional/test_rbac_role.py | 1 | 7047 | import pytest
from awx.main.access import (
RoleAccess,
UserAccess,
TeamAccess)
from awx.main.models import Role, Organization
@pytest.mark.django_db
def test_team_access_attach(rando, team, inventory):
# rando is admin of the team
team.admin_role.members.add(rando)
inventory.read_role.members.add(rando)
# team has read_role for the inventory
team.member_role.children.add(inventory.read_role)
access = TeamAccess(rando)
data = {'id': inventory.admin_role.pk}
assert not access.can_attach(team, inventory.admin_role, 'member_role.children', data, False)
@pytest.mark.django_db
def test_user_access_attach(rando, inventory):
inventory.read_role.members.add(rando)
access = UserAccess(rando)
data = {'id': inventory.admin_role.pk}
assert not access.can_attach(rando, inventory.admin_role, 'roles', data, False)
@pytest.mark.django_db
def test_role_access_attach(rando, inventory):
inventory.read_role.members.add(rando)
access = RoleAccess(rando)
assert not access.can_attach(inventory.admin_role, rando, 'members', None)
@pytest.mark.django_db
def test_visible_roles(admin_user, system_auditor, rando, organization, project):
'''
system admin & system auditor fixtures needed to create system roles
'''
organization.auditor_role.members.add(rando)
access = RoleAccess(rando)
assert rando not in organization.admin_role
assert access.can_read(organization.admin_role)
assert organization.admin_role in Role.visible_roles(rando)
assert rando not in project.admin_role
assert access.can_read(project.admin_role)
assert project.admin_role in Role.visible_roles(rando)
# Permissions when adding users to org member/admin
@pytest.mark.django_db
def test_org_user_role_attach(user, organization, inventory):
'''
Org admins must not be able to add arbitrary users to their
organization, because that would give them admin permission to that user
'''
admin = user('admin')
nonmember = user('nonmember')
inventory.admin_role.members.add(nonmember)
organization.admin_role.members.add(admin)
role_access = RoleAccess(admin)
assert not role_access.can_attach(organization.member_role, nonmember, 'members', None)
assert not role_access.can_attach(organization.admin_role, nonmember, 'members', None)
# Permissions when adding users/teams to org special-purpose roles
@pytest.mark.django_db
def test_user_org_object_roles(organization, org_admin, org_member):
'''
Unlike admin & member roles, the special-purpose organization roles do not
confer any permissions related to user management,
Normal rules about role delegation should apply, only admin to org needed.
'''
assert RoleAccess(org_admin).can_attach(
organization.notification_admin_role, org_member, 'members', None
)
assert not RoleAccess(org_member).can_attach(
organization.notification_admin_role, org_member, 'members', None
)
@pytest.mark.django_db
def test_team_org_object_roles(organization, team, org_admin, org_member):
'''
the special-purpose organization roles are not ancestors of any
team roles, and can be delegated en masse through teams,
following normal admin rules
'''
assert RoleAccess(org_admin).can_attach(
organization.notification_admin_role, team, 'member_role.parents', {'id': 68}
)
# Obviously team admin isn't enough to assign organization roles to the team
team.admin_role.members.add(org_member)
assert not RoleAccess(org_member).can_attach(
organization.notification_admin_role, team, 'member_role.parents', {'id': 68}
)
# Cannot make a team member of an org
assert not RoleAccess(org_admin).can_attach(
organization.member_role, team, 'member_role.parents', {'id': 68}
)
# Singleton user editing restrictions
@pytest.mark.django_db
def test_org_superuser_role_attach(admin_user, org_admin, organization):
'''
Ideally, you would not add superusers to roles (particularly member_role)
but it has historically been possible
this checks that the situation does not grant unexpected permissions
'''
organization.member_role.members.add(admin_user)
role_access = RoleAccess(org_admin)
assert not role_access.can_attach(organization.member_role, admin_user, 'members', None)
assert not role_access.can_attach(organization.admin_role, admin_user, 'members', None)
user_access = UserAccess(org_admin)
assert not user_access.can_change(admin_user, {'last_name': 'Witzel'})
# Sanity check user editing permissions combined with new org roles
@pytest.mark.django_db
def test_org_object_role_not_sufficient(user, organization):
member = user('amember')
obj_admin = user('icontrolallworkflows')
organization.member_role.members.add(member)
organization.workflow_admin_role.members.add(obj_admin)
user_access = UserAccess(obj_admin)
assert not user_access.can_change(member, {'last_name': 'Witzel'})
# Org admin user editing permission ANY to ALL change
@pytest.mark.django_db
def test_need_all_orgs_to_admin_user(user):
'''
Old behavior - org admin to ANY organization that a user is member of
grants permission to admin that user
New behavior enforced here - org admin to ALL organizations that a
user is member of grants permission to admin that user
'''
org1 = Organization.objects.create(name='org1')
org2 = Organization.objects.create(name='org2')
org1_admin = user('org1-admin')
org1.admin_role.members.add(org1_admin)
org12_member = user('org12-member')
org1.member_role.members.add(org12_member)
org2.member_role.members.add(org12_member)
user_access = UserAccess(org1_admin)
assert not user_access.can_change(org12_member, {'last_name': 'Witzel'})
role_access = RoleAccess(org1_admin)
assert not role_access.can_attach(org1.admin_role, org12_member, 'members', None)
assert not role_access.can_attach(org1.member_role, org12_member, 'members', None)
org2.admin_role.members.add(org1_admin)
assert role_access.can_attach(org1.admin_role, org12_member, 'members', None)
assert role_access.can_attach(org1.member_role, org12_member, 'members', None)
# Orphaned user can be added to member role, only in special cases
@pytest.mark.django_db
def test_orphaned_user_allowed(org_admin, rando, organization):
'''
We still allow adoption of orphaned* users by assigning them to
organization member role, but only in the situation where the
org admin already posesses indirect access to all of the user's roles
*orphaned means user is not a member of any organization
'''
role_access = RoleAccess(org_admin)
assert role_access.can_attach(organization.member_role, rando, 'members', None)
# Cannot edit the user directly without adding to org first
user_access = UserAccess(org_admin)
assert not user_access.can_change(rando, {'last_name': 'Witzel'})
| apache-2.0 | -5,083,817,037,180,761,000 | 37.298913 | 97 | 0.72357 | false |
Antergos/Cnchi | src/installation/post_fstab.py | 1 | 9489 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# post_fstab.py
#
# Copyright © 2013-2018 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Create /etc/fstab file """
import logging
import os
import re
import parted3.fs_module as fs
class PostFstab():
""" Setup /etc/fstab """
DEST_DIR = '/install'
def __init__(self, method, mount_devices, fs_devices, ssd, settings):
""" Init class properties """
self.method = method
self.mount_devices = mount_devices
self.fs_devices = fs_devices
self.ssd = ssd
self.zfs = settings.get('zfs')
self.use_lvm = settings.get('use_lvm')
self.use_luks = settings.get('use_luks')
self.luks_root_password = settings.get('luks_root_password')
self.root_uuid = None
def get_swap_fstab_line(self, uuid, partition_path):
""" Create swap line for fstab """
# If using a TRIM supported SSD,
# discard is a valid mount option for swap
if partition_path in self.ssd:
opts = "defaults,discard"
else:
opts = "defaults"
if self.zfs:
# We can't use UUID with zfs, so we will use device name
txt = "{0} swap swap {1} 0 0".format(partition_path, opts)
else:
txt = "UUID={0} swap swap {1} 0 0".format(uuid, opts)
return txt
@staticmethod
def add_vol_to_crypttab(vol_name, uuid, keyfile='none'):
""" Modify the crypttab file """
crypttab_path = os.path.join(PostFstab.DEST_DIR, 'etc/crypttab')
os.chmod(crypttab_path, 0o666)
with open(crypttab_path, 'a') as crypttab_file:
line = "{0} /dev/disk/by-uuid/{1} {2} luks\n"
line = line.format(vol_name, uuid, keyfile)
crypttab_file.write(line)
logging.debug("Added %s to crypttab", line)
os.chmod(crypttab_path, 0o600)
@staticmethod
def get_device_fstab_line(partition_path, mount_point, myfmt, opts='defaults', chk='0'):
""" Create fstab line """
txt = "{0} {1} {2} {3} 0 {4}"
txt = txt.format(partition_path, mount_point, myfmt, opts, chk)
logging.debug("Added %s to fstab", txt)
return txt
@staticmethod
def get_uuid_fstab_line(uuid, mount_point, myfmt, opts='defaults', chk='0'):
""" Create fstab line """
txt = "UUID={0} {1} {2} {3} 0 {4}"
txt = txt.format(uuid, mount_point, myfmt, opts, chk)
logging.debug("Added %s to fstab", txt)
return txt
@staticmethod
def get_mount_options(myfmt, is_ssd):
""" Adds mount options depending on filesystem """
opts = ""
if not is_ssd:
if "btrfs" in myfmt:
opts = "defaults,relatime,space_cache,autodefrag,inode_cache"
elif "f2fs" in myfmt:
opts = "defaults,noatime"
elif "ext3" in myfmt or "ext4" in myfmt:
opts = "defaults,relatime,data=ordered"
else:
opts = "defaults,relatime"
else:
# As of linux kernel version 3.7, the following
# filesystems support TRIM: ext4, btrfs, JFS, and XFS.
if myfmt in ["ext4", "jfs", "xfs"]:
opts = "defaults,noatime,discard"
elif myfmt == "btrfs":
opts = ("defaults,noatime,compress=lzo,ssd,discard,"
"space_cache,autodefrag,inode_cache")
else:
opts = "defaults,noatime"
return opts
def run(self):
""" Create /etc/fstab file """
all_lines = [
"# /etc/fstab: static file system information.",
"#",
"# Use 'blkid' to print the universally unique identifier for a",
"# device; this may be used with UUID= as a more robust way to name devices",
"# that works even if disks are added and removed. See fstab(5).",
"#",
"# <file system> <mount point> <type> <options> <dump> <pass>",
"#"]
# Use lsblk to be able to match LUKS UUID with mapper UUID
pknames = fs.get_pknames()
for mount_point in self.mount_devices:
partition_path = self.mount_devices[mount_point]
uuid = fs.get_uuid(partition_path)
if uuid == "":
logging.warning(
"Can't get %s partition UUID. It won't be added to fstab",
partition_path)
continue
if partition_path in self.fs_devices:
myfmt = self.fs_devices[partition_path]
else:
# It hasn't any filesystem defined, skip it.
continue
# Take care of swap partitions
if "swap" in myfmt:
txt = self.get_swap_fstab_line(uuid, partition_path)
all_lines.append(txt)
logging.debug("Added %s to fstab", txt)
continue
# Fix for home + luks, no lvm (from Automatic Install)
if ("/home" in mount_point and
self.method == "automatic" and
self.use_luks and not self.use_lvm and
'/dev/mapper' in partition_path):
keyfile = '/etc/luks-keys/home'
if self.luks_root_password:
# Use password and not a keyfile
keyfile = 'none'
vol_name = partition_path[len("/dev/mapper/"):]
self.add_vol_to_crypttab(vol_name, uuid, keyfile)
# Add cryptAntergosHome line to fstab
txt = self.get_device_fstab_line(partition_path, mount_point, myfmt)
all_lines.append(txt)
continue
# Add all LUKS partitions from Advanced Install (except root).
if (self.method == 'advanced' and
mount_point is not '/' and
self.use_luks and '/dev/mapper' in partition_path):
# As the mapper with the filesystem will have a different UUID
# than the partition it is encrypted in, we have to take care
# of this here. Then we will be able to add it to crypttab
try:
vol_name = partition_path[len("/dev/mapper/"):]
luks_partition_path = "/dev/" + pknames[vol_name]
except KeyError:
logging.error(
"Can't find the PKNAME value of %s",
partition_path)
continue
luks_uuid = fs.get_uuid(luks_partition_path)
if luks_uuid:
self.add_vol_to_crypttab(vol_name, luks_uuid)
else:
logging.error(
"Can't add luks uuid to crypttab for %s partition",
luks_partition_path)
continue
# Finally, the fstab line to mount the unencrypted file system
# if a mount point has been specified by the user
if mount_point:
txt = self.get_device_fstab_line(partition_path, mount_point, myfmt)
all_lines.append(txt)
continue
# Avoid adding a partition to fstab when it has no mount point
# (swap has been checked above)
if mount_point == "":
continue
# fstab uses vfat to mount fat16 and fat32 partitions
if "fat" in myfmt:
myfmt = 'vfat'
# Create mount point on destination system if it yet doesn't exist
full_path = os.path.join(PostFstab.DEST_DIR, mount_point)
os.makedirs(full_path, mode=0o755, exist_ok=True)
# Is ssd ?
# Device list example: {'/dev/sdb': False, '/dev/sda': True}
txt = "Device list : {0}".format(self.ssd)
logging.debug(txt)
device = re.sub("[0-9]+$", "", partition_path)
is_ssd = self.ssd.get(device)
txt = "Device: {0}, SSD: {1}".format(device, is_ssd)
logging.debug(txt)
# Get mount options
opts = self.get_mount_options(myfmt, is_ssd)
chk = '0'
if mount_point == "/":
if myfmt not in ['btrfs', 'f2fs']:
chk = '1'
self.root_uuid = uuid
txt = self.get_uuid_fstab_line(uuid, mount_point, myfmt, opts, chk)
all_lines.append(txt)
full_text = '\n'.join(all_lines) + '\n'
fstab_path = os.path.join(PostFstab.DEST_DIR, 'etc/fstab')
with open(fstab_path, 'w') as fstab_file:
fstab_file.write(full_text)
| gpl-3.0 | 5,706,897,448,647,539,000 | 37.569106 | 92 | 0.541315 | false |
bjoernricks/python-quilt | quilt/cli/delete.py | 1 | 2083 | # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 - 2017 Björn Ricks <[email protected]>
#
# See LICENSE comming with the source of python-quilt for details.
from quilt.cli.meta import Command
from quilt.cli.parser import Argument, OptionArgument
from quilt.delete import Delete
class DeleteCommand(Command):
name = "delete"
help = "Remove the specified or topmost patch from the series file."
remove = OptionArgument("-r", action="store_true", dest="remove",
default=False,
help="Remove the deleted patch file from the "
"patches directory as well.")
backup = OptionArgument("--backup",
action="store_true", default=False, dest="backup",
help="Rename the patch file to patch~ rather than "
"deleting it. Ignored if not used with `-r'.")
next = OptionArgument("-n", action="store_true", dest="next",
help="Delete the next unapplied patch, "
"rather than the specified or topmost patch.")
patch = Argument(nargs="?")
def run(self, args):
delete = Delete(self.get_cwd(), self.get_pc_dir(),
self.get_patches_dir())
delete.deleted_patch.connect(self.deleted_patch)
delete.deleting_patch.connect(self.deleting_patch)
if args.next and args.patch:
self.exit_error("-n parameter doesn't take an argument")
if args.next:
delete.delete_next(args.remove, args.backup)
else:
delete.delete_patch(args.patch, args.remove, args.backup)
def deleted_patch(self, patch):
print("Removed patch %s" % patch.get_name())
def deleting_patch(self, patch, applied):
if applied:
print("Removing currently applied patch %s" % patch.get_name())
else:
print("Removing patch %s" % patch.get_name())
| mit | -4,401,374,766,035,893,000 | 38.283019 | 79 | 0.59414 | false |
kbknapp/Platypus-cad | platypi/ppmodules/Apps/Testing/Network.py | 1 | 2926 | # -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Python 3.x
#
# ppctl_cadnetwork v0.1
# * Displays information about the network setup to the PiFaceCAD
# * Requires:
# * ifconfig (for subnet mask)
# * grep (for subnet mask)
# * awk (for subnet mask)
# * ip (for default gw)
#
# Changelog
# * v0.1
# * Initial Release
#
import pifacecad
import socket # For: IP, Hostname
import subprocess # For: Default GW, Subnet Mask
_ROCKER_RIGHT = 7
_ROCKER_LEFT = 6
_ROCKER_PUSH = 5
_curr_index = 0
_cad = None
_listener = None
_orig_listener = None
_orig_screen = ""
_screens = [["IP:", ""],
["Subnet Mask:", ""],
["Default GW:", ""],
["Hostname", ""],
["Quit?", ""]]
def _write_screen():
_cad.lcd.clear()
if _screens[_curr_index][1] == "":
_do_action(_curr_index)
_cad.lcd.write("%s\n%s" % (_screens[_curr_index][0], _screens[_curr_index][1]))
def _next():
global _curr_index
if _curr_index == len(_screens):
_curr_index = 0
else:
_curr_index += 1
_write_screen()
def _previous():
global _curr_index
if _curr_index == 0:
_curr_index = len(_screens)
else:
_curr_index -= 1
_write_screen()
def _do_action():
if _curr_index == 0:
# Get IP
_screens[0][1] = socket.gethostbyname(socket.gethostname())
elif _curr_index == 1:
# Get Subnet Mask
_screens[1][1] = subprocess.check_output("ifconfig eth0 | grep netmask | awk '{print $4}'", shell=True).decode("utf-8")
elif _curr_index == 2:
# Get Default GW
_screens[2][1] = subprocess.check_output("ip route show | grep via | awk '{print $3}'", shell=True).decode("utf-8")
elif _curr_index == 3:
# Get hostname
_screens[3][1] = socket.gethostname()
else:
# Quit
_listener.deactivate()
_cad.lcd.clear()
if _orig_screen != "" and _orig_listener is not None:
_cad.lcd.write(_orig_screen)
_orig_listener.activate()
def _register_buttons():
_listener = pifacecad.SwitchEventListener(chip=_cad)
# Add rocker->right (switch 7) to 'next'
_listener.register(_ROCKER_RIGHT, pifacecad.IODIR_FALLING_EDGE, _next)
# Add rocker->left (switch 6) to 'previous'
_listener.register(_ROCKER_LEFT, pifacecad.IODIR_FALLING_EDGE, _previous)
# Add rocker->down (push) (switch 8) to 'do action'
_listener.register(_ROCKER_PUSH, pifacecad.IODIR_FALLING_EDGE, _do_action)
_listener.activate()
def start_module(cad, listener, screen):
global _cad
global _orig_listener
global _orig_screen
_cad = cad
_orig_listener = listener
_orig_screen = screen
_cad.lcd.clear()
_cad.lcd.blink_off()
_cad.lcd.cursor_off()
if _screens[0][1] == "":
_do_action(0)
_cad.lcd.write("%s\n%s" % (_screens[0][0], _screens[0][1]))
_register_buttons()
if __name__ == "__main__":
# Called directly, must initialize CAD
_cad = pifacecad.PiFaceCAD()
_cad.lcd.blink_off()
_cad.lcd.cursor_off()
_cad.lcd.backlight_off()
if _screens[0][1] == "":
_do_action(0)
_cad.lcd.write("%s\n%s" % (_screens[0][0], _screens[0][1]))
_register_buttons() | gpl-2.0 | -4,029,454,239,713,564,000 | 22.796748 | 121 | 0.638414 | false |
tomassurin/codility | Lesson 01/01 - tapes.py | 1 | 1696 | # A non-empty zero-indexed array A consisting of N integers is given. Array A represents numbers on a tape.
# Any integer P, such that 0 < P < N, splits this tape into two non-empty parts: A[0], A[1], ..., A[P − 1] and A[P], A[P + 1], ..., A[N − 1].
# The difference between the two parts is the value of: |(A[0] + A[1] + ... + A[P − 1]) − (A[P] + A[P + 1] + ... + A[N − 1])|
# In other words, it is the absolute difference between the sum of the first part and the sum of the second part.
# For example, consider array A such that:
# A[0] = 3
# A[1] = 1
# A[2] = 2
# A[3] = 4
# A[4] = 3
# We can split this tape in four places:
# P = 1, difference = |3 − 10| = 7
# P = 2, difference = |4 − 9| = 5
# P = 3, difference = |6 − 7| = 1
# P = 4, difference = |10 − 3| = 7
# Write a function:
# def solution(A)
# that, given a non-empty zero-indexed array A of N integers, returns the minimal difference that can be achieved.
# For example, given:
# A[0] = 3
# A[1] = 1
# A[2] = 2
# A[3] = 4
# A[4] = 3
# the function should return 1, as explained above.
# Assume that:
# N is an integer within the range [2..100,000];
# each element of array A is an integer within the range [−1,000..1,000].
# Complexity:
# expected worst-case time complexity is O(N);
# expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments).
# Elements of input arrays can be modified.
def solution(A):
differences = []
sumAll = sum(A)
partSum = 0
for i in xrange(0, len(A)):
partSum += A[i]
differences.append(abs(partSum - (sumAll - partSum)))
return min(differences) | unlicense | 710,252,307,385,933,200 | 29.490909 | 141 | 0.614558 | false |
sternoru/goscalecms | goscale/plugins/presentations/cms_plugins.py | 1 | 2031 | from goscale.cms_plugins import GoscaleCMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
import models
GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES = getattr(settings, 'GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES', (
('presentation.html', _('Presentation')),
)) + getattr(settings, 'GOSCALE_PRESENTATIONS_CUSTOM_PLUGIN_TEMPLATES', ())
class GooglePresentationPlugin(GoscaleCMSPluginBase):
"""
Google Presentation plugin for GoScale
"""
model = models.GooglePresentation
name = _("Google Presentation")
plugin_templates = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES
render_template = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES[0][0]
fieldsets = [
[_('Presentation options'), {
'fields': ['embed', 'width', 'height', 'ratio', 'embed_as_is', 'delay', 'autoplay', 'loop']
}]
]
plugin_pool.register_plugin(GooglePresentationPlugin)
class SlidesharePlugin(GoscaleCMSPluginBase):
"""
Slideshare Presentation plugin for GoScale
"""
model = models.Slideshare
name = _("Slideshare Presentation")
plugin_templates = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES
render_template = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES[0][0]
fieldsets = [
[_('Presentation options'), {
'fields': ['embed', 'width', 'height', 'ratio', 'embed_as_is', 'start', 'without_related_content']
}]
]
plugin_pool.register_plugin(SlidesharePlugin)
class SpeakerdeckPlugin(GoscaleCMSPluginBase):
"""
Speakerdeck Presentation plugin for GoScale
"""
model = models.Speakerdeck
name = _("Speakerdeck Presentation")
plugin_templates = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES
render_template = GOSCALE_PRESENTATIONS_PLUGIN_TEMPLATES[0][0]
fieldsets = [
[_('Presentation options'), {
'fields': ['embed', 'width', 'height', 'ratio', 'embed_as_is', 'start']
}]
]
plugin_pool.register_plugin(SpeakerdeckPlugin) | bsd-3-clause | 7,149,152,504,235,631,000 | 32.866667 | 110 | 0.687839 | false |
guohongze/adminset | branches/migrations/0001_initial.py | 1 | 4299 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-18 05:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('appconf', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Branch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True, verbose_name='\u5206\u652f\u673a\u6784')),
('address', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u529e\u516c\u5730\u5740')),
('telephone', models.CharField(blank=True, max_length=25, null=True, verbose_name='\u8054\u7cfb\u7535\u8bdd')),
('description', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5907\u6ce8')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='appconf.AppOwner', verbose_name='\u8d1f\u8d23\u4eba')),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True, verbose_name='\u884c\u653f\u533a\u57df')),
('address', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u529e\u516c\u5730\u5740')),
('telephone', models.CharField(blank=True, max_length=25, null=True, verbose_name='\u8054\u7cfb\u7535\u8bdd')),
('description', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5907\u6ce8\u4fe1\u606f')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='appconf.AppOwner', verbose_name='\u8d1f\u8d23\u4eba')),
],
),
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sn', models.CharField(max_length=255, verbose_name='\u8d44\u6e90\u7f16\u7801')),
('name', models.CharField(max_length=255, verbose_name='\u8d44\u6e90\u540d\u79f0')),
('spec', models.CharField(max_length=255, verbose_name='\u8d44\u6e90\u89c4\u683c')),
('budget', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u9884\u7b97\u91d1\u989d')),
('paid', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5408\u540c\u91d1\u989d')),
('contract', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5408\u540c\u7f16\u53f7')),
('contract_start', models.DateField(blank=True, max_length=255, null=True, verbose_name='\u5408\u540c\u5f00\u59cb')),
('contract_end', models.DateField(blank=True, null=True, verbose_name='\u5408\u540c\u7ed3\u675f')),
('supplier', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u4f9b\u5e94\u5546\u540d')),
('service_phone', models.CharField(blank=True, max_length=25, null=True, verbose_name='\u670d\u52a1\u7535\u8bdd')),
('description', models.CharField(blank=True, max_length=255, null=True, verbose_name='\u5408\u540c\u8bf4\u660e')),
('branch', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='branches.Branch', verbose_name='\u6240\u5c5e\u673a\u6784')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='appconf.AppOwner', verbose_name='\u4f9b\u5e94\u5546\u8054\u7cfb\u4eba')),
],
),
migrations.AddField(
model_name='branch',
name='region',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='branches.Region', verbose_name='\u6240\u5c5e\u5927\u533a'),
),
]
| gpl-2.0 | -442,874,068,296,152,400 | 66.171875 | 190 | 0.626192 | false |
erickt/hue | desktop/libs/hadoop/src/hadoop/cluster.py | 1 | 7182 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from hadoop import conf
from hadoop.fs import webhdfs, LocalSubFileSystem
from hadoop.job_tracker import LiveJobTracker
from desktop.lib.paths import get_build_dir
LOG = logging.getLogger(__name__)
FS_CACHE = None
MR_CACHE = None
MR_NAME_CACHE = 'default'
def _make_filesystem(identifier):
choice = os.getenv("FB_FS")
if choice == "testing":
path = os.path.join(get_build_dir(), "fs")
if not os.path.isdir(path):
LOG.warning(("Could not find fs directory: %s. Perhaps you need to run manage.py filebrowser_test_setup?") % path)
return LocalSubFileSystem(path)
else:
cluster_conf = conf.HDFS_CLUSTERS[identifier]
return webhdfs.WebHdfs.from_config(cluster_conf)
def _make_mrcluster(identifier):
cluster_conf = conf.MR_CLUSTERS[identifier]
return LiveJobTracker.from_conf(cluster_conf)
def get_hdfs(identifier="default"):
global FS_CACHE
get_all_hdfs()
return FS_CACHE[identifier]
def get_defaultfs():
fs = get_hdfs()
if fs.logical_name:
return fs.logical_name
else:
return fs.fs_defaultfs
def get_all_hdfs():
global FS_CACHE
if FS_CACHE is not None:
return FS_CACHE
FS_CACHE = {}
for identifier in conf.HDFS_CLUSTERS.keys():
FS_CACHE[identifier] = _make_filesystem(identifier)
return FS_CACHE
def get_default_mrcluster():
"""
Get the default JT (not necessarily HA).
"""
global MR_CACHE
global MR_NAME_CACHE
try:
all_mrclusters()
return MR_CACHE.get(MR_NAME_CACHE)
except KeyError:
# Return an arbitrary cluster
candidates = all_mrclusters()
if candidates:
return candidates.values()[0]
return None
def get_default_yarncluster():
"""
Get the default RM (not necessarily HA).
"""
global MR_NAME_CACHE
try:
return conf.YARN_CLUSTERS[MR_NAME_CACHE]
except KeyError:
return get_yarn()
def get_next_ha_mrcluster():
"""
Return the next available JT instance and cache its name.
This method currently works for distincting between active/standby JT as a standby JT does not respond.
A cleaner but more complicated way would be to do something like the MRHAAdmin tool and
org.apache.hadoop.ha.HAServiceStatus#getServiceStatus().
"""
global MR_NAME_CACHE
candidates = all_mrclusters()
has_ha = sum([conf.MR_CLUSTERS[name].SUBMIT_TO.get() for name in conf.MR_CLUSTERS.keys()]) >= 2
current_user = get_default_mrcluster().user
for name in conf.MR_CLUSTERS.keys():
config = conf.MR_CLUSTERS[name]
if config.SUBMIT_TO.get():
jt = candidates[name]
if has_ha:
try:
jt.setuser(current_user)
status = jt.cluster_status()
if status.stateAsString == 'RUNNING':
MR_NAME_CACHE = name
LOG.warn('Picking HA JobTracker: %s' % name)
return (config, jt)
else:
LOG.info('JobTracker %s is not RUNNING, skipping it: %s' % (name, status))
except Exception, ex:
LOG.info('JobTracker %s is not available, skipping it: %s' % (name, ex))
else:
return (config, jt)
return None
def get_mrcluster(identifier="default"):
global MR_CACHE
all_mrclusters()
return MR_CACHE[identifier]
def all_mrclusters():
global MR_CACHE
if MR_CACHE is not None:
return MR_CACHE
MR_CACHE = {}
for identifier in conf.MR_CLUSTERS.keys():
MR_CACHE[identifier] = _make_mrcluster(identifier)
return MR_CACHE
def get_yarn():
global MR_NAME_CACHE
if MR_NAME_CACHE in conf.YARN_CLUSTERS and conf.YARN_CLUSTERS[MR_NAME_CACHE].SUBMIT_TO.get():
return conf.YARN_CLUSTERS[MR_NAME_CACHE]
for name in conf.YARN_CLUSTERS.keys():
yarn = conf.YARN_CLUSTERS[name]
if yarn.SUBMIT_TO.get():
return yarn
def get_next_ha_yarncluster():
"""
Return the next available YARN RM instance and cache its name.
"""
from hadoop.yarn.resource_manager_api import ResourceManagerApi
global MR_NAME_CACHE
has_ha = sum([conf.YARN_CLUSTERS[name].SUBMIT_TO.get() for name in conf.YARN_CLUSTERS.keys()]) >= 2
for name in conf.YARN_CLUSTERS.keys():
config = conf.YARN_CLUSTERS[name]
if config.SUBMIT_TO.get():
rm = ResourceManagerApi(config.RESOURCE_MANAGER_API_URL.get(), config.SECURITY_ENABLED.get(), config.SSL_CERT_CA_VERIFY.get())
if has_ha:
try:
cluster_info = rm.cluster()
if cluster_info['clusterInfo']['haState'] == 'ACTIVE':
MR_NAME_CACHE = name
LOG.warn('Picking RM HA: %s' % name)
from hadoop.yarn import resource_manager_api
resource_manager_api._api_cache = None # Reset cache
from hadoop.yarn import mapreduce_api
mapreduce_api._api_cache = None
return (config, rm)
else:
LOG.info('RM %s is not RUNNING, skipping it: %s' % (name, cluster_info))
except Exception, ex:
LOG.info('RM %s is not available, skipping it: %s' % (name, ex))
else:
return (config, rm)
return None
def get_cluster_for_job_submission():
"""
Check the 'submit_to' for each MR/Yarn cluster, and return the
config section of first one that enables submission.
Support MR1/MR2 HA.
"""
yarn = get_next_ha_yarncluster()
if yarn:
return yarn
mr = get_next_ha_mrcluster()
if mr is not None:
return mr
return None
def get_cluster_conf_for_job_submission():
cluster = get_cluster_for_job_submission()
if cluster:
config, rm = cluster
return config
else:
return None
def get_cluster_addr_for_job_submission():
"""
Check the 'submit_to' for each MR/Yarn cluster, and return the logical name or host:port of first one that enables submission.
"""
if is_yarn():
if get_yarn().LOGICAL_NAME.get():
return get_yarn().LOGICAL_NAME.get()
conf = get_cluster_conf_for_job_submission()
if conf is None:
return None
return "%s:%s" % (conf.HOST.get(), conf.PORT.get())
def is_yarn():
return get_yarn() is not None
def clear_caches():
"""
Clears cluster's internal caches. Returns
something that can be given back to restore_caches.
"""
global FS_CACHE, MR_CACHE
old = FS_CACHE, MR_CACHE
FS_CACHE, MR_CACHE = None, None
return old
def restore_caches(old):
"""
Restores caches from the result of a previous clear_caches call.
"""
global FS_CACHE, MR_CACHE
FS_CACHE, MR_CACHE = old
| apache-2.0 | 8,905,411,804,505,308,000 | 26 | 132 | 0.673629 | false |
bobobox/ansible | lib/ansible/cli/doc.py | 1 | 13761 | # (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import traceback
import textwrap
from ansible.compat.six import iteritems, string_types
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader, action_loader
from ansible.cli import CLI
from ansible.utils import module_docs
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
""" Vault command line class """
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
module_opts=True,
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
self.parser.add_option("-a", "--all", action="store_true", default=False, dest='all_modules',
help='Show documentation for all modules')
super(DocCLI, self).parse()
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.pager(self.get_module_list_text())
return 0
# process all modules
if self.options.all_modules:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.args = sorted(set(self.module_list) - module_docs.BLACKLIST_MODULES)
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py', ignore_deprecated=True)
if filename is None:
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs, metadata = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
display.vvv(traceback.format_exc())
display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
# is there corresponding action plugin?
if module in action_loader:
doc['action'] = True
else:
doc['action'] = False
all_keys = []
for (k,v) in iteritems(doc['options']):
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
doc['metadata'] = metadata
if self.options.show_snippet:
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.format_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
if text:
self.pager(text)
return 0
def find_modules(self, path):
for module in os.listdir(path):
full_path = '/'.join([path, module])
if module.startswith('.'):
continue
elif os.path.isdir(full_path):
continue
elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in C.IGNORE_FILES:
continue
elif module.startswith('_'):
if os.path.islink(full_path): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
module = module.lstrip('_') # remove underscore from deprecated modules
self.module_list.append(module)
def get_module_list_text(self):
columns = display.columns
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py', ignore_deprecated=True)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs, metadata = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
pad = 31
subdent = " " * pad
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def get_man_text(self, doc):
opt_indent=" "
text = []
text.append("> %s (%s)\n" % (doc['module'].upper(), doc['filename']))
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
if isinstance(doc['description'], list):
desc = " ".join(doc['description'])
else:
desc = doc['description']
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" "))
# FUTURE: move deprecation to metadata-only
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n%s\n" % doc['deprecated'])
if doc['metadata'] and isinstance(doc['metadata'], dict):
text.append("Metadata:")
for k in doc['metadata']:
if isinstance(k, list):
text.append("\t%s: %s\n" % (k.capitalize(), ", ".join(doc['metadata'][k])))
else:
text.append("\t%s: %s\n" % (k.capitalize(), doc['metadata'][k]))
if 'action' in doc and doc['action']:
text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
for entry in opt['description']:
text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
else:
text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
choices = ''
if 'choices' in opt:
choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")"
default = ''
if 'default' in opt or not required:
default = "[Default: " + str(opt.get('default', '(null)')) + "]"
text.append(textwrap.fill(CLI.tty_ify(choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
text.append("Notes:")
for note in doc['notes']:
text.append(textwrap.fill(CLI.tty_ify(note), limit-6, initial_indent=" * ", subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
maintainers = set()
if 'author' in doc:
if isinstance(doc['author'], string_types):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
if 'maintainers' in doc:
if isinstance(doc['maintainers'], string_types):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
text.append('MAINTAINERS: ' + ', '.join(maintainers))
text.append('')
return "\n".join(text)
| gpl-3.0 | 4,870,792,144,390,071,000 | 38.205128 | 153 | 0.548943 | false |
TUDelftGeodesy/Doris | doris_stack/functions/baselines.py | 1 | 3319 | import os
import numpy as np
import warnings
from shutil import copyfile
from doris.doris_stack.main_code.resdata import ResData
import datetime
import subprocess
def baselines(dir_in,inputfile,start_date='2014-01-01',end_date='2018-01-01',doris=''):
# This function calculates the baselines and plots a baseline plot.
# Define doris path
if not doris:
doris = doris_path
if not os.path.exists(dir_in):
warnings.warn('The input directory does not exist!')
return
os.chdir(dir_in)
process_folder = os.path.join(dir_in, 'baseline_process')
if not os.path.exists(process_folder):
os.mkdir(process_folder)
os.chdir(process_folder)
try:
first = np.datetime64(start_date)
last = np.datetime64(end_date)
except:
warnings.warn('Input dates could not be converted, use "yyyy-mm-dd"')
return
# Search for folders and take only the first burst.
folders = next(os.walk(dir_in))[1]
folders = sorted(folders)
# Initialize... (Search for folders / resfiles / dates)
n = 0
res = []; date = []
for fold in folders:
# Select only the folders which has a name like yyyymmdd and are within
if len(fold) == 8:
# define date of folder
date_prod = np.datetime64((fold[:4] + '-' + fold[4:6] + '-' + fold[6:]))
if date_prod >= first and date_prod <= last:
# Select the first swath
date_fold = os.path.join(dir_in,fold)
swath_fold = os.path.join(date_fold,next(os.walk(date_fold))[1][0])
# Select the first burst
prod_files = next(os.walk(swath_fold))[2]
for file in prod_files:
if file.endswith('1.res'):
res.extend([os.path.join(swath_fold,file)])
date.extend([date_prod])
n = n + 1
break
# Now create a set of baselines
baselines = np.zeros([len(res),len(res)])
resfiles = dict()
# First create the ifgs.res files and store the data in a res data class.
master = res[0]
copyfile(master,os.path.join(process_folder,'master.res'))
for resultfile, dat in zip(res, date):
copyfile(resultfile,os.path.join(process_folder,'slave.res'))
subprocess.call([doris + ' ' + inputfile], shell=True)
dat = dat.astype(datetime.datetime).strftime('%Y-%m-%d')
resfiles[dat] = ResData(type='interferogram',filename='ifgs.res')
resfiles[dat].read()
os.remove(os.path.join(process_folder,'ifgs.res'))
# Then gather the baselines
for dat, j in zip(date, range(len(date))):
dat = dat.astype(datetime.datetime).strftime('%Y-%m-%d')
baselines[j,0] = resfiles[dat].processes['coarse_orbits']['Bperp'][1]
# Create figure of baselines.
days = (date[0] - date).astype(float)
plt.figure(111)
plt.plot(baselines[:,0], days, marker='o')
# Annotate
for dat, x, y in zip(date, baselines[:,0], days):
dat = dat.astype(datetime.datetime).strftime('%Y-%m-%d')
plt.annotate(
dat,
xy = (x, y), xytext = (0, 0),
textcoords = 'offset points', size = 8)
plt.savefig('baseline_plot.pdf')
| gpl-3.0 | -2,185,828,952,843,984,600 | 32.525253 | 87 | 0.587828 | false |
rolker/cesium-tools | srtm2qmesh.py | 1 | 7012 | #!/usr/bin/env python
import sys
import os
import math
import json
import scipy.io.netcdf
import quantized_mesh_tile.global_geodetic
import quantized_mesh_tile.terrain
# https://pypi.python.org/pypi/quantized-mesh-tile/
# pip install quantized-mesh-tile
class Grd:
def __init__(self,fname,tileSize):
self.ncfile = scipy.io.netcdf.netcdf_file(fname)
self.xcount = self.ncfile.dimensions['lon']
self.ycount = self.ncfile.dimensions['lat']
self.latVar = self.ncfile.variables['lat']
self.lonVar = self.ncfile.variables['lon']
self.zVar = self.ncfile.variables['z']
self.minx = self.lonVar[0]
self.miny = self.latVar[0]
self.maxx = self.lonVar[-1]
self.maxy = self.latVar[-1]
self.dx = (self.maxx-self.minx)/(self.xcount-1.0)
self.dy = (self.maxy-self.miny)/(self.ycount-1.0)
self.maxZoom = int(math.log(180/(self.dy*tileSize),2))
def getPointAtIndex(self,xi,yi):
if xi < 0 or yi < 0 or xi >= self.xcount or yi >= self.ycount:
return None
lat = self.latVar[int(yi)]
lon = self.lonVar[int(xi)]
d = self.zVar[int(yi),int(xi)]
return Point(lat,lon,d)
def interpolatePointAtIndex(self,xi,yi,interpolateX=False,interpolateY=False,verbose=False):
if (not interpolateX and not interpolateY):
return self.getPointAtIndex(xi,yi)
if xi < 0 or yi < 0 or xi >= self.xcount or yi >= self.ycount:
return None
xi0 = int(xi)
xi1 = min(xi0+1,self.xcount-1)
xp = xi-xi0
yi0 = int(yi)
yi1 = min(yi0+1,self.ycount-1)
yp = yi-yi0
lon0 = self.lonVar[xi0]
lon1 = self.lonVar[xi1]
lon = lon0*(1.0-xp)+lon1*xp
lat0 = self.latVar[yi0]
lat1 = self.latVar[yi1]
lat = lat0*(1.0-yp)+lat1*yp
d00 = self.zVar[yi0,xi0]
d01 = self.zVar[yi1,xi0]
d10 = self.zVar[yi0,xi1]
d11 = self.zVar[yi1,xi1]
d0 = d00*(1.0-yp)+d01*yp
d1 = d10*(1.0-yp)+d11*yp
d = d0*(1.0-xp)+d1*xp
if verbose:
print 'ds:',d00,d01,d10,d11,'d:',d,'xp:',xp,'yp:',yp,
return Point(lat,lon,d)
def __str__(self):
return 'size: '+str(self.xcount)+'x'+str(self.ycount)+' bounds: '+str(self.minx)+','+str(self.miny)+' - '+str(self.maxx)+','+str(self.maxy)+' dx,dy:'+str(self.dx)+','+str(self.dy)+' max zoom: '+str(self.maxZoom)
class Point:
def __init__(self,lat,lon,height=None):
self.lat = lat
self.lon = lon
self.height = height
def __str__(self):
return 'lat: '+str(self.lat)+', lon: '+str(self.lon)+', height: '+str(self.height)
def __repr__(self):
return '('+self.__str__()+')'
def asTriple(self):
h = self.height
if math.isnan(h):
h = 0.0
return (self.lon,self.lat,h)
def createTile(x,y,level,params,base,maps=None):
print geodetic.TileBounds(x,y,level)
fname = os.path.join(params['outputDirectory'],str(level)+'/'+str(x)+'/'+str(y)+'.terrain')
print '\t',fname
dn = os.path.dirname(fname)
if not os.path.isdir(dn):
os.makedirs(os.path.dirname(fname))
if os.path.isfile(fname):
os.remove(fname)
b = geodetic.TileBounds(x,y,level)
m = base
if level >= base.maxZoom:
m = maps[0]
xStep = ((b[2]-b[0])/params['tileSize'])/m.dx
yStep = ((b[3]-b[1])/params['tileSize'])/m.dy
print '\txStep:',xStep,'yStep:',yStep
xi = (b[0]-m.minx)/m.dx
yi = (b[1]-m.miny)/m.dy
print '\txi,yi:',xi,yi
print '\t',m.getPointAtIndex(xi,yi)
print '\tinterp\t',m.interpolatePointAtIndex(xi,yi,True,True,True)
sys.stdout.flush()
triangles = []
verticies = []
for j in range(params['tileSize']):
if j == 0:
yedge0 = True
else:
yedge0 = False
if j == (params['tileSize']-1):
yedge1 = True
else:
yedge1 = False
for i in range(params['tileSize']):
if i == 0:
xedge0 = True
else:
xedge0 = False
if i == (params['tileSize']-1):
xedge1 = True
else:
xedge1 = False
if i < (params['tileSize']) and j < (params['tileSize']):
t1 = m.interpolatePointAtIndex(xi+i*xStep,yi+j*yStep,xedge0,yedge0)
t2 = m.interpolatePointAtIndex(xi+(i+1)*xStep,yi+j*yStep,xedge1,yedge0)
t3 = m.interpolatePointAtIndex(xi+(i+1)*xStep,yi+(j+1)*yStep,xedge1,yedge1)
if t1 is not None and t2 is not None and t3 is not None:
triangles.append((t1.asTriple(),t2.asTriple(),t3.asTriple()))
t1 = m.interpolatePointAtIndex(xi+i*xStep,yi+j*yStep,xedge0,yedge0)
t2 = m.interpolatePointAtIndex(xi+(i+1)*xStep,yi+(j+1)*yStep,xedge1,yedge1)
t3 = m.interpolatePointAtIndex(xi+i*xStep,yi+(j+1)*yStep,xedge0,yedge1)
if t1 is not None and t2 is not None and t3 is not None:
triangles.append((t1.asTriple(),t2.asTriple(),t3.asTriple()))
if i == (params['tileSize']-1) and j == (params['tileSize']-1):
print '\tget\t',m.getPointAtIndex(xi+(i+1)*xStep,yi+(j+1)*yStep)
print '\tinterp\t',m.interpolatePointAtIndex(xi+(i+1)*xStep,yi+(j+1)*yStep,xedge1,yedge1,True)
if len(triangles):
tile = quantized_mesh_tile.encode(triangles,bounds=geodetic.TileBounds(x,y,level),hasLighting=True)
tile.toFile(fname)
if len(sys.argv) != 2:
print 'Usage: base2qmesh params.json'
sys.exit(1)
params = json.load(open(sys.argv[1]))
print params
geodetic = quantized_mesh_tile.global_geodetic.GlobalGeodetic(True)
base = Grd(params['basemap'],params['tileSize'])
print base
maxLevel = params['baseLevels']
maps = []
for m in params['maps']:
print m
maps.append(Grd(m,params['tileSize']))
maxLevel = max(maxLevel,maps[-1].maxZoom)
print maps[-1]
layer = {'tilesjon':'2.1.0',
'format':'quantized-mesh-1.0',
'scheme':'tms',
'minzoom':0,
'tiles':('{z}/{x}/{y}.terrain',),
'available':[]
}
for level in range(maxLevel):
layer['maxzoom']=level
factor = 2**level
print level,factor
sys.stdout.flush()
if level < params['baseLevels']:
for x in range(2*factor):
for y in range(factor):
createTile(x,y,level,params,base)
else:
x0,y0= geodetic.LonLatToTile(maps[0].minx,maps[0].miny,level)
x1,y1= geodetic.LonLatToTile(maps[0].maxx,maps[0].maxy,level)
print 'level:',level,'indecies:',x0,y0,'-',x1,y1
for x in range(x0,x1+1):
for y in range(y0,y1+1):
createTile(x,y,level,params,base,maps)
open(os.path.join(params['outputDirectory'],'layer.json'),'w').write(json.dumps(layer)) | mit | 2,825,510,702,830,970,000 | 34.065 | 219 | 0.566315 | false |
LaryLoose/laryloose.xbmc-addons | plugin.video.szenestreams/default.py | 1 | 7913 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib,urllib2,re,xbmcaddon,xbmcplugin,xbmcgui,xbmc,HTMLParser
from stream import *
htmlparser = HTMLParser.HTMLParser()
pluginhandle = int(sys.argv[1])
itemcnt = 0
baseurl = 'http://www.szene-streams.com'
settings = xbmcaddon.Addon(id='plugin.video.szene-streams')
maxitems = (int(settings.getSetting("items_per_page"))+1)*10
filterUnknownHoster = settings.getSetting("filterUnknownHoster") == 'true'
forceMovieViewMode = settings.getSetting("forceMovieViewMode") == 'true'
movieViewMode = str(settings.getSetting("movieViewMode"))
dbg = False
def CATEGORIES():
data = getUrl(baseurl)
cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I)
addDir('Letzte Updates', baseurl + '/publ/?page1', 1, '', True)
addDir('Suche...', baseurl + '/publ', 4, '', True)
addDir('Serien', baseurl + '/load', 0, '', True)
for (url, num, name) in cats:
if 'http:' not in url: url = baseurl + url
addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True)
xbmc.executebuiltin("Container.SetViewMode(400)")
def SERIES(url):
data = getUrl(url)
cats = re.findall('<a[^>]*?class="CatInf"[^>]*?href="(.*?)"[^>]*?>.*?<div class="CatNumInf">(.*?)</div>[^<]*?<div[^>]*?class="CatNameInf">(.*?)</div>', data, re.S|re.I)
addDir('Letzte Updates', baseurl + '/load/0-1', 1, '', True)
for (url, num, name) in cats:
if 'http:' not in url: url = baseurl + url
addDir(name + ' [COLOR=blue](' + num + ')[/COLOR]', url, 1, '', True)
xbmc.executebuiltin("Container.SetViewMode(400)")
def INDEX(url, search=None):
global itemcnt
if (dbg): print url
data = getUrl(url, search)
movies = re.findall('<div class="ImgWrapNews">[^<]*<a[^<]*<img[^>]*src="([^"]*.[jpg|png])"[^>]*alt="([^"]*)"[^>]*>.*?class="[^"]*entryLink[^"]*".*?href="([^"]*)"', data, re.S|re.I)
if movies:
for (m_image, m_title, m_url) in movies:
if 'http:' not in m_url: m_url = baseurl + m_url
addDir(clean(m_title), m_url, 2, m_image, True)
itemcnt = itemcnt + 1
nextPage = re.findall('<a class="swchItem"[^>]*onclick="spages\(\'(\d+)\'[^>]*?"[^>]*><span>»</span>', data, re.S)
if nextPage:
if '?page' in url:
nextPageUrl = re.sub('\?page[\d]+$', '?page' + nextPage[0], url)
elif re.search('[\d]+-[\d]+$', url):
nextPageUrl = re.sub('-[\d]+$', '-' + nextPage[0], url)
else:
nextPageUrl = url + "-" + nextPage[0]
if itemcnt >= maxitems:
addDir('Weiter >>', nextPageUrl, 1, '', True)
else:
INDEX(nextPageUrl)
if forceMovieViewMode: xbmc.executebuiltin("Container.SetViewMode(" + movieViewMode + ")")
def VIDEOLINKS(url, image):
data = getUrl(url)
streams = []
raw = re.findall('(<fieldset[^>]*>[^<]*<legend>.*?</fieldset>)', data, re.S)
if raw:
for each in raw:
if "Film Tipps" in each: continue
series = re.findall('<div class="spoiler"><font[^>]*><b[^>]*>(.+?)</b>(.*?)<input', each, re.S|re.I)
if not series: series = re.findall('<legend>(.+?)</legend>[^<]*<div class="spoiler">(.*?)<input', each, re.S|re.I)
if not series: series = re.findall('<legend>(.+?)</legend>.*?(<iframe.*?</iframe>|<a[^>]*href=".+"[^>]*>).*', each, re.S|re.I)
if series:
for ser in series:
for (s, n) in re.findall('<a[^>]*href="([^"]+)"[^>]*>([^<]*)<', each, re.S|re.I):
if dbg: print 'ser1'
if ser: n = clean(ser[1]) + ' ' + extractFilename(s)
n = clean(n) if n else extractFilename(s)
if n: streams += [(n, s)]
for s in re.findall('<iframe[^>]*src="([^"]*)"[^>]*>', each, re.S|re.I):
if dbg: print 'ser2'
if ser: n = clean(ser[1])
if not n: n = 'unknown'
if n: streams += [(n, s)]
elif re.match('.*?iframe.*?src.*', each, re.S|re.I):
if dbg: print 'nonser1'
streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?<iframe.*?src=["|\'](.*?)["|\']', each, re.S|re.I)
else:
if dbg: print 'nonser2'
streams += re.findall('<font[^>]*>.*?src=".*?/player/(.*?)\..{3}".*?</font>.*?target="_blank" href=["|\'](.*?)["|\']', each, re.S|re.I)
if streams:
for (filename, stream) in streams:
stream = cleanURL(stream)
if dbg: print "filename: " + str(filename) + ", stream: " + str(stream)
hoster = get_stream_link().get_hostername(stream)
if filterUnknownHoster and hoster == 'Not Supported': continue
entry = '[COLOR=blue](' + hoster + ')[/COLOR] ' + filename
addLink(entry, cleanURL(stream), 3, image)
def SEARCH(url):
keyboard = xbmc.Keyboard('', 'Suche')
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
search_string = keyboard.getText()
INDEX(url, search_string)
def clean(s):
try: s = htmlparser.unescape(s)
except: print "could not unescape string '%s'"%(s)
s = re.sub('<[^>]*>', '', s)
s = s.replace('_', ' ')
s = re.sub('[ ]+', ' ', s)
for hit in set(re.findall("&#\d+;", s)):
try: s = s.replace(hit, unichr(int(hit[2:-1])))
except ValueError: pass
return s.strip('\n').strip()
def cleanURL(s):
s = re.sub('<[^>]*>', '', s)
s = re.sub('[ ]+', ' ', s)
for hit in set(re.findall("&#\d+;", s)):
try: s = s.replace(hit, unichr(int(hit[2:-1])))
except ValueError: pass
return s.strip('\n').strip()
def extractFilename(path):
path = re.sub('^.*/', '',clean(path)).replace('.html', '').replace('_', ' ')
return re.sub('\.[a-zA-Z]{3}', '', path)
def GETLINK(url):
stream_url = get_stream_link().get_stream(url)
if stream_url:
if re.match('^Error: ', stream_url, re.S|re.I):
xbmc.executebuiltin("XBMC.Notification(Fehler!, " + re.sub('^Error: ','',stream_url) + ", 4000)")
else:
listitem = xbmcgui.ListItem(path=stream_url)
return xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
def getUrl(url, query=None):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
if (dbg): print query
if query:
values = { 'query' : query, 'a' : '2' }
response = urllib2.urlopen(req, urllib.urlencode(values))
else:
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addLink(name, url, mode, image):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=image)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('IsPlayable', 'true')
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
def addDir(name, url, mode, image, is_folder=False):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&image="+urllib.quote_plus(image)
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=is_folder)
params = get_params()
url = mode = image = None
try: url = urllib.unquote_plus(params["url"])
except: pass
try: mode = int(params["mode"])
except: pass
try: image = urllib.unquote_plus(params["image"])
except: pass
if mode==None or url==None or len(url)<1: CATEGORIES()
elif mode==0: SERIES(url)
elif mode==1: INDEX(url)
elif mode==2: VIDEOLINKS(url, image)
elif mode==3: GETLINK(url)
elif mode==4: SEARCH(url)
xbmcplugin.endOfDirectory(int(sys.argv[1])) | gpl-2.0 | 3,533,005,184,932,227,000 | 39.172589 | 181 | 0.606091 | false |
ericzundel/pants | src/python/pants/engine/scheduler.py | 1 | 19768 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import threading
import time
from contextlib import contextmanager
from pants.base.specs import (AscendantAddresses, DescendantAddresses, SiblingAddresses,
SingleAddress)
from pants.build_graph.address import Address
from pants.engine.addressable import SubclassesOf
from pants.engine.fs import PathGlobs, create_fs_intrinsics, generate_fs_subjects
from pants.engine.isolated_process import create_snapshot_intrinsics, create_snapshot_singletons
from pants.engine.nodes import Return, Runnable, Throw
from pants.engine.rules import NodeBuilder, RulesetValidator
from pants.engine.selectors import (Select, SelectDependencies, SelectLiteral, SelectProjection,
SelectVariant, constraint_for)
from pants.engine.struct import HasProducts, Variants
from pants.engine.subsystem.native import (ExternContext, Function, TypeConstraint, TypeId,
extern_id_to_str, extern_key_for, extern_project,
extern_project_multi, extern_satisfied_by,
extern_store_list, extern_val_to_str)
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class ExecutionRequest(datatype('ExecutionRequest', ['roots'])):
"""Holds the roots for an execution, which might have been requested by a user.
To create an ExecutionRequest, see `LocalScheduler.build_request` (which performs goal
translation) or `LocalScheduler.execution_request`.
:param roots: Roots for this request.
:type roots: list of tuples of subject and product.
"""
class LocalScheduler(object):
"""A scheduler that expands a product Graph by executing user defined tasks."""
def __init__(self,
goals,
tasks,
project_tree,
native,
graph_lock=None):
"""
:param goals: A dict from a goal name to a product type. A goal is just an alias for a
particular (possibly synthetic) product.
:param tasks: A set of (output, input selection clause, task function) triples which
is used to compute values in the product graph.
:param project_tree: An instance of ProjectTree for the current build root.
:param native: An instance of engine.subsystem.native.Native.
:param graph_lock: A re-entrant lock to use for guarding access to the internal product Graph
instance. Defaults to creating a new threading.RLock().
"""
self._products_by_goal = goals
self._project_tree = project_tree
self._native = native
self._product_graph_lock = graph_lock or threading.RLock()
self._run_count = 0
# Create a handle for the ExternContext (which must be kept alive as long as this object), and
# the native Scheduler.
self._context = ExternContext()
self._context_handle = native.new_handle(self._context)
# TODO: The only (?) case where we use inheritance rather than exact type unions.
has_products_constraint = TypeConstraint(self._to_id(SubclassesOf(HasProducts)))
scheduler = native.lib.scheduler_create(self._context_handle,
extern_key_for,
extern_id_to_str,
extern_val_to_str,
extern_satisfied_by,
extern_store_list,
extern_project,
extern_project_multi,
self._to_key('name'),
self._to_key('products'),
self._to_key('default'),
self._to_constraint(Address),
has_products_constraint,
self._to_constraint(Variants))
self._scheduler = native.gc(scheduler, native.lib.scheduler_destroy)
self._execution_request = None
# Validate and register all provided and intrinsic tasks.
select_product = lambda product: Select(product)
# TODO: This bounding of input Subject types allows for closed-world validation, but is not
# strictly necessary for execution. We might eventually be able to remove it by only executing
# validation below the execution roots (and thus not considering paths that aren't in use).
root_selector_fns = {
Address: select_product,
AscendantAddresses: select_product,
DescendantAddresses: select_product,
PathGlobs: select_product,
SiblingAddresses: select_product,
SingleAddress: select_product,
}
intrinsics = create_fs_intrinsics(project_tree) + create_snapshot_intrinsics(project_tree)
singletons = create_snapshot_singletons(project_tree)
node_builder = NodeBuilder.create(tasks, intrinsics, singletons)
RulesetValidator(node_builder, goals, root_selector_fns).validate()
self._register_tasks(node_builder.tasks)
self._register_intrinsics(node_builder.intrinsics)
self._register_singletons(node_builder.singletons)
def _to_value(self, obj):
return self._context.to_value(obj)
def _from_value(self, val):
return self._context.from_value(val)
def _to_id(self, typ):
return self._context.to_id(typ)
def _to_key(self, obj):
return self._context.to_key(obj)
def _from_id(self, cdata):
return self._context.from_id(cdata)
def _from_key(self, cdata):
return self._context.from_key(cdata)
def _to_constraint(self, type_or_constraint):
return TypeConstraint(self._to_id(constraint_for(type_or_constraint)))
def _register_singletons(self, singletons):
"""Register the given singletons dict.
Singleton tasks are those that are the default for a particular type(product). Like
intrinsics, singleton tasks create Runnables that are not cacheable.
"""
for product_type, rule in singletons.items():
self._native.lib.singleton_task_add(self._scheduler,
Function(self._to_id(rule.func)),
self._to_constraint(product_type))
def _register_intrinsics(self, intrinsics):
"""Register the given intrinsics dict.
Intrinsic tasks are those that are the default for a particular type(subject), type(product)
pair. By default, intrinsic tasks create Runnables that are not cacheable.
"""
for (subject_type, product_type), rule in intrinsics.items():
self._native.lib.intrinsic_task_add(self._scheduler,
Function(self._to_id(rule.func)),
TypeId(self._to_id(subject_type)),
self._to_constraint(subject_type),
self._to_constraint(product_type))
def _register_tasks(self, tasks):
"""Register the given tasks dict with the native scheduler."""
registered = set()
for output_type, rules in tasks.items():
output_constraint = self._to_constraint(output_type)
for rule in rules:
# TODO: The task map has heterogeneous keys, so we normalize them to type constraints
# and dedupe them before registering to the native engine:
# see: https://github.com/pantsbuild/pants/issues/4005
key = (output_constraint, rule)
if key in registered:
continue
registered.add(key)
_, input_selects, func = rule.as_triple()
self._native.lib.task_add(self._scheduler, Function(self._to_id(func)), output_constraint)
for selector in input_selects:
selector_type = type(selector)
product_constraint = self._to_constraint(selector.product)
if selector_type is Select:
self._native.lib.task_add_select(self._scheduler,
product_constraint)
elif selector_type is SelectVariant:
self._native.lib.task_add_select_variant(self._scheduler,
product_constraint,
self._context.utf8_buf(selector.variant_key))
elif selector_type is SelectLiteral:
# NB: Intentionally ignores subject parameter to provide a literal subject.
self._native.lib.task_add_select_literal(self._scheduler,
self._to_key(selector.subject),
product_constraint)
elif selector_type is SelectDependencies:
self._native.lib.task_add_select_dependencies(self._scheduler,
product_constraint,
self._to_constraint(selector.dep_product),
self._to_key(selector.field),
selector.transitive)
elif selector_type is SelectProjection:
if len(selector.fields) != 1:
raise ValueError("TODO: remove support for projecting multiple fields at once.")
field = selector.fields[0]
self._native.lib.task_add_select_projection(self._scheduler,
self._to_constraint(selector.product),
TypeId(self._to_id(selector.projected_subject)),
self._to_key(field),
self._to_constraint(selector.input_product))
else:
raise ValueError('Unrecognized Selector type: {}'.format(selector))
self._native.lib.task_end(self._scheduler)
def trace(self, roots):
"""Yields a stringified 'stacktrace' starting from the given failed root.
:param iterable roots: An iterable of the root nodes to begin the trace from.
"""
return "TODO: Restore trace (see: #4007)."
def visualize_graph_to_file(self, filename):
"""Visualize a graph walk by writing graphviz `dot` output to a file.
:param iterable roots: An iterable of the root nodes to begin the graph walk from.
:param str filename: The filename to output the graphviz output to.
"""
with self._product_graph_lock:
self._native.lib.graph_visualize(self._scheduler, bytes(filename))
def build_request(self, goals, subjects):
"""Translate the given goal names into product types, and return an ExecutionRequest.
:param goals: The list of goal names supplied on the command line.
:type goals: list of string
:param subjects: A list of Spec and/or PathGlobs objects.
:type subject: list of :class:`pants.base.specs.Spec`, `pants.build_graph.Address`, and/or
:class:`pants.engine.fs.PathGlobs` objects.
:returns: An ExecutionRequest for the given goals and subjects.
"""
return self.execution_request([self._products_by_goal[goal_name] for goal_name in goals],
subjects)
def execution_request(self, products, subjects):
"""Create and return an ExecutionRequest for the given products and subjects.
The resulting ExecutionRequest object will contain keys tied to this scheduler's product Graph, and
so it will not be directly usable with other scheduler instances without being re-created.
An ExecutionRequest for an Address represents exactly one product output, as does SingleAddress. But
we differentiate between them here in order to normalize the output for all Spec objects
as "list of product".
:param products: A list of product types to request for the roots.
:type products: list of types
:param subjects: A list of Spec and/or PathGlobs objects.
:type subject: list of :class:`pants.base.specs.Spec`, `pants.build_graph.Address`, and/or
:class:`pants.engine.fs.PathGlobs` objects.
:returns: An ExecutionRequest for the given products and subjects.
"""
return ExecutionRequest(tuple((s, Select(p)) for s in subjects for p in products))
def selection_request(self, requests):
"""Create and return an ExecutionRequest for the given (selector, subject) tuples.
This method allows users to specify their own selectors. It has the potential to replace
execution_request, which is a subset of this method, because it uses default selectors.
:param requests: A list of (selector, subject) tuples.
:return: An ExecutionRequest for the given selectors and subjects.
"""
#TODO: Think about how to deprecate the existing execution_request API.
return ExecutionRequest(tuple((subject, selector) for selector, subject in requests))
@contextmanager
def locked(self):
with self._product_graph_lock:
yield
def root_entries(self, execution_request):
"""Returns the roots for the given ExecutionRequest as a dict of tuples to State."""
with self._product_graph_lock:
if self._execution_request is not execution_request:
raise AssertionError(
"Multiple concurrent executions are not supported! {} vs {}".format(
self._execution_request, execution_request))
raw_roots = self._native.gc(self._native.lib.execution_roots(self._scheduler),
self._native.lib.nodes_destroy)
roots = {}
for root, raw_root in zip(execution_request.roots, self._native.unpack(raw_roots.nodes_ptr, raw_roots.nodes_len)):
if raw_root.union_tag is 0:
state = None
elif raw_root.union_tag is 1:
state = Return(self._from_value(raw_root.union_return))
elif raw_root.union_tag is 2:
state = Throw("Failed")
elif raw_root.union_tag is 3:
state = Throw("Nooped")
else:
raise ValueError('Unrecognized State type `{}` on: {}'.format(raw_root.union_tag, raw_root))
roots[root] = state
return roots
def invalidate_files(self, filenames):
"""Calls `Graph.invalidate_files()` against an internal product Graph instance."""
subjects = set(generate_fs_subjects(filenames))
subject_keys = list(self._to_key(subject) for subject in subjects)
with self._product_graph_lock:
invalidated = self._native.lib.graph_invalidate(self._scheduler,
subject_keys,
len(subject_keys))
logger.debug('invalidated %d nodes for subjects: %s', invalidated, subjects)
return invalidated
def node_count(self):
with self._product_graph_lock:
return self._native.lib.graph_len(self._scheduler)
def _execution_next(self, completed):
# Unzip into two arrays.
returns_ids, returns_states, throws_ids = [], [], []
for cid, c in completed:
if type(c) is Return:
returns_ids.append(cid)
returns_states.append(self._to_value(c.value))
elif type(c) is Throw:
throws_ids.append(cid)
else:
raise ValueError("Unexpected `Completed` state from Runnable execution: {}".format(c))
# Run, then collect the outputs from the Scheduler's RawExecution struct.
self._native.lib.execution_next(self._scheduler,
returns_ids,
returns_states,
len(returns_ids),
throws_ids,
len(throws_ids))
def decode_runnable(raw):
return (
raw.id,
Runnable(self._from_id(raw.func.id_),
tuple(self._from_value(arg)
for arg in self._native.unpack(raw.args_ptr, raw.args_len)),
bool(raw.cacheable))
)
runnables = [decode_runnable(r)
for r in self._native.unpack(self._scheduler.execution.runnables_ptr,
self._scheduler.execution.runnables_len)]
# Rezip from two arrays.
return runnables
def _execution_add_roots(self, execution_request):
if self._execution_request is not None:
self._native.lib.execution_reset(self._scheduler)
self._execution_request = execution_request
for subject, selector in execution_request.roots:
if type(selector) is Select:
self._native.lib.execution_add_root_select(self._scheduler,
self._to_key(subject),
self._to_constraint(selector.product))
elif type(selector) is SelectDependencies:
self._native.lib.execution_add_root_select_dependencies(self._scheduler,
self._to_key(subject),
self._to_constraint(selector.product),
self._to_constraint(selector.dep_product),
self._to_key(selector.field),
selector.transitive)
else:
raise ValueError('Unsupported root selector type: {}'.format(selector))
def schedule(self, execution_request):
"""Yields batches of Steps until the roots specified by the request have been completed.
This method should be called by exactly one scheduling thread, but the Step objects returned
by this method are intended to be executed in multiple threads, and then satisfied by the
scheduling thread.
"""
with self._product_graph_lock:
start_time = time.time()
# Reset execution, and add any roots from the request.
self._execution_add_roots(execution_request)
# Yield nodes that are Runnable, and then compute new ones.
completed = []
outstanding_runnable = set()
runnable_count, scheduling_iterations = 0, 0
while True:
# Call the scheduler to create Runnables for the Engine.
runnable = self._execution_next(completed)
outstanding_runnable.difference_update(i for i, _ in completed)
outstanding_runnable.update(i for i, _ in runnable)
if not runnable and not outstanding_runnable:
# Finished.
break
# The double yield here is intentional, and assumes consumption of this generator in
# a `for` loop with a `generator.send(completed)` call in the body of the loop.
completed = yield runnable
yield
runnable_count += len(runnable)
scheduling_iterations += 1
if self._native.visualize_to_dir is not None:
name = 'run.{}.dot'.format(self._run_count)
self._run_count += 1
self.visualize_graph_to_file(os.path.join(self._native.visualize_to_dir, name))
logger.debug(
'ran %s scheduling iterations and %s runnables in %f seconds. '
'there are %s total nodes.',
scheduling_iterations,
runnable_count,
time.time() - start_time,
self._native.lib.graph_len(self._scheduler)
)
| apache-2.0 | 4,387,969,145,928,732,700 | 46.864407 | 120 | 0.608964 | false |
15th/simpleat | simpleat/core/unittests/parse.py | 1 | 3139 | #!/usr/bin/env python
#coding=utf-8
# Filename: parse.py
'''
解析数据数据语法
@author: 15th
@data: 2017.4.12
'''
import re
#添加组变量: *
#组变量添加方式为: *字段名
#例: 某一步骤的返回报文中有一字段名为uid, 现在要将其添加为组变量. 则在该步骤的返回报文中填写 *uid
#对于不同测试用例/不同步骤中的同一返回字段, 若都将其添加为组变量, 他们相互之间并不会覆盖, 为不同的组变量.
RE_SET_SHAREDVAR = re.compile(r'^\*[^*]+')
#添加组唯一变量: **
#组变量添加方式为: **字段名
#例: 某一步骤的返回报文中有一字段名为uid, 现在要将其添加为组变量. 则在该步骤的返回报文中填写 **uid
#对于不同测试用例/不同步骤中的同一返回字段, 若都将其添加为组唯一变量, 按照赋值的前后顺序对组唯一变量的值进行覆盖.
#同一个字段可以同时声明为组变量和组唯一变量, 覆盖操作只能覆盖组唯一变量, 而并不能改变组变量的值.
RE_SET_GSHAREDVAR = re.compile(r'^\*\*.+')
#使用内置变量: $
#内置变量为simpleat内置变量, 使用时在变量名前加$.
#如: {{$IID}}
RE_GET_RESERVEDVAR = re.compile(r'^\{\{\$.+\}\}$')
#使用单用例变量
#使用单用例变量时, 按照步骤序号.字段名的方式进行调用.
#如: 在某个测试用例中一共有3个步骤, 其中在第1个步骤有一返回字段someRt, 现将其添加为组唯一变量.
#调用时应写为{{1.someRt}}
#步骤序号从1开始计算.
RE_GET_INTERNALVAR = re.compile(r'^\{\{\d+\..+\}\}$')
#使用组变量: *
#使用组变量, 使用时在变量名前加*.按照*用例编号.步骤序号.字段名的方式进行调用.
#如: 在用例编号为t3的测试用例一共有3个步骤, 其中在第1个步骤有一返回字段someRt, 现将其添加为组变量.
#调用时应写为{{*t3.1.someRt}}
#步骤序号从1开始计算.
RE_GET_SHAREDVAR = re.compile(r'^\{\{\*[^*]+\}\}$')
#使用组唯一变量: **
#使用组变量, 使用时在变量名前加**.按照**字段名的方式进行调用.
#如: 在用例编号为t3的测试用例一共有3个步骤, 其中在第1个步骤有一返回字段someRt, 现将其添加为组唯一变量.
#调用时应写为{{**someRt}}
#步骤序号从1开始计算.
RE_GET_GSHAREDVAR = re.compile(r'^\{\{\*\*.+\}\}$')
#运算式: ~
#对于上送报文/验证字段中的字段, 可以使用运算式进行赋值.
#运算式为表示此行内容为符合python语法的语句, 使用时为~具体内容. 运算式中可以调用变量.
#如: ~{{1.someRt}}+1
#表示单用例步骤1的返回字段someRt的值+1.
#运算式符号只能放在句首使用.
#若运算式不合法则计算结果为空.
RE_EXPRESSION = re.compile(r'^\~.+')
#字符串字面值: !
#对于上送报文中字段填写的值字符串, 若在串首添加!, 则将按照后面内容的原文进行发送, 不会触发任何内容特殊语法.
#如: 某一上送字段note, 填写的值为!{{1.someRt}}, 则实际发送时, note的值为字符串{{1.someRt}}, 而非变量1.someRt.
RE_LITERALVAR = re.compile(r'^\!.+')
| mit | -3,955,956,424,769,032,000 | 24.515625 | 81 | 0.700551 | false |
SuperFriendBFG/PyBreakout | Game/Scenes/GameOverScene.py | 1 | 1454 | import pygame
from Game.Scenes.Scene import Scene
from Game.Shared import *
from Game import Highscore
class GameOverScene(Scene):
def __init__(self, game):
super(GameOverScene, self).__init__(game)
self.__playerName = ""
self.__highscoreSprite = pygame.image.load(GameConstants.SPRITE_HIGHSCORE)
def render(self):
self.getGame().screen.blit(self.__highscoreSprite, (50, 50))
self.clearText()
self.addText("Your Name: ", 300, 200, size=30)
self.addText(self.__playerName, 420, 200, size=30)
super(GameOverScene, self).render()
def handleEvents(self, events):
super(GameOverScene, self).handleEvents(events)
for event in events:
if event.type == pygame.QUIT:
exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
game = self.getGame()
Highscore().add(self.__playerName, game.getScore())
game.reset()
game.changeScene(GameConstants.HIGHSCORE_SCENE)
elif event.key >= 65 and event.key <= 122:
self.__playerName += chr(event.key)
if event.key == pygame.K_F1:
self.getGame().reset()
self.getGame().changeScene(GameConstants.PLAYING_SCENE) | gpl-3.0 | 4,623,302,732,384,540,000 | 31.090909 | 83 | 0.541265 | false |
BertrandBordage/django-postgrefts | postgrefts/migrations/0001_initial.py | 1 | 2798 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import postgrefts.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Index',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language', models.CharField(max_length=5, db_index=True)),
('object_id', models.PositiveIntegerField()),
('url', models.CharField(max_length=300)),
('thumbnail_url', models.CharField(max_length=300, blank=True)),
('boost', models.FloatField(default=1.0)),
('title', models.CharField(max_length=100)),
('body', models.TextField(blank=True)),
('title_search', postgrefts.fields.VectorField(default='', serialize=False, editable=False)),
('body_search', postgrefts.fields.VectorField(default='', serialize=False, editable=False)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='index',
unique_together=set([('language', 'content_type', 'object_id')]),
),
migrations.RunSQL("""
CREATE EXTENSION IF NOT EXISTS unaccent;
CREATE EXTENSION IF NOT EXISTS btree_gin;
CREATE TEXT SEARCH CONFIGURATION fr ( COPY = french );
CREATE TEXT SEARCH DICTIONARY fr_stop (
TEMPLATE = simple,
StopWords = 'french', Accept = false
);
-- myspell-fr must be installed in order to get this dict working.
CREATE TEXT SEARCH DICTIONARY fr_ispell (
TEMPLATE = ispell,
DictFile = 'fr', AffFile = 'fr'
);
CREATE TEXT SEARCH DICTIONARY fr_stem (
TEMPLATE = snowball,
Language = 'french'
);
ALTER TEXT SEARCH CONFIGURATION fr
ALTER MAPPING FOR asciihword, asciiword WITH fr_stop, fr_ispell, simple;
ALTER TEXT SEARCH CONFIGURATION fr
ALTER MAPPING FOR hword, hword_asciipart, hword_part, word WITH fr_stop, fr_ispell, unaccent, simple;
CREATE INDEX content_type_id_title_search ON postgrefts_index USING gin(content_type_id, title_search);
CREATE INDEX title_search ON postgrefts_index USING gin(title_search);
CREATE INDEX body_search ON postgrefts_index USING gin(body_search);
"""),
]
| bsd-3-clause | -3,996,278,163,791,316,000 | 42.046154 | 117 | 0.572552 | false |
siyanew/Siarobo | bot.py | 1 | 12792 | import os
from os.path import dirname, realpath, join
import random
from queue import Queue
import aiohttp
import demjson
import re
import asyncio
import io
import telepot
import telepot.aio
from telepot.aio.loop import MessageLoop
from message import Message
WD = dirname(realpath(__file__))
plugins = []
public_plugins = []
config = {}
user_steps = {}
sender_queue = Queue()
def get_config():
global config
file = open(join(WD, "config.json"), "r")
config = demjson.decode(file.read())
file.close()
def save_config():
file = open(join(WD, "config.json"), "w")
file.write(demjson.encode(config))
file.close()
def load_plugins():
global plugins
global public_plugins
get_config()
plugins = []
public_plugins = []
for pluginName in config['plugins']:
plugin_dir = join(WD, "plugins", pluginName + ".py")
values = {}
with open(plugin_dir, encoding="utf-8") as f:
code = compile(f.read(), plugin_dir, 'exec')
exec(code, values)
f.close()
plugin = values['plugin']
if not plugin['sudo'] and 'usage' in plugin:
public_plugins.append(plugin)
plugins.append(plugin)
print("Loading plugin: {}".format(plugin['name']))
def sort_key(p):
return p["name"]
plugins.sort(key=sort_key)
public_plugins.sort(key=sort_key)
def check_sudo(chat_id):
if chat_id in config['sudo_members']:
return True
return False
def add_plugin(plugin_name):
config['plugins'].append(plugin_name)
save_config()
load_plugins()
def markdown_escape(text):
text = text.replace("_", "\\_")
text = text.replace("[", "\\{")
text = text.replace("*", "\\*")
text = text.replace("`", "\\`")
return text
@asyncio.coroutine
def handle_messages(message):
content_type, chat_type, chat_id = telepot.glance(message)
from_id = message['from']['id']
if 'text' in message:
if "cancel" in message['text'].lower():
if from_id in user_steps:
del user_steps[from_id]
hide_keyboard = {'hide_keyboard': True, 'selective': True}
yield from sender(Message(chat_id).set_text("You Canceled the operation.", reply_to_message_id=message['message_id'], reply_markup=hide_keyboard))
return
if from_id in user_steps:
for plugin in plugins:
if plugin['name'] == user_steps[from_id]['name'] :
if plugin['sudo']:
if check_sudo(from_id):
return_values = yield from plugin['run'](message, [""], chat_id, user_steps[from_id]['step'])
for return_value in return_values:
if return_value:
yield from sender(return_value)
else:
yield from sender(Message(chat_id).set_text("Just Sudo Users Can Use This."))
else:
return_values = yield from plugin['run'](message, [""], chat_id, user_steps[from_id]['step'])
if return_values:
for return_value in return_values:
yield from sender(return_value)
break
return
if 'text' in message:
for plugin in plugins:
for pattern in plugin['patterns']:
if re.search(pattern, message['text'], re.IGNORECASE|re.MULTILINE):
matches = re.findall(pattern, message['text'], re.IGNORECASE)
if plugin['sudo']:
if check_sudo(message['from']['id']):
return_values = yield from plugin['run'](message, matches[0], chat_id, 0)
for return_value in return_values:
if return_value:
yield from sender(return_value)
else:
yield from sender(Message(chat_id).set_text("Just Sudo Users Can Use This."))
else:
return_values = yield from plugin['run'](message, matches[0], chat_id, 0)
if return_values:
for return_value in return_values:
yield from sender(return_value)
break
@asyncio.coroutine
def on_callback_query(message):
query_id, from_id, data = telepot.glance(message, flavor='callback_query')
for plugin in plugins:
if 'callback' in plugin:
for pattern in plugin['callback_patterns']:
if re.search(pattern, data, re.IGNORECASE|re.MULTILINE):
matches = re.findall(pattern, data, re.IGNORECASE)
return_value = yield from plugin['callback'](message, matches[0], message['message']['chat']['id'])
if return_value:
yield from sender(return_value)
break
@asyncio.coroutine
def on_inline_query(message):
query_id, from_id, query = telepot.glance(message, flavor='inline_query')
global plugins
@asyncio.coroutine
def get_inline():
for plugin in plugins:
if 'inline_query' in plugin:
for pattern in plugin['inline_patterns']:
if re.search(pattern, query, re.IGNORECASE|re.MULTILINE):
matches = re.findall(pattern, query, re.IGNORECASE)
return_values = yield from plugin['inline_query'](message, matches[0], from_id, 0)
if return_values:
return {'results': return_values, 'cache_time': 0}
break
return []
try:
answerer.answer(message, get_inline)
except:
pass
@asyncio.coroutine
def on_chosen_inline_result(message):
result_id, from_id, query_string = telepot.glance(message, flavor='chosen_inline_result')
for plugin in plugins:
if 'chosen_inline' in plugin:
for pattern in plugin['chosen_inline_pattern']:
if re.search(pattern, query_string, re.IGNORECASE | re.MULTILINE):
matches = re.findall(pattern, query_string, re.IGNORECASE)
return_values = yield from plugin['chosen_inline'](message, matches[0], from_id, result_id)
if return_values:
return return_values
break
@asyncio.coroutine
def forward_id(chat_id_forward, chat_id, msg_id):
yield from bot.forwardMessage(chat_id_forward, chat_id, msg_id)
@asyncio.coroutine
def sender(message):
try:
if message.content_type == "text":
r = yield from bot.sendMessage(message.chat_id,message.text, parse_mode=message.parse_mode, disable_web_page_preview=message.disable_web_page_preview, disable_notification=message.disable_notification, reply_to_message_id=message.reply_to_message_id, reply_markup=message.reply_markup)
elif message.content_type == "video":
yield from bot.sendChatAction(message.chat_id, 'upload_video')
if os.path.isfile(message.video):
r = yield from bot.sendVideo(message.chat_id, open(message.video, 'rb'), duration=message.duration, width=message.width, height=message.height, caption=message.caption, disable_notification=message.disable_notification, reply_to_message_id=message.reply_to_message_id, reply_markup=message.reply_markup)
os.remove(message.video)
else:
r = yield from bot.sendVideo(message.chat_id, message.video, duration=message.duration, width=message.width, height=message.height, caption=message.caption, disable_notification=message.disable_notification, reply_to_message_id=message.reply_to_message_id, reply_markup=message.reply_markup)
elif message.content_type == "document":
yield from bot.sendChatAction(message.chat_id, 'upload_document')
if os.path.isfile(message.file):
r = yield from bot.sendDocument(message.chat_id, open(message.file, 'rb'), caption=message.caption, disable_notification=message.disable_notification, reply_to_message_id=message.reply_to_message_id, reply_markup=message.reply_markup)
os.remove(message.file)
else:
r = yield from bot.sendDocument(message.chat_id, message.file, caption=message.caption, disable_notification=message.disable_notification, reply_to_message_id=message.reply_to_message_id, reply_markup=message.reply_markup)
elif message.content_type == "photo":
yield from bot.sendChatAction(message.chat_id, 'upload_photo')
if os.path.isfile(message.photo):
r = yield from bot.sendPhoto(message.chat_id, open(message.photo, 'rb'), caption=message.caption, disable_notification=message.disable_notification, reply_to_message_id=message.reply_to_message_id, reply_markup=message.reply_markup)
os.remove(message.photo)
else:
r = yield from bot.sendPhoto(message.chat_id, message.photo, caption=message.caption, disable_notification=message.disable_notification, reply_to_message_id=message.reply_to_message_id, reply_markup=message.reply_markup)
elif message.content_type == "audio":
yield from bot.sendChatAction(message.chat_id, 'upload_audio')
if os.path.isfile(message.audio):
r = yield from bot.sendAudio(message.chat_id, open(message.audio, 'rb'), duration=message.duration, performer=message.performer, title=message.title, disable_notification=message.disable_notification, reply_to_message_id=message.reply_to_message_id, reply_markup=message.reply_markup)
os.remove(message.audio)
else:
r = yield from bot.sendAudio(message.chat_id, message.audio, duration=message.duration, performer=message.performer, title=message.title, disable_notification=message.disable_notification, reply_to_message_id=message.reply_to_message_id, reply_markup=message.reply_markup)
elif message.content_type == "callback_query":
r = yield from bot.answerCallbackQuery(message.callback_query_id, text=message.text, show_alert=message.show_alert)
elif message.content_type == "edit_message":
r = yield from bot.editMessageText(message.msg_identifier, message.text, parse_mode=message.parse_mode, disable_web_page_preview=message.disable_web_page_preview, reply_markup=message.reply_markup)
return r
except:
pass
@asyncio.coroutine
def download(file_id, path):
yield from bot.download_file(file_id, path)
return path
async def downloader(url, path, params=None):
try:
d = path if isinstance(path, io.IOBase) else open(path, 'wb')
with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as r:
while 1:
chunk = await r.content.read()
if not chunk:
break
d.write(chunk)
d.flush()
return path
finally:
if not isinstance(path, io.IOBase) and 'd' in locals():
d.close()
async def get_stream(url, params=None):
connector = aiohttp.TCPConnector(verify_ssl=False)
with aiohttp.ClientSession(connector=connector) as session:
async with session.get(url, params=params) as resp:
return await resp
async def get(url, params=None, headers=None):
connector = aiohttp.TCPConnector(verify_ssl=False)
with aiohttp.ClientSession(connector=connector) as session:
async with session.get(url, params=params, headers=headers) as resp:
return await resp.text()
async def check_queue():
while 1:
while not sender_queue.empty():
await sender(sender_queue.get())
await asyncio.sleep(0.1)
load_plugins()
bot = telepot.aio.Bot(config['token'])
answerer = telepot.aio.helper.Answerer(bot)
loop = asyncio.get_event_loop()
loop.create_task(MessageLoop(bot, {'chat': handle_messages,
'callback_query': on_callback_query,
'inline_query': on_inline_query,
'chosen_inline_result': on_chosen_inline_result,
'edited_chat': handle_messages}).run_forever())
loop.create_task(check_queue())
print('Bot Started ...')
loop.run_forever()
| mit | -843,876,762,284,825,000 | 43.262976 | 319 | 0.599046 | false |
carlosgbr/python0 | convierte_distancia.py | 1 | 1297 | """ Funciones: convierte_distancia.py
Por about.me/carlosgbr
Versión 1
Para Python 3.0 y superior
Entrada:
Salida:
Se ilustra el uso de funciones en Python
"""
"""
ENUNCIADO: Esta función convierte millas a kilómetros (km).
Complete la función para devolver el resultado de la conversión.
Llame a la función para convertir la distancia de viaje de millas a kilómetros
Complete el espacio en blanco para imprimir el resultado de la conversión
Calcule el viaje de ida y vuelta en kilómetros duplicando el resultado y
complete el espacio en blanco para imprimir el resultado
"""
# 1) Complete la función para devolver el resultado de la conversión.
def convierte_distancia(millas):
km = millas * 1.6 # aproximadamente 1.6 km en 1 milla
return km
mi_viaje_en_millas = 55
# 2) Convierta mi_viaje_en_millas a kilómetros llamando a la función anterior
mi_viaje_en_km = convierte_distancia(mi_viaje_en_millas)
# 3) Complete el espacio en blanco para imprimir el resultado de la conversión
print("La distancia en kilómetros es " + str(mi_viaje_en_km))
# 4) Calcule el viaje de ida y vuelta en kilómetros duplicando el resultado,
# y complete el espacio en blanco para imprimir el resultado
print("El viaje de ida y vuelta en kilómetros es " + str(mi_viaje_en_km * 2))
| gpl-2.0 | 2,355,355,193,386,646,000 | 39 | 78 | 0.764844 | false |
zen4ever/django-dynatree | treewidget/apps/categories/models.py | 1 | 2254 | from django.db import models
from django_extensions.db.fields import AutoSlugField
import mptt
from urlparse import urljoin
class Category(models.Model):
parent = models.ForeignKey('self',
null=True,
blank=True,
related_name='children')
name = models.CharField(max_length=50)
slug = AutoSlugField(max_length=50,
overwrite=True,
populate_from='name')
url = models.TextField(editable=False)
class Meta:
verbose_name_plural = "categories"
unique_together = (("name", "slug", "parent"), )
ordering = ("tree_id", "lft")
def __unicode__(self):
return self.url
def save(self, force_insert=False, force_update=False, **kwargs):
super(Category, self).save(
force_insert=force_insert,
force_update=force_update,
**kwargs)
self.update_url()
def get_tree(self, *args):
"""
Return the tree structure for this element
"""
level_representation = "--"
if self.level == 0:
node = "| "
else:
node = "+ "
_tree_structure = node + level_representation * self.level
return _tree_structure
get_tree.short_description = 'tree'
def get_repr(self, *args):
"""
Return the branch representation for this element
"""
level_representation = "--"
if self.level == 0:
node = "| "
else:
node = "+ "
_tree_structure = node + level_representation * self.level + ' ' + self.name
return _tree_structure
get_repr.short_description = 'representation'
def tree_order(self):
return str(self.tree_id) + str(self.lft)
def update_url(self):
"""
Updates the url for this Category and all children Categories.
"""
url = urljoin(getattr(self.parent, 'url', '') + '/', self.slug)
if url != self.url:
self.url = url
self.save()
for child in self.get_children():
child.update_url()
mptt.register(Category, order_insertion_by=['name'])
| bsd-2-clause | 3,287,626,030,816,427,000 | 28.657895 | 84 | 0.537711 | false |
planbcoin/planbcoin | test/functional/wallet-hd.py | 1 | 4385 | #!/usr/bin/env python3
# Copyright (c) 2016 The PlanBcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import PlanbcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
import os
import shutil
class WalletHDTest(PlanbcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']]
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
self.assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
self.nodes[1] = self.start_node(1, self.options.tmpdir, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = self.start_node(1, self.options.tmpdir, self.extra_args[1])
#connect_nodes_bi(self.nodes, 0, 1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
# Needs rescan
self.stop_node(1)
self.nodes[1] = self.start_node(1, self.options.tmpdir, self.extra_args[1] + ['-rescan'])
#connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
if __name__ == '__main__':
WalletHDTest().main ()
| mit | 1,909,362,870,807,386,000 | 40.367925 | 111 | 0.618016 | false |
QuantumTechDevStudio/RUDNEVGAUSS | rms/run_manager_approx2.py | 1 | 3432 | # -*- coding: utf-8 -*-
import os
from rms import Run
import time
import math
######################################################################################################################################
# Программа для управления ранами. Руководство по использованию #
# 1) Задаем количество ранов и присываем путь к корневому каталогу в какую-нибудь переменную #
# 2) Задаем задачу(пока это влияет только на иерархию папок) #
# 3) Задаем версию(аналогично) #
# 4) задаем имя вычислительного скрипта #
# 4) Если нужно(ТОЛЬКО ЕСЛИ НУЖНО), создаем основание нашей иерархии папок os.makedirs("./runs/" + task + "/" + version + "/") #
# 5) В цикле создаем объект Run, задаем словарь параметров задачи(в разработке) и вызываем run.execute() #
# 6) Возвращаемся в исходную директорию #
# 7) Можно добавлять что-то своё. Но лучше делать это в своём вычислительном скрипте #
######################################################################################################################################
#n_parralel = 5
n_stat = 4
initial_path = os.getcwd()
task = "approx"
version = "main"
program = "approx_script2.py"
os.makedirs("./runs/" + task + "/" + version + "/")
k = 1
a = 0
b = math.pi*2
dim = 1
m = 10
n_sig = 1
n_sig_max = 50
eps = 1e-9
iters = int(1e5)
max_t = 200
while(n_sig <= n_sig_max):
for i in range(n_stat):
run = Run(task, version, program)
model_dict = {"a":a, "b":b, "m":m, "k":k, "n_sig":n_sig, "dim":dim, "iters":iters, "eps":eps, "max_t" : max_t}
run.feed_model_info(model_dict)
run.execute()
os.chdir(initial_path)
time.sleep(max_t+5)
n_sig += 1
dim = 2
n_sig = 1
eps = 1e-9
iters = int(1e5)
max_t = 400
while(n_sig <= n_sig_max):
for i in range(n_stat):
run = Run(task, version, program)
model_dict = {"a":a, "b":b, "m":m, "k":k, "n_sig":n_sig, "dim":dim, "iters":iters, "eps":eps, "max_t" : max_t}
run.feed_model_info(model_dict)
run.execute()
os.chdir(initial_path)
time.sleep(max_t+5)
n_sig += 1
dim = 3
n_sig = 1
eps = 1e-9
iters = int(1e5)
max_t = 660
while(n_sig <= n_sig_max):
run = Run(task, version, program)
model_dict = {"a":a, "b":b, "m":m, "k":k, "n_sig":n_sig, "dim":dim, "iters":iters, "eps":eps, "max_t" : max_t}
run.feed_model_info(model_dict)
run.execute()
os.chdir(initial_path)
time.sleep(max_t+5)
n_sig += 1
print(" ---done--- ")
| gpl-3.0 | -7,805,784,263,390,462,000 | 32.264368 | 134 | 0.442469 | false |
hongzhouye/frankenstein | scf/rhfatom.py | 1 | 6064 | """
Spin-restricted Hartree-Fock for atom
"""
import numpy as np
import scipy.linalg as slg
from frankenstein import molecule, scf
from frankenstein.tools.mol_utils import get_norb_l
from frankenstein.tools.scf_utils import get_fock, get_fock_ao_direct, \
get_scf_energy
from frankenstein.data.atom_data import get_atomic_number, get_nelec_by_l
class RHFATOM(scf.RHF):
"""Basic class for spin-restricted Hartree-Fock for atoms.
Note:
The idea is to fix the MO occupation to satisfy the aufbau principle. For degenerate shells (l > 0), the occupation is spherically averaged. The following example illustrates this using nitrogen atom.
>>> mfa = RHFATOM("N", "cc-pVDZ")
>>> print(mfa.mol.bas_l)
<<< [0, 0, 0, 1, 1, 2]
>>> print(mfa.mol.bas_pure)
<<< [False, False, False, False, False, True]
>>> print(mfa.idao_by_l)
<<< [[0, 1, 2], [3, 6], [9]]
>>> print(mfa.occ_vec)
<<< [1. 1. 0. 0.5 0.5 0.5 0. 0. 0. 0. 0. 0. 0. 0. ]
"""
def __init__(self, atomsymb, basis, **kwargs):
Z = get_atomic_number(atomsymb)
spin = 2 if Z % 2 else 1
atom = molecule.MOL("{:s} 0 0 0".format(atomsymb), basis, spin=spin, \
verbose=0)
scf.RHF.__init__(self, atom, orth_hV=False, max_iter=10, conv=5, \
guess="core", **kwargs)
self.norb_by_l = self.mol.get_norb_by_l()
self.idao_by_l = self.mol.get_idao_by_l()
self.occ_vec = self.get_occ_vec()
self.initialize()
def initialize(self):
# we need re-define how we compute Fock matrices and etc. since we are now working in raw AOs (i.e., non-orthogonal)
def __rdm1_builder_ez(mo_coeff):
id_occ = self.occ_vec > 0
Cocc = mo_coeff[id_occ]
return (Cocc * self.occ_vec[id_occ]) @ Cocc.T
# Inp: rdm1 in AO; Out: Fock in AO
if self.ao_direct:
def __fock_builder_ez(Dao):
m = self.mol
Iao = np.eye(m.nao)
return get_fock_ao_direct(m.h, m.Zs, m.xyzs, m.basis, Iao, Dao)
else:
def __fock_builder_ez(Dao):
m = self.mol
return get_fock(m.h, m.V, Dao)
def __e_scf_builder_ez(fock, rdm1):
return get_scf_energy(self.mol.h, fock, rdm1)
self.rdm1_builder_ez = __rdm1_builder_ez
self.fock_builder_ez = __fock_builder_ez
self.e_scf_builder_ez = __e_scf_builder_ez
def get_sphave_occ(self):
"""Get spherically averaged occupation
"""
nelec_by_l = get_nelec_by_l(self.mol.atoms[0])
max_l = len(self.norb_by_l)
ndocc = [0] * max_l
nfocc = [0.] * max_l
for l in range(max_l):
norb_l = self.norb_by_l[l]
ndocc[l] = nelec_by_l[l] // (2 * norb_l)
nfocc[l] = (nelec_by_l[l] - ndocc[l]*2*norb_l) / float(norb_l)
return ndocc, nfocc
def get_occ_vec(self):
ndocc, nfocc = self.get_sphave_occ()
occ_vec = np.zeros(self.mol.nmo)
for l,idao in enumerate(self.idao_by_l):
norb_l = self.norb_by_l[l]
for m in range(norb_l):
occ_vec[np.array(idao[:ndocc[l]], dtype=int)+m] = 1.
if len(idao) > ndocc[l]:
occ_vec[idao[ndocc[l]]+m] = nfocc[l] * 0.5
return occ_vec
def Roothaan_step(self):
"""Diagonalize the spherically averaged Fock matrix.
Note:
Since AOs with different l's are orthogonal, this "average and diagonalize" process is performed one l-group at a time, and the final MO coefficient matrix will be block diagonalized.
"""
mo_energy = np.zeros(self.nao)
mo_coeff = np.zeros([self.nao, self.nao])
max_l = len(self.idao_by_l)
for l in range(max_l):
idao = np.array(self.idao_by_l[l], dtype=int)
norb_l = self.norb_by_l[l]
# compute spherically averaged Fock matrix for shell with a.m. = l
fock_l = 0.
ovlp_l = 0.
for m in range(norb_l):
fock_l += self.fock[idao+m,:][:,idao+m]
ovlp_l += self.mol.S[idao+m,:][:,idao+m]
fock_l /= float(norb_l)
ovlp_l /= float(norb_l)
# diagonalize fl
eps_l, C_l = slg.eigh(fock_l, ovlp_l)
# construct mo_coeff and mo_energy
for m in range(norb_l):
mo_energy[idao+m] = eps_l
for i,i1 in enumerate(idao):
mo_coeff[idao+m,i1+m] = C_l[:,i]
self.mo_energy = mo_energy
self.mo_coeff = mo_coeff
def update(self):
if not self.mo_coeff is None:
self.rdm1 = (self.mo_coeff * self.occ_vec) @ self.mo_coeff.T
elif self.rdm1 is None:
raise RuntimeError("Both mo_coeff and rdm1 are None.")
self.fock = self.fock_builder_ez(self.rdm1)
self.e_scf = self.e_scf_builder_ez(self.fock, self.rdm1)
self.S2 = 0. if not self.unrestricted \
else get_uscf_S2(self.rdm1, self.noccs)
def get_diis_errmat(self):
if self.unrestricted:
raise ValueError("Atomic SCF only supports spin-restricted calculations!")
else:
X = self.fock @ self.rdm1 @ self.mol.S
X -= X.T
return X
if __name__ == "__main__":
from frankenstein.data.atom_data import get_atomic_name
from frankenstein.tools.io_utils import dumpMat
# for Z in range(1,10):
for Z in [7]:
atom = get_atomic_name(Z)
mfa = RHFATOM(atom, "cc-pVDZ")
mfa.verbose = 1
print(mfa.mol.bas_l)
print(mfa.mol.bas_pure)
print(mfa.idao_by_l)
print(mfa.occ_vec)
# mfa.kernel()
# print(np.trace([email protected]))
# print(mfa.e_scf, "\n")
# dumpMat(mfa.mo_energy)
# dumpMat(mfa.mo_coeff)
# dumpMat(mfa.occ_vec)
# dumpMat((mfa.mo_coeff*mfa.occ_vec)@mfa.mo_coeff.T*2.)
| bsd-3-clause | 1,381,208,671,575,960,600 | 34.670588 | 208 | 0.54535 | false |
9h37/pompadour-wiki | pompadour_wiki/pompadour_wiki/apps/wiki/models.py | 1 | 1637 | # -*- coding: utf-8 -*-
from django.core.files.base import ContentFile
from django.db.models.signals import post_delete
from django.db import models
from django.utils.translation import ugettext
from django.core.cache import cache
from gitstorage.StorageBackend import GitStorage
class Wiki(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50)
description = models.TextField()
gitdir = models.CharField(max_length=512)
def __unicode__(self):
return self.name
@property
def repo(self):
return GitStorage(self.gitdir)
def create_repo(self, user):
""" Create repository """
st = GitStorage.create_storage(self.gitdir)
content = ContentFile('Home')
st.save('Home.md', content)
st.commit(user, 'Initialize repository')
def invalidate_cache_on_delete(sender, **kwargs):
""" When a Wiki is deleted, clear all cache """
cache.clear()
# Create empty commit
wiki = kwargs.get('instance', None)
if not wiki:
raise AttributeError, 'instance is NoneType'
# current user ???
wiki.repo.commit(None, ugettext(u'Wiki deleted'))
post_delete.connect(invalidate_cache_on_delete, sender=Wiki)
class WikiNotifier(models.Model):
wiki = models.ForeignKey(Wiki)
email = models.EmailField(max_length=254)
def __unicode__(self):
return self.email
class Document(models.Model):
path = models.CharField(max_length=512)
wikipath = models.CharField(max_length=512)
is_image = models.BooleanField()
def __unicode__(self):
return self.path
| mit | -8,345,888,732,468,905,000 | 23.80303 | 60 | 0.677459 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/disks_operations.py | 1 | 34648 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DisksOperations(object):
"""DisksOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2018-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-04-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(disk, 'Disk')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk
operation.
:type disk: ~azure.mgmt.compute.v2018_04_01.models.Disk
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Disk or
ClientRawResponse<Disk> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.Disk]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.Disk]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
def _update_initial(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(disk, 'DiskUpdate')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk
operation.
:type disk: ~azure.mgmt.compute.v2018_04_01.models.DiskUpdate
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Disk or
ClientRawResponse<Disk> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.Disk]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.Disk]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
def get(
self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Disk or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2018_04_01.models.Disk or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
def _delete_initial(
self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, disk_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Disk
:rtype:
~azure.mgmt.compute.v2018_04_01.models.DiskPaged[~azure.mgmt.compute.v2018_04_01.models.Disk]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the disks under a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Disk
:rtype:
~azure.mgmt.compute.v2018_04_01.models.DiskPaged[~azure.mgmt.compute.v2018_04_01.models.Disk]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'}
def _grant_access_initial(
self, resource_group_name, disk_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config):
grant_access_data = models.GrantAccessData(access=access, duration_in_seconds=duration_in_seconds)
# Construct URL
url = self.grant_access.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def grant_access(
self, resource_group_name, disk_name, access, duration_in_seconds, custom_headers=None, raw=False, polling=True, **operation_config):
"""Grants access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param access: Possible values include: 'None', 'Read'
:type access: str or
~azure.mgmt.compute.v2018_04_01.models.AccessLevel
:param duration_in_seconds: Time duration in seconds until the SAS
access expires.
:type duration_in_seconds: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns AccessUri or
ClientRawResponse<AccessUri> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.AccessUri]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.AccessUri]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._grant_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
access=access,
duration_in_seconds=duration_in_seconds,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'}
def _revoke_access_initial(
self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.revoke_access.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def revoke_access(
self, resource_group_name, disk_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._revoke_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'}
| mit | 8,232,429,002,564,041,000 | 45.074468 | 165 | 0.641451 | false |
retooth/morse | morse/slots/search.py | 1 | 1306 | #!/usr/bin/python
# This file is part of Morse.
#
# Morse is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Morse is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Morse. If not, see <http://www.gnu.org/licenses/>.
from . import app
from flask import request, jsonify
from ..models.core import User
from ..protocols import ajax_triggered
@app.route('/search/users.json', methods=['GET'])
@ajax_triggered
def get_users ():
"""
Gets a list of users matching GET
parameter pattern.
:rtype: json
"""
pattern = request.args.get('pattern')
if pattern:
users = User.query.filter(User.username.ilike('%' + pattern + '%')).all()
else:
users = User.query.all()
userlist = []
for u in users:
userlist.append([u.id, u.username])
return jsonify(users = userlist)
| gpl-3.0 | 7,996,134,308,721,858,000 | 30.853659 | 81 | 0.676876 | false |
YueLinHo/Subversion | tools/dist/release.py | 1 | 52520 | #!/usr/bin/env python
# python: coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# About this script:
# This script is intended to simplify creating Subversion releases for
# any of the supported release lines of Subversion.
# It works well with our Apache infrastructure, and should make rolling,
# posting, and announcing releases dirt simple.
#
# This script may be run on a number of platforms, but it is intended to
# be run on people.apache.org. As such, it may have dependencies (such
# as Python version) which may not be common, but are guaranteed to be
# available on people.apache.org.
# It'd be kind of nice to use the Subversion python bindings in this script,
# but people.apache.org doesn't currently have them installed
# Stuff we need
import os
import re
import sys
import glob
import fnmatch
import shutil
import urllib2
import hashlib
import tarfile
import logging
import datetime
import tempfile
import operator
import itertools
import subprocess
import argparse # standard in Python 2.7
# Find ezt, using Subversion's copy, if there isn't one on the system.
try:
import ezt
except ImportError:
ezt_path = os.path.dirname(os.path.dirname(os.path.abspath(sys.path[0])))
ezt_path = os.path.join(ezt_path, 'build', 'generator')
sys.path.append(ezt_path)
import ezt
sys.path.remove(ezt_path)
# Our required / recommended release tool versions by release branch
tool_versions = {
'trunk' : {
'autoconf' : ['2.69',
'954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969'],
'libtool' : ['2.4.6',
'e3bd4d5d3d025a36c21dd6af7ea818a2afcd4dfc1ea5a17b39d7854bcd0c06e3'],
'swig' : ['3.0.10',
'2939aae39dec06095462f1b95ce1c958ac80d07b926e48871046d17c0094f44c'],
},
'1.10' : {
'autoconf' : ['2.69',
'954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969'],
'libtool' : ['2.4.6',
'e3bd4d5d3d025a36c21dd6af7ea818a2afcd4dfc1ea5a17b39d7854bcd0c06e3'],
'swig' : ['3.0.10',
'2939aae39dec06095462f1b95ce1c958ac80d07b926e48871046d17c0094f44c'],
},
'1.9' : {
'autoconf' : ['2.69',
'954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969'],
'libtool' : ['2.4.6',
'e3bd4d5d3d025a36c21dd6af7ea818a2afcd4dfc1ea5a17b39d7854bcd0c06e3'],
'swig' : ['2.0.12',
'65e13f22a60cecd7279c59882ff8ebe1ffe34078e85c602821a541817a4317f7'],
},
'1.8' : {
'autoconf' : ['2.69',
'954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969'],
'libtool' : ['2.4.3',
'36b4881c1843d7585de9c66c4c3d9a067ed3a3f792bc670beba21f5a4960acdf'],
'swig' : ['2.0.9',
'586954000d297fafd7e91d1ad31089cc7e249f658889d11a44605d3662569539'],
},
}
# The version that is our current recommended release
# ### TODO: derive this from svn_version.h; see ../../build/getversion.py
recommended_release = '1.9'
# Some constants
repos = 'https://svn.apache.org/repos/asf/subversion'
secure_repos = 'https://svn.apache.org/repos/asf/subversion'
dist_repos = 'https://dist.apache.org/repos/dist'
dist_dev_url = dist_repos + '/dev/subversion'
dist_release_url = dist_repos + '/release/subversion'
KEYS = 'https://people.apache.org/keys/group/subversion.asc'
extns = ['zip', 'tar.gz', 'tar.bz2']
#----------------------------------------------------------------------
# Utility functions
class Version(object):
regex = re.compile(r'(\d+).(\d+).(\d+)(?:-(?:(rc|alpha|beta)(\d+)))?')
def __init__(self, ver_str):
# Special case the 'trunk-nightly' version
if ver_str == 'trunk-nightly':
self.major = None
self.minor = None
self.patch = None
self.pre = 'nightly'
self.pre_num = None
self.base = 'nightly'
self.branch = 'trunk'
return
match = self.regex.search(ver_str)
if not match:
raise RuntimeError("Bad version string '%s'" % ver_str)
self.major = int(match.group(1))
self.minor = int(match.group(2))
self.patch = int(match.group(3))
if match.group(4):
self.pre = match.group(4)
self.pre_num = int(match.group(5))
else:
self.pre = None
self.pre_num = None
self.base = '%d.%d.%d' % (self.major, self.minor, self.patch)
self.branch = '%d.%d' % (self.major, self.minor)
def is_prerelease(self):
return self.pre != None
def is_recommended(self):
return self.branch == recommended_release
def get_download_anchor(self):
if self.is_prerelease():
return 'pre-releases'
else:
if self.is_recommended():
return 'recommended-release'
else:
return 'supported-releases'
def get_ver_tags(self, revnum):
# These get substituted into svn_version.h
ver_tag = ''
ver_numtag = ''
if self.pre == 'alpha':
ver_tag = '" (Alpha %d)"' % self.pre_num
ver_numtag = '"-alpha%d"' % self.pre_num
elif self.pre == 'beta':
ver_tag = '" (Beta %d)"' % args.version.pre_num
ver_numtag = '"-beta%d"' % self.pre_num
elif self.pre == 'rc':
ver_tag = '" (Release Candidate %d)"' % self.pre_num
ver_numtag = '"-rc%d"' % self.pre_num
elif self.pre == 'nightly':
ver_tag = '" (Nightly Build r%d)"' % revnum
ver_numtag = '"-nightly-r%d"' % revnum
else:
ver_tag = '" (r%d)"' % revnum
ver_numtag = '""'
return (ver_tag, ver_numtag)
def __serialize(self):
return (self.major, self.minor, self.patch, self.pre, self.pre_num)
def __eq__(self, that):
return self.__serialize() == that.__serialize()
def __ne__(self, that):
return self.__serialize() != that.__serialize()
def __hash__(self):
return hash(self.__serialize())
def __lt__(self, that):
if self.major < that.major: return True
if self.major > that.major: return False
if self.minor < that.minor: return True
if self.minor > that.minor: return False
if self.patch < that.patch: return True
if self.patch > that.patch: return False
if not self.pre and not that.pre: return False
if not self.pre and that.pre: return False
if self.pre and not that.pre: return True
# We are both pre-releases
if self.pre != that.pre:
return self.pre < that.pre
else:
return self.pre_num < that.pre_num
def __str__(self):
"Return an SVN_VER_NUMBER-formatted string, or 'nightly'."
if self.pre:
if self.pre == 'nightly':
return 'nightly'
else:
extra = '-%s%d' % (self.pre, self.pre_num)
else:
extra = ''
return self.base + extra
def __repr__(self):
return "Version(%s)" % repr(str(self))
def get_prefix(base_dir):
return os.path.join(base_dir, 'prefix')
def get_tempdir(base_dir):
return os.path.join(base_dir, 'tempdir')
def get_workdir(base_dir):
return os.path.join(get_tempdir(base_dir), 'working')
# The name of this directory is also used to name the tarball and for
# the root of paths within the tarball, e.g. subversion-1.9.5 or
# subversion-nightly-r1800000
def get_exportdir(base_dir, version, revnum):
if version.pre != 'nightly':
return os.path.join(get_tempdir(base_dir), 'subversion-'+str(version))
return os.path.join(get_tempdir(base_dir),
'subversion-%s-r%d' % (version, revnum))
def get_deploydir(base_dir):
return os.path.join(base_dir, 'deploy')
def get_target(args):
"Return the location of the artifacts"
if args.target:
return args.target
else:
return get_deploydir(args.base_dir)
def get_tmpldir():
return os.path.join(os.path.abspath(sys.path[0]), 'templates')
def get_tmplfile(filename):
try:
return open(os.path.join(get_tmpldir(), filename))
except IOError:
# Hmm, we had a problem with the local version, let's try the repo
return urllib2.urlopen(repos + '/trunk/tools/dist/templates/' + filename)
def get_nullfile():
return open(os.path.devnull, 'w')
def run_script(verbose, script, hide_stderr=False):
stderr = None
if verbose:
stdout = None
else:
stdout = get_nullfile()
if hide_stderr:
stderr = get_nullfile()
for l in script.split('\n'):
subprocess.check_call(l.split(), stdout=stdout, stderr=stderr)
def download_file(url, target, checksum):
response = urllib2.urlopen(url)
target_file = open(target, 'w+')
target_file.write(response.read())
target_file.seek(0)
m = hashlib.sha256()
m.update(target_file.read())
target_file.close()
checksum2 = m.hexdigest()
if checksum != checksum2:
raise RuntimeError("Checksum mismatch for '%s': "\
"downloaded: '%s'; expected: '%s'" % \
(target, checksum, checksum2))
#----------------------------------------------------------------------
# ezt helpers
# In ezt, «[if-any foo]» is true when «data['foo'] == False»,
# hence, provide this constant for readability.
ezt_False = ""
# And this constant for symmetry.
ezt_True = True
# And this for convenience.
def ezt_bool(boolean_value):
return ezt_True if boolean_value else ezt_False
#----------------------------------------------------------------------
# Cleaning up the environment
def cleanup(args):
'Remove generated files and folders.'
logging.info('Cleaning')
shutil.rmtree(get_prefix(args.base_dir), True)
shutil.rmtree(get_tempdir(args.base_dir), True)
shutil.rmtree(get_deploydir(args.base_dir), True)
#----------------------------------------------------------------------
# Creating an environment to roll the release
class RollDep(object):
'The super class for each of the build dependencies.'
def __init__(self, base_dir, use_existing, verbose):
self._base_dir = base_dir
self._use_existing = use_existing
self._verbose = verbose
def _test_version(self, cmd):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout, stderr) = proc.communicate()
rc = proc.wait()
if rc: return ''
return stdout.split('\n')
def build(self):
if not hasattr(self, '_extra_configure_flags'):
self._extra_configure_flags = ''
cwd = os.getcwd()
tempdir = get_tempdir(self._base_dir)
tarball = os.path.join(tempdir, self._filebase + '.tar.gz')
if os.path.exists(tarball):
if not self._use_existing:
raise RuntimeError('autoconf tarball "%s" already exists'
% tarball)
logging.info('Using existing %s.tar.gz' % self._filebase)
else:
logging.info('Fetching %s' % self._filebase)
download_file(self._url, tarball, self._checksum)
# Extract tarball
tarfile.open(tarball).extractall(tempdir)
logging.info('Building ' + self.label)
os.chdir(os.path.join(tempdir, self._filebase))
run_script(self._verbose,
'''./configure --prefix=%s %s
make
make install''' % (get_prefix(self._base_dir),
self._extra_configure_flags))
os.chdir(cwd)
class AutoconfDep(RollDep):
def __init__(self, base_dir, use_existing, verbose, autoconf_ver, checksum):
RollDep.__init__(self, base_dir, use_existing, verbose)
self.label = 'autoconf'
self._filebase = 'autoconf-' + autoconf_ver
self._autoconf_ver = autoconf_ver
self._url = 'https://ftp.gnu.org/gnu/autoconf/%s.tar.gz' % self._filebase
self._checksum = checksum
def have_usable(self):
output = self._test_version(['autoconf', '-V'])
if not output: return False
version = output[0].split()[-1:][0]
return version == self._autoconf_ver
def use_system(self):
if not self._use_existing: return False
return self.have_usable()
class LibtoolDep(RollDep):
def __init__(self, base_dir, use_existing, verbose, libtool_ver, checksum):
RollDep.__init__(self, base_dir, use_existing, verbose)
self.label = 'libtool'
self._filebase = 'libtool-' + libtool_ver
self._libtool_ver = libtool_ver
self._url = 'https://ftp.gnu.org/gnu/libtool/%s.tar.gz' % self._filebase
self._checksum = checksum
def have_usable(self):
output = self._test_version(['libtool', '--version'])
if not output: return False
return self._libtool_ver in output[0]
def use_system(self):
# We unconditionally return False here, to avoid using a borked
# system libtool (I'm looking at you, Debian).
return False
def build(self):
RollDep.build(self)
# autogen.sh looks for glibtoolize before libtoolize
bin_dir = os.path.join(get_prefix(self._base_dir), "bin")
os.symlink("libtoolize", os.path.join(bin_dir, "glibtoolize"))
os.symlink("libtool", os.path.join(bin_dir, "glibtool"))
class SwigDep(RollDep):
def __init__(self, base_dir, use_existing, verbose, swig_ver, checksum,
sf_mirror):
RollDep.__init__(self, base_dir, use_existing, verbose)
self.label = 'swig'
self._filebase = 'swig-' + swig_ver
self._swig_ver = swig_ver
self._url = 'https://sourceforge.net/projects/swig/files/swig/%(swig)s/%(swig)s.tar.gz/download?use_mirror=%(sf_mirror)s' % \
{ 'swig' : self._filebase,
'sf_mirror' : sf_mirror }
self._checksum = checksum
self._extra_configure_flags = '--without-pcre'
def have_usable(self):
output = self._test_version(['swig', '-version'])
if not output: return False
version = output[1].split()[-1:][0]
return version == self._swig_ver
def use_system(self):
if not self._use_existing: return False
return self.have_usable()
def build_env(args):
'Download prerequisites for a release and prepare the environment.'
logging.info('Creating release environment')
try:
os.mkdir(get_prefix(args.base_dir))
os.mkdir(get_tempdir(args.base_dir))
except OSError:
if not args.use_existing:
raise
autoconf = AutoconfDep(args.base_dir, args.use_existing, args.verbose,
tool_versions[args.version.branch]['autoconf'][0],
tool_versions[args.version.branch]['autoconf'][1])
libtool = LibtoolDep(args.base_dir, args.use_existing, args.verbose,
tool_versions[args.version.branch]['libtool'][0],
tool_versions[args.version.branch]['libtool'][1])
swig = SwigDep(args.base_dir, args.use_existing, args.verbose,
tool_versions[args.version.branch]['swig'][0],
tool_versions[args.version.branch]['swig'][1],
args.sf_mirror)
# iterate over our rolling deps, and build them if needed
for dep in [autoconf, libtool, swig]:
if dep.use_system():
logging.info('Using system %s' % dep.label)
else:
dep.build()
#----------------------------------------------------------------------
# Create release artifacts
def compare_changes(repos, branch, revision):
mergeinfo_cmd = ['svn', 'mergeinfo', '--show-revs=eligible',
repos + '/trunk/CHANGES',
repos + '/' + branch + '/' + 'CHANGES']
stdout = subprocess.check_output(mergeinfo_cmd)
if stdout:
# Treat this as a warning since we are now putting entries for future
# minor releases in CHANGES on trunk.
logging.warning('CHANGES has unmerged revisions: %s' %
stdout.replace("\n", " "))
_current_year = str(datetime.datetime.now().year)
_copyright_re = re.compile(r'Copyright (?:\(C\) )?(?P<year>[0-9]+)'
r' The Apache Software Foundation',
re.MULTILINE)
def check_copyright_year(repos, branch, revision):
def check_file(branch_relpath):
file_url = (repos + '/' + branch + '/'
+ branch_relpath + '@' + str(revision))
cat_cmd = ['svn', 'cat', file_url]
stdout = subprocess.check_output(cat_cmd)
m = _copyright_re.search(stdout)
if m:
year = m.group('year')
else:
year = None
if year != _current_year:
logging.warning('Copyright year in ' + branch_relpath
+ ' is not the current year')
check_file('NOTICE')
check_file('subversion/libsvn_subr/version.c')
def replace_lines(path, actions):
with open(path, 'r') as old_content:
lines = old_content.readlines()
with open(path, 'w') as new_content:
for line in lines:
for start, pattern, repl in actions:
if line.startswith(start):
line = re.sub(pattern, repl, line)
new_content.write(line)
def roll_tarballs(args):
'Create the release artifacts.'
if not args.branch:
args.branch = 'branches/%d.%d.x' % (args.version.major, args.version.minor)
branch = args.branch # shorthand
branch = branch.rstrip('/') # canonicalize for later comparisons
logging.info('Rolling release %s from branch %s@%d' % (args.version,
branch, args.revnum))
check_copyright_year(repos, args.branch, args.revnum)
# Ensure we've got the appropriate rolling dependencies available
autoconf = AutoconfDep(args.base_dir, False, args.verbose,
tool_versions[args.version.branch]['autoconf'][0],
tool_versions[args.version.branch]['autoconf'][1])
libtool = LibtoolDep(args.base_dir, False, args.verbose,
tool_versions[args.version.branch]['libtool'][0],
tool_versions[args.version.branch]['libtool'][1])
swig = SwigDep(args.base_dir, False, args.verbose,
tool_versions[args.version.branch]['swig'][0],
tool_versions[args.version.branch]['swig'][1], None)
for dep in [autoconf, libtool, swig]:
if not dep.have_usable():
raise RuntimeError('Cannot find usable %s' % dep.label)
if branch != 'trunk':
# Make sure CHANGES is sync'd.
compare_changes(repos, branch, args.revnum)
# Ensure the output directory doesn't already exist
if os.path.exists(get_deploydir(args.base_dir)):
raise RuntimeError('output directory \'%s\' already exists'
% get_deploydir(args.base_dir))
os.mkdir(get_deploydir(args.base_dir))
logging.info('Preparing working copy source')
shutil.rmtree(get_workdir(args.base_dir), True)
run_script(args.verbose, 'svn checkout %s %s'
% (repos + '/' + branch + '@' + str(args.revnum),
get_workdir(args.base_dir)))
# Exclude stuff we don't want in the tarball, it will not be present
# in the exported tree.
exclude = ['contrib', 'notes']
if branch != 'trunk':
exclude += ['STATUS']
if args.version.minor < 7:
exclude += ['packages', 'www']
cwd = os.getcwd()
os.chdir(get_workdir(args.base_dir))
run_script(args.verbose,
'svn update --set-depth exclude %s' % " ".join(exclude))
os.chdir(cwd)
if args.patches:
# Assume patches are independent and can be applied in any
# order, no need to sort.
majmin = '%d.%d' % (args.version.major, args.version.minor)
for name in os.listdir(args.patches):
if name.find(majmin) != -1 and name.endswith('patch'):
logging.info('Applying patch %s' % name)
run_script(args.verbose,
'''svn patch %s %s'''
% (os.path.join(args.patches, name),
get_workdir(args.base_dir)))
# Massage the new version number into svn_version.h.
ver_tag, ver_numtag = args.version.get_ver_tags(args.revnum)
replacements = [('#define SVN_VER_TAG',
'".*"', ver_tag),
('#define SVN_VER_NUMTAG',
'".*"', ver_numtag),
('#define SVN_VER_REVISION',
'[0-9][0-9]*', str(args.revnum))]
if args.version.pre != 'nightly':
# SVN_VER_PATCH might change for security releases, e.g., when
# releasing 1.9.7 from the magic revision of 1.9.6.
#
# ### Would SVN_VER_MAJOR / SVN_VER_MINOR ever change?
# ### Note that SVN_VER_MINOR is duplicated in some places, see
# ### <https://subversion.apache.org/docs/community-guide/releasing.html#release-branches>
replacements += [('#define SVN_VER_MAJOR',
'[0-9][0-9]*', str(args.version.major)),
('#define SVN_VER_MINOR',
'[0-9][0-9]*', str(args.version.minor)),
('#define SVN_VER_PATCH',
'[0-9][0-9]*', str(args.version.patch))]
replace_lines(os.path.join(get_workdir(args.base_dir),
'subversion', 'include', 'svn_version.h'),
replacements)
# Basename for export and tarballs, e.g. subversion-1.9.5 or
# subversion-nightly-r1800000
exportdir = get_exportdir(args.base_dir, args.version, args.revnum)
basename = os.path.basename(exportdir)
def export(windows):
shutil.rmtree(exportdir, True)
if windows:
eol_style = "--native-eol CRLF"
else:
eol_style = "--native-eol LF"
run_script(args.verbose, "svn export %s %s %s"
% (eol_style, get_workdir(args.base_dir), exportdir))
def transform_sql():
for root, dirs, files in os.walk(exportdir):
for fname in files:
if fname.endswith('.sql'):
run_script(args.verbose,
'python build/transform_sql.py %s/%s %s/%s'
% (root, fname, root, fname[:-4] + '.h'))
def clean_autom4te():
for root, dirs, files in os.walk(get_workdir(args.base_dir)):
for dname in dirs:
if dname.startswith('autom4te') and dname.endswith('.cache'):
shutil.rmtree(os.path.join(root, dname))
logging.info('Building Windows tarballs')
export(windows=True)
os.chdir(exportdir)
transform_sql()
# Can't use the po-update.sh in the Windows export since it has CRLF
# line endings and won't run, so use the one in the working copy.
run_script(args.verbose,
'%s/tools/po/po-update.sh pot' % get_workdir(args.base_dir))
os.chdir(cwd)
clean_autom4te() # dist.sh does it but pointless on Windows?
os.chdir(get_tempdir(args.base_dir))
run_script(args.verbose,
'zip -q -r %s %s' % (basename + '.zip', basename))
os.chdir(cwd)
logging.info('Building Unix tarballs')
export(windows=False)
os.chdir(exportdir)
transform_sql()
run_script(args.verbose,
'''tools/po/po-update.sh pot
./autogen.sh --release''',
hide_stderr=True) # SWIG is noisy
os.chdir(cwd)
clean_autom4te() # dist.sh does it but probably pointless
# Do not use tar, it's probably GNU tar which produces tar files
# that are not compliant with POSIX.1 when including filenames
# longer than 100 chars. Platforms without a tar that understands
# the GNU tar extension will not be able to extract the resulting
# tar file. Use pax to produce POSIX.1 tar files.
#
# Use the gzip -n flag - this prevents it from storing the
# original name of the .tar file, and far more importantly, the
# mtime of the .tar file, in the produced .tar.gz file. This is
# important, because it makes the gzip encoding reproducable by
# anyone else who has an similar version of gzip, and also uses
# "gzip -9n". This means that committers who want to GPG-sign both
# the .tar.gz and the .tar.bz2 can download the .tar.bz2 (which is
# smaller), and locally generate an exact duplicate of the
# official .tar.gz file. This metadata is data on the temporary
# uncompressed tarball itself, not any of its contents, so there
# will be no effect on end-users.
os.chdir(get_tempdir(args.base_dir))
run_script(args.verbose,
'''pax -x ustar -w -f %s %s
bzip2 -9fk %s
gzip -9nf %s'''
% (basename + '.tar', basename,
basename + '.tar',
basename + '.tar'))
os.chdir(cwd)
# Move the results to the deploy directory
logging.info('Moving artifacts and calculating checksums')
for e in extns:
filename = basename + '.' + e
filepath = os.path.join(get_tempdir(args.base_dir), filename)
shutil.move(filepath, get_deploydir(args.base_dir))
filepath = os.path.join(get_deploydir(args.base_dir), filename)
m = hashlib.sha1()
m.update(open(filepath, 'r').read())
open(filepath + '.sha1', 'w').write(m.hexdigest())
m = hashlib.sha512()
m.update(open(filepath, 'r').read())
open(filepath + '.sha512', 'w').write(m.hexdigest())
# Nightlies do not get tagged so do not need the header
if args.version.pre != 'nightly':
shutil.copy(os.path.join(get_workdir(args.base_dir),
'subversion', 'include', 'svn_version.h'),
os.path.join(get_deploydir(args.base_dir),
'svn_version.h.dist-%s' % str(args.version)))
# And we're done!
#----------------------------------------------------------------------
# Sign the candidate release artifacts
def sign_candidates(args):
'Sign candidate artifacts in the dist development directory.'
def sign_file(filename):
asc_file = open(filename + '.asc', 'a')
logging.info("Signing %s" % filename)
proc = subprocess.check_call(['gpg', '-ba', '-o', '-', filename],
stdout=asc_file)
asc_file.close()
target = get_target(args)
for e in extns:
filename = os.path.join(target, 'subversion-%s.%s' % (args.version, e))
sign_file(filename)
if args.version.major >= 1 and args.version.minor <= 6:
filename = os.path.join(target,
'subversion-deps-%s.%s' % (args.version, e))
sign_file(filename)
#----------------------------------------------------------------------
# Post the candidate release artifacts
def post_candidates(args):
'Post candidate artifacts to the dist development directory.'
target = get_target(args)
logging.info('Importing tarballs to %s' % dist_dev_url)
ver = str(args.version)
svn_cmd = ['svn', 'import', '-m',
'Add Subversion %s candidate release artifacts' % ver,
'--auto-props', '--config-option',
'config:auto-props:*.asc=svn:eol-style=native;svn:mime-type=text/plain',
target, dist_dev_url]
if (args.username):
svn_cmd += ['--username', args.username]
subprocess.check_call(svn_cmd)
#----------------------------------------------------------------------
# Create tag
def create_tag(args):
'Create tag in the repository'
target = get_target(args)
logging.info('Creating tag for %s' % str(args.version))
if not args.branch:
args.branch = 'branches/%d.%d.x' % (args.version.major, args.version.minor)
branch = secure_repos + '/' + args.branch.rstrip('/')
tag = secure_repos + '/tags/' + str(args.version)
svnmucc_cmd = ['svnmucc', '-m',
'Tagging release ' + str(args.version)]
if (args.username):
svnmucc_cmd += ['--username', args.username]
svnmucc_cmd += ['cp', str(args.revnum), branch, tag]
svnmucc_cmd += ['put', os.path.join(target, 'svn_version.h.dist' + '-' +
str(args.version)),
tag + '/subversion/include/svn_version.h']
# don't redirect stdout/stderr since svnmucc might ask for a password
try:
subprocess.check_call(svnmucc_cmd)
except subprocess.CalledProcessError:
if args.version.is_prerelease():
logging.error("Do you need to pass --branch=trunk?")
raise
if not args.version.is_prerelease():
logging.info('Bumping revisions on the branch')
def replace_in_place(fd, startofline, flat, spare):
"""In file object FD, replace FLAT with SPARE in the first line
starting with STARTOFLINE."""
fd.seek(0, os.SEEK_SET)
lines = fd.readlines()
for i, line in enumerate(lines):
if line.startswith(startofline):
lines[i] = line.replace(flat, spare)
break
else:
raise RuntimeError('Definition of %r not found' % startofline)
fd.seek(0, os.SEEK_SET)
fd.writelines(lines)
fd.truncate() # for current callers, new value is never shorter.
new_version = Version('%d.%d.%d' %
(args.version.major, args.version.minor,
args.version.patch + 1))
def file_object_for(relpath):
fd = tempfile.NamedTemporaryFile()
url = branch + '/' + relpath
fd.url = url
subprocess.check_call(['svn', 'cat', '%s@%d' % (url, args.revnum)],
stdout=fd)
return fd
svn_version_h = file_object_for('subversion/include/svn_version.h')
replace_in_place(svn_version_h, '#define SVN_VER_PATCH ',
str(args.version.patch), str(new_version.patch))
STATUS = file_object_for('STATUS')
replace_in_place(STATUS, 'Status of ',
str(args.version), str(new_version))
svn_version_h.seek(0, os.SEEK_SET)
STATUS.seek(0, os.SEEK_SET)
subprocess.check_call(['svnmucc', '-r', str(args.revnum),
'-m', 'Post-release housekeeping: '
'bump the %s branch to %s.'
% (branch.split('/')[-1], str(new_version)),
'put', svn_version_h.name, svn_version_h.url,
'put', STATUS.name, STATUS.url,
])
del svn_version_h
del STATUS
#----------------------------------------------------------------------
# Clean dist
def clean_dist(args):
'Clean the distribution directory of all but the most recent artifacts.'
stdout = subprocess.check_output(['svn', 'list', dist_release_url])
def minor(version):
"""Return the minor release line of the parameter, which must be
a Version object."""
return (version.major, version.minor)
filenames = stdout.split('\n')
filenames = filter(lambda x: x.startswith('subversion-'), filenames)
versions = set(map(Version, filenames))
minor_lines = set(map(minor, versions))
to_keep = set()
# Keep 3 minor lines: 1.10.0-alpha3, 1.9.7, 1.8.19.
# TODO: When we release 1.A.0 GA we'll have to manually remove 1.(A-2).* artifacts.
for recent_line in sorted(minor_lines, reverse=True)[:3]:
to_keep.add(max(
x for x in versions
if minor(x) == recent_line
))
for i in sorted(to_keep):
logging.info("Saving release '%s'", i)
svnmucc_cmd = ['svnmucc', '-m', 'Remove old Subversion releases.\n' +
'They are still available at ' +
'https://archive.apache.org/dist/subversion/']
if (args.username):
svnmucc_cmd += ['--username', args.username]
for filename in filenames:
if Version(filename) not in to_keep:
logging.info("Removing %r", filename)
svnmucc_cmd += ['rm', dist_release_url + '/' + filename]
# don't redirect stdout/stderr since svnmucc might ask for a password
if 'rm' in svnmucc_cmd:
subprocess.check_call(svnmucc_cmd)
else:
logging.info("Nothing to remove")
#----------------------------------------------------------------------
# Move to dist
def move_to_dist(args):
'Move candidate artifacts to the distribution directory.'
stdout = subprocess.check_output(['svn', 'list', dist_dev_url])
filenames = []
for entry in stdout.split('\n'):
if fnmatch.fnmatch(entry, 'subversion-%s.*' % str(args.version)):
filenames.append(entry)
svnmucc_cmd = ['svnmucc', '-m',
'Publish Subversion-%s.' % str(args.version)]
if (args.username):
svnmucc_cmd += ['--username', args.username]
svnmucc_cmd += ['rm', dist_dev_url + '/' + 'svn_version.h.dist'
+ '-' + str(args.version)]
for filename in filenames:
svnmucc_cmd += ['mv', dist_dev_url + '/' + filename,
dist_release_url + '/' + filename]
# don't redirect stdout/stderr since svnmucc might ask for a password
logging.info('Moving release artifacts to %s' % dist_release_url)
subprocess.check_call(svnmucc_cmd)
#----------------------------------------------------------------------
# Write announcements
def write_news(args):
'Write text for the Subversion website.'
data = { 'date' : datetime.date.today().strftime('%Y%m%d'),
'date_pres' : datetime.date.today().strftime('%Y-%m-%d'),
'major-minor' : args.version.branch,
'version' : str(args.version),
'version_base' : args.version.base,
'anchor': args.version.get_download_anchor(),
'is_recommended': ezt_bool(args.version.is_recommended()),
}
if args.version.is_prerelease():
template_filename = 'rc-news.ezt'
else:
template_filename = 'stable-news.ezt'
template = ezt.Template()
template.parse(get_tmplfile(template_filename).read())
template.generate(sys.stdout, data)
def get_sha1info(args):
'Return a list of sha1 info for the release'
target = get_target(args)
sha1s = glob.glob(os.path.join(target, 'subversion*-%s*.sha1' % args.version))
class info(object):
pass
sha1info = []
for s in sha1s:
i = info()
# strip ".sha1"
i.filename = os.path.basename(s)[:-5]
i.sha1 = open(s, 'r').read()
sha1info.append(i)
return sha1info
def write_announcement(args):
'Write the release announcement.'
sha1info = get_sha1info(args)
siginfo = "\n".join(get_siginfo(args, True)) + "\n"
data = { 'version' : str(args.version),
'sha1info' : sha1info,
'siginfo' : siginfo,
'major-minor' : args.version.branch,
'major-minor-patch' : args.version.base,
'anchor' : args.version.get_download_anchor(),
}
if args.version.is_prerelease():
template_filename = 'rc-release-ann.ezt'
else:
data['dot-zero'] = ezt_bool(args.version.patch == 0)
# TODO: instead of requiring the RM to remember to pass --security,
# read the private repository where CVE announcements are staged,
# parse the json file that identifies which versions are affected,
# and accordingly automagically set data['security'].
data['security'] = ezt_bool(args.security)
template_filename = 'stable-release-ann.ezt'
# The template text assumes these two are mutually exclusive.
# If you ever find a reason to make a x.y.0 release with a security
# bug, just comment this out and update the template before sending.
assert not (data['dot-zero'] and data['security'])
template = ezt.Template(compress_whitespace = False)
template.parse(get_tmplfile(template_filename).read())
template.generate(sys.stdout, data)
def write_downloads(args):
'Output the download section of the website.'
sha1info = get_sha1info(args)
data = { 'version' : str(args.version),
'fileinfo' : sha1info,
}
template = ezt.Template(compress_whitespace = False)
template.parse(get_tmplfile('download.ezt').read())
template.generate(sys.stdout, data)
#----------------------------------------------------------------------
# Validate the signatures for a release
key_start = '-----BEGIN PGP SIGNATURE-----'
PUBLIC_KEY_ALGORITHMS = {
# These values are taken from the RFC's registry at:
# https://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
#
# The values are callables that produce gpg1-like key length and type
# indications, e.g., "4096R" for a 4096-bit RSA key.
1: (lambda keylen: str(keylen) + 'R'), # RSA
}
def _make_human_readable_fingerprint(fingerprint):
return re.compile(r'(....)' * 10).sub(r'\1 \2 \3 \4 \5 \6 \7 \8 \9 \10',
fingerprint)
def get_siginfo(args, quiet=False):
'Returns a list of signatures for the release.'
try:
import gnupg
except ImportError:
import security._gnupg as gnupg
gpg = gnupg.GPG()
target = get_target(args)
good_sigs = {}
fingerprints = {}
output = []
glob_pattern = os.path.join(target, 'subversion*-%s*.asc' % args.version)
for filename in glob.glob(glob_pattern):
text = open(filename).read()
keys = text.split(key_start)
if not quiet:
logging.info("Checking %d sig(s) in %s" % (len(keys[1:]), filename))
for key in keys[1:]:
fd, fn = tempfile.mkstemp()
os.write(fd, key_start + key)
os.close(fd)
verified = gpg.verify_file(open(fn, 'rb'), filename[:-4])
os.unlink(fn)
if verified.valid:
good_sigs[verified.fingerprint] = True
else:
sys.stderr.write("BAD SIGNATURE for %s\n" % filename)
if verified.key_id:
sys.stderr.write(" key id: %s\n" % verified.key_id)
sys.exit(1)
for id in good_sigs.keys():
# Most potential signers have public short keyid (32-bit) collisions in
# the https://evil32.com/ set, which has been uploaded to the
# keyservers, so generate the long keyid (see use of LONG_KEY_ID below).
#
# TODO: in the future it'd be nice to use the 'gnupg' module here.
gpg_output = subprocess.check_output(
['gpg', '--fixed-list-mode', '--with-colons', '--fingerprint', id],
stderr=subprocess.STDOUT,
)
gpg_output = gpg_output.splitlines()
# This code was added in r934990, but there was no comment (nor log
# message text) explaining its purpose. I've commented it out since
# ignoring arbitrary warnings in a verification codepath is Bad. If
# you run into warnings on your machine, feel free to uncomment it,
# but when you do so please make it match specific warnings only.
#
#gpg_output = "\n".join([ l for l in gpg_output.splitlines()
# if l[0:7] != 'Warning' ])
# Parse gpg's output. This happens to work for both gpg1 and gpg2,
# even though their outputs are slightly different.
#
# See http://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob_plain;f=doc/DETAILS
for line in gpg_output:
parts = line.split(':')
if parts[0] == 'pub':
keylen = int(parts[2])
keytype = int(parts[3])
formatter = PUBLIC_KEY_ALGORITHMS[keytype]
long_key_id = parts[4]
length_and_type = formatter(keylen) + '/' + long_key_id
del keylen, keytype, formatter, long_key_id
break
else:
raise RuntimeError("Failed to determine LONG_KEY_ID")
for line in gpg_output:
parts = line.split(':')
if parts[0] == 'fpr':
fingerprint = parts[9]
break
else:
raise RuntimeError("Failed to determine FINGERPRINT")
for line in gpg_output:
parts = line.split(':')
if parts[0] == 'uid':
name = parts[9].split(' <')[0]
break
else:
raise RuntimeError("Failed to determine NAME")
format_expandos = dict(
name=name,
length_and_type=length_and_type,
fingerprint=_make_human_readable_fingerprint(fingerprint),
)
del name, length_and_type, fingerprint
line = " {name} [{length_and_type}] with fingerprint:"
output.append( line.format(**format_expandos) )
line = " {fingerprint}"
output.append( line.format(**format_expandos) )
return output
def check_sigs(args):
'Check the signatures for the release.'
output = get_siginfo(args)
for line in output:
print(line)
def get_keys(args):
'Import the LDAP-based KEYS file to gpg'
# We use a tempfile because urlopen() objects don't have a .fileno()
with tempfile.SpooledTemporaryFile() as fd:
fd.write(urllib2.urlopen(KEYS).read())
fd.flush()
fd.seek(0)
subprocess.check_call(['gpg', '--import'], stdin=fd)
#----------------------------------------------------------------------
# Main entry point for argument parsing and handling
def main():
'Parse arguments, and drive the appropriate subcommand.'
# Setup our main parser
parser = argparse.ArgumentParser(
description='Create an Apache Subversion release.')
parser.add_argument('--clean', action='store_true', default=False,
help='Remove any directories previously created by %(prog)s')
parser.add_argument('--verbose', action='store_true', default=False,
help='Increase output verbosity')
parser.add_argument('--base-dir', default=os.getcwd(),
help='''The directory in which to create needed files and
folders. The default is the current working
directory.''')
subparsers = parser.add_subparsers(title='subcommands')
# Setup the parser for the build-env subcommand
subparser = subparsers.add_parser('build-env',
help='''Download release prerequisistes, including autoconf,
libtool, and swig.''')
subparser.set_defaults(func=build_env)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
subparser.add_argument('--sf-mirror', default='softlayer',
help='''The mirror to use for downloading files from
SourceForge. If in the EU, you may want to use
'kent' for this value.''')
subparser.add_argument('--use-existing', action='store_true', default=False,
help='''Attempt to use existing build dependencies before
downloading and building a private set.''')
# Setup the parser for the roll subcommand
subparser = subparsers.add_parser('roll',
help='''Create the release artifacts.''')
subparser.set_defaults(func=roll_tarballs)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
subparser.add_argument('revnum', type=lambda arg: int(arg.lstrip('r')),
help='''The revision number to base the release on.''')
subparser.add_argument('--branch',
help='''The branch to base the release on,
relative to ^/subversion/.''')
subparser.add_argument('--patches',
help='''The path to the directory containing patches.''')
# Setup the parser for the sign-candidates subcommand
subparser = subparsers.add_parser('sign-candidates',
help='''Sign the release artifacts.''')
subparser.set_defaults(func=sign_candidates)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
subparser.add_argument('--target',
help='''The full path to the directory containing
release artifacts.''')
# Setup the parser for the post-candidates subcommand
subparser = subparsers.add_parser('post-candidates',
help='''Commit candidates to the release development area
of the dist.apache.org repository.''')
subparser.set_defaults(func=post_candidates)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
subparser.add_argument('--username',
help='''Username for ''' + dist_repos + '''.''')
subparser.add_argument('--target',
help='''The full path to the directory containing
release artifacts.''')
# Setup the parser for the create-tag subcommand
subparser = subparsers.add_parser('create-tag',
help='''Create the release tag.''')
subparser.set_defaults(func=create_tag)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
subparser.add_argument('revnum', type=lambda arg: int(arg.lstrip('r')),
help='''The revision number to base the release on.''')
subparser.add_argument('--branch',
help='''The branch to base the release on,
relative to ^/subversion/.''')
subparser.add_argument('--username',
help='''Username for ''' + secure_repos + '''.''')
subparser.add_argument('--target',
help='''The full path to the directory containing
release artifacts.''')
# The clean-dist subcommand
subparser = subparsers.add_parser('clean-dist',
help='''Clean the distribution directory (and mirrors) of
all but the most recent MAJOR.MINOR release.''')
subparser.set_defaults(func=clean_dist)
subparser.add_argument('--dist-dir',
help='''The directory to clean.''')
subparser.add_argument('--username',
help='''Username for ''' + dist_repos + '''.''')
# The move-to-dist subcommand
subparser = subparsers.add_parser('move-to-dist',
help='''Move candiates and signatures from the temporary
release dev location to the permanent distribution
directory.''')
subparser.set_defaults(func=move_to_dist)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
subparser.add_argument('--username',
help='''Username for ''' + dist_repos + '''.''')
# The write-news subcommand
subparser = subparsers.add_parser('write-news',
help='''Output to stdout template text for use in the news
section of the Subversion website.''')
subparser.set_defaults(func=write_news)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
# write-announcement
subparser = subparsers.add_parser('write-announcement',
help='''Output to stdout template text for the emailed
release announcement.''')
subparser.set_defaults(func=write_announcement)
subparser.add_argument('--security', action='store_true', default=False,
help='''The release being announced includes security
fixes.''')
subparser.add_argument('--target',
help='''The full path to the directory containing
release artifacts.''')
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
# write-downloads
subparser = subparsers.add_parser('write-downloads',
help='''Output to stdout template text for the download
table for subversion.apache.org''')
subparser.set_defaults(func=write_downloads)
subparser.add_argument('--target',
help='''The full path to the directory containing
release artifacts.''')
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
# check-sigs
subparser = subparsers.add_parser('check-sigs',
help='''Output to stdout the signatures collected for this
release''')
subparser.set_defaults(func=check_sigs)
subparser.add_argument('version', type=Version,
help='''The release label, such as '1.7.0-alpha1'.''')
subparser.add_argument('--target',
help='''The full path to the directory containing
release artifacts.''')
# get-keys
subparser = subparsers.add_parser('get-keys',
help='''Import committers' public keys to ~/.gpg/''')
subparser.set_defaults(func=get_keys)
# A meta-target
subparser = subparsers.add_parser('clean',
help='''The same as the '--clean' switch, but as a
separate subcommand.''')
subparser.set_defaults(func=cleanup)
# Parse the arguments
args = parser.parse_args()
# first, process any global operations
if args.clean:
cleanup(args)
# Set up logging
logger = logging.getLogger()
if args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# Fix up our path so we can use our installed versions
os.environ['PATH'] = os.path.join(get_prefix(args.base_dir), 'bin') + ':' \
+ os.environ['PATH']
# Make timestamps in tarballs independent of local timezone
os.environ['TZ'] = 'UTC'
# finally, run the subcommand, and give it the parsed arguments
args.func(args)
if __name__ == '__main__':
main()
| apache-2.0 | 5,809,119,848,239,719,000 | 37.987379 | 133 | 0.571064 | false |
skosukhin/spack | var/spack/repos/builtin/packages/r-gridbase/package.py | 1 | 1620 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGridbase(RPackage):
"""Integration of base and grid graphics."""
homepage = "https://cran.r-project.org/web/packages/gridBase/index.html"
url = "https://cran.r-project.org/src/contrib/gridBase_0.4-7.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/gridBase"
version('0.4-7', '6d5064a85f5c966a92ee468ae44c5f1f')
| lgpl-2.1 | -7,292,303,512,323,437,000 | 45.285714 | 78 | 0.677778 | false |
pronto/SSH-Ranking | sqlclass.py | 1 | 3027 | #classes
import sqlalchemy
from ConfigParser import SafeConfigParser
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String,VARCHAR,TEXT,DATETIME, Sequence,func,Boolean, ForeignKey
from sqlalchemy.orm import relationship
# Config files! Yay!
config = SafeConfigParser()
config.read('config.ini')
sqlserver = config.get('sql', 'sqlserv')
sqlservertype = config.get('sql', 'sqlservertype')
sqluser = config.get('sql', 'sqluser')
sqlpass = config.get('sql', 'sqlpass')
Base = declarative_base()
query_string = sqlservertype + '://' + sqluser + ':' + sqlpass + '@' + sqlserver
eng = sqlalchemy.create_engine(query_string)
eng.execute("USE db_sshrank")
eng.execute("select 1").scalar()
Session =sqlalchemy.orm.sessionmaker(bind=eng)
sqlsess = Session()
class ips(Base):
__tablename__ = 'ips_alc2'
ip = Column(VARCHAR(39))
user = Column(TEXT)
dtime = Column(DATETIME)
pk = Column(Integer,Sequence('pk'), primary_key=True)
def __init__(self,ip,user,dtime):
self.ip = ip
self.user = user
self.dtime = dtime
def __repr__(self):
return "<ip('%s','%s', '%s')>" % (self.ip, self.user, self.dtime)
class rdns(Base):
__tablename__= 'rdns_tbl'
pk = Column(Integer,Sequence('pk'), primary_key=True)
ip = Column(VARCHAR(39))
rdns = Column(TEXT)
good = Column(VARCHAR(20))
dtime = Column(DATETIME)
def __init__(self,ip,rdns,good,dtime):
self.ip = ip
self.rdns = rdns
self.good = good
self.dtime = dtime
def __repr__(self):
return "<rdns('%s','%s','%s','%s')>" % (self.ip, self.rdns, self.good, self.dtime)
class nmapSQL(Base):
__tablename__='nmapmysql'
# |ignored|
#ip, dtime, port number, State, Protocol, Owner, Service, SunRPC info, Version info
pk = Column(Integer,Sequence('pk'), primary_key=True)
ip = Column(VARCHAR(39))
dtime = Column(DATETIME)
portnum = Column(VARCHAR(5))
state = Column(VARCHAR(10))
proto = Column(VARCHAR(5))
service = Column(VARCHAR(39))
verinfo = Column(TEXT)
# 1 2 3 4 5 6 7
def __init__(self, ip, dtime, portnum, state, proto, service, verinfo):
self.ip = ip
self.dtime = dtime
self.portnum = portnum
self.state = state
self.proto = proto
self.service = service
self.verinfo = verinfo
def __repr__(self):
return "<nmapSQL>('%s','%s','%s','%s','%s','%s','%s')>" % ( self.ip, self.dtime, self.portnum, self.state, self.proto, self.service, self.verinfo)
# 1 2 3 4 5 6 7 1 2 3 4 5 6 7
#http://stackoverflow.com/questions/8839211/sqlalchemy-add-child-in-one-to-many-relationship
#from sqlclass import *
#a=ips('127.0.0.1', 'jkldj', '2013-10-28 15:10:51')
#Session.add(a)
#Session.commit()
| apache-2.0 | 223,636,774,653,269,000 | 33.011236 | 154 | 0.589693 | false |
sid88in/incubator-airflow | airflow/contrib/operators/sftp_operator.py | 1 | 5426 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.ssh_hook import SSHHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SFTPOperation(object):
PUT = 'put'
GET = 'get'
class SFTPOperator(BaseOperator):
"""
SFTPOperator for transferring files from remote host to local or vice a versa.
This operator uses ssh_hook to open sftp transport channel that serve as basis
for file transfer.
:param ssh_hook: predefined ssh_hook to use for remote execution.
Either `ssh_hook` or `ssh_conn_id` needs to be provided.
:type ssh_hook: :class:`SSHHook`
:param ssh_conn_id: connection id from airflow Connections.
`ssh_conn_id` will be ingored if `ssh_hook` is provided.
:type ssh_conn_id: str
:param remote_host: remote host to connect (templated)
Nullable. If provided, it will replace the `remote_host` which was
defined in `ssh_hook` or predefined in the connection of `ssh_conn_id`.
:type remote_host: str
:param local_filepath: local file path to get or put. (templated)
:type local_filepath: str
:param remote_filepath: remote file path to get or put. (templated)
:type remote_filepath: str
:param operation: specify operation 'get' or 'put', defaults to put
:type get: bool
:param confirm: specify if the SFTP operation should be confirmed, defaults to True
:type confirm: bool
"""
template_fields = ('local_filepath', 'remote_filepath', 'remote_host')
@apply_defaults
def __init__(self,
ssh_hook=None,
ssh_conn_id=None,
remote_host=None,
local_filepath=None,
remote_filepath=None,
operation=SFTPOperation.PUT,
confirm=True,
*args,
**kwargs):
super(SFTPOperator, self).__init__(*args, **kwargs)
self.ssh_hook = ssh_hook
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.local_filepath = local_filepath
self.remote_filepath = remote_filepath
self.operation = operation
self.confirm = confirm
if not (self.operation.lower() == SFTPOperation.GET or
self.operation.lower() == SFTPOperation.PUT):
raise TypeError("unsupported operation value {0}, expected {1} or {2}"
.format(self.operation, SFTPOperation.GET, SFTPOperation.PUT))
def execute(self, context):
file_msg = None
try:
if self.ssh_conn_id:
if self.ssh_hook and isinstance(self.ssh_hook, SSHHook):
self.log.info("ssh_conn_id is ignored when ssh_hook is provided.")
else:
self.log.info("ssh_hook is not provided or invalid. " +
"Trying ssh_conn_id to create SSHHook.")
self.ssh_hook = SSHHook(ssh_conn_id=self.ssh_conn_id)
if not self.ssh_hook:
raise AirflowException("Cannot operate without ssh_hook or ssh_conn_id.")
if self.remote_host is not None:
self.log.info("remote_host is provided explicitly. " +
"It will replace the remote_host which was defined " +
"in ssh_hook or predefined in connection of ssh_conn_id.")
self.ssh_hook.remote_host = self.remote_host
with self.ssh_hook.get_conn() as ssh_client:
sftp_client = ssh_client.open_sftp()
if self.operation.lower() == SFTPOperation.GET:
file_msg = "from {0} to {1}".format(self.remote_filepath,
self.local_filepath)
self.log.debug("Starting to transfer %s", file_msg)
sftp_client.get(self.remote_filepath, self.local_filepath)
else:
file_msg = "from {0} to {1}".format(self.local_filepath,
self.remote_filepath)
self.log.debug("Starting to transfer file %s", file_msg)
sftp_client.put(self.local_filepath,
self.remote_filepath,
confirm=self.confirm)
except Exception as e:
raise AirflowException("Error while transferring {0}, error: {1}"
.format(file_msg, str(e)))
return None
| apache-2.0 | 4,150,012,125,310,411,000 | 44.216667 | 90 | 0.600995 | false |
justinmk/python-client | neovim/__init__.py | 1 | 1799 | from client import Client
from script_host import ScriptHost
from plugin_host import PluginHost
from uv_stream import UvStream
from msgpack_stream import MsgpackStream
from rpc_stream import RPCStream
from time import sleep
import logging, os
__all__ = ['connect', 'start_host', 'ScriptHost', 'PluginHost']
# Required for python 2.6
class NullHandler(logging.Handler):
def emit(self, record):
pass
def connect(address=None, port=None, vim_compatible=False):
client = Client(RPCStream(MsgpackStream(UvStream(address, port))),
vim_compatible)
client.discover_api()
return client.vim
def spawn(argv):
client = Client(RPCStream(MsgpackStream(UvStream(spawn_argv=argv))))
client.discover_api()
return client.vim
def start_host(address=None, port=None):
logging.root.addHandler(NullHandler())
logger = logging.getLogger(__name__)
info = logger.info
if 'NVIM_PYTHON_LOG_FILE' in os.environ:
logfile = os.environ['NVIM_PYTHON_LOG_FILE'].strip()
handler = logging.FileHandler(logfile, 'w')
handler.formatter = logging.Formatter(
'%(asctime)s [%(levelname)s @ '
'%(filename)s:%(funcName)s:%(lineno)s] %(process)s - %(message)s')
logging.root.addHandler(handler)
level = logging.INFO
if 'NVIM_PYTHON_LOG_LEVEL' in os.environ:
l = getattr(logging,
os.environ['NVIM_PYTHON_LOG_LEVEL'].strip(),
level)
if isinstance(l, int):
level = l
logger.setLevel(level)
info('connecting to neovim')
vim = connect(address, port, vim_compatible=True)
info('connected to neovim')
with PluginHost(vim, discovered_plugins=[ScriptHost]) as host:
host.run()
| apache-2.0 | 1,397,941,041,186,873,000 | 31.125 | 78 | 0.644803 | false |
mancoast/CPythonPyc_test | fail/334_test_poll.py | 1 | 6101 | # Test case for the os.poll() function
import os
import random
import select
from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX
try:
import threading
except ImportError:
threading = None
import time
import unittest
from test.support import TESTFN, run_unittest, reap_threads
try:
select.poll
except AttributeError:
raise unittest.SkipTest("select.poll not defined -- skipping test_poll")
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class PollTests(unittest.TestCase):
def test_poll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.poll()
NUM_PIPES = 12
MSG = b" This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError("no pipes ready for writing")
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError("no pipes ready for reading")
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def poll_unit_tests(self):
# returns NVAL for invalid file descriptor
FD = 42
try:
os.close(FD)
except OSError:
pass
p = select.poll()
p.register(FD)
r = p.poll()
self.assertEqual(r[0], (FD, select.POLLNVAL))
f = open(TESTFN, 'w')
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
self.assertEqual(r[0][0], fd)
f.close()
r = p.poll()
self.assertEqual(r[0], (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
self.assertRaises(TypeError, p.register, p)
self.assertRaises(TypeError, p.unregister, p)
# can't unregister non-existent object
p = select.poll()
self.assertRaises(KeyError, p.unregister, 3)
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
self.assertRaises(TypeError, pollster.register, Nope(), 0)
self.assertRaises(TypeError, pollster.register, Almost(), 0)
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != "":
self.fail('error: pipe seems to be closed, but still returns data')
continue
elif flags & select.POLLIN:
line = p.readline()
if not line:
break
continue
else:
self.fail('Unexpected return value from select.poll: %s' % fdlist)
p.close()
def test_poll3(self):
# test int overflow
pollster = select.poll()
pollster.register(1)
self.assertRaises(OverflowError, pollster.poll, 1 << 64)
x = 2 + 3
if x != 5:
self.fail('Overflow must have occurred')
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1)
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_threaded_poll(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
rfds = []
for i in range(10):
fd = os.dup(r)
self.addCleanup(os.close, fd)
rfds.append(fd)
pollster = select.poll()
for fd in rfds:
pollster.register(fd, select.POLLIN)
t = threading.Thread(target=pollster.poll)
t.start()
try:
time.sleep(0.5)
# trigger ufds array reallocation
for fd in rfds:
pollster.unregister(fd)
pollster.register(w, select.POLLOUT)
self.assertRaises(RuntimeError, pollster.poll)
finally:
# and make the call to poll() from the thread return
os.write(w, b'spam')
t.join()
def test_main():
run_unittest(PollTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 | 5,694,158,091,451,612,000 | 28.906863 | 87 | 0.545976 | false |
regardscitoyens/nosfinanceslocales_scraper | localfinance/parsing/zone.py | 1 | 5072 | # -*- coding: utf-8 -*-
from .document_mapper import DocumentMapper
from .finance import (
CityFinanceParser,
EPCIFinanceParser,
DepartmentFinanceParser,
DepartmentFinance2013Parser,
RegionFinanceParser,
RegionFinance2013Parser
)
from .tax import (
CityTaxParser,
CityBefore2008TaxParser,
EPCITaxParser,
EPCI2008TaxParser,
EPCI2010TaxParser,
DepTaxParser,
DepTax2008Parser,
DepTax20092010Parser,
RegTaxParser2008,
RegTaxParser20092010,
RegTaxParserAfter2011,
)
class BaseZoneParser(object):
zone_type = ''
def __init__(self, insee_code, year, url):
self.data = {
'insee_code': insee_code,
'year': year,
'zone_type': self.zone_type,
'url': url
}
self.tax_parser = None
self.finance_parser = None
self.account = None
self.finance_table_id = 3
def parse(self, hxs):
data = self.data.copy()
data.update(self.finance_parser.parse(hxs))
data.update(self.tax_parser.parse(hxs))
return data
class RegionZoneParser(BaseZoneParser):
zone_type = 'region'
def __init__(self, insee_code, year, url):
super(RegionZoneParser, self).__init__(insee_code, year, url)
self.account = DocumentMapper("data/mapping/region_2008.yaml")
year = int(self.data['year'])
if year == 2008:
self.tax_parser = RegTaxParser2008(self.account)
self.finance_parser = RegionFinanceParser(self.account)
elif 2008 < year < 2011:
self.tax_parser = RegTaxParser20092010(self.account)
self.finance_parser = RegionFinanceParser(self.account)
elif 2010 < year < 2013:
self.tax_parser = RegTaxParserAfter2011(self.account)
self.finance_parser = RegionFinanceParser(self.account)
else:
self.account = DocumentMapper("data/mapping/region_2013.yaml")
self.tax_parser = RegTaxParserAfter2011(self.account)
self.finance_parser = RegionFinance2013Parser(self.account)
class DepartmentZoneParser(BaseZoneParser):
zone_type = 'department'
def __init__(self, insee_code, year, url):
super(DepartmentZoneParser, self).__init__(insee_code, year, url)
year = int(self.data['year'])
if year >= 2013:
self.account = DocumentMapper("data/mapping/department_2013.yaml")
self.tax_parser = DepTaxParser(self.account)
self.finance_parser = DepartmentFinance2013Parser(self.account)
elif 2013 > year > 2010:
self.account = DocumentMapper("data/mapping/department_2011.yaml")
self.tax_parser = DepTaxParser(self.account)
self.finance_parser = DepartmentFinanceParser(self.account)
elif year == 2010:
self.account = DocumentMapper("data/mapping/department_2010.yaml")
self.tax_parser = DepTax20092010Parser(self.account)
self.finance_parser = DepartmentFinanceParser(self.account)
elif 2010 > year > 2008:
self.account = DocumentMapper("data/mapping/department_2009.yaml")
self.tax_parser = DepTax20092010Parser(self.account)
self.finance_parser = DepartmentFinanceParser(self.account)
elif year == 2008:
self.account = DocumentMapper("data/mapping/department_2008.yaml")
self.tax_parser = DepTax2008Parser(self.account)
self.finance_parser = DepartmentFinanceParser(self.account)
class EPCIZoneParser(BaseZoneParser):
zone_type = 'epci'
def __init__(self, insee_code, year, url, siren):
super(EPCIZoneParser, self).__init__(insee_code, year, url)
self.data['siren'] = siren
self.account = DocumentMapper("data/mapping/epci_2010.yaml")
self.finance_parser = EPCIFinanceParser(self.account)
year = int(self.data['year'])
if year < 2009:
self.account = DocumentMapper("data/mapping/epci_2008.yaml")
self.tax_parser = EPCI2008TaxParser(self.account)
elif year < 2011:
self.tax_parser = EPCI2010TaxParser(self.account)
else:
self.tax_parser = EPCITaxParser(self.account)
class CityZoneParser(BaseZoneParser):
"""Parser of city html page"""
zone_type = 'city'
def __init__(self, insee_code, year, url):
super(CityZoneParser, self).__init__(insee_code, year, url)
year = int(self.data['year'])
if year > 2010:
self.account = DocumentMapper("data/mapping/city_2011.yaml")
self.tax_parser = CityTaxParser(self.account)
elif 2008 < year < 2011:
self.account = DocumentMapper("data/mapping/city_2009.yaml")
self.tax_parser = CityTaxParser(self.account)
elif year < 2009:
self.account = DocumentMapper("data/mapping/city_2000.yaml")
self.tax_parser = CityBefore2008TaxParser(self.account)
self.finance_parser = CityFinanceParser(self.account)
| mit | 9,042,204,551,475,589,000 | 33.27027 | 78 | 0.637815 | false |
czhengsci/pymatgen | pymatgen/__init__.py | 1 | 2748 | from __future__ import unicode_literals
import sys
import os
import warnings
import ruamel.yaml as yaml
__author__ = "Pymatgen Development Team"
__email__ ="[email protected]"
__maintainer__ = "Shyue Ping Ong"
__maintainer_email__ ="[email protected]"
__version__ = "2018.3.14"
SETTINGS_FILE = os.path.join(os.path.expanduser("~"), ".pmgrc.yaml")
def _load_pmg_settings():
try:
with open(SETTINGS_FILE, "rt") as f:
d = yaml.safe_load(f)
except IOError:
# If there are any errors, default to using environment variables
# if present.
d = {}
for k, v in os.environ.items():
if k.startswith("PMG_"):
d[k] = v
elif k in ["VASP_PSP_DIR", "MAPI_KEY", "DEFAULT_FUNCTIONAL"]:
d["PMG_" + k] = v
clean_d = {}
for k, v in d.items():
if not k.startswith("PMG_"):
warnings.warn('With effect from pmg 5.0, all pymatgen settings are'
' prefixed with a "PMG_". E.g., "PMG_VASP_PSP_DIR" '
'instead of "VASP_PSP_DIR".')
clean_d["PMG_" + k] = v
else:
clean_d[k] = v
return clean_d
SETTINGS = _load_pmg_settings()
# Order of imports is important on some systems to avoid
# failures when loading shared libraries.
# import spglib
# from . import optimization, util
# del(spglib, optimization, util)
# Useful aliases for commonly used objects and modules.
# Allows from pymatgen import <class> for quick usage.
from pymatgen.core import *
from .electronic_structure.core import Spin, Orbital
from .ext.matproj import MPRester
from monty.json import MontyEncoder, MontyDecoder, MSONable
def get_structure_from_mp(formula):
"""
Convenience method to get a crystal from the Materials Project database via
the API. Requires PMG_MAPI_KEY to be set.
Args:
formula (str): A formula
Returns:
(Structure) The lowest energy structure in Materials Project with that
formula.
"""
m = MPRester()
entries = m.get_entries(formula, inc_structure="final")
if len(entries) == 0:
raise ValueError("No structure with formula %s in Materials Project!" %
formula)
elif len(entries) > 1:
warnings.warn("%d structures with formula %s found in Materials "
"Project. The lowest energy structure will be returned." %
(len(entries), formula))
return min(entries, key=lambda e: e.energy_per_atom).structure
if sys.version_info < (3, 5):
warnings.warn("""
Pymatgen will drop Py2k support from v2019.1.1. Pls consult the documentation
at https://www.pymatgen.org for more details.""") | mit | 4,265,692,872,666,505,000 | 30.597701 | 80 | 0.618632 | false |
QuantGov/quantgov | tests/test_ml.py | 1 | 2638 | import pytest
import quantgov.ml
import subprocess
from pathlib import Path
PSEUDO_CORPUS_PATH = Path(__file__).resolve().parent.joinpath('pseudo_corpus')
PSEUDO_ESTIMATOR_PATH = (
Path(__file__).resolve().parent
.joinpath('pseudo_estimator')
)
def check_output(cmd):
return (
subprocess.check_output(cmd, universal_newlines=True)
.replace('\n\n', '\n')
)
def test_simple_estimator():
output = check_output(
['quantgov', 'ml', 'estimate',
str(PSEUDO_ESTIMATOR_PATH.joinpath('data', 'binary.qge')),
str(PSEUDO_CORPUS_PATH)]
)
assert output == 'file,is_world\ncfr,False\nmoby,False\n'
def test_probability_estimator():
output = check_output(
['quantgov', 'ml', 'estimate',
str(PSEUDO_ESTIMATOR_PATH.joinpath('data', 'binary.qge')),
str(PSEUDO_CORPUS_PATH), '--probability']
)
assert output == ('file,is_world_prob\ncfr,0.0899\nmoby,0.0216\n')
def test_probability_estimator_6decimals():
output = check_output(
['quantgov', 'ml', 'estimate',
str(PSEUDO_ESTIMATOR_PATH.joinpath('data', 'binary.qge')),
str(PSEUDO_CORPUS_PATH), '--probability', '--precision', '6']
)
assert output == ('file,is_world_prob\ncfr,0.089898\nmoby,0.02162\n')
def test_multiclass_probability_estimator():
output = check_output(
['quantgov', 'ml', 'estimate',
str(PSEUDO_ESTIMATOR_PATH.joinpath('data', 'multiclass.qge')),
str(PSEUDO_CORPUS_PATH), '--probability']
)
assert output == ('file,class,probability\n'
'cfr,business-and-industry,0.1765\n'
'cfr,environment,0.1294\n'
'cfr,health-and-public-welfare,0.1785\n'
'cfr,money,0.169\n'
'cfr,science-and-technology,0.147\n'
'cfr,world,0.1997\n'
'moby,business-and-industry,0.1804\n'
'moby,environment,0.1529\n'
'moby,health-and-public-welfare,0.205\n'
'moby,money,0.1536\n'
'moby,science-and-technology,0.1671\n'
'moby,world,0.141\n')
def test_multiclass_probability_oneclass_estimator():
output = check_output(
['quantgov', 'ml', 'estimate',
str(PSEUDO_ESTIMATOR_PATH.joinpath('data', 'multiclass.qge')),
str(PSEUDO_CORPUS_PATH), '--probability', '--oneclass']
)
assert output == ('file,class,probability\n'
'cfr,world,0.1997\n'
'moby,health-and-public-welfare,0.205\n')
| mit | 1,779,570,359,535,059,700 | 32.820513 | 78 | 0.572403 | false |
marksweiss/organize-m | lib/item.py | 1 | 3548 | from element import Elem
class OrganizemIllegalDataFormatException(Exception): pass
class OrganizemIllegalDataTypeException(Exception): pass
# A single Item in the data file, with a root 'item:' element and child
# elements for each of the fields (Elements) in an Item
# Only title is required and all other args are optional and any or none can
# be passed as named args (kwargs)
# Values for all elements are available directly as properties
# str() returns the YAML string serialization of the Item
# repr() returns the Item as a dict/list that is what YAML deserializes to
class Item(object):
def __init__(self, title, dict_of_elems=None):
# Store list of all elements in Item
self._elems = Elem.get_elems()
# Required elements are 'ROOT' and 'TITLE'
# Set 'root' Item Element
self.__setattr__('_' + Elem.ROOT, Elem.elem_init(Elem.ROOT, None))
# 'title' Element is required, set it first
if not title:
raise OrganizemIllegalDataFormatException("Cannot construct Item with null or empty title")
title_obj = Elem.elem_init(Elem.TITLE, title)
self.__setattr__('_' + Elem.TITLE, title_obj)
self.__setattr__(Elem.TITLE, title_obj.val)
# A little dirty, but not bad. Elem exposes method to get list of optional
# elements, with the assumption being client can call get_optional_data_elems() to
# get all elements and this to get only optional, so it can take care of
# required ones (statically, as here) and process optional ones dynamically
opt_elems = Elem.get_optional_data_elems()
for elem in opt_elems:
kwval = None
elem_obj = None
if dict_of_elems:
if elem in dict_of_elems:
kwval = dict_of_elems[elem]
elem_obj = Elem.elem_init(elem, kwval)
# Private object str(), repr() used by Item str() and repr()
self.__setattr__('_' + elem, elem_obj)
# Public getter just returns obj.val, value for the element
self.__setattr__(elem, elem_obj.val)
else:
self.__setattr__('_' + elem, Elem.elem_init(elem, None))
self.__setattr__(elem, None)
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return None
# Used for access to arbitrary element value: x = item[elem]
def __getitem__(self, elem):
return self.__getattr__(elem)
# For now str() representation is YAML. Make separate method to make client
# code more explicit and allow future change to str() without client code change
def __str__(self):
return self._to_yaml()
def _to_yaml(self):
return '\n'.join([str(self.__getattr__('_' + elem)) for elem in self._elems])
# NOTE: Used by organizem_test.py unit tests
def __repr__(self):
"""
Returns form of object matching form produced by PyYaml.#load() when it loads
the YAML item from the data file. So then PyYaml.#dump(Item.#repr()) produces
valid YAML string
"""
# Use list of elements skipping ROOT
# Iterate list of elems to create list of dicts, one for each attr
elems = [{elem : self.__getattr__(elem)} for elem in self._elems[1:]]
item_repr = {Elem.ROOT : elems}
return repr(item_repr)
| mit | 1,379,241,476,481,216,000 | 43.35 | 103 | 0.602875 | false |
macarthur-lab/xbrowse | seqr/migrations/0059_auto_20190705_1450.py | 1 | 1096 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-07-05 14:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seqr', '0058_matchmakercontactnotes'),
]
operations = [
migrations.AlterField(
model_name='family',
name='analysis_status',
field=models.CharField(choices=[(b'S', b'S'), (b'S_kgfp', b'S'), (b'S_kgdp', b'S'), (b'S_ng', b'S'), (b'Sc_kgfp', b'S'), (b'Sc_kgdp', b'S'), (b'Sc_ng', b'S'), (b'Rcpc', b'R'), (b'Rncc', b'R'), (b'C', b'C'), (b'I', b'A'), (b'Q', b'W')], default=b'Q', max_length=10),
),
migrations.AlterField(
model_name='family',
name='internal_analysis_status',
field=models.CharField(blank=True, choices=[(b'S', b'S'), (b'S_kgfp', b'S'), (b'S_kgdp', b'S'), (b'S_ng', b'S'), (b'Sc_kgfp', b'S'), (b'Sc_kgdp', b'S'), (b'Sc_ng', b'S'), (b'Rcpc', b'R'), (b'Rncc', b'R'), (b'C', b'C'), (b'I', b'A'), (b'Q', b'W')], max_length=10, null=True),
),
]
| agpl-3.0 | -1,311,480,607,405,681,700 | 42.84 | 286 | 0.516423 | false |
ahmetcemturan/SFACT | skeinforge_application/skeinforge_plugins/craft_plugins/cool.py | 1 | 19876 | """
This page is in the table of contents.
Cool is a craft tool to cool the shape.
Cool works well with a stepper extruder, it does not work well with a DC motor extruder.
If enabled, before each layer that takes less then "Minimum Layer Time" to print the tool head will orbit around the printed area for 'Minimum Layer Time' minus 'the time it takes to print the layer' before it starts printing the layer. This is great way to let layers with smaller area cool before you start printing on top of them (so you do not overheat the area).
The cool manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Cool
Allan Ecker aka The Masked Retriever's has written the "Skeinforge Quicktip: Cool" at:
http://blog.thingiverse.com/2009/07/28/skeinforge-quicktip-cool/
==Operation==
The default 'Activate Cool' checkbox is on. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Bridge Cool===
Default is one degree Celcius.
If the layer is a bridge layer, then cool will lower the temperature by 'Bridge Cool' degrees Celcius.
===Cool Type===
Default is 'Slow Down'.
====Orbit====
When selected, cool will add orbits with the extruder off to give the layer time to cool, so that the next layer is not extruded on a molten base. The orbits will be around the largest island on that layer. Orbit should only be chosen if you can not upgrade to a stepper extruder.
====Slow Down====
When selected, cool will slow down the extruder so that it will take the minimum layer time to extrude the layer. DC motors do not operate properly at slow flow rates, so if you have a DC motor extruder, you should upgrade to a stepper extruder, but if you can't do that, you can try using the 'Orbit' option.
===Maximum Cool===
Default is 2 degrees Celcius.
If it takes less time to extrude the layer than the minimum layer time, then cool will lower the temperature by the 'Maximum Cool' setting times the layer time over the minimum layer time.
===Minimum Layer Time===
Default is 60 seconds.
Defines the minimum amount of time the extruder will spend on a layer, this is an important setting.
===Minimum Orbital Radius===
Default is 10 millimeters.
When the orbit cool type is selected, if the area of the largest island is as large as the square of the "Minimum Orbital Radius" then the orbits will be just within the island. If the island is smaller, then the orbits will be in a square of the "Minimum Orbital Radius" around the center of the island. This is so that the hot extruder does not stay too close to small islands.
===Name of Alteration Files===
Cool looks for alteration files in the alterations folder in the .skeinforge folder in the home directory. Cool does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder. The cool start and end text idea is from:
http://makerhahn.blogspot.com/2008/10/yay-minimug.html
====Name of Cool End File====
Default is cool_end.gcode.
If there is a file with the name of the "Name of Cool End File" setting, it will be added to the end of the orbits.
====Name of Cool Start File====
Default is cool_start.gcode.
If there is a file with the name of the "Name of Cool Start File" setting, it will be added to the start of the orbits.
===Orbital Outset===
Default is 2 millimeters.
When the orbit cool type is selected, the orbits will be outset around the largest island by 'Orbital Outset' millimeters. If 'Orbital Outset' is negative, the orbits will be inset instead.
===Turn Fan On at Beginning===
Default is on.
When selected, cool will turn the fan on at the beginning of the fabrication by adding the M106 command.
===Turn Fan Off at Ending===
Default is on.
When selected, cool will turn the fan off at the ending of the fabrication by adding the M107 command.
==Examples==
The following examples cool the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and cool.py.
> python cool.py
This brings up the cool dialog.
> python cool.py Screw Holder Bottom.stl
The cool tool is parsing the file:
Screw Holder Bottom.stl
..
The cool tool has created the file:
.. Screw Holder Bottom_cool.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import os
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText(fileName, text, repository=None):
'Cool a gcode linear move text.'
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText(gcodeText, repository=None):
'Cool a gcode linear move text.'
if gcodec.isProcedureDoneOrFileIsEmpty(gcodeText, 'cool'):
return gcodeText
if repository is None:
repository = settings.getReadRepository(CoolRepository())
if not repository.activateCool.value:
return gcodeText
return CoolSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return CoolRepository()
def writeOutput(fileName, shouldAnalyze=True):
'Cool a gcode linear move file. Chain cool the gcode if it is not already cooled.'
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'cool', shouldAnalyze)
class CoolRepository:
'A class to handle the cool settings.'
def __init__(self):
'Set the default settings, execute title & settings fileName.'
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.cool.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName(
fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Cool', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute(
'http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Cool')
self.activateCool = settings.BooleanSetting().getFromValue('Activate Cool.. but use with a fan!', self, False)
settings.LabelDisplay().getFromName('- When To use Cool?-', self )
self.minimumLayerTime = settings.FloatSpin().getFromValue(0.0, 'Use Cool if layer takes shorter than(seconds):', self, 120.0, 10.0)
self.minimumLayerFeedrate = settings.FloatSpin().getFromValue(5.0, 'Do not go slower than (mm/s):', self, 50.0, 15.0)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- What to do if Cool is necessary? -', self )
self.turnFanOnAtBeginning = settings.BooleanSetting().getFromValue('Turn Fan On at Beginning', self, True)
self.turnFanOffAtEnding = settings.BooleanSetting().getFromValue('Turn Fan Off at Ending', self, True)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Name of Macro (gmc) Files to execute -', self )
self.nameOfCoolEndFile = settings.StringSetting().getFromValue('Execute when Cool ends:', self, 'cool_end.gmc')
self.nameOfCoolStartFile = settings.StringSetting().getFromValue('Execute when Cool starts:', self, 'cool_start.gmc')
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- How to Cool? -', self )
self.coolType = settings.MenuButtonDisplay().getFromName('Cool by:', self)
self.orbit = settings.MenuRadio().getFromMenuButtonDisplay(self.coolType, 'Orbiting around Object', self, False)
self.slowDown = settings.MenuRadio().getFromMenuButtonDisplay(self.coolType, 'Slow Down during print', self, True)
settings.LabelSeparator().getFromRepository(self)
self.maximumCool = settings.FloatSpin().getFromValue(0.0, 'Maximum Cool (Celcius):', self, 10.0, 2.0)
self.bridgeCool = settings.FloatSpin().getFromValue(0.0, 'Bridge Cool (Celcius):', self, 10.0, 1.0)
self.minimumOrbitalRadius = settings.FloatSpin().getFromValue(
0.0, 'Minimum Orbital Radius (millimeters):', self, 20.0, 10.0)
settings.LabelSeparator().getFromRepository(self)
self.orbitalOutset = settings.FloatSpin().getFromValue(1.0, 'Orbital Outset (millimeters):', self, 5.0, 2.0)
self.executeTitle = 'Cool'
def execute(self):
'Cool button has been clicked.'
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(
self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class CoolSkein:
'A class to cool a skein of extrusions.'
def __init__(self):
self.boundaryLayer = None
self.coolTemperature = None
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.feedRateMinute = 960.0
self.highestZ = 1.0
self.isBridgeLayer = False
self.isExtruderActive = False
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.lines = None
self.multiplier = 1.0
self.oldFlowRate = None
self.oldFlowRateString = None
self.oldLocation = None
self.oldTemperature = None
def addCoolOrbits(self, remainingOrbitTime):
'Add the minimum radius cool orbits.'
if len(self.boundaryLayer.loops) < 1:
return
insetBoundaryLoops = self.boundaryLayer.loops
if abs(self.repository.orbitalOutset.value) > 0.1 * abs(self.edgeWidth):
insetBoundaryLoops = intercircle.getInsetLoopsFromLoops(self.boundaryLayer.loops, -self.repository.orbitalOutset.value)
if len(insetBoundaryLoops) < 1:
insetBoundaryLoops = self.boundaryLayer.loops
largestLoop = euclidean.getLargestLoop(insetBoundaryLoops)
loopArea = euclidean.getAreaLoopAbsolute(largestLoop)
if loopArea < self.minimumArea:
center = 0.5 * (euclidean.getMaximumByComplexPath(largestLoop) + euclidean.getMinimumByComplexPath(largestLoop))
centerXBounded = max(center.real, self.boundingRectangle.cornerMinimum.real)
centerXBounded = min(centerXBounded, self.boundingRectangle.cornerMaximum.real)
centerYBounded = max(center.imag, self.boundingRectangle.cornerMinimum.imag)
centerYBounded = min(centerYBounded, self.boundingRectangle.cornerMaximum.imag)
center = complex(centerXBounded, centerYBounded)
maximumCorner = center + self.halfCorner
minimumCorner = center - self.halfCorner
largestLoop = euclidean.getSquareLoopWiddershins(minimumCorner, maximumCorner)
pointComplex = euclidean.getXYComplexFromVector3(self.oldLocation)
if pointComplex is not None:
largestLoop = euclidean.getLoopStartingClosest(self.edgeWidth, pointComplex, largestLoop)
intercircle.addOrbitsIfLarge(
self.distanceFeedRate, largestLoop, self.orbitalFeedRatePerSecond, remainingOrbitTime, self.highestZ)
def addCoolTemperature(self, remainingOrbitTime):
'Parse a gcode line and add it to the cool skein.'
layerCool = self.repository.maximumCool.value * remainingOrbitTime / self.repository.minimumLayerTime.value
if self.isBridgeLayer:
layerCool = max(self.repository.bridgeCool.value, layerCool)
if self.oldTemperature is not None and layerCool != 0.0:
self.coolTemperature = self.oldTemperature - layerCool
self.addTemperature(self.coolTemperature)
# def addFlowRate(self, flowRate):
# 'Add a multipled line of flow rate if different.'
# self.distanceFeedRate.addLine('M108 S' + euclidean.getFourSignificantFigures(flowRate))
def addGcodeFromFeedRateMovementZ(self, feedRateMinute, point, z):
'Add a movement to the output.'
self.distanceFeedRate.addLine(self.distanceFeedRate.getLinearGcodeMovementWithFeedRate(feedRateMinute, point, z))
def addOrbitsIfNecessary(self, remainingOrbitTime):
'Parse a gcode line and add it to the cool skein.'
if remainingOrbitTime > 0.0 and self.boundaryLayer is not None:
self.addCoolOrbits(remainingOrbitTime)
def addTemperature(self, temperature):
'Add a line of temperature.'
self.distanceFeedRate.addLine('M104 S' + euclidean.getRoundedToThreePlaces(temperature))
def getCoolMove(self, line, location, splitLine):
'Get cool line according to time spent on layer.'
self.feedRateMinute = gcodec.getFeedRateMinute(self.feedRateMinute, splitLine)
calcCoolFeedrate = self.multiplier * self.feedRateMinute
if calcCoolFeedrate >= self.repository.minimumLayerFeedrate.value*60:
coolFeedrate = calcCoolFeedrate
else:
coolFeedrate = self.repository.minimumLayerFeedrate.value*60
return self.distanceFeedRate.getLineWithFeedRate(coolFeedrate, line, splitLine)
def getCraftedGcode(self, gcodeText, repository):
'Parse gcode text and store the cool gcode.'
self.repository = repository
self.coolEndLines = settings.getAlterationFileLines(repository.nameOfCoolEndFile.value)
self.coolStartLines = settings.getAlterationFileLines(repository.nameOfCoolStartFile.value)
self.halfCorner = complex(repository.minimumOrbitalRadius.value, repository.minimumOrbitalRadius.value)
self.lines = archive.getTextLines(gcodeText)
self.minimumArea = 4.0 * repository.minimumOrbitalRadius.value * repository.minimumOrbitalRadius.value
self.parseInitialization()
self.boundingRectangle = gcodec.BoundingRectangle().getFromGcodeLines(
self.lines[self.lineIndex :], 0.5 * self.edgeWidth)
margin = 0.2 * self.edgeWidth
halfCornerMargin = self.halfCorner + complex(margin, margin)
self.boundingRectangle.cornerMaximum -= halfCornerMargin
self.boundingRectangle.cornerMinimum += halfCornerMargin
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
if repository.turnFanOffAtEnding.value:
self.distanceFeedRate.addLine('M107')
return gcodec.getGcodeWithoutDuplication('M108', self.distanceFeedRate.output.getvalue())
def getLayerTime(self):
'Get the time the extruder spends on the layer.'
feedRateMinute = self.feedRateMinute
layerTime = 0.0
lastThreadLocation = self.oldLocation
for lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(lastThreadLocation, splitLine)
feedRateMinute = gcodec.getFeedRateMinute(feedRateMinute, splitLine)
if lastThreadLocation is not None:
feedRateSecond = feedRateMinute / 60.0
layerTime += location.distance(lastThreadLocation) / feedRateSecond
lastThreadLocation = location
elif firstWord == '(<bridgeRotation>':
self.isBridgeLayer = True
elif firstWord == '(</layer>)':
return layerTime
return layerTime
def getLayerTimeActive(self):
'Get the time the extruder spends on the layer while active.'
feedRateMinute = self.feedRateMinute
isExtruderActive = self.isExtruderActive
layerTime = 0.0
lastThreadLocation = self.oldLocation
for lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(lastThreadLocation, splitLine)
feedRateMinute = gcodec.getFeedRateMinute(feedRateMinute, splitLine)
if lastThreadLocation is not None and isExtruderActive:
feedRateSecond = feedRateMinute / 60.0
layerTime += location.distance(lastThreadLocation) / feedRateSecond
lastThreadLocation = location
elif firstWord == 'M101':
isExtruderActive = True
elif firstWord == 'M103':
isExtruderActive = False
elif firstWord == '(<bridgeRotation>':
self.isBridgeLayer = True
elif firstWord == '(</layer>)':
return layerTime
return layerTime
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
# if firstWord == 'M108':
# self.oldFlowRate = float(splitLine[1][1 :])
if firstWord == '(<edgeWidth>':
self.edgeWidth = float(splitLine[1])
if self.repository.turnFanOnAtBeginning.value:
self.distanceFeedRate.addLine('M106')
elif firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('cool')
return
# elif firstWord == '(<operatingFlowRate>':
# self.oldFlowRate = float(splitLine[1])
elif firstWord == '(<orbitalFeedRatePerSecond>':
self.orbitalFeedRatePerSecond = float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
'Parse a gcode line and add it to the cool skein.'
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.highestZ = max(location.z, self.highestZ)
if self.isExtruderActive:
line = self.getCoolMove(line, location, splitLine)
self.oldLocation = location
elif firstWord == 'M101':
self.isExtruderActive = True
elif firstWord == 'M103':
self.isExtruderActive = False
elif firstWord == 'M104':
self.oldTemperature = gcodec.getDoubleAfterFirstLetter(splitLine[1])
# elif firstWord == 'M108':
# self.oldFlowRate = float(splitLine[1][1 :])
# self.addFlowRate(self.multiplier * self.oldFlowRate)
# return
elif firstWord == '(<boundaryPoint>':
self.boundaryLoop.append(gcodec.getLocationFromSplitLine(None, splitLine).dropAxis())
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('cool')
self.distanceFeedRate.addLine(line)
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.coolStartLines)
layerTime = self.getLayerTime()
remainingOrbitTime = max(self.repository.minimumLayerTime.value - layerTime, 0.0)
self.addCoolTemperature(remainingOrbitTime)
if self.repository.orbit.value:
self.addOrbitsIfNecessary(remainingOrbitTime)
else:
self.setMultiplier(remainingOrbitTime)
# self.addFlowRate(self.multiplier * self.oldFlowRate)
z = float(splitLine[1])
self.boundaryLayer = euclidean.LoopLayer(z)
self.highestZ = max(z, self.highestZ)
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.coolEndLines)
return
elif firstWord == '(</layer>)':
self.isBridgeLayer = False
self.multiplier = 1.0
if self.coolTemperature is not None:
self.addTemperature(self.oldTemperature)
self.coolTemperature = None
# self.addFlowRate(self.oldFlowRate)
elif firstWord == '(<nestedRing>)':
self.boundaryLoop = []
self.boundaryLayer.loops.append(self.boundaryLoop)
self.distanceFeedRate.addLine(line)
def setMultiplier(self, remainingOrbitTime):
'Set the feed and flow rate multiplier.'
layerTimeActive = self.getLayerTimeActive()
self.multiplier = min(1.0, layerTimeActive / (remainingOrbitTime + layerTimeActive))
def main():
'Display the cool dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == '__main__':
main()
| agpl-3.0 | -5,475,630,925,524,603,000 | 45.98818 | 439 | 0.768213 | false |
bmya/odoo-support | web_support_client_issue/wizard/support_new_issue.py | 1 | 3221 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, api, models, _
from openerp.addons.base.res.res_request import referencable_models
class support_new_issue_wizzard(models.TransientModel):
_name = "support.new_issue.wizard"
_description = "Support - New Issue Wizard"
@api.model
def get_default_description(self):
default_description = """
<h4>¿Cuáles son los <b>pasos</b> para reproducir su problema?</h4>
<p>
<br/>
<br/>
</p>
<h4>¿Cuál es el problema?</h4>
<p>
<br/>
<br/>
</p>
<h4>¿Puede copiarnos uno o más links a <b>casos concretos</b> o adjuntar una
<b>captura de pantalla</b>?</h4>
<p>
<br/>
<br/>
</p>
"""
return default_description
user_id = fields.Many2one(
'res.users',
required=True,
string='Usuario afectado',
default=lambda self: self.env.user,
)
company_id = fields.Many2one(
'res.company',
required=True,
string='Compañía utilizada',
)
date = fields.Datetime(
string='Date',
required=True,
default=fields.Datetime.now
)
name = fields.Char(
string='Title',
required=True,
)
description = fields.Html(
string='Description',
required=True,
default=get_default_description,
)
attachment_ids = fields.Many2many(
'ir.attachment',
'new_issue_ir_attachments_rel'
'wizard_id', 'attachment_id',
string='Attachments',
required=False,
)
resource = fields.Reference(
selection=lambda self: referencable_models(
self, self.env.cr, self.env.uid, self.env.context),
string='Recurso afectado',
help='You can reference the model and record related to the issue, '
'this will help our technicians to resolve the issue faster',
required=False,
)
priority = fields.Selection(
[('0', 'Low'), ('1', 'Normal'), ('2', 'High')],
'Priority',
default='0',
)
@api.onchange('user_id')
def change_user(self):
self.company_id = self.user_id.company_id.id
@api.multi
def action_confirm(self):
self.ensure_one()
active_contract = self.env['support.contract'].get_active_contract()
description = self.description
if self.resource:
description += '\nResource: %s' % str(self.resource)
vals = {
'db_user': self.user_id.login,
'db_company': self.company_id.name,
'date': self.date,
'issue_description': description,
'name': self.name,
'priority': self.priority,
}
issue_id = active_contract.create_issue(vals, self.attachment_ids)
return self.env['warning_box'].info(
title=_('Issue succesfully loaded'),
message=_('For your reference and if you contact support by another\
channel, issue ID: %s') % (issue_id))
| lgpl-3.0 | 4,524,937,655,549,498,400 | 29.6 | 80 | 0.557734 | false |
dirn/Secret-Santa | tests/factories.py | 1 | 1124 | """Factories for populating models for tests."""
import factory
from factory.alchemy import SQLAlchemyModelFactory
from xmas import models
from xmas.core import db
from xmas.utils import slugify
class Event(SQLAlchemyModelFactory):
"""A factory instance of :class:`~xmas.models.Event`."""
FACTORY_FOR = models.Event
FACTORY_SESSION = db.session
id = factory.Sequence(lambda x: x)
name = factory.Sequence(lambda x: 'Event {}'.format(x))
slug = factory.LazyAttribute(lambda obj: slugify(obj.name))
class Item(SQLAlchemyModelFactory):
"""A factory instance of :class:`~xmas.models.Item`."""
FACTORY_FOR = models.Item
FACTORY_SESSION = db.session
id = factory.Sequence(lambda x: x)
name = factory.Sequence(lambda x: 'Item {}'.format(x))
class User(SQLAlchemyModelFactory):
"""A factory instance of :class:`~xmas.models.User`."""
FACTORY_FOR = models.User
FACTORY_SESSION = db.session
id = factory.Sequence(lambda x: x)
name = factory.Sequence(lambda x: 'User {}'.format(x))
email = factory.Sequence(lambda x: 'email-{}@example.org'.format(x))
| bsd-3-clause | 265,303,943,291,645,300 | 25.139535 | 72 | 0.69395 | false |
DavidCain/film_server | cgi-bin/playlist.py | 1 | 7485 | #!/usr/bin/env python
# David Cain
# RE357
# 2012-12-16
"""
A script to make a m3u bookmark playlist (playable in VLC), or an
archive of .m4v video clip files.
"""
from collections import OrderedDict
from datetime import datetime
import cgi
import csv
import os
import re
import shutil
import subprocess
import sys
import tempfile
import traceback
import zipfile
hms = "%H:%M:%S"
ms = "%M:%S"
film_dir = "/srv/ftp/"
movie_start = datetime.strptime("00:00:00", hms)
def print_m3u(clips, title, filmpath):
""" Print the contents of a .m3u playlist of clips in the film.
Note that each bookmark should probably have a value for a "bytes"
attribute, but it seems to work without it.
"""
attach_header("bookmarks.m3u")
print "#EXTM3U"
print "#EXTINF:7061,%s" % title
# Bookmarks
print "#EXTVLCOPT:bookmarks=", # trailing comma is key
bookmarks = ["{name=%s,time=%i}" % (name, seconds(start)) for start, (end, name) in clips]
print ",".join(bookmarks)
# Path to file
print filmpath
def print_zip(clips, film_title):
""" Print the contents of a .zip file of film clips. """
try:
zip_file = make_clips(clips, film_title)
except Exception, msg:
text_err(msg)
else:
attach_header(film_title + "_clips.zip")
for line in zip_file:
print line,
finally:
try:
os.remove(zip_file.name)
except OSError:
pass # If make_clips failed, file won't exist
def make_clips(clips, film_title):
""" Return a .zip file of film clips. """
temp_clip_dir = tempfile.mkdtemp(prefix=film_title)
film_path = os.path.join(film_dir, "%s.m4v" % film_title)
base, extension = os.path.splitext(film_path)
clip_files = []
for start, (end, clip_name) in clips:
if seconds(end - start) > 600:
raise Exception("Clip '%s' exceeds ten minutes." % clip_name)
running_time = str(end - start) # Will be in HMS
start = str(start)
clip_fn = clean_path(clip_name)
outfile = os.path.join(temp_clip_dir, clip_fn + extension)
cmd = ['ffmpeg', '-ss', start, '-t', running_time, '-i', film_path,
'-acodec', 'copy', '-vcodec', 'copy', '-y', outfile]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
text_err("Error code %i:\n\n %s" % (e.returncode, e.output))
clip_files.append(outfile)
# Zip the clips into an archive, return file handle
zip_handle = make_zip(clip_files, film_title + "_clips")
shutil.rmtree(temp_clip_dir)
return zip_handle
def make_zip(paths, top_dir="film_clips"):
""" Return the handle to a .zip archive of the given files.
:param top_dir: Directory name to place files in
"""
fd, zip_path = tempfile.mkstemp()
archive = zipfile.ZipFile(zip_path, 'w')
for path in paths:
arcname = os.path.join(top_dir, os.path.split(path)[1])
archive.write(path, arcname)
archive.close()
os.close(fd)
return open(zip_path)
class CSVError(Exception):
pass
def get_clip_dict(csv_file, give_times=False):
""" Return a dictionary of clip names with start and end times. """
clip_dict = OrderedDict()
clips_csv = csv.reader(csv_file)
for num, line in enumerate(clips_csv, start=1):
if len(line) > 3:
raise CSVError("Too many columns on line %i (check commas!)" % num)
elif len(line) < 3:
raise CSVError("Fewer than three columns on line %i" % num)
start, end, name = [val.strip() for val in line]
timename = "%s-%s" % (start, end)
clip_name = "%s - %s" % (timename, name) if give_times else name
start_time = get_time(start)
end_time = get_time(end)
if end_time < start_time:
raise CSVError("End time of '%s' (line %i) precedes start." % (name, num))
clip_dict[start_time] = (end_time, clip_name)
return clip_dict
def seconds(delta):
return int(delta.total_seconds())
def get_time(clip_start):
try:
bookmark_time = datetime.strptime(clip_start, hms)
except ValueError:
try:
bookmark_time = datetime.strptime(clip_start, ms)
except ValueError:
raise ValueError("Invalid time format '%s'."
"Enter time in H:M:S, or M:S" % clip_start)
return bookmark_time - movie_start
def clean_path(path):
""" Sanitize the path for sensible names.
It's not to prevent traversals, just to avoid common filename 'gotchas'
"""
path = re.sub("[:/\\\]", "-", path)
path = re.sub(" ", "_", path)
path = re.sub("[?]", "", path)
return path
def universal_file(in_file):
""" Return the handle to a file with universal EOL support.
(A hack to get around the fact that CGI handles are already open).
"""
fileno, filename = tempfile.mkstemp()
with open(filename, "w") as newline_file:
for line in in_file:
newline_file.write(line)
os.close(fileno)
return open(filename, "rU")
def attach_header(outname):
print 'Content-Type:text/enriched; filename="%s"' % outname
print 'Content-Disposition: attachment; filename="%s"\n' % outname
def text_err(msg):
print 'Content-Type:text/plain\n'
print "Error:\n"
print msg
sys.exit(1)
def html_err(msg):
print 'Content-Type:text/html\n'
print "<html>\n<body>"
print "<h1>Error:</h1>\n"
print "<p>\n%s\n</p>" % msg
print "</body>\n</html>"
sys.exit(1)
def main():
""" Read the CGI form, display any errors. Otherwise, give content. """
form = cgi.FieldStorage()
film_title = form["title"].value
movie_path = form["movie_path"].value
clip_order = form["clip_order"].value
user_csv = form["csv_file"].file
# Quit if CSV file is empty
if not (user_csv and user_csv.read()):
html_err("No CSV file given.")
user_csv.seek(0)
# Get output type
try:
output_type = form["output_type"].value
except:
html_err("No output format selected.")
# Raise error if using playlist and path is left as example path
if (output_type == "playlist" and (not movie_path or
movie_path == "/Users/suzieq/East_of_Eden.m4v")):
html_err("Playlists require the path to your film.\n"
'<a href="/gen_clips.html#full_path">'
'Getting the full path of a file'
'</a>')
csv_file = universal_file(user_csv) # Force universal line support
# Parse CSV, crash if errors
try:
clip_dict = get_clip_dict(csv_file)
except CSVError, msg:
html_err(msg)
except Exception, msg:
html_err("Error parsing CSV: %s" % msg)
finally:
os.remove(csv_file.name)
# Sort clips chronologically, if specified
if clip_order == "chronological":
clips = sorted(clip_dict.items())
else:
clips = clip_dict.items()
if len(clips) == 0:
html_err("No clips were found in the CSV file!")
# Give the result as downloadable
if output_type == "playlist":
print_m3u(clips, film_title, movie_path)
elif output_type == "clips":
print_zip(clips, film_title)
if __name__ == "__main__":
try:
main()
except SystemExit:
pass
except:
traceback.print_exc(file=sys.stdout)
| gpl-3.0 | -7,705,770,874,046,983,000 | 26.929104 | 94 | 0.604275 | false |
CMPUT410W15/cmput410-project | posts/remote.py | 1 | 2909 | """Functions for dealing with remote posts."""
from author.models import Author
from posts.models import Post, Comment
from common.util import get_request_to_json, get_nodes
from common.util import HINDLEBOOK, HINDLE_AUTH, BUBBLE, BUBBLE_AUTH
from dateutil import parser
import threading
VISIBILITY = {
'PRIVATE': 0,
'FRIEND': 1,
'FRIENDS': 2,
'FOAF': 3,
'PUBLIC': 4,
'SERVERONLY': 5,
'private': 0,
'friend': 1,
'friends': 2,
'foaf': 3,
'public': 4,
'serveronly': 5
}
CONTENT_TYPE = {
u'text/html': 0,
u'text/x-markdown': 1,
}
def get_pubdate(dictionary):
pd1 = dictionary.get('pubDate', None)
pd2 = dictionary.get('pubdate', None)
return pd1 or pd2
def add_remote_comment(comment, post, author):
comment_data = {
'uid': comment['guid'],
'content': comment['comment'],
'author': author,
'post': post
}
if not len(Comment.objects.filter(uid=comment['guid'])):
c, _ = Comment.objects.get_or_create(**comment_data)
c.published = parser.parse(get_pubdate(comment))
def add_remote_post(post, author):
post_data = {
'uid': post['guid'],
'title': post['title'],
'description': post['description'],
'content': post['content'],
'content_type': CONTENT_TYPE.get(post['content-type'], 0),
'visibility': VISIBILITY[post['visibility']],
'send_author': author
}
if not len(Post.objects.filter(uid=post_data['uid'])):
p = Post.objects.get_or_create(**post_data)[0]
p.published = parser.parse(get_pubdate(post))
else:
p = Post.objects.get(uid=post_data['uid'])
for comment in post['comments']:
try:
author = Author.objects.get(uid=comment['author']['id'])
add_remote_comment(comment, p, author)
except:
pass
def update_posts_mutex(node, author, lock):
if HINDLEBOOK in author.host:
headers = {'Uuid': author.uid}
data = get_request_to_json(node.url + 'author/posts',
headers, HINDLE_AUTH)
elif BUBBLE in author.host:
data = get_request_to_json(node.url + 'author/posts2/',
auth=BUBBLE_AUTH)
else:
data = 0
with lock:
if not isinstance(data, int):
for post in data['posts']:
uid = post['author']['id']
try:
author = Author.objects.get(uid=uid)
add_remote_post(post, author)
except:
pass
def reset_remote_posts():
lock = threading.Lock()
for node in get_nodes():
for author in Author.objects.filter(user=None):
thread = threading.Thread(target=update_posts_mutex,
args=(node, author, lock))
thread.start()
| apache-2.0 | 7,596,660,701,311,065,000 | 27.242718 | 68 | 0.559299 | false |
MikeDMorgan/gwas_pipeline | PipelineGWAS.py | 1 | 213238 | #########################################################################
#########################################################################
# Classes for handling genome-wide association input and output files, ##
# analysis and qc programs, and post-hoc analyses ##
#########################################################################
#########################################################################
import CGAT.Experiment as E
import CGATPipelines.Pipeline as P
import CGAT.IOTools as IOTools
import numpy as np
import pandas as pd
import pandas.io.sql as pdsql
import re
import random
import os
import subprocess
import rpy2.robjects as ro
from rpy2.robjects import r as R
from rpy2.robjects import pandas2ri as py2ri
from rpy2.robjects.packages import importr
# set matplotlib non-interactive backend to Agg to
# allow running on cluster
import collections
import sqlite3 as sql
from math import *
import scipy.stats as stats
import sklearn.metrics as metrics
class FileGroup(object):
'''
An object for holding, formatting and processing files for genome-wide
association analysis including compressed and binary files
File types supported:
* plink - .ped and .map files
* plink binary - .bim, .fam. and .bed files
* variant call format - .vcf and .bcf (including gzipped vcf)
* Oxford format - .gen or .bgen with matched sample text file (must
be .sample)
* GRM_binary - genetic relationship matrix calculated in an appropriate
program in binary format. File suffixes are *.grm.bin, *.grm.N.bin
and *.grmid
* GRM_gz - previously calcualted gzip compressed GRM, file suffixes
are *.grm.gz and *.grm.id
Phenotypes are assumed to be contained in the relevant files, if not
then an additional phenotypes files can be included using the
`phenotypes` argument. Covariate files (if different from the phenotypes
file) can also be included in the instantiation of a :FileGroup:
object using the `covarite_files` argument.
Only the `files` and `file_format` arguments are required.
Genotype data are assumed to be raw genotype calls. This can be modified
using the `genotype_format` argument upon instantiation. Values allowed
are:
* calls - standard bi-allelic genotype calls, i.e. AA, AB, BB
* imputed_call - discrete genotype calls from imputed data,
essentially treated the same as ``calls``
* genotype_prob - posterior probabilities for each genotype class,
i.e. 0.88 0.07 0.05 corresponding to homozygote
reference, heterozygote then homozygote rare allele.
'''
# Defaults for file formats
ped_file = None
map_file = None
bim_file = None
fam_file = None
bed_file = None
sample_file = None
gen_file = None
bgen_file = None
vcf_file = None
bcf_file = None
def __init__(self, files, file_format, phenotypes=None,
genotype_format="calls", covariate_files=None):
self.files = files
self.file_format = file_format
self.pheno_file = phenotypes
self.genotype_format = genotype_format
self.covariate_files = covariate_files
self.set_file_prefix(files)
def set_file_prefix(self, infiles):
'''Get file prefixes from input files. These are used across all
file formats, e.g. myfile.bed, myfile.bim, myfile.fam name=myfile.
Only use periods, '.' to denote file suffixes. use hyphens and
underscores for separating file names.
Set these to the appropriate attributes.
'''
file_prefixes = set()
for f in infiles:
# get all input file prefixes
if len(f.split("/")) > 1:
g = f.split("/")[-1]
fdir = f.split("/")[:-1]
fdir = "/".join(fdir)
ffile = fdir + "/" + g.split(".")[0]
file_prefixes.add(ffile)
else:
file_prefixes.add(f.split(".")[0])
# if only prefix then use this for all data files
if len(file_prefixes) == 1:
self.name = [xf for xf in file_prefixes][0]
else:
# if there are multiple prefixes then use separate
# flags for file inputs
self.name = None
# define file types by their suffix instead
if self.file_format == "plink":
self.ped_file = [pf for pf in infiles if re.search(".ped",
pf)][0]
self.map_file = [mf for mf in infiles if re.search(".map",
mf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.ped_file
except AssertionError:
raise ValueError(".ped file is missing, please "
"specify")
try:
assert self.map_file
except AssertionError:
raise ValueError(".map file is missing, please "
"specify")
elif self.file_format == "plink_binary":
self.fam_file = [ff for ff in infiles if re.search(".fam",
ff)][0]
self.bim_file = [fb for fb in infiles if re.search(".bim",
fb)][0]
self.bed_file = [bf for bf in infiles if re.search(".bed",
bf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.fam_file
except AssertionError:
raise ValueError(".fam file is missing, please "
"specify")
try:
assert self.bim_file
except AssertionError:
raise ValueError(".bim file is missing, please "
"specify")
try:
assert self.bed_file
except AssertionError:
raise ValueError(".bed file is missing, please "
"specify")
elif self.file_format == "oxford":
self.gen_file = [gf for gf in infiles if re.search(".gen",
gf)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.gen_file
except AssertionError:
raise ValueError(".gen file missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file missing, please "
"specify")
elif self.file_format == "oxford_binary":
self.bgen_file = [bg for bg in infiles if re.search(".bgen",
bg)][0]
self.sample_file = [sf for sf in infiles if re.search(".sample",
sf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bgen_file
except AssertionError:
raise ValueError(".bgen file is missing, please "
"specify")
try:
assert self.sample_file
except AssertionError:
raise ValueError(".sample file is missing, please "
"specify")
elif self.file_format == "vcf":
self.vcf_file = [vf for vf in infiles if re.search(".vcf",
vf)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.vcf_file
except AssertionError:
raise ValueError(".vcf file is missing, please "
"specify")
elif self.file_format == "bcf":
self.bcf_file = [bv for bv in infiles if re.search(".bcf",
bv)][0]
# check files exist (i.e. are not the default None values)
try:
assert self.bcf_file
except AssertionError:
raise ValueError(".bcf file is missing, please "
"specify")
elif self.file_format == "GRM_binary":
self.id_file = [ig for ig in infiles if re.search(".grm.id",
ig)][0]
self.n_file = [gn for gn in infiles if re.search(".grm.N.bin",
gn)][0]
self.bin_file = [gb for gb in infiles if re.search(".grm.bin",
gb)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.n_file
except AssertionError:
raise ValueError("grm.N file is missing, please "
"specify")
try:
assert self.bin_file
except AssertionError:
VaueError("GRM genotype is missing, please "
"specify")
elif self.file_format == "GRM_plink":
self.id_file = [ig for ig in infiles if re.search(".rel.id",
ig)][0]
self.rel_file = [gn for gn in infiles if re.search(".rel.N.bin",
gn)][0]
# check files exits
try:
assert self.id_file
except AssertionError:
raise ValueError("GRM ids file is missing, please "
"specify")
try:
assert self.rel_file
except AssertionError:
raise ValueError("rel.N file is missing, please "
"specify")
def set_phenotype(self, pheno_file=None, pheno=1):
'''
Set the phenotype for a set of individuals
using an external phenotypes file.
Default is to use the (n+2)th column, designated
as pheno 1.
'''
if type(pheno) == int:
pheno = str(pheno)
elif type(pheno) == str:
pass
else:
raise AttributeError("Type of pheno unknown. "
"Must be str or int.")
self.pheno_file = pheno_file
self.pheno = pheno
class GWASProgram(object):
'''
A base level object for programs designed to perform genome-wide
association analysis and operate on genome-wide genotyping data.
[INSERT PROPER DOCSTRING - see style guide]
'''
def __init__(self, executable=None, required_format=None):
self.executable = executable
self.require_format = required_format
def program_call(self, infiles, outfile):
'''build a statement to perform genome-wide
analysis using infiles
'''
return ""
def postprocess(self, infiles, outfile):
'''collect and process output files from
program - format for Result class objects'''
return ""
def build(self, infiles, outfile):
'''run analysis program'''
cmd_program = self.program_call(infile, outfile)
cmd_postprocess = self.postprocess(infiles, outfile)
if cmd_postprocess:
cmd_postprocess = cmd_postprocess.strip().endswith(";")
assert cmd_postprocess
else:
pass
statement = " checkpoint; ".join((cmd_program,
cmd_postprocess))
return statement
class GCTA(GWASProgram):
'''
GCTA is designed for computing genetic relationship matrices, linear
mixed model analyses and phenotype estimation/prediction.
It can also perform SNP-wise GWAS.
Files MUST be in Plink binary format
'''
def __init__(self, files, options=None, settings=None,
design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "gcta64"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
'''build GCTA call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs = self._build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
raise AttributeError("Files must be in binary plink format "
"or as a GRM to use GCTA. Please "
"convert and try again.")
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
self.statement["program"] = " ".join(statement)
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_binary" or file_format == "GRM_plink":
statement = " --grm %s " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def apply_filters(self, filter_type, filter_value):
'''
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome_number - for non-human species, the number of chromosomes to
be considered autosomes
* exclude_snps - text file list of variant IDs to exclude from analysis.
[file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
'''
if filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "autosome_number":
self._construct_filters(autosome_number=filter_value)
elif filter_type == "exclude_snps":
self._construct_filters(exclude_snps=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
def _construct_filters(self, **kwargs):
'''
Add filter to each GCTA run.
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
* update_gender - provide gender information in a separate text
file. [file]
* grm_threshold - remove one of a pair of individuals with
estimated relatedness greater than this value.
* ld_significance - p-value threshold for regression test
of LD significance
* genotype_call - GenCall score cut-off for calling raw
genotypes into Plink PED format
* meta_pval - p-value threshold cut-off for conditional
and joint genome-wide analysis
* cojo_window - distance in kb beyond wich SNPs this
distance apart are assumed to be in linkage equilibrium
* cojo_collinear - multiple regression R^2 on selected SNPs
value above which the testing SNP will not be selected.
* cojo_inflation - adjust COJO analysis test statistics
for genomic control. [boolean]
* reml_iterations - maximum number of iterations to use
during reml analysis. Default is 100. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"autosome": " --autosome ",
"autosome_number": " --autosome-num %s ",
"grm_threshold": " --grm-cutoff %s ",
"ld_significance": " --ls-sig %s ",
"genotype_call": " --gencall %s ",
"meta_pval": " --cojo-p %s ",
"cojo_window": " --cojo-wind %s ",
"cojo_collinear": " --cojo-collinear %s ",
"cojo_inflation": " --cojo-gc ",
"reml_iterations": " --reml-maxit %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.iteritems():
filter_dict[key] = value
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == 'bool':
filters.append(filter_map[each])
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
pass
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def mixed_model(self, lmm_method, grm=None, qcovar=None,
dcovar=None):
'''
Run a linear mixed model with the GRM used to model
random effects of an estimated genetic relationshi
between individuals
'''
# add the mlm flag to the statement
self._run_tasks(lmm=lmm_method)
# construct the rest of mlm statement
statement = []
if qcovar:
statement.append(" --qcovar %s " % qcovar)
else:
pass
if dcovar:
statement.append(" --covar %s " % dcovar)
else:
pass
try:
statement.append(" --grm %s " % grm)
except ValueError:
E.warn("No GRM has been provided, the GRM ")
self.statement["mlm"] = " ".join(statement)
def reml_analysis(self, method, parameters, prevalence=None,
qcovariates=None, discrete_covar=None):
'''
Use REML to estimate the proportion of phenotypic variance
explained by the estimated genetic relationship between
individuals.
Arguments
---------
method: string
GCTA method to use for REML estimation of h2. Includes:
* snpBLUP - calculate the SNP BLUPs from the genotype
data and the estimated total genetic value/ breeding value
* fixed_cor -
* priors - provide initial priors for the variance components
estimation
* unconstrained - allow variance estimates to fall outside
of the normal parameter space, bounded [0, ).
* GxE - estimate the contribution of GxE with covariates
to the phenotype variance
* BLUP_EBV - output individual total genetic effect/breeding
values
'''
statement = []
try:
params = parameters.split(",")
if len(params) == 1:
params = params[0]
else:
pass
except AttributeError:
params = parameters
self._run_tasks(parameter=params,
greml=method)
if prevalence:
statement.append(" --prevalence %0.3f " % prevalence)
else:
pass
if qcovariates:
statement.append(" --qcovar %s " % qcovariates)
else:
pass
if discrete_covar:
statement.append(" --covar %s " % discrete_covar)
else:
pass
self.statement["reml"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
The principal functions of GCTA revolve around GRM estimation
and variance components analysis, such as REML estimation of
heritability and variance components, BLUP and phenotype prediciton.
It can also be used to do PCA and conditional and joint GWAS.
Tasks
-----
* pca - perform principal components analysis on a GRM
* greml - perform restricted maximum likelihood analysis
for estimation of variance components
* estimate_ld - estimate the linkage disequilibrium structure
over the genomic regions specified
* simulate_gwas - simulate genome-wide association data based
on observed genotype data
* cojo - conditional and joint genome-wide association
analysis across SNPs and covariates
* bivariate_reml - perform GREML on two traits, either both
binary, both quantitative or one of each
* lmm - perform a linear mixed model based association analysis
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
# put all of the other tasks as options in the calling function
task_map = {"pca": " --pca %s ",
"greml": {"standard": " --reml ",
"priors": " --reml --reml-priors %s ",
"reml_algorithm": " --reml --reml-alg %s ",
"unconstrained": " --reml --reml-no-constrain ",
"GxE": " --reml --gxe %s ",
"LRT": " --reml --reml-lrt %s ",
"BLUP_EBV": " --reml --reml-pred-rand ",
"snpBLUP": " --blup-snp %s "},
"estimate_ld": " --ld %s ",
"simulate_gwas": {"quantitative": " --simu-qt ",
"case_control": " --simu-cc %s %s "},
"cojo": {"stepwise": " --cojo-file %s --cojo-slct ",
"no_selection": " --cojo-file %s --cojo-joint ",
"snp_conditional": " --cojo-file %s --cojo-cond %s "},
"bivariate_reml": {"standard": " --reml-bivar %s ",
"no_residual": " --reml-bivar %s --reml-bivar-nocove ",
"fixed_cor": " --reml-bivar %s --reml-bivar-lrt-rg %s "},
"lmm": {"standard": " --mlma ",
"loco": " --mlma-loco ",
"no_covar": " --mlma-no-adj-covar "},
"remove_relations": {"cutoff": " --grm-cutoff %s "}}
for task, value in kwargs.iteritems():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
# LD estimation is likewise not nested
elif task == "estimate_ld":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
raise IOError("no SNP file list detected")
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("% Task not recognised, see docs for details of "
"recognised tasks" % task)
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
self.statement["tasks"] = " ".join(statement)
def genetic_relationship_matrix(self, compression="binary", metric=None,
shape="square", options=None):
'''
Calculate the estimated genetic relationship matrix from
genotyping data
* estimate_grm - estimate the realized genetic relationship
matrix between individuals from genotyping data
'''
mapf = {"binary": " --make-grm-bin ",
"gzip": " --make-grm-gz ",
"no_compress": " --make-grm ",
"X_chr": " --make-grm-chr ",
"X_chr_gz": " --make-grm-gz ",
"inbreeding": " --ibc "}
if options == "X_chr":
if compression == "gz":
state = mapf["X_chr_gz"]
else:
state = mapf["X_chr"]
elif options == "inbreding":
state = mapf["inbreeding"]
else:
pass
# check compression is compatible
if compression == "gz":
state = mapf["gzip"]
elif compression == "bin":
state = mapf["binary"]
elif compression is None and not options:
state = mapf["no_compress"]
self.statement["matrix"] = state
def build_statement(self, infiles, outfile, threads=None,
memory=None, parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["mlm"])
except KeyError:
pass
try:
statement.append(self.statement["reml"])
except KeyError:
pass
if threads:
statement.append(" --thread-num %i " % threads)
else:
pass
# add output flag
statement.append(" --out %s " % outfile)
os.system(" ".join(statement))
class Plink2(GWASProgram):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9 to be in the users PATH variable as ``plink2`` to
distinguish it from Plink v1.07.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plink2"
self.statement = {}
self.filters = []
def program_call(self, infiles, outfile):
''' build Plink call statement on infiles'''
statement = []
statement.append(self.executable)
if infiles.name:
inputs =self. _build_single_file_input(infiles,
infiles.file_format)
statement.append(inputs)
else:
inputs = self._build_multiple_file_input(infiles,
infiles.file_format)
statement.append(inputs)
# check for the presence of an additional phenotypes file
try:
if infiles.pheno_file:
statement.append(" --pheno %s --mpheno %s " % (infiles.pheno_file,
infiles.pheno))
else:
pass
except AttributeError:
pass
self.statement["program"] = " ".join(statement)
def hamming_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using Hamming distance across all variants
'''
# check shape is compatible
if not shape:
shape = "triangle"
elif shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="hamming", shape=shape,
compression=compression)
self.statement["matrix"] = state
def ibs_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise similarity matrix between
individuals using proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="ibs", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genome_matrix(self, shape, compression, options):
'''
Calculate genomic pair-wise distance matrix between
individuals using 1 - proportion of IBS alleles
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if options:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression, options=options)
else:
state = self._matrices(matrix_type="genomic", shape=shape,
compression=compression)
self.statement["matrix"] = state
def genetic_relationship_matrix(self, shape, compression, metric,
options=None):
'''
Calculate genomic pair-wise distance matrix between
individuals using proportion of IBS alleles
Requires the use of the Plink2 parallelisation to run with large
cohorts of patients
'''
# check shape is compatible
if shape in ["square", "square0", "triangle"]:
pass
else:
raise ValueError("matrix shape %s not recognised."
"Valid options are square, square0, "
"and triangle." % shape)
# check compression is compatible
if compression in ["gz", "bin", "bin4"]:
pass
else:
raise ValueError("compression %s not recognised. Accepted "
"formats are gz, bin and bin4." % compression)
if metric in ["cov", "ibc2", "ibc3"]:
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression, options=metric)
else:
E.info("%s metric not recognised. Running with default Fhat1" % metric)
state = self._matrices(matrix_type="grm", shape=shape,
compression=compression)
self.statement["matrix"] = state
def apply_filters(self, filter_type, filter_value):
'''
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* exclude_snp - exclude this single variant
* exclude_snps - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
'''
if filter_type == "genotype_rate":
self._construct_filters(genotype_rate=filter_value)
elif filter_type == "hwe":
self._construct_filters(hwe=filter_value)
elif filter_type == "missingness":
self._construct_filters(missingness=filter_value)
elif filter_type == "min_allele_frequency":
self._construct_filters(min_allele_frequency=filter_value)
elif filter_type == "max_allele_frequency":
self._construct_filters(max_allele_frequency=filter_value)
elif filter_type == "exclude_snp":
self._construct_filters(exclude_snp=filter_value)
elif filter_type == "exclude":
self._construct_filters(exclude=filter_value)
elif filter_type == "extract":
self._construct_filters(extract=filter_value)
elif filter_type == "chromosome":
self._construct_filters(chromosome=filter_value)
elif filter_type == "exclude_chromosome":
self._constuct_filters(exclude_chromosome=filter_value)
elif filter_type == "autosome":
self._construct_filters(autosome=filter_value)
elif filter_type == "pseudo_autosome":
self._construct_filters(pseudo_autosome=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
elif filter_type == "snp_bp_range":
self._construct_filters(snp_bp_range=filter_value)
elif filter_type == "conditional_snp":
self._construct_filters(conditional_snp=filter_value)
elif filter_type == "keep":
self._construct_filters(keep=filter_value)
elif filter_type == "remove":
self._construct_filters(remove=filter_value)
elif filter_type == "ignore_indels":
self._construct_filters(ignore_indels=filter_value)
def _build_multiple_file_input(self, infiles, file_format):
'''
internal function only. Use it to construct
the appropriate file input flags
'''
statement = None
if file_format == "oxford":
statement = " --gen %s --sample %s " % (infiles.gen_file,
infiles.sample_file)
elif file_format == "oxford_binary":
statement = " --bgen %s --sample %s " % (infiles.bgen_file,
infiles.sample_file)
elif file_format == "plink":
statement = " --ped %s --map %s " % (infiles.ped_file,
infiles.sample_file)
elif file_format == "plink_binary":
statement = " --bed %s --bim %s --fam %s " % (infiles.bed_file,
infiles.bim_file,
infiles.fam_file)
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.vcf_file
elif file_format == "bcf":
statement = " --bcf %s " % infiles.vcf_file
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
else:
raise AttributeError("file format is not defined. Please "
"define the input file formats when "
"instantiating a FileGroup object")
return statement
def _build_single_file_input(self, infiles, file_format):
'''internal function only. Use it to construct the
file input flags with --file, --bfile or --data
'''
statement = None
if file_format == "plink":
statement = " --file %s " % infiles.name
elif file_format == "plink_binary":
statement = " --bfile %s " % infiles.name
elif file_format == "oxford" or file_format == "oxford_binary":
statement = " --data %s" % infiles.name
elif file_format == "GRM_plink":
statement = " --grm.bin %s " %infiles.name
elif file_format == "GRM_binary":
statement = " --grm-bin %s " % infiles.name
elif file_format == "vcf":
statement = " --vcf %s.vcf.gz " % infiles.name
else:
raise AttributeError("file format is not defined or recognised."
"Please define the input corectly when "
"instantiating a FileGroup object")
return statement
def _construct_filters(self, **kwargs):
'''
Add filter to each plink run. [data type]
The filters accepted are defined below. These are input as keyword
arguments supported by this function.
* genotype_rate - exclude SNPs with a genotyping rate below this
value. [float]
* missingness - exclude individuals with total genotype missingness
above this value. [float]
* hwe - p-value threshold for excluding SNPs deviating from
Hardy-Weinberg expectations. [float]
* min_allele_frequency - only include SNPs with cohort/case allele
frequency above this threshold. [float]
* max_allele_frequency - include all SNPs with a MAF equal to or below
this value. [float]
* mendelian_error - filter out samples/trios exceeding the error
threshold. [float]
* keep - keep individuals with matching individual and family IDs.
[file]
* remove - remove all individuals with matching individual and family
IDs. [file]
* quality_score_file - vcf file with variants and quality scores. Use
`qual_score_column` and `var_id_col` to specify which columns
correspond to the quality score and variant ID columns.
[file] <int> <int>
* min_qual_score - alters the lower bound of the quality score
threshold; default is 0.[int]
* max_qual_score - sets an upper limit on the quality scores;
default is Inf. [int]
* allow_no_sex - prevents phenotypes set to missing if there is no
gender information. [boolean]
* enforce_sex - force phenotype missing when using --make-bed, --recode
or --write-covar. [boolean]
* subset_filter - filter on a particular subset. Choices are: cases,
controls, males, females, founders, nonfounders. [str]
* extract - text file list of variant IDs to include in analysis,
ignores all others. [file]
* exclude - text file list of variant IDs to exclude from analysis.
[file]
* chromosome - exclude all variants not on the specified chromosome(s).
[str/list]
* exclude_chromosome - exclude all variants on the specified
chromosome(s). [str/list]
* autosome - exclude all non-place and non-autosomal variants.
[boolean]
* pseudo_autosome - include the pseudo-autosomal region of chromosome
X. [boolean]
* ignore_indels - remove all indels/multi-character allele coding
variants. [boolean]
* snp_bp_range - (from, to) range in bp of variants to include in
analysis. [tuple]
* specific_snp - only load the variant specified. [str]
* exclude_snp - exclude this single variant
* window_size - alters behaviour of `specific_snp` and `exclude_snp`
to include/exclude SNPs within +/- half of this distance (kb) are
also included. [float]
* range_resolution - sets the resolution of the (from, to) range.
Either bp, kb or mb. If set it will take the values from
`snp_bp_range`. [str/int/float]
* covariates_file - specify the covariates file with family and
individual IDs in the first two columns. Covariates are in the
(n+2)th column. Only used in conjunction with `covariate_filter`.
[file]
* covariate_filter - covariate columns value to filter on. Can be
used with non-numeric values to filter out individuals with
covariate =/= `covariate_filter` value. [str/int/float]
* covariate_column - column number to apply filtering to if more
than one covariate in the file. [int]
'''
statement = []
# map of keyword arguments recognised to Plink2 filtering flags
filter_map = {"genotype_rate": " --geno %s ",
"missingness": "--mind %s ",
"hwe": " --hwe %s ",
"min_allele_frequency": " --maf %s ",
"max_allele_frequency": " --max-maf %s ",
"mendelian_error": " --me %s ",
"keep": " --keep %s ",
"remove": " --remove %s ",
"quality_score_file": " --qual-scores %s ",
"qual_score_column": " %s ",
"var_id_col": " %s ",
"min_qual_score": " --qual-threshold %s ",
"max_qual_score": " --qual-max-threshold %s ",
"allow_no_sex": " --allow-no-sex ",
"enforce_sex": " --must-have-sex ",
"subset_filter": " --filter-%s ",
"extract": " --extract %s ",
"exclude": " --exclude %s ",
"chromosome": " --chr %s ",
"exclude_chromosome": " --not-chr %s ",
"autosome": " --autosome ",
"pseudo_autosome": " --autosome-xy ",
"ignore_indels": " --snps-only no-DI ",
"snp_id_range": " --from %s --to %s ",
"specific_snp": " --snp %s ",
"window_size": " --window %s ",
"exclude_snp": " --exclude-snp %s ",
"snp_bp_range": "--from-bp %s --to-bp %s ",
"covariates_file": " --filter %s ",
"covariate_filter": " %s ",
"covariate_column": " --mfilter %s ",
"missing_phenotype": " --prune ",
"conditional_snp": " --condition %s ",
"haplotype_size": " --blocks-max-kb %s ",
"haplotype_frequency": " --blocks-min-maf %s "}
# compile all filters together, checking for dependencies.
# use a mapping dictionary to extract the relevant flags and
# combinations to use.
filters = []
filter_dict = {}
for key, value in kwargs.iteritems():
filter_dict[key] = value
# need to check for covariates and qual scores - these
# are more complex. Deal with these first and remove
# from dictionary once complete.
try:
assert filter_dict["quality_score_file"]
assert filter_dict["qual_score_column"]
assert filter_dict["var_id_col"]
quals = []
qual_file = filter_dict["quality_score_file"]
score_col = filter_dict["qual_score_column"]
id_col = filter_dict["var_id_col"]
quals.append(filter_map["quality_score_file"] % qual_file)
quals.append(filter_map["qual_score_column"] % score_col)
quals.append(filter_map["var_id_col"] % id_col)
# remove from dictionary
filter_dict.pop("qual_score_column", None)
filter_dict.pop("var_id_col", None)
filters.append(" ".join(quals))
except KeyError:
pass
try:
assert filter_dict["covariates_file"]
assert filter_dict["covariate_filter"]
covars = []
covar_file = filter_dict["covariates_file"]
covar_val = filter_dict["covariate_filter"]
covars.append(filter_map["covariates_file"] % covar_file)
covars.append(filter_map["covariate_filter"] % covar_val)
# check to filter on specific column numnber, default is 3rd file
# column, i.e. (n+2)th column
try:
assert filter_dict["covariate_column"]
covar_col = filter_dict["covariate_column"]
covars.append(filter_map["covariate_column"] % covar_col)
filter_dict.pop("covariate_column", None)
except KeyError:
pass
# remove from dictionary
filter_dict.pop("covariates_file", None)
filter_dict.pop("covariate_filter", None)
filters.append(" ".join(covars))
except KeyError:
pass
# range_resolution and snp_bp_range are used together
try:
assert filter_dict["snp_bp_range"]
flags = filter_map["snp_bp_range"]
from_pos = filter_dict["snp_bp_range"].split(",")[0]
to_pos = filter_dict["snp_bp_range"].split(",")[1]
filters.append(flags % (from_pos, to_pos))
# remove so they are not duplicated - source of bugs
filter_dict.pop("snp_bp_range", None)
except KeyError:
pass
for each in filter_dict.keys():
try:
assert filter_map[each]
# check for data type <- behaviour is type dependent
if type(filter_dict[each]) == bool:
filters.append(filter_map[each])
# handle multiple arguments in string format
elif len(filter_dict[each].split(",")) > 1:
vals = tuple(filter_dict[each].split(","))
filters.append(filter_map[each] % vals)
else:
filter_val = filter_dict[each]
filters.append(filter_map[each] % filter_val)
except KeyError:
E.warn("%s filter not recognised, please see "
"documentation for allowed filters" % each)
pass
self.filters.append(" ".join(filters))
self.statement["filters"] = " ".join(self.filters)
def calc_ld(self, ld_statistic, ld_threshold,
ld_shape="table"):
'''
Calculate linkage disequilibrium between all SNP
pairs.
Arguments
---------
ld_statistic: string
The LD statistic to report, either correlation or squared correlation
of inter-variant allele counts
ld_threshold: float
minimum value to report for pair-wise LD
ld_window: int
max distance (in Kb) between SNPs for calculating LD
ld_shape: string
shape to use for reporting LD, either a table or a matrix. If a
matrix then either square, square with diagnonal (square0) or
triangular. Square matrices are symmetric.
'''
statement = []
ld_map = {"r": " --r %s dprime ",
"r2": "--r2 %s dprime "}
shape_map = {"table": "inter-chr gz",
"square": "square gz",
"square0": "square0 gz",
"triangle": "triangle gz"}
try:
statement.append(ld_map[ld_statistic] % shape_map[ld_shape])
except KeyError:
raise ValueError("%s LD statistic not recognised. Please "
"use eithr 'r' or 'r2'" % ld_statistic)
if type(ld_threshold) == float:
statement.append(" --ld-window-r2 %0.3f " % ld_threshold)
else:
E.warn("threshold type not recognised, setting to default "
"value of 0.2")
self.statement["tasks"] = " ".join(statement)
def _run_tasks(self, parameter=None, **kwargs):
'''
Plink2 is capable of much more than just running basic association
analyses.
These include file processing, reformating, filtering, data summaries,
PCA, clustering, GRM calculation (slow and memory intense), etc.
multiple tasks can be added by separate calls to this function.
For instance, adding phenotype and gender information using the
update_samples task whilst change the file format.
Tasks
-----
* change_format - convert from input format to an alternative format
after applying filters.
* change_missing_values - alters the genotype or phenotype missing
value into the value supplied.
* update_variants - use this to fill in missing variant IDs, useful
for data from exome or whole-genome sequencing that have
non-standard IDs.
* update_samples - update phenotype and sample information
* flip_strands - flip the strand for alleles, swaps A for T and
C for G.
* flip_scan - use the LD-based scan to check SNPs have not had
incorrect strand assignment. Particularly useful if cases and
controls were genotyped separately, or the cohort was genotyped
in different batches.
* sort - sort files by individual and/or family IDs
* merge - merge new filesets with reference fileset.
* merge_mode - handling of missing values and overwriting values
* find_duplicates - find and output duplicate variants based on bp position,
or variant ID. Useful to output for the --exclude filtering flag.
* remove_relations - remove one of a pair of individuals with IBS >=
a threshold. Recommended minimum is 3rd cousins (IBS >= 0.03125).
* check_gender - check imputed gender from non-pseudoautosomal X
chromsome genotypes against self-reported gender
* estimate_haplotypes - assign SNPs to haplotype blocks and get
positional information
'''
statement = []
# set up a dictionary of recognised tasks with key word argument
# values as further dictionaries. Use the parameter argument
# to pass arguments by value to string formatting
task_map = {'change_format': {"plink_binary": " --make-bed ",
"plink": " --recode ",
"oxford": " --recode oxford ",
"oxford_binary": " --recode oxford gen-gz ",
"raw": " --recode A tabx "},
"change_missing_values": {"genotype": " --missing-genotype %s ",
"phenotype": " --missing-phenotype %s "},
"update_variants": {"variant_ids": " --set-missing-var-ids %s ",
"missing_id": " --mising-var-code %s ",
"chromosome": " --update-chr %s ",
"centimorgan": " --update-cm %s ",
"name": " --update-name %s ",
"alleles": " --update-alleles %s ",
"map": " --update-map %s "},
"update_samples": {"sample_ids": " --update-ids %s ",
"parents": " --update-parents %s ",
"gender": " --update-sex %s %s "},
"flip_strands": {"all_samples": " --flip %s ",
"subset": " --flip-subset %s "},
"flip_scan": {"default": " --flip-scan verbose ",
"window": "--flip-scan --flip-scan-window %s ",
"kb": " --flip-scan --flip-scan-window-kb %s ",
"threshold": " --flip-scan --flip-scan-threshold %s "},
"sort": {"none": " --indiv-sort %s ",
"natural": " --indiv-sort %s ",
"ascii": " --indiv-sort %s ",
"file": " --indiv-sort %s "},
"merge": {"plink": " --merge %s ",
"binary_plink": " --bmerge %s "},
"merge_mode": {"default": " --merge-mode 1 ",
"orginal_missing": " --merge-mode 2 ",
"new_nonmissing": " --merge-mode 3 ",
"no_overwrite": " --merge-mode 4 ",
"force": " --merge-mode 5 ",
"report_all": " --merge-mode 6 ",
"report_nonmissing": " --merge-mode 7"},
"find_duplicates": {"same_ref": " --list-duplicate-vars require-same-ref ",
"id_match": " --list-duplicate-vars ids-only ",
"suppress_first": " --list-duplicate-vars suppress-first"},
"remove_relations": {"cutoff": " --rel-cutoff %s "},
"check_gender": " --check-sex ",
"pca": " --pca %s ",
"estimate_haplotypes": " --blocks "}
for task, value in kwargs.iteritems():
# check for PCA first as it is not nested in task_map
if task == "pca":
try:
state = task_map[task] % value
statement.append(state)
except TypeError:
statement.append(task_map[task])
statement.append
elif task == "check_gender":
statement.append(task_map[task])
elif task == "estimate_haplotypes":
statement.append(task_map[task])
elif task != "parameter":
try:
# sub_task is a nested dictionary
sub_task = task_map[task]
try:
assert sub_task[value]
try:
# gender has two string formats
if value == "gender":
gcol = 1
statement.append(sub_task[value] % (parameter,
gcol))
else:
# some tasks do not contain task values for the
# parameter argument - catch these with the TypeError
# exception
statement.append(sub_task[value] % parameter)
# the default for parameter is None, check this is appropriate
if not parameter:
E.warn("Parameter value is set to NoneType. "
"Please check this is an appropriate value "
"to pass for this task")
else:
pass
except TypeError:
statement.append(sub_task[value])
except KeyError:
raise KeyError("No sub task found, see docs for details of "
"recognised tasks")
except KeyError:
raise KeyError("Task not recognised, see docs for details of "
"recognised tasks")
else:
pass
# handle multiple tasks for a single run
try:
curr_tasks = self.statement["tasks"]
new_tasks = " ".join(statement)
self.statement["tasks"] = " ".join([curr_tasks, new_tasks])
except KeyError:
self.statement["tasks"] = " ".join(statement)
def _output_statistics(self, **kwargs):
'''
Summary statistics are written to specific files dictated by the
type of statistic
Statistics
----------
* allele_frequency - writes out MAF to `plink`.frq, this can be
modified with specific keywords.
* missing_data - generates a report of data missingness, can be subset
into within family and/or cluster reports
* hardy_weinberg - calculates all HWE p-values using exact test
statistics. For case/control studies reports are written for case,
controls and combined.
* mendel_errors - generates a Mendelian error report across all trios.
There are 10 different codes responding to different Mendelian error
scenarios.
* inbreeding - calculate observed and expected homozygosity across
individuals and F statistics. If the sample size is small then a
file of MAFs is required. Inbreeding coefficients can also be
reported on request using inbreeding_coef.
* gender_checker - checks gender assignment against X chromosome
genotypes. Gender values can also be imputed based on genotype
information using gender_impute.
* wrights_fst - calculate Wright's Fst statistic given a set of
subpopulations for each autosomal diploid variant. Used in
conjunction with the --within flag.
'''
stats_map = {"allele_frequency": " --freq %s ",
"missing_data": " --missing %s ",
"hardy_weinberg": " --hardy midp ",
"mendel_errors": " --mendel %s ",
"inbreeding": " --het %s ",
"inbreeding_coef": " --ibc ",
"gender_checker": " --check-sex ",
"gender_impute": " --impute-sex ",
"wrights_fst": " --fst --within %s ",
"case_control_fst": "--fst %s "}
statement = []
for key, value in kwargs.iteritems():
if value:
try:
assert stats_map[key]
statement.append(stats_map[key] % value)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
else:
try:
assert stats_map[key]
flag = stats_map[key].rstrip("%s ")
statement.append(flag)
except KeyError:
raise KeyError("statistic not recognised. Please "
"consult the documentation for allowed "
"options.")
self.statement["stats"] = " ".join(statement)
def run_association(self, association=None, model=None,
run_options=None,
permutation=False, n_perms=None,
random_seed=None, permutation_options=None,
covariates_file=None, covariates=None):
'''
Construct a statement for a plink2 association analysis.
QC filters are constructed from input during instantiation.
run options include redirecting logging output, using parallelisation,
defining number of threads to use, etc
The default association uses the --assoc flag. Plink will check
phenotype coding, if it is not case/control it assumes
it is a continuous trait and uses linear regression.
Alternative regression models that include covariates can be used,
i.e. logistic and linear regression.
key
***
{CC} - applies to case/control analysis only
{quant} - applies to quantitative trait only
{CC/quant} - applies to both
run_options
-----------
``--assoc``:
* `fisher | fisher-midp` - uses Fisher's exact test to calculate
association p-values or applies Lancaster's mid-p adjustment. {CC}
* `counts` - causes --assoc to report allele counts instead of
frequencies. {CC}
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `qt-means` - generates a .qassoc.means file reporting trait means
and standard deviations by genotype. {quant}
* `lin` - reports the Lin et al (2006) statistic to be reported. If
multiple testing adjustments and/or permutation is also used, they
will be based on this statistic. {quant}
``--model``:
* `fisher | fisher-midp | trend-only` - uses Fisher's exact test
to calculate association p-values or applies Lancaster's mid-p
adjustment. trend-only forces only a trend test to be performed.
{CC}
* `dom | rec | gen | trend` - use the specified test as the basis
for the model permutation. If none are defined the result with the
smallest p-value is reported. {CC}
* --cell - sets the minimum number of observations per cell in the
2x3 contingency table. The default is 0 with the Fisher and
Fiser-midp test, otherwise 5. {CC}
``--linear/logistic``:
* `set-test` - implements and tests the significance of variant
sets. See documentation below. {CC/quant}
* `hide-covar` - removes the covariate specific sections from the
results output. {CC/quant
* `sex | no-x-sex` - `sex` adds sex as covariate to all models,
whislt `no-x-sex` does not include gender into X-chromosome SNP
models. {CC/quant}
* `interaction` - adds in genotype X covariate interaction terms
into the model. Can only be used with permutation is ``--tests``
is also specified. {CC/quant}
* `beta` - reports the beta coefficients instead of the OR in a
logistic model. {CC}
* `standard-beta` - standardizes the phenotype and all predictor
variables to zero mean and unit variance prior to regression
(separate for each variant analysed). {quant}
* `intercept` - includes the intercept in the output results.
{quant}
model
-----
* `recessive` - `recessive` specifies the model assuming the A1 allele
as recessive. {CC/quant}
* `dominant` - `dominant` specifies the model assuming the A1 allele is
dominant. {CC/quant}
* `genotype` - `genotype` adds an additive effect/dominance deviation
2df joint test with two genotype variables in the test (coded 0/1/2
and 0/1/0). {CC/quant}
* `trend` - forces a trend test to be performed. {CC/quant}
* `hethom` - `hethom` uses 0/0/1 and 0/1/0 instead of the genotype
coding. With permutation it will be based on the joint test instead
of just the additive effects. This can be overriden using the
`--tests` flag. {CC/quant}
* `no-snp` - `no-snp` defines a regression of phenotype on covariates
without reference to genotype data, except where `--conditon{-list}`
is specified. If used with permuation, test results will be reported
for every covariate. {CC/quant}
permutation
-----------
If permutation is True, run an adaptive Monte Carlo permutation test.
If n_perms is set, this will run a max(T) permutation test with the n
replications. A random seed will need to be provided.
* `perm-count` - this alters the permutation output report to include
counts instead of frequencies
covariates
----------
These should be provided in a separate file. Specifying which
covariates to include can be done as either a comma-separated list
of covariate names or numbers. These numbers will correspond to the
(n+2)th covariate file column as per the plink documentation.
'''
# model map maps common option effects onto specific syntax
model_map = {"--logistic": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--linear": {"recessive": "recssive",
"dominant": "dominant",
"genotype": "genotypic"},
"--model": {"recessive": "rec",
"dominant": "dom",
"genotype": "gen"}}
statement = []
# construct analysis flags
# add model, i.e. additive, recessive, dominant, etc.
# see docstring for details. Make sure correct modifier is used
# with a mapping dictionary
if association == "logistic":
statement.append(" --logistic ")
m_map = model_map["--logistic"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "linear":
statement.append(" --linear ")
m_map = model_map["--linear"]
if model:
statement.append(m_map[model])
else:
pass
elif association == "model":
statement.append(" --model ")
m_map = model_map["--model"]
statement.append(m_map[model])
else:
statement.append(" --assoc ")
# add in run options. These need to be in their correct
# format already
if run_options:
modifiers = " ".join(run_options)
statement.append(modifiers)
else:
pass
# permutation should have a random seed set by the user. Allow
# this to set it's own seed if one not provided, but report it in
# the log file
if permutation:
try:
assert random_seed
except AssertionError:
rand_seed = random.randint(0, 100000000)
E.warn("No seed is provided for the permutation test. "
"Setting seed to %s. Record this for future "
"replicability" % random_seed)
if n_perms:
statement.append(" mperm=%i --seed %s " % (n_perms,
random_seed))
else:
statement.append(" perm --seed %s " % (random_seed))
else:
pass
# if using linear or logistic, covariates can be added into the model
# to adjust for their effects - assumes fixed effects of covariates
# mixed models are not yet implemented in Plink2.
if covariates:
covars = covariates.split(",")
if len(covars) > 1:
if type(covars[0]) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars[0]) == int:
m_covar = " --covar-number %s " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
elif len(covars) == 1:
if type(covars) == str:
m_covar = " --covar-name %s " % covariates
elif type(covars) == int:
m_covar = " --covar-number %i " % covariates
else:
# if none are specified then don't adjust the model for any
# and log a warning
E.warn("Covariate header or numbers are not recognised."
"No covariates will be included in the model. Please"
"specifiy them exactly")
covariates = None
covariates_file = None
if covariates and covariates_file:
statement.append(" --covar %s %s " % (covariates_file,
m_covar))
elif covariates and not covaries_file:
E.warn("No covariate file specified. None included in model.")
elif covariates_file and not covariates:
E.warn("No covariates specified to include in the model."
"None included")
else:
pass
self.statement["assoc"] = " ".join(statement)
def PCA(self, n_pcs="20"):
'''
Perform PCA analysis on previosly generated GRM, output the number n
principal componets, default = 20
'''
self._run_tasks(pca=n_pcs)
def _dimension_reduction(self, **kwargs):
'''
Use PCA to perform dimensionality reduction on
input samples. A PCA can be calculated using
a subset of samples which can then be projected on
to other samples.
'''
# FINISH ME!!!!
def _detect_interactions(self, method=None, modifier=None,
set_file=None, set_mode=None,
report_threshold=None,
sig_threshold=None,
covariates_file=None, covariates=None):
'''
Detect epistatic interactions between SNPs using either an inaccurate
scan (fast-epistasis) or a fully saturated linear model
Methods
-------
fast_epistasis - uses an "imprecise but fast" scan of all 3x3 joint genotype
count tables to test for interactions. Can be modified to use a likelihood
ration test `boost` or a joint-effects test `joint-effects`. Default is
`joint-effects`.
epistasis - uses a linear model to test for interactions between additive
effects after main effects. Logistic regression for case/control and
linear regression for quantitative traits.
two_locus - tests a single interaction between two variants using joint genotype
counts and frequencies.
adjusted - allows adjustment for covariates in the interaction test, and also adjusts
for main effects from both the test and target SNP. Requires and R plugin script.
'''
interact_map = {"fast_epistasis": " --fast-epistasis %s ",
"epistasis": " --epistasis %s ",
"two_locus": " --twolocus %s ",
"adjusted": " --R %s "}
statement = []
if modifier:
statement.append(interact_map[method] % modifier)
else:
modifier = ""
statement.append(interact_map[method] % modifier)
if covariates_file:
statement.append("--covar %s --covar-name %s " % (covariates_file,
covariates))
else:
pass
if set_mode and set_file:
# does not work with two-locus test
if method == "two_locus" and set_mode:
E.warn("Two locus test cannot be used in conjunction "
"with a set-based test.")
elif set_mode:
statement.append(" %s --set %s " % (set_mode, set_file))
else:
pass
else:
pass
# alter reporting of significant interactions and significance
# level of interactions
if report_threshold:
statement.append(" --epi1 %0.3f " % float(report_threshold))
else:
pass
if sig_threshold:
statement.append(" --epi2 %0.3f " % float(sig_threshold))
else:
pass
self.statement["epistasis"] = " ".join(statement)
def _matrices(self, matrix_type, shape="triangle", compression=None, options=None):
'''
Calculate a number of different distance matrices:
realised genetic relationship matrix
relationship covariance matrix
identity by descent/state matrix
hamming distance matrix
* matrix_type - matrix to compute. Can be either IBS, 1 - IBS,
Hamming, GRM
'''
statement = []
if matrix_type == "hamming":
flag = " --distance "
elif matrix_type == "ibs":
flag = " --distance ibs "
elif matrix_type == "genomic":
flag = " --distance 1-ibs "
elif matrix_type == "grm":
flag = " --make-grm-bin "
if options:
statement.append(" ".join([flag, shape, compression, options]))
elif matrix_type == "grm":
statement.append(flag)
else:
statement.append(" ".join([flag, shape, compression]))
return " ".join(statement)
def _qc_methods(self, parameter=None, **kwargs):
''''
Perform QC on genotyping data, SNP-wise and sample-wise.
All arguments are passed as key word arguments, except
cases detailed in `Parameters` where they are passed with
the ``parameter`` argument.
Methods
-------
* ld_prune - generate a list of SNPs in linkage equilibrium by
pruning SNPs on either an LD statistic threshold, i.e. r^2,
or use a variance inflation factor (VIF) threshold
* heterozygosity - calculate average heterozygosity from each
individual across a set of SNPs, threshold on individuals
with deviation from expected proportions
* ibd - calculate the genetic relationship of individuals to
infer relatedness between individuals, threshold on given
degree of relatedness, e.g. IBD > 0.03125, 3rd cousins
* genetic_gender - estimate the gender of an individual
from the X chromosome genotypes - correlate with reported
gender and output discrepancies
* ethnicity_pca - perform PCA using a subset of independent
SNPs to infer genetic ancestry. Compare and contrast this
to individuals reported ancestry. Report discrepancies
and individuals greater than a threshold distance away
from a reference population.
* homozygosity - identifies sets of runs of homozygosity
within individuals. These may be indicative of inbreeding,
systematic genotyping errors or regions under selection.
Parameters
----------
Method parameters can also be passed through this function
as keyword=value pairs.
* ld_prune:
`kb` - this modifier changes the window resolution to kb
rather than bp.
`r2` - the r^2 threshold above which SNPs are to be removed
`vif` - the VIF threshold over which SNPs will be removed
`window` - window size to calculate pair-wise LD over
`step` - step size to advance window by
'''
qc_dict = {"ld_prune": {"R2": " --indep-pairwise %s %s %s ",
"VIF": " --indep %s %s %s "},
"heterozygosity": {"gz": " --het gz",
"raw": " --het "},
"ibd": {"relatives": " --genome gz rel-check ",
"full": " --genome gz full ",
"norm": " --genome gz "},
"genetic_gender": "none",
"ethnicity_pca": "none",
"homozygosity": {"min_snp": " --homozyg-snp %s ",
"min_kb": " --homozyg-kb %s ",
"default": " --homozyg ",
"density": " --homozyg-density ",
"set_gap": " --homozyg-gap ",
"snp_window": " --homozyg-window-snp %s ",
"het_max": " --homozyg-het %s "}}
task_dict = {}
state = []
# put everything in an accessible dictionary first
for task, value in kwargs.iteritems():
task_dict[task] = value
# LD pruning can be passed multiple parameters,
# handle this separately
try:
sub_task = task_dict["ld_prune"]
ld_prune_task = qc_dict["ld_prune"]
try:
step = task_dict["step"]
except KeyError:
raise AttributeError("No step size found, please "
"pass a step size to advance the "
"window by")
try:
window = task_dict["window"]
try:
task_dict["kb"]
window = "".join([window, "kb"])
task_dict.pop("kb", None)
except KeyError:
pass
except KeyError:
raise AttributeError("No window size found. Please input "
"a window size to prune over")
try:
threshold = task_dict["threshold"]
except KeyError:
raise AttributeError("No threshold value, please input "
"a value to LD prune SNPs on")
# add in the kb if it is passed as an argument
state.append(ld_prune_task[sub_task] % (window, step, threshold))
task_dict.pop("threshold", None)
task_dict.pop("ld_prune", None)
task_dict.pop("window", None)
task_dict.pop("step", None)
except KeyError:
pass
for task,value in task_dict.iteritems():
try:
sub_task = qc_dict[task]
try:
state.append(sub_task[value] % parameter)
except TypeError:
state.append(sub_task[value])
except KeyError:
raise AttributeError("Task not found, please see "
"documentation for available features")
self.statement["QC"] = " ".join(state)
def build_statement(self, infiles, outfile, threads=None,
memory="60G", parallel=None):
'''
Build statement and execute from components
'''
statement = []
exec_state = self.executable
# calls to function add to the self.statement dictionary
try:
statement.append(self.statement["program"])
except KeyError:
raise AttributeError("Input files and format not detected")
try:
statement.append(self.statement["QC"])
except KeyError:
pass
try:
statement.append(self.statement["filters"])
except KeyError:
pass
try:
statement.append(self.statement["tasks"])
except KeyError:
pass
try:
statement.append(self.statement["stats"])
except KeyError:
pass
try:
statement.append(self.statement["assoc"])
except KeyError:
pass
try:
statement.append(self.statement["matrix"])
except KeyError:
pass
try:
statement.append(self.statement["epistasis"])
except KeyError:
pass
if threads:
statement.append(" --threads %i " % threads)
else:
pass
if not memory:
pass
elif memory != "60G":
memory = int(memory.strip("G")) * 1000
statement.append(" --memory %i " % memory)
else:
statement.append(" --memory 60000 ")
# add output flag
# outfile needs to be complete path for Plink to save
# results properly - check if it starts with '/',
# if so is already a full path
if not parallel:
if os.path.isabs(outfile):
statement.append(" --out %s " % outfile)
else:
outpath = "/".join([os.getcwd(), outfile])
statement.append(" --out %s " % outpath)
os.system(" ".join(statement))
else:
# parallelisation only really applies to GRM calculation
# at the moment <- need to generalise
# if parallelisation is used, invoke temp files
# then agglomerate files
statements = []
if os.path.isabs(outfile):
outpath = outfile
else:
outpath = "/".join([os.getcwd(), outfile])
for i in range(1, parallel+1):
p_state = statement[:] # copy list, assigning just makes a pointer
p_state.append(" --parallel %i %i " % (i, parallel))
p_state.append(" --out %s.%i " % (outpath, i))
statements.append(" ".join(p_state))
os.system(";".join(statements))
class PlinkDev(Plink2):
'''
Run various Plink functions and analysis, including file processing, GRM
calculation, PCA and other GWA tasks
Require Plink v1.9_devel to be in the users PATH variable as ``plinkdev`` to
distinguish it from Plink v1.07 and v1.9.
Currently uses Nov 11 development build.
'''
def __init__(self, files, options=None,
settings=None, design=None):
self.infiles = files
self.options = options
self.settings = settings
self.design = design
self.executable = "plinkdev"
self.statement = {}
self.filters = []
class GWASResults(object):
'''
A class for handling the results from a GWA, used for plotting
and post-analysis QC
'''
def __init__(self, assoc_file, **kwargs):
# if the assoc_file is a list of multiple files,
# then merge them into a single files
if type(assoc_file) == list and len(assoc_file) > 1:
E.info("multiple results files detected")
self.infiles = assoc_file
self.infile = None
self.results = self.parse_genome_wide(assoc_file)
else:
E.info("single results file detected")
self.infile = assoc_file
self.infiles = None
# results is a pandas dataframe to operate on
self.results = self.get_results(assoc_file, **kwargs)
def parse_genome_wide(self, association_files):
'''
Accept a list of results files, merge them together
and output as a single dataframe
Will this take a lot of memory??
'''
file0 = association_files.pop(0)
df = self.get_results(file0)
for afile in association_files:
_df = self.get_results(afile)
df = df.append(_df)
df["CHR"] = df["CHR"].astype(np.int64)
df.sort_values(by=["CHR", "BP"], inplace=True)
return df
def get_results(self, association_file,
epistasis=False):
'''
Parse a GWA results file and return the table
'''
# use Pandas for now - try something different later
# SQLite DB maybe?
# inconsistent number of white spaces between
# fields means Pandas parsing breaks down
# fields need to be the correct data type,
# i.e. BP = int, P = float, SNP = str, etc
# if the file has already been parsed and processed
# just assign it instead
# epistasis results don't have a header
try:
peek = pd.read_table(association_file, nrows=5,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
peek = pd.read_table(association_file, nrows=5,
sep="\t", header=0,
index_col=None)
if epistasis:
try:
results_frame = pd.read_table(association_file,
sep="\s*", header=0,
index_col=None)
except StopIteration:
results_frame = pd.read_table(association_file,
sep="\t", header=None,
index_col=None)
# results from fast epistasis are different to others
if results_frame.shape[1] == 7:
results_frame.columns = ["CHR1", "SNP1", "CHR",
"SNP", "OR", "STAT", "P"]
else:
results_frame.columns = ["CHR", "SNP", "BP", "A1", "OR",
"SE", "STAT", "P"]
results_frame.loc[:, "BP"] = pd.to_numeric(results_frame["BP"],
errors="coerce")
results_frame.loc[:, "P"] = pd.to_numeric(results_frame["P"],
errors="coerce")
return results_frame
else:
try:
assert peek["log10P"].any()
results_frame = pd.read_table(association_file,
sep="\t", header=0,
index_col=None,
dtype={"BP": np.int64,
"NMISS": np.int64})
return results_frame
except KeyError:
pass
l_count = 0
E.info("parsing file: %s" % association_file)
with open(association_file, "r") as ifile:
for line in ifile:
# check if spacing is whitespace or tab
if len(line.split(" ")) > 1:
parsed = line.split(" ")
elif len(line.split("\t")) > 1:
parsed = line.split("\t")
else:
raise IOError("file separator not recognised. "
"Must be whitespace or tab")
# remove multiple blank spaces
for i in range(parsed.count('')):
parsed.remove('')
# get rid of the newline
try:
parsed.remove('\n')
except ValueError:
parsed = [(px).rstrip("\n") for px in parsed]
if l_count == 0:
header = [iy.upper() for ix, iy in enumerate(parsed)]
head_idx = [ix for ix, iy in enumerate(parsed)]
map_dict = dict(zip(head_idx, header))
res_dict = dict(zip(header, [[] for each in header]))
l_count += 1
else:
col_idx = [lx for lx, ly in enumerate(parsed)]
col = [ly for lx, ly in enumerate(parsed)]
for i in col_idx:
res_dict[map_dict[i]].append(col[i])
l_count += 1
# substract one from the index for the header column
df_idx = range(l_count-1)
results_frame = pd.DataFrame(res_dict, index=df_idx)
results_frame.fillna(value=1.0, inplace=True)
try:
results_frame = results_frame[results_frame["TEST"] == "ADD"]
except KeyError:
pass
# need to handle NA as strings
results_frame["P"][results_frame["P"] == "NA"] = 1.0
results_frame["BP"] = [int(bx) for bx in results_frame["BP"]]
results_frame["P"] = [np.float64(fx) for fx in results_frame["P"]]
try:
results_frame["STAT"][results_frame["STAT"] == "NA"] = 1.0
results_frame["STAT"] = [np.float64(sx) for sx in results_frame["STAT"]]
except KeyError:
try:
results_frame["CHISQ"][results_frame["CHISQ"] == "NA"] = 1.0
results_frame["CHISQ"] = [np.float64(sx) for sx in results_frame["CHISQ"]]
except KeyError:
try:
results_frame["T"][results_frame["T"] == "NA"] = 1.0
results_frame["T"] = [np.float64(sx) for sx in results_frame["T"]]
except KeyError:
pass
try:
results_frame["F_U"][results_frame["F_U"] == "NA"] = 0.0
results_frame["F_U"] = [np.float64(ux) for ux in results_frame["F_U"]]
except KeyError:
pass
try:
results_frame["F_A"][results_frame["F_A"] == "NA"] = 0.0
results_frame["F_A"] = [np.float64(ax) for ax in results_frame["F_A"]]
except KeyError:
pass
try:
results_frame["FREQ"][results_frame["FREQ"] == "NA"] = 0.0
results_frame["FREQ"] = [np.float64(fx) for fx in results_frame["FREQ"]]
except KeyError:
pass
try:
results_frame["OR"][results_frame["OR"] == "NA"] = 1.0
results_frame["OR"] = [np.float64(ox) for ox in results_frame["OR"]]
except KeyError:
try:
results_frame["BETA"][results_frame["BETA"] == "NA"] = 1.0
results_frame["BETA"] = [np.float64(ox) for ox in results_frame["BETA"]]
except KeyError:
results_frame["B"][results_frame["B"] == "NA"] = 0.0
results_frame["B"] = [np.float64(ox) for ox in results_frame["B"]]
return results_frame
def plotManhattan(self, save_path, resolution="chromosome",
write_merged=True):
'''
Generate a basic manhattan plot of the association results
Just deal with chromosome-by-chromosome for now.
'''
# use the python ggplot plotting package
# need to calculate -log10P values separately
self.results["log10P"] = np.log10(self.results["P"])
# or using rpy2
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
R('''sink(file="sink.text")''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
if resolution == "chromosome":
R('''assoc.df$CHR <- factor(assoc.df$CHR, '''
'''levels=levels(ordered(unique(assoc.df$CHR))),'''
'''labels=unique(paste0("chr", assoc.df$CHR)))''')
R('''nchrom <- length(unique(assoc.df$CHR))''')
R('''myCols <- rep(c("#ca0020", "#404040"), nchrom)[1:nchrom]''')
R('''names(myCols) <- sort(unique(assoc.df$CHR))''')
R('''colScale <- scale_colour_manual(name = "CHR", values=myCols)''')
R('''bp_indx <- seq_len(dim(assoc.df[1]))''')
R('''assoc.df$BPI <- bp_indx''')
R('''p <- ggplot(assoc.df, aes(x=BPI, y=-log10(P), colour=CHR)) + '''
'''geom_point(size=1) + colScale + '''
'''geom_hline(yintercept=8, linetype="dashed", colour="blue") + '''
'''theme_bw() + labs(x="Chromosome position (bp)", '''
'''y="-log10 P-value") + facet_grid(~CHR, scale="free_x") + '''
'''theme(axis.text.x = element_text(size=8))''')
R('''png("%s", res=90, unit="in", height=8, width=12)''' % save_path)
R('''print(p)''')
R('''dev.off()''')
elif resolution == "genome_wide":
R('''nchroms <- length(unique(assoc.df$CHR))''')
R('''png("%s", width=720, height=540)''' % save_path)
R('''p <- manhattan(assoc.df, main="Manhattan plot",'''
'''ylim=c(0, 50), cex=0.9, suggestiveline=T,'''
'''genomewideline=-log10(5e-8), chrlabs=c(1:nchroms), '''
'''col=c("#8B1A1A","#8470FF"))''')
R('''print(p)''')
R('''dev.off()''')
R('''sink(file=NULL)''')
if write_merged:
return self.results
else:
return False
def plotQQ(self, save_path, resolution="chromosome"):
'''
Generate a QQ-plot of expected vs. observed
test statistics
'''
self.results["log10P"] = np.log(self.results["P"])
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
R('''suppressPackageStartupMessages(library(qqman))''')
r_df = py2ri.py2ri_pandasdataframe(self.results)
R.assign("assoc.df", r_df)
R('''png("%s", width=720, height=540)''' % save_path)
R('''qq(assoc.df$P)''')
R('''dev.off()''')
def plotEpistasis(self, save_path, resolution="chromosome"):
'''
Generate both manhattan plot of the SNPs tested for
epistasis with their target SNP, and a QQplot
of the association test p-values
'''
# plot QQplot
qq_save = "_".join([save_path, "qqplot.png"])
self.plotQQ(qq_save)
manhattan_save = "_".join([save_path, "manhattan.png"])
self.plotManhattan(manhattan_save,
resolution=resolution,
write_merged=False)
def getHits(self, threshold=0.00000005):
'''
Pull out regions of association by selecting
all SNPs with association p-values less than
a certain threshold. Defaults is genome-wide
signifance, p < 5x10-8.
Then select region +/- 1.5Mb of the index SNP.
'''
hits_df = self.results[self.results["P"] <= threshold]
# find the range of SNPs with 3Mb of each index SNP
contig_group = hits_df.groupby(["CHR"])
# there may be multiple independent hits on a given
# chromosome. Need to identify independent regions.
# Independent regions are defined by their statistical
# independence, not distance. Just take all SNPs
# in 3Mb of the lead SNP for each signal
# this will create overlaps of associatation signals
for contig, region in contig_group:
region.index = region["BP"]
chr_df = self.results[self.results["CHR"] == contig]
chr_df.index = chr_df["BP"]
# find independent regions and output consecutively
# if only a single SNP above threshold then there is
# only one independent region!!
if len(region) > 1:
independents = self.findIndependentRegions(region)
indi_group = independents.groupby("Group")
else:
region["Group"] = 1
indi_group = region.groupby("Group")
for group, locus in indi_group:
# if there is only a single variant should
# the region be kept? Likely a false
# positive
if min(locus["BP"]) == max(locus["BP"]):
pass
else:
try:
try:
locus.loc[:, "STAT"] = abs(locus["STAT"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.loc[:, "T"] = abs(locus["T"])
locus.sort_values(by="STAT", inplace=True)
except KeyError:
locus.sort_values(by="CHISQ", inplace=True)
index_bp = locus.iloc[0]["BP"]
E.info("Lead SNP for regions is: {}".format(locus.iloc[0]["SNP"]))
left_end = min(chr_df.loc[chr_df.index >= index_bp - 1500000, "BP"])
right_end = max(chr_df.loc[chr_df.index <= index_bp + 1500000, "BP"])
range_df = chr_df.loc[left_end : right_end, :]
max_stat = max(abs(range_df["STAT"]))
yield contig, range_df
def extractSNPs(self, snp_ids):
'''
Extract a specific set of SNP results
Arguments
---------
snp_ids: list
a list of SNP IDs to extract from the
GWAS results
Returns
-------
snp_results: pandasd.Core.DataFrame
'''
self.results.index = self.results["SNP"]
snp_results = self.results.loc[snp_ids]
return snp_results
def findIndependentRegions(self, dataframe):
'''
Find the number of independent regions on
a chromsome. Uses R distance and tree
cutting functions
'''
# mong dataframe into R
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(dataframe)
R.assign("rdf", r_df)
R('''mat <- as.matrix(rdf$BP)''')
# get distances then cluster, chop tree at 1x10^7bp
R('''dist.mat <- dist(mat, method="euclidean")''')
R('''clusts <- hclust(dist.mat, "average")''')
R('''cut <- cutree(clusts, h=1e6)''')
R('''out.df <- rdf''')
R('''out.df$Group <- cut''')
regions_df = py2ri.ri2py_dataframe(R["out.df"])
return regions_df
def mergeFrequencyResults(self, freq_dir, file_regex):
'''
Merge GWAS results with frequency information,
and format for GCTA joint analysis input
'''
# create a dummy regex to compare
# file_regex type against
test_re = re.compile("A")
if type(file_regex) == str:
file_regex = re.compile(file_regex)
elif type(file_regex) == type(test_re):
pass
else:
raise TypeError("Regex type not recognised. Must"
"be string or re.SRE_Pattern")
all_files = os.listdir(freq_dir)
freq_files = [fx for fx in all_files if re.search(file_regex, fx)]
gwas_df = self.results
df_container = []
for freq in freq_files:
freq_file = os.path.join(freq_dir, freq)
E.info("Adding information from {}".format(freq_file))
# files may or may not be tab-delimited
try:
_df = pd.read_table(freq_file,
sep="\s*", header=0,
index_col=None,
engine='python')
except StopIteration:
_df = pd.read_table(freq_file,
sep="\t", header=0,
index_col=None)
merge_df = pd.merge(self.results, _df,
left_on=["CHR", "SNP"],
right_on=["CHR", "SNP"],
how='left')
df_container.append(merge_df)
count = 0
for df in df_container:
if not count:
gwas_df = df
count += 1
else:
gwas_df = gwas_df.append(df)
E.info("Calculating Z scores and SEs")
z_scores = -0.862 + np.sqrt(0.743 - 0.2404 *\
np.log(gwas_df.loc[:, "P"]))
se = np.log(gwas_df.loc[:, "OR"])/z_scores
gwas_df.loc[:, "Z"] = z_scores
gwas_df.loc[:, "SE"] = se
gwas_df.loc[:, "logOR"] = np.log(gwas_df.loc[:, "OR"])
out_cols = ["SNP", "A1_x", "A2", "MAF", "logOR", "SE", "P", "NMISS"]
out_df = gwas_df[out_cols]
# need to remove duplicates, especially those
# that contain NaN for A2 and MAF
out_df = out_df.loc[~np.isnan(out_df["MAF"])]
return out_df
##########################################################
# unbound methods that work on files and data structures #
##########################################################
def plotMapPhenotype(data, coords, coord_id_col, lat_col,
long_col, save_path, xvar, var_type,
xlabels=None, level=None):
'''
Generate a map of the UK, with phenotype data overlaid
'''
# merge co-ordinate data with phenotype data
merged_df = pd.merge(left=coords, right=data, left_on=coord_id_col,
right_on=coord_id_col, how='inner')
# pheno column and set level of categorical variable
if xlabels and var_type == "categorical":
# convert to string type as a categorical variable
# drop NA observations from the merged data frame
na_mask = pd.isnull(merged_df.loc[:, xvar])
merged_df = merged_df[~na_mask]
rvar = merged_df.loc[:, xvar].copy()
nvar = pd.Series(np.nan_to_num(rvar), dtype=str)
var = [v for v in set(nvar)]
var.sort()
# recode the variables according to the input labels
xlabs = xlabels.split(",")
lbls = [str(xlabs[ix]) for ix in range(len(var))]
for xv in range(len(var)):
nvar[nvar == var[xv]] = lbls[xv]
merged_df.loc[:, "cat_var"] = nvar
else:
pass
if level:
lvar = merged_df.loc[:, "cat_var"].copy()
mask = lvar.isin([level])
lvar[mask] = 1
lvar[~mask] = 0
lvar = lvar.fillna(0)
merged_df.loc[:, "dichot_var"] = lvar
else:
pass
# push the df into the R env
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(merged_df)
R.assign("pheno.df", r_df)
# setup the map and plot the points
R('''suppressPackageStartupMessages(library(maps))''')
R('''suppressPackageStartupMessages(library(mapdata))''')
R('''uk_map <- map("worldHires", c("UK", "Isle of Wight",'''
'''"Ireland", "Isle of Man", "Wales:Anglesey"), '''
'''xlim=c(-11, 3), ylim=c(50, 60.9), plot=F)''')
# colour by reference, or a colour for each discrete value
if level:
R('''red <- rep("#FF0000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 1]))''')
R('''black <- rep("#000000", '''
'''times=length(pheno.df$dichot_var[pheno.df$dichot_var == 0]))''')
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points((-pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 1], '''
'''(-pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 1], pch=".", col=red)''' % locals())
R('''points((pheno.df[,"%(long_col)s"])[pheno.df$dichot_var == 0], '''
'''(pheno.df[,"%(lat_col)s"])[pheno.df$dichot_var == 0], pch=".", col=black)''' % locals())
R('''legend('topleft', legend=c("not-%(level)s", "%(level)s"),'''
'''fill=c("#000000", "#FF0000"))''' % locals())
R('''dev.off()''')
else:
R('''png("%(save_path)s", width=540, height=540, res=90)''' % locals())
R('''map(uk_map)''')
R('''points(pheno.df[,"%(long_col)s"], pheno.df[,"%(lat_col)s"], pch=".", '''
'''col=factor(pheno.df$cat_var))''' % locals())
R('''legend('topleft', legend=unique(pheno.df$cat_var),'''
'''fill=unique(pheno.df$cat_var))''' % locals())
R('''dev.off()''')
def plotPhenotype(data, plot_type, x, y=None, group=None,
save_path=None, labels=None, xlabels=None,
ylabels=None, glabels=None, var_type="continuous"):
'''
Generate plots of phenotypes using ggplot
'''
# change data format if necessary and convert nan/NA to missing
if not y and var_type == "categorical":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=str)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "integer":
var = np.nan_to_num(data.loc[:, x].copy())
data.loc[:, x] = pd.Series(var, dtype=np.int64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif not y and var_type == "continuous":
var = data.loc[:, x].copy()
data.loc[:, x] = pd.Series(var, dtype=np.float64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "categorical":
xvar = np.nan_to_num(data.loc[:, x].copy())
yvar = np.nan_to_num(data.loc[:, y].copy())
data.loc[:, x] = pd.Series(xvar, dtype=str)
data.loc[:, y] = pd.Series(yvar, dtype=str)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "integer":
xvar = np.nan_to_num(data.loc[:, x].copy())
yvar = np.nan_to_num(data.loc[:, y].copy())
data.loc[:, x] = pd.Series(xvar, dtype=np.int64)
data.loc[:, y] = pd.Series(yvar, dtype=np.int64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
elif y and var_type == "continuous":
# NAs and NaNs should be handled by ggplot
xvar = data.loc[:, x].copy()
yvar = data.loc[:, y].copy()
data.loc[:, x] = pd.Series(xvar, dtype=np.float64)
data.loc[:, y] = pd.Series(yvar, dtype=np.float64)
if group:
gvar = np.nan_to_num(data.loc[:, group].copy())
data.loc[:, group] = pd.Series(gvar, dtype=str)
else:
pass
R('''suppressPackageStartupMessages(library(ggplot2))''')
# put the pandas dataframe in to R with rpy2
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(data)
R.assign("data_f", r_df)
# plotting parameters, including grouping variables and labels
# axis labels
try:
labs = labels.split(",")
except AttributeError:
labs = []
# if variable labels have been provided then assume they are
# categorical/factor variables.
# assume variable labels are input in the correct order
if xlabels:
try:
unique_obs = len(set(data.loc[:, x]))
xfact = len(xlabels.split(","))
if xfact == unique_obs:
R('''lvls <- unique(data_f[,"%(x)s"])''' % locals())
lbls = ro.StrVector([ri for ri in xlabels.split(",")])
R.assign("lbls", lbls)
R('''lvls <- lvls[order(lvls, decreasing=F)]''')
R('''data_f[,"%(x)s"] <- ordered(data_f[,"%(x)s"], '''
'''levels=lvls, labels=lbls)''' % locals())
else:
E.warn("the number of labels does not match the "
"number of unique observations, labels not "
"used.")
pass
except AttributeError:
xlabels = None
else:
pass
if glabels:
unique_obs = len(set(data.loc[:, group]))
gfact = len(glabels.split(","))
if gfact == unique_obs:
R('''lvls <- unique(data_f[, "%(group)s"])''' % locals())
lbls = ro.StrVector([rg for rg in glabels.split(",")])
R.assign("lbls", lbls)
R('''lvls <- lvls[order(lvls, decreasing=F)]''')
R('''data_f[,"%(group)s"] <- ordered(data_f[,"%(group)s"], '''
'''levels=lvls, labels=lbls)''' % locals())
else:
E.warn("the number of labels does not match the "
"number of unique observations, labels not "
"used.")
pass
# start constructing the plot
# if X and Y are specified, assume Y is a variable to colour
# observations by, unless group is also set.
# If Y and group then colour by group and split by Y
if y:
R('''p <- ggplot(aes(x=%s, y=%s), data=data_f)''' % (x, y))
if plot_type == "histogram":
if group:
R('''p <- p + geom_histogram(aes(colour=%(group)s)) + '''
'''facet_grid(. ~ %(y)s)''' % locals())
else:
R('''p <- p + geom_histogram(aes(colour=%(y)s))''' % locals())
elif plot_type == "barplot":
if group:
R('''p <- p + geom_bar(aes(colour=%(group)s)) + '''
'''facet_grid(. ~ %(y)s)''' % locals())
else:
R('''p <- p + geom_bar(aes(colour=%(y)s))''' % locals())
elif plot_type == "density":
if group:
R('''p <- p + geom_density(aes(colour=%(group)s)) + '''
'''facet_grid(. ~ %(y)s)''' % locals())
else:
R('''p <- p + geom_density(aes(colour=%(y)s))''' % locals())
elif plot_type == "boxplot":
if group:
R('''p <- p + geom_boxplot(group=%(group)s,'''
'''aes(x=factor(%(x)s), y=%(y)s, fill=%(group)s))''' % locals())
else:
R('''p <- p + geom_boxplot(aes(colour=%(x)s))''' % locals())
elif plot_type == "scatter":
if group:
R('''p <- p + geom_point(size=1, aes(colour=%(group)s))''' % locals())
else:
R('''p <- p + geom_point(size=1)''')
if len(labs) == 1:
xlab = labs[0]
R('''p <- p + labs(x="%s")''' % xlab)
elif len(labs) == 2:
xlab = labs[0]
ylab = labs[1]
R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s")''' % locals())
elif len(labs) == 3:
xlab = labs[0]
ylab = labs[1]
title = labs[2]
R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s", '''
'''title="%(title)s")''' % locals())
elif len(labs) == 4:
xlab = labs[0]
ylab = labs[1]
glab = labs[2]
title = labs[3]
R('''p <- p + labs(x="%(xlab)s", y="%(ylab)s",'''
'''title="%(title)s")''' % locals())
# need to add in guide/legend title
else:
R('''p <- ggplot(data=data_f)''')
if plot_type == "histogram":
if group:
R('''p <- p + geom_histogram(aes(%(x)s)) + '''
'''facet_grid(. ~ %(group)s)''' % locals())
else:
R('''p <- p + geom_histogram(aes(%s))''' % x)
elif plot_type == "barplot":
if group:
R(''' p <- p + geom_bar(aes(%(x)s)) + '''
'''facet_grid(. ~ %(group)s)''')
else:
R('''p <- p + geom_bar(aes(%s))''' % x)
elif plot_type == "density":
if group:
R('''p <- p + geom_density(aes(%(x)s)) + '''
'''facet_grid(. ~ %(group)s)''' % locals())
else:
R('''p <- p + geom_density(aes(%s))''' % x)
elif plot_type == "boxplot":
if group:
R('''p <- p + geom_boxplot(aes(y=%(x)s, '''
'''x=factor(%(group)s)))''' % locals())
else:
raise AttributeError("Y or group variable is missing")
if len(labs) == 1:
xlab = labs[0]
R('''p <- p + labs(x="%s")''' % xlab)
elif len(labs) == 2:
xlab = labs[0]
title = labs[1]
R('''p <- p + labs(x="%(xlab)s", '''
'''title="%(title)s")''' % locals())
elif len(labs) == 3:
if group:
xlab = labs[0]
glab = labs[1]
title = labs[2]
R('''p <- p + labs(x="%(glab)s", y="%(xlab)s",'''
'''title="%(title)s")''' % locals())
else:
E.warn("too many labels provided, assume first is X, "
"and second is plot title")
xlab = labs[0]
title = labs[1]
R('''p <- p + labs(x="%(xlab)s", '''
'''title="%(title)s")''' % locals())
# the default theme is bw
R('''p <- p + theme_bw()''')
R('''png("%(save_path)s")''' % locals())
R('''print(p)''')
R('''dev.off()''')
def parseFlashPCA(pcs_file, fam_file):
'''
Parse the principal components file from FlashPCA
and match with individual identifiers. This
assumes the output order of FlashPCA is the same
as the input order in the .fam file
'''
pc_df = pd.read_table(pcs_file, sep=None,
header=None, index_col=None)
# add a header to the pc_df file
headers = ["PC%i" % m for n, m in enumerate(pc_df.columns)]
pc_df.columns = headers
fam_df = pd.read_table(fam_file, sep="\t",
header=None, index_col=None)
fam_df.columns = ["FID", "IID", "PAR", "MAT", "GENDER",
"PHENO"]
pc_df[["FID", "IID"]] = fam_df.iloc[:, :2]
return pc_df
def plotPCA(data, nPCs, point_labels, save_path,
headers, metadata=None, multiplot=False):
'''
Plot N principal components from a PCA either as
a single plot of the first 2 PCs, a grid plot of
N PCs.
Arguments
---------
data: string
PATH to file containing principal components
nPCs: int
number of principal components to plot. If this
value is > 2, then multiplot will be enabled
automatically
point_labels: vector
a vector of labels of length correpsonding to
the number of rows in the data file. These are
used to colour the points in the plot with relevant
metadata. Alternatively, can be the column header
in the metadata file that corresponds to annotations
save_path: string
An absolute PATH to save the plot(s) to
headers: boolean
whether the `data` file contains header delineating the
columns
metadata: string
file containing metadata to annotate plot with, includes
point_labels data
multiplot: boolean
If True, generate a grid of scatter plots with successive
PCs plotted against each other
Returns
-------
None
'''
py2ri.activate()
if metadata:
meta_df = pd.read_table(metadata, sep="\t", header=0,
index_col=None)
else:
pass
labels = meta_df[["FID", "IID", point_labels]]
merged = pd.merge(data, labels, left_on="FID",
right_on="FID", how='inner')
# TO DO: enable multiplotting of many PCs
r_df = py2ri.py2ri_pandasdataframe(merged)
R.assign("pc.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''pc.df[["%(point_labels)s"]] <- as.factor(pc.df[["%(point_labels)s"]])''' % locals())
R('''p_pcs <- ggplot(pc.df, aes(x=PC1, y=PC2, colour=%s)) + '''
'''geom_point(size=1) + theme_bw() + '''
'''labs(x="PC1", y="PC2", title="PC1 vs. PC2 LD trimmed genotypes")''' % point_labels)
R('''png("%s")''' % save_path)
R('''print(p_pcs)''')
R('''dev.off()''')
def countByVariantAllele(ped_file, map_file):
'''
Count the number of individuals carrying the variant allele
for each SNP.
Count the number of occurences of each allele with the variant
allele of each other SNP.
Requires ped file genotyping to be in format A1(minor)=1, A2=2
'''
# parse the ped file - get the variant column headers from
# the map file - no headers with these files
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
variant_ids = variants.keys()
# store genotype matrix as an array
# rows and columns are variant IDs
homA1 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
homA2 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
het = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
tcount = 0
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
indiv = indiv.strip("\n")
indiv_split = indiv.split("\t")
fid = indiv_split[0]
iid = indiv_split[1]
mid = indiv_split[2]
pid = indiv_split[3]
gender = indiv_split[4]
phen = indiv_split[5]
genos = indiv_split[6:]
#genos = ["".join([alleles[i],
# alleles[i+1]]) for i in range(0, len(alleles), 2)]
tcount += 1
# get genotype counts
for i in range(len(genos)):
# missing genotypes are coded '00' in plink format
if genos[i] == "00":
pass
elif genos[i] == "11":
homA1[i, i] += 1
elif genos[i] == "12":
het[i, i] += 1
else:
homA2[i, i] += 1
allele_counts = ((2 * homA2) + het)/float(2 * tcount)
mafs = 1 - allele_counts.diagonal()
maf_df = pd.DataFrame(zip(variant_ids, mafs), columns=["SNP", "MAF"],
index=[x for x,y in enumerate(variant_ids)])
maf_df["A2_HOMS"] = (2 * homA1).diagonal()
maf_df["A2_HETS"] = het.diagonal()
maf_df.index = maf_df["SNP"]
maf_df.drop(["SNP"], axis=1, inplace=True)
E.info("allele frequencies calculated over %i SNPs and "
"%i individuals" % (len(genos), tcount))
return maf_df
def calcMaxAlleleFreqDiff(ped_file, map_file, group_file,
test=None, ref=None):
'''
Calculate the allele frequency difference between
two groups of individuals based upon some prior
assignment.
Arguments
---------
ped_file: string
plink text format .ped file - see Plink documentation
for details (https://www.cog-genomics.org/plink2/input#ped)
map_file: string
plink test format .map file - see Plink documentation
for details (https://www.cog-genomics.org/plink2/input#ped)
group_file: string
a file containing grouping information, must be in standard
Plink format with IID, FID, GROUP as the columns
test: string
group label to use as the test case for calculating
allele frequency differences. If this isn't set, then
the first non-ref value encountered will be set as test
ref: string
group label to use as the reference case for calculating
allele frequency differences. If not set, then the first
value encountered will be the test.
Returns
-------
freq_diffs: pandas.Core.DataFrame
dataframe of SNP information and allele frequency difference
between group labels
'''
# group labels need to be of the same type, convert all
# group values to string
group_df = pd.read_table(group_file, sep="\t", header=0,
index_col=None,
converters={"GROUP": str,
"FID": str,
"IID": str})
group_df["GROUP"] = [str(xg) for xg in group_df["GROUP"]]
try:
assert ref
E.info("Reference label set to %s" % ref)
except AssertionError:
ref = set(group_df["GROUP"])[0]
E.info("Reference label not provided. Setting "
"reference label to %s" % ref)
try:
assert test
E.info("Test label set to %s" % test)
except AssertionError:
test = [tx for tx in set(group_df["GROUP"]) if not ref][0]
E.info("Test label not provided, setting test "
"label to %s." % test)
# parse the ped file - get the variant column headers from
# the map file - no headers with these files
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
variant_ids = variants.keys()
# store genotype matrix as an array
# rows and columns are variant IDs
ref_homA1 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
ref_homA2 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
ref_het = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
test_homA1 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
test_homA2 = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
test_het = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.int64)
tcount = 0
rcount = 0
ncount = 0
ref_ids = group_df["IID"][group_df["GROUP"] == ref].values
test_ids = group_df["IID"][group_df["GROUP"] == test].values
total = len(group_df)
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
indiv = indiv.strip("\n")
indiv_split = indiv.split("\t")
fid = indiv_split[0]
iid = indiv_split[1]
mid = indiv_split[2]
pid = indiv_split[3]
gender = indiv_split[4]
phen = indiv_split[5]
genos = indiv_split[6:]
#genos = ["".join([alleles[i],
# alleles[i+1]]) for i in range(0, len(alleles), 2)]
# check for ref and test conditions
# ignore individuals in neither camp
if iid in test_ids:
tcount += 1
# get genotype counts
for i in range(len(genos)):
# missing genotypes are coded '00' in plink format
if genos[i] == "00":
pass
elif genos[i] == "11":
test_homA1[i, i] += 1
elif genos[i] == "12":
test_het[i, i] += 1
else:
test_homA2[i, i] += 1
elif iid in ref_ids:
rcount += 1
# get genotype counts
for i in range(len(genos)):
# missing genotypes are coded '00' in plink format
if genos[i] == "00":
pass
elif genos[i] == "11":
ref_homA1[i, i] += 1
elif genos[i] == "12":
ref_het[i, i] += 1
else:
ref_homA2[i, i] += 1
else:
ncount += 1
if round((tcount + rcount + ncount)/total, 2) == 0.25:
E.info("%i samples counted."
"Approximately 25% samples counted" % tcount + rcount + ncount)
elif round((tcount + rcount + ncount)/total, 2) == 0.50:
E.info("%i samples counted."
"Approximately 50% samples counted" % tcount + rcount + ncount)
elif round((tcount + rcount + ncount)/total, 2) == 0.75:
E.info("%i samples counted."
"Approximately 75% samples counted" % tcount + rcount + ncount)
E.info("Counted alleles for %i test cases, %i ref cases,"
" %i neither reference nor test." % (tcount, rcount,
ncount))
ref_allele_counts = ((2 * ref_homA2) + ref_het)/float(2 * rcount)
test_allele_counts = ((2 * test_homA2) + test_het)/float(2 * tcount)
ref_mafs = 1 - ref_allele_counts.diagonal()
test_mafs = 1 - ref_allele_counts.diagonal()
ref_maf_df = pd.DataFrame(zip(variant_ids, ref_mafs),
columns=["SNP", "ref_MAF"],
index=[x for x,y in enumerate(variant_ids)])
ref_maf_df["ref_A2_HOMS"] = (2 * ref_homA1).diagonal()
ref_maf_df["ref_A2_HETS"] = ref_het.diagonal()
ref_maf_df.index = ref_maf_df["SNP"]
ref_maf_df.drop(["SNP"], axis=1, inplace=True)
test_maf_df = pd.DataFrame(zip(variant_ids, test_mafs),
columns=["SNP", "test_MAF"],
index=[x for x,y in enumerate(variant_ids)])
test_maf_df["test_A2_HOMS"] = (2 * test_homA1).diagonal()
test_maf_df["test_A2_HETS"] = test_het.diagonal()
test_maf_df.index = test_maf_df["SNP"]
test_maf_df.drop(["SNP"], axis=1, inplace=True)
freq_diffs = pd.merge(ref_maf_df, test_maf_df,
left_index=True, right_index=True,
how='inner')
freq_diffs["MAF_diff"] = freq_diffs["ref_MAF"] - freq_diffs["test_MAF"]
E.info("allele frequencies calculated over %i SNPs and "
"%i individuals" % (len(genos), tcount + rcount))
return freq_diffs
def calcPenetrance(ped_file, map_file, mafs=None,
subset=None, snpset=None):
'''
Calculate the proportion of times an allele is observed
in the phenotype subset vs it's allele frequency.
This is the penetrance of the allele
i.e. if observed in 100% of affected individuals and 0%
of controls, then penetrance is 100%
Generates a table of penetrances for each variants/allele
and a plot of MAF vs # cases carrying the allele
Generates a heatmap of compound heterozygotes, and homozygotes
with penetrances.
Outputs a table of SNPs, homozygote and heterozygote counts
among subset individuals and proportion of subset individual
phenotype explained by homozygotes and heterozygotes
Requires alleles are coded A1(minor)=1, A2=2
'''
# check subset is set, if not then throw an error
# cannot calculate penetrance without a phenotype
if not subset:
raise ValueError("Cannot calculate penetrance of alleles "
"without a phenotype to subset in")
else:
pass
# parse the ped file - get the variant column headers from
# the map file - no headers with these files
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
if snpset:
with IOTools.openFile(snpset, "r") as sfile:
snps = sfile.readlines()
snps = [sx.rstrip("\n") for sx in snps]
variant_ids = [ks for ks in variants.keys() if ks in snps]
else:
variant_ids = variants.keys()
var_idx = [si for si, sj in enumerate(variant_ids)]
case_mat = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.float64)
all_mat = np.zeros((len(variant_ids), len(variant_ids)),
dtype=np.float64)
tcount = 0
ncases = 0
# missing phenotype individuals must be ignored, else
# they will cause the number of individuals explained
# to be underestimated
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
indiv = indiv.strip("\n")
indiv_split = indiv.split("\t")
fid = indiv_split[0]
iid = indiv_split[1]
mid = indiv_split[2]
pid = indiv_split[3]
gender = int(indiv_split[4])
phen = int(indiv_split[5])
if phen != -9:
if subset == "cases":
select = phen
elif subset == "gender":
select = gender
else:
select = None
genos = np.array(indiv_split[6:])
genos = genos[var_idx]
#genos = ["".join([alleles[i],
# alleles[i+1]]) for i in range(0, len(alleles), 2)]
tcount += 1
het = np.zeros(len(genos), dtype=np.float64)
hom = np.zeros(len(genos), dtype=np.float64)
for i in range(len(genos)):
# missing values are coded '00' in plink format
# A2 homs are coded '11' in plink format
if genos[i] == "11":
hom[i] += 1
elif genos[i] == "12":
het[i] += 1
else:
pass
hom_mat = np.outer(hom, hom)
het_mat = np.outer(het, het)
homs = hom_mat.diagonal()
het_mat[np.diag_indices(len(genos))] = homs
gen_mat = het_mat
# separate matrix for subset
# reference is always level 2 for plink files,
# either cases or females
if select == 2:
case_mat += gen_mat
all_mat += gen_mat
ncases += 1
else:
all_mat += gen_mat
else:
pass
E.info("alleles counted over %i SNPs "
"and %i individuals, of which %i are "
"in the %s subset" % (len(genos), tcount, ncases, subset))
penetrance = np.divide(case_mat, all_mat)
# round for the sake of aesthetics
penetrance = np.round(penetrance, decimals=5)
pen_df = pd.DataFrame(penetrance, columns=variant_ids,
index=variant_ids)
pen_df = pen_df.fillna(0.0)
case_df = pd.DataFrame(case_mat, columns=variant_ids,
index=variant_ids)
all_df = pd.DataFrame(all_mat, columns=variant_ids,
index=variant_ids)
# plot heatmap of penetrances as percentages
indf = pen_df * 100
py2ri.activate()
# only plot penetrances > 0%
r_pen = py2ri.py2ri_pandasdataframe(indf)
r_cases = py2ri.py2ri_pandasdataframe(case_df)
r_all = py2ri.py2ri_pandasdataframe(all_df)
R.assign("pen.df", r_pen)
R.assign("case.df", r_cases)
R.assign("all.df", r_all)
R('''suppressPackageStartupMessages(library(gplots))''')
R('''suppressPackageStartupMessages(library(RColorBrewer))''')
# penetrances
E.info("plotting penetrance matrix")
R('''hmcol <- colorRampPalette(brewer.pal(9, "BuGn"))(100)''')
R('''rowpen <- pen.df[rowSums(pen.df) > 0,]''')
R('''colpen <- rowpen[,colSums(rowpen) > 0]''')
R('''png("%s/penetrance-matrix.png", width=720, height=720)''' % os.getcwd())
R('''heatmap.2(as.matrix(colpen), trace="none", col=hmcol,'''
'''dendrogram="none", Colv=colnames(colpen), key=FALSE, '''
'''Rowv=rownames(colpen), margins=c(10,10), cellnote=round(colpen),'''
'''notecol="white")''')
R('''dev.off()''')
E.info("plotting case counts matrix")
R('''rowcase <- case.df[rowSums(case.df) > 0,]''')
R('''colcase <- rowcase[,colSums(rowcase) > 0]''')
R('''png("%s/cases-matrix.png", width=720, height=720)''' % os.getcwd())
R('''heatmap.2(as.matrix(colcase), trace="none", col=rep("#F0F8FF", 100),'''
'''dendrogram="none", Colv=colnames(colcase), key=FALSE, '''
'''colsep=seq(1:length(colnames(colcase))), '''
'''rowsep=seq(1:length(rownames(colcase))),'''
'''Rowv=rownames(colcase), margins=c(10,10), cellnote=round(colcase),'''
'''notecol="black")''')
R('''dev.off()''')
E.info("plotting all individuals matrix")
R('''rowall <- all.df[rownames(colcase),]''')
R('''colall <- rowall[,colnames(colcase)]''')
R('''png("%s/all-matrix.png", width=720, height=720)''' % os.getcwd())
R('''heatmap.2(as.matrix(colall), trace="none", col=rep("#F0F8FF", 100),'''
'''dendrogram="none", Colv=colnames(colall), key=FALSE, '''
'''colsep=seq(1:length(colnames(colall))), '''
'''rowsep=seq(1:length(rownames(colall))), '''
'''Rowv=rownames(colall), margins=c(10,10), cellnote=round(colall),'''
'''notecol="black")''')
R('''dev.off()''')
# plot MAF vs homozygosity
maf_df = pd.read_table(mafs, sep="\t", header=0, index_col=0)
plot_df = pd.DataFrame(columns=["MAF"],
index=maf_df.index)
plot_df["MAF"] = maf_df["MAF"]
homs = case_mat.diagonal()
hom_series = pd.Series({x: y for x, y in zip(variant_ids,
homs)})
plot_df["explained_by_homozygotes"] = hom_series
plot_df["SNP"] = plot_df.index
plot_df.index = [ix for ix, iy in enumerate(plot_df.index)]
plotPenetrances(plotting_df=plot_df)
out_df = summaryPenetrance(maf_df=maf_df,
case_counts=case_mat,
variants=variant_ids,
n_cases=ncases,
n_total=tcount)
return out_df, pen_df
def summaryPenetrance(maf_df, case_counts,
variants, n_cases, n_total):
'''
Summarise genotype counts and proportion of cases explained
by the observed homozygotes and compound heterozygotes.
This is a function of the total population size and
population allele frequency - does this assume 100%
penetrance of each allele?
'''
# homozygous individuals are on the
# diagonal of the case_counts array
homozyg_cases = case_counts.diagonal()
homozyg_series = pd.Series({x: y for x, y in zip(variants,
homozyg_cases)})
# heterozygotes are on the off-diagonal elements
# get all off diagonal elements by setting diagonals to zero
# matrix is diagonal symmetric
np.fill_diagonal(case_counts, 0)
het_counts = np.sum(case_counts, axis=0)
het_series = pd.Series({x: y for x, y in zip(variants,
het_counts)})
out_df = pd.DataFrame(columns=["homozygote_cases",
"heterozygote_cases"],
index=maf_df.index)
out_df["MAF"] = maf_df["MAF"]
out_df["homozygote_cases"] = np.round(homozyg_series, 1)
out_df["expected_cases"] = np.round(((out_df["MAF"] ** 2) * n_total), 3)
out_df["heterozygote_cases"] = het_series
out_df["hom_prop_explained"] = np.round(homozyg_series/float(n_cases), 3)
out_df["het_prop_explained"] = np.round(het_series/float(n_cases), 3)
return out_df
def plotPenetrances(plotting_df):
'''
Plot the proportion of cases/phenotype explained by
individuals carrying allele vs. population allele frequency.
Generate final output summary table (should be in separate function)
'''
# only need to plot variants with MAF >= 0.01
low_frq = plotting_df["MAF"] < 0.01
hi_df = plotting_df[~low_frq]
# get into R and use ggplot for MAF vs homozygosity amongs cases
r_plot = py2ri.py2ri_pandasdataframe(hi_df)
R.assign("hom.df", r_plot)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''png("%s/penetrance-plot.png", height=720, width=720)''' % os.getcwd())
R('''pen_p <- ggplot(hom.df, aes(x=explained_by_homozygotes, y=MAF, colour=SNP)) + '''
'''geom_point(size=4) + theme_bw() + '''
'''geom_text(aes(label=explained_by_homozygotes),'''
'''colour="black",vjust=0.5, hjust=0.5) + '''
'''labs(x="Number of Red haired homozygotes", y="MAF") + '''
'''theme(axis.title=element_text(size=10, colour="black"))''')
R('''print(pen_p)''')
R('''dev.off()''')
def findDuplicateVariants(bim_file, take_last=False):
'''
identify variants with duplicate position and reference
alleles
'''
# count the number of lines first to get
# the necessary array sizes
E.info("getting number of variants")
lines = 1
with open(bim_file, "r") as bfile:
for line in bfile.readlines():
lines += 1
E.info("%i variants found" % lines)
# setup index arrays
var_array = np.empty(lines, dtype=object)
ref_alleles = np.empty(lines, dtype=object)
pos_array = np.zeros(lines, dtype=np.int64)
minor_alleles = np.empty(lines, dtype=object)
idx = 0
# find duplicates on position
with open(bim_file, "r") as bfile:
for line in bfile.readlines():
line = line.rstrip("\n")
varline = line.split("\t")
var = varline[1]
pos = int(varline[3])
ref_allele = varline[-1]
minor_allele = varline[-2]
var_array[idx] = var
ref_alleles[idx] = ref_allele
minor_alleles[idx] = minor_allele
pos_array[idx] = pos
idx += 1
# find duplicates using pandas series
pos_series = pd.Series(pos_array)
dup_last = pos_series[pos_series.duplicated(take_last=True)]
dup_first = pos_series[pos_series.duplicated(take_last=False)]
var_series = pd.Series(var_array)
ref_series = pd.Series(ref_alleles)
alt_series = pd.Series(minor_alleles)
# a few variants have duplicate IDs - count these as duplicates
# and add to the exclusion list - these won't be identified
# based on shared position necessarily - force add them
ref_first = ref_series[ref_series.duplicated(take_last=False)]
ref_last = ref_series[ref_series.duplicated(take_last=True)]
ref_dups = set(ref_first.index).union(ref_last.index)
# union of take first and take last
dup_all = set(dup_last.index).union(set(dup_first.index))
dup_complete = dup_all.union(ref_dups)
dup_idx = np.array([sx for sx in dup_complete])
dup_idx.sort()
# make a dataframe to hold all triallelic and duplicate variants
dup_dict = {"SNP": var_series[dup_idx],
"BP": pos_series[dup_idx],
"REF": ref_series[dup_idx],
"VAR": alt_series[dup_idx]}
dup_df = pd.DataFrame(dup_dict)
# some variants may have more than one ID/entry
# step through using pandas groupby - group on position
E.info("looking for duplicates and triallelic variants")
tri_alleles = []
dups_alleles = []
overlap_vars = []
for names, groups in dup_df.groupby(["BP"]):
# if there is only one reference allele, indicates a
# triallelic variant, otherwise its probably a duplicate
# or overlaping INDEL and SNV
var_lens = groups["VAR"].apply(len)
if groups.shape[0] == 1:
pass
elif np.mean(var_lens) > 1:
# probably overlapping variants, exclude, but report
# separately
over_vars = groups["SNP"].values.tolist()
for ovs in over_vars:
overlap_vars.append(ovs)
elif len(set(groups["REF"])) == 1:
tri_vars = groups["SNP"].values.tolist()
for tri in tri_vars:
tri_alleles.append(tri)
else:
dup_vars = groups["SNP"].values.tolist()
for dup in dup_vars:
dups_alleles.append(dup)
E.info("%i triallelic variants found" % len(tri_alleles))
E.info("%i duplicate position variants found" % len(dups_alleles))
E.info("%i overlapping SNVs and INDELs found" % len(overlap_vars))
return dups_alleles, tri_alleles, overlap_vars
def flagExcessHets(hets_file, plot=True, plot_path=None):
'''
Take output from Plink 1.9 --het command
calculate heterozygosity rate and flag individuals
with heterozygosity > 3 s.d. from the mean
value.
This assumes all individuals are from the same
population, and thus form a homogenous cluster,
with only outliers at the extremes.
Visualise the data, if there are multiple apparent
clusters then filter for ethnicity/ancestry first
'''
if hets_file.endswith("gz"):
compression = "gzip"
else:
compression = None
het_df = pd.read_table(hets_file, header=0, index_col=None,
sep="\t", compression=compression)
nmiss = pd.Series(het_df.loc[:, "N(NM)"], dtype=np.float64)
nhoms = het_df.loc[:, "O(HOM)"]
het_df["het_rate"] = (nmiss - nhoms) / nmiss
# get mean value and std, set upper and lower thresholds
mean_het = np.mean(het_df.loc[:, "het_rate"].values)
sd_het = np.std(het_df.loc[:, "het_rate"].values)
upper = mean_het + (3 * sd_het)
lower = mean_het - (3 * sd_het)
hi_hets = het_df[het_df["het_rate"] > upper]
lo_hets = het_df[het_df["het_rate"] < lower]
E.info("%i individuals with high heterozygosity" % len(hi_hets))
E.info("%i individuals with low heterozygosity" % len(lo_hets))
hi_hets["exclude"] = "high_heterozygosity"
lo_hets["exclude"] = "low_heterozygosity"
all_flags = lo_hets.append(hi_hets)
if plot:
E.info("plotting heterozygosity rate distribution")
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(het_df)
R.assign("het.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p <- ggplot(het.df, aes(het_rate)) + '''
'''geom_histogram() + '''
'''labs(title="Distribution of heterozygosity rate") + '''
'''theme_bw() + '''
'''geom_vline(xintercept=c(%0.3f, %0.3f), '''
'''linetype=2, col="#838B83")''' % (lower, upper))
R('''png("%s/het_rate-hist.png")''' % plot_path)
R('''print(p)''')
R('''dev.off()''')
return all_flags
def flagGender(gender_file, plot=True, plot_path=None):
'''
Parse the .sexcheck output report from Plink
--sex-check and flag gender discordant individuals.
Arguments
---------
gender_file: string
the .sexcheck output report file from Plink --sex-check
plot: boolean
generate a histogram of F values distributions showing male and
female clusters, split by reported gender
plot_path: string
PATH to save F coefficient histogram
Returns
-------
discords: pandas.Core.DataFrame
a pandas dataframe of individuals that are gender discordant
'''
gender_df = pd.read_table(gender_file, header=0,
index_col=None, sep=None)
genders = lambda x: "male" if x == 1 else "female"
gender_df["GENDER"] = gender_df["PEDSEX"].apply(genders)
E.info("checking individuals for discordance")
discords = gender_df[gender_df["STATUS"] != "OK"]
discords.drop(labels=["PEDSEX", "SNPSEX", "STATUS", "F",
"GENDER"],
axis=1, inplace=True)
E.info("%i individuals with discordant gender" % len(discords))
if plot:
E.info("plotting F gender coefficient distributions")
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(gender_df)
R.assign("gender.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p <- ggplot(gender.df, aes(F, fill=GENDER)) + '''
'''geom_histogram() + '''
'''labs(title="F coefficient distributions for gender") + '''
'''theme_bw() + facet_grid(. ~ GENDER)''')
R('''png("%s/gender_check-hist.png")''' % plot_path)
R('''print(p)''')
R('''dev.off()''')
else:
pass
return discords
def _compare_ibds(ibd_entry, threshold=0.03125):
'''
Just for internal use in `flagRelated` function.
To compare IBD estimates and flag up related
individuals
Arguments
---------
ibd_entry: pandas.Core.Series
a single line entry from an IBD estimates
file
threshold: float
the threshold at which to flag an individual as related
Returns
-------
flag: boolean
True if related, else false
'''
if ibd_entry["PI_HAT"] < threshold:
return False
else:
return True
def flagRelated(ibd_file, chunk_size=None,
threshold=0.03125, plot=True,
plotting_path=None):
'''
Use IBS estimates to find pairs of related individuals
above a threshold.
This will also flag up the number of duplicated/monozygotic
twin pairs (matrix diagonals).
Arguments
---------
ibd_file: string
file containing IBS estimates between pairs from Plink
or GCTA.
chunk_size: int
the file chunk size to read in at a time, should correspond
to the number of individuals. If not set, the whole file
is read in. Not recommend for large (>2GB) files.
threshold: float
IBS threshold, above which individuals will be flagged
as related. Default is 3rd cousins.
plot: boolean
generate a histogram of the distribution of IBS values.
Default = True
plotting_path: string
PATH to plot histogram to
Returns
-------
flagged: pandas.Core.DataFrame
dataframe of individuals to remove, with the estimated
relationship to another individual.
'''
# need to make this faster
# sequentially add new IDs only
related_list = []
ibds = []
if ibd_file.endswith("gz"):
comp = "gzip"
else:
pass
E.info("reading file in chunks of %i lines" % chunk_size)
if chunk_size:
# read in and operate on chunks
df_iter = pd.read_table(ibd_file, header=0, index_col=None,
delim_whitespace=True, compression=comp,
chunksize=chunk_size)
count = 0
for chunk in df_iter:
count += 1
entrys = chunk[["FID1", "IID1",
"FID2", "IID2",
"PI_HAT"]]
ibds.append(entrys)
relate_mask = entrys.apply(_compare_ibds, axis=1)
related = entrys[relate_mask]
E.info("%i relations found" % len(related))
related_list.append(related)
else:
pass
df = pd.concat(ibds, axis=0, keys=None)
if plot:
# for lots of observations, plot log counts
E.info("plotting pair-wise IBD distribution")
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(df)
R.assign("relate.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p <- ggplot(relate.df, aes(PI_HAT+0.5)) + '''
'''geom_histogram(binwidth=0.01) + '''
'''labs(title="Proportion of IBD shared distribution") + '''
'''theme_bw() + scale_y_log10() + '''
'''geom_vline(xintercept=%(threshold)f, '''
'''linetype=4, colour="#838B83")''' % locals())
R('''png("%s/IBD-hist.png")''' % plotting_path)
R('''print(p)''')
R('''dev.off()''')
else:
pass
return related_list
def flagInbred(inbred_file, inbreeding_coefficient,
ibc_threshold=0.05,
plot=True, plot_path=None):
'''
Use Plink or GCTA's estimate of F, inbreeding coefficient
to flag individuals that are highly inbred.
Arguments
---------
inbred_file: string
file containing estimates of F
inbreeding_coefficient: string
coefficient to use to identify inbred individuals. This name
should correspond to one of the columns in `inbred_file`.
ibc_threshold: float
the threshold above which individuals will be flagged as inbred
plot: boolean
generate a histogram of the distribution of F coefficients
plotting_path: string
PATH to directoru for plotting F coefficient distribution
Returns
-------
inbreds: padas.Core.DataFrame
dataframe of inbred individuals to exclude from analysis
'''
inbreed_df = pd.read_table(inbred_file, header=0,
index_col=None, sep="\t")
E.info("Identifing individuals with inbreeding coefficient"
" greater than %0.3f" % ibc_threshold)
inbreds = inbreed_df[inbreed_df[inbreeding_coefficient] > ibc_threshold]
inbreds = inbreds[["FID", "IID"]]
E.info("%i individuals with high inbreeding "
"coefficient" % len(inbreds))
if plot:
E.info("plotting F coefficient distributions")
py2ri.activate()
r_df = py2ri.py2ri_pandasdataframe(inbreed_df)
R.assign("inbreed.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p <- ggplot(inbreed.df, aes(%(inbreeding_coefficient)s)) + '''
'''geom_histogram(binwidth=0.01) + '''
'''labs(title="Inbreeding coefficient, %(inbreeding_coefficient)s,'''
'''distribution") + theme_bw() + '''
'''geom_vline(xintercept=%(ibc_threshold)0.3f, '''
'''linetype=4, colour="#838B83")''' % locals())
R('''png("%s/inbreeding-hist.png")''' % plot_path)
R('''print(p)''')
R('''dev.off()''')
else:
pass
return inbreds
def mergeQcExclusions(hets_file=None, inbred_file=None,
related_file=None, gender_file=None,
mask_file=None):
'''
Merge sets of excluded individuals into a single file for
downstream analysis, processing, etc
Arguments
---------
hets_file: string
file containing individuals to remove due to excessive or
reduced heterozygosity
inbred_file: string
file of individuals highly related to themselves for
exclusion
related_file: string
file of IDs of individuals pruned due to greater relatedness
than an arbitrary threshold
gender_file: string
individuals with discordant reported vs. genetic gender
mask_file: string
individuals to be excluded from analyses, unrelated
for reasons to QC (i.e. mask out category of individuals)
Returns
-------
exclusions: pandas.Core.DataFrame
A dataframe of FID and IIDs of the unique set of excluded
individuals
'''
if hets_file:
hets_df = pd.read_table(hets_file, sep="\t",
header=0, index_col=None)
E.info("%i exclusions due to "
"heterozygosity deviation" % len(hets_df))
else:
hets_df = None
E.warn("No heterozygosity exclusion file")
if inbred_file:
inbred_df = pd.read_table(inbred_file, sep="\t",
header=0, index_col=None)
E.info("%i exclusions due "
"to consanguinuity" % len(inbred_df))
else:
inbred_df = None
E.warn("No inbred exclusions")
if related_file:
related_df = pd.read_table(related_file, delim_whitespace=True,
header=None, index_col=None)
related_df.columns = ["FID", "IID"]
E.info("%i individuals excluded due "
"to high relatedness" % len(related_df))
else:
related_df = None
E.warn("No individuals excluded on relatedness")
if gender_file:
gender_df = pd.read_table(gender_file, sep="\t",
header=0, index_col=None)
E.info("%i individuals with discordant "
"gender recorded" % len(gender_df))
else:
gender_df = None
E.warn("No individuals exclued with "
"discordant gender")
if mask_file:
mask_df = pd.read_table(mask_file, sep="\t",
header=None, index_col=None)
E.info("%i individuals to be excluded "
"for additional reasons" % len(gender_df))
mask_df.columns = ["FID", "IID"]
else:
mask_df = None
df_list = [hets_df, inbred_df, related_df, gender_df,
mask_df]
df_true = [True for x in df_list if x is not False]
if not all(df_true):
raise ValueError("no QC files detected - do some QC!!")
else:
pass
# assume all df have FID and IID columns
real_df = [x for x in df_list if x is not None]
real_df = [x[["FID", "IID"]] for x in real_df]
full_df = pd.concat(real_df, keys=None, axis=0)
exclusions = full_df.drop_duplicates(subset=["FID",
"IID"],
take_last=True,
inplace=False)
return exclusions
def selectLdFromTabix(ld_dir, chromosome, snp_pos,
ld_threshold=0.01):
'''
Select all LD values from a tabix indexed BGZIP
file of LD. Assumes Plink format.
Arguments
---------
ld_dir: string
path to directory containing LD data
chromosome: string
chromosome of SNP to pull out LD values
assumes chrN format
snp_pos: int
bp mapping position of the SNP on the same
genome build as the LD was calculated
ld_threshold: float
minimum LD value to return
Returns
-------
ld_df: pandas.Core.DataFrame
Pandas dataframe containing LD values over
target range.
'''
tab_dir = [td for td in os.listdir(ld_dir) if re.search(".bgz$", td)]
contig = int(chromosome.lstrip("chr"))
start = snp_pos
end = snp_pos
tab_query = """
tabix %(ld_dir)s/%(tab_indx)s %(contig)i:%(start)i-%(end)i |
awk '{if($7 >= %(ld_threshold)s) print $0}'"""
tab_indx = [tx for tx in tab_dir if re.search(chromosome,
tx)][-1]
E.info("Retrieving LD values at bp: %i" % snp_pos)
proc = subprocess.Popen(tab_query % locals(),
shell=True,
stdout=subprocess.PIPE)
ld_dict = {}
count = 0
for line in proc.stdout:
snp_dict = {}
parse = line.split("\t")
snp_dict["CHR_A"] = int(parse[0])
snp_dict["BP_A"] = int(parse[1])
snp_dict["SNP_A"] = parse[2]
snp_dict["CHR_B"] = int(parse[3])
snp_dict["BP_B"] = int(parse[4])
snp_dict["SNP_B"] = parse[5]
snp_dict["R2"] = float(parse[6])
snp_dict["DP"] = float(parse[7])
count += 1
ld_dict[count] = snp_dict
ld_df = pd.DataFrame(ld_dict).T
# ld Dataframe may be empty, return
# empty dataframe
try:
ld_df.index = ld_df["SNP_B"]
ld_df.drop_duplicates(subset="SNP_B",
keep="last",
inplace=True)
except KeyError:
E.info("No SNPs detected in LD "
"with r^2 > {}".format(ld_threshold))
ld_df = pd.DataFrame(0.0,
index=[snp_pos],
columns=["SNP_A",
"R2"])
return ld_df
def selectLdFromDB(database, table_name,
index_snp,
index_label=None,
ld_threshold=None):
'''
Select LD values from an SQL table over
a specific range. Large regions will consume
large memory and queries may take several
minutes to complete.
Arguments
---------
database: sql.connection
An SQL database connection to the DB
containing the LD values
table_name: string
The table to query containing LD information
index_snp: string
SNP ID to select LD values from the SQL
database on
index_label: str
Column label in SQL database to use as the
index in the output dataframe
ld_threshold: float
minimum LD value to return
Returns
-------
ld_df: pandas.Core.DataFrame
Pandas dataframe containing LD values over
target range.
'''
# UTF-8 codec struggles to decode ';' in some columns
database.text_factory = str
if ld_threshold:
state = '''
select SNP_A,SNP_B,R2 FROM %s where %s = "%s" AND
R2 > %0.3f;
''' % (table_name, index_label,
index_snp, ld_threshold)
else:
state = '''
select SNP_A,SNP_B,R2 FROM %s where %s = "%s";
''' % (table_name, index_label, index_snp)
ld_df = pdsql.read_sql(sql=state, con=database,
index_col=index_label)
return ld_df
def calcLdScores(ld_table, snps,
scale=False):
'''
Calculate the LD scores for SNPs across a chromosome,
stored in a SQL database.
Arguments
---------
ld_table: pandas.Core.DataFrame
Pandas dataframe in table format containing LD
values between SNPs. Columns are `SNP_A`, `SNP_B`
and `R2`.
snps: list
the snps over which to calculate LD scores
scale: bool
Whether to scale LD score by the number of SNPs
used to calculate the score. Useful if used
as a weighting for other SNP scores.
Returns
-------
ld_scores: float
LD scores for each SNP
'''
if len(ld_table) > 0:
ld_score = sum(ld_table["R2"])
else:
ld_score = 0
if scale:
ld_scores = ld_score/len(ld_table)
else:
ld_scores = ld_score
return ld_scores
def calcWeightedEffects(gwas_results, snps, calc_se=True,
scale=False):
'''
Calculate the standard error weighted effect sizes score
for each SNP:
score = sum(ln(OR) * se)
Arguments
---------
gwas_results: pandas.Core.DataFrame
A dataframe of the results from a genome_wide association
study. Assumes SNP IDs are the index column.
snps: list
the snps over which to calculate the total weighted
effect size score.
calc_se: boolean
Calculate the standard error from the p-values and
effect sizes:
SE = ln(OR)/Z
Z = -0.862 + sqrt(0.743 - 2.404 * ln(P))
scale: boolean
Scale the sum of standard error weighted effect sizes
by the number of SNPs
Returns
-------
es_score: float
sum of SE weighted effect sizes
'''
# calculate standard error of effect size based on
# p-value and effect size
if calc_se:
# check p-values that = 0 are set to smallest floating
# point representation instead
gwas_results["P"][gwas_results["P"] == 0] = np.finfo(np.float64).min
z_func = lambda x: - 0.862 + sqrt(0.743 - 2.404 * np.log(x))
gwas_results["Z"] = gwas_results["P"].apply(z_func)
gwas_results["SE"] = abs(np.log(gwas_results["OR"])/gwas_results["Z"])
else:
E.warn("Standard errors have not been calculated, please "
"make sure they exist in this results table")
es_score = sum((abs(np.log(gwas_results["OR"])) * gwas_results["SE"]).fillna(0))
if scale and len(gwas_results):
return es_score/len(gwas_results)
else:
return es_score
def snpPriorityScore(gwas_results, chromosome, ld_dir=None,
clean=True, database=None, table_name=None):
'''
Generate SNP scores based on the amount of genetic variation
they capture and the sum of the weighted effect sizes for
the trait of interest.
This score can then be integrated with a score based on
the overlap with functional annotation features
of interest.
Arguments
---------
gwas_results: string
Results from a GWAS, assumed to be in Plink format.
ld_dir: string
directory containing tabix index LD files from Plink
database: string
Path to an SQL database containing LD values in
table format
table_name: string
Specific table, often referring to a specific
chromosome, that contains LD values with columns
SNP_A, SNP_B, BP_A, BP_B and R2.
chromosome: string
A chromosome to select from the gwas_results
file.
clean: boolean
Whether the results table has been pre-cleaned to
remove results not relevant to SNPs. e.g. if
covariates had been included in the regression
model these should be removed.
Returns
-------
SNP_scores: pd.Core.DataFrame
A pandas dataframe of LDscores, weight effect size
scores and SNP priority score.
'''
E.info("Reading association results from %s" % gwas_results)
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\t", header=0)
if clean:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\t", header=0)
else:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\s*", header=0)
gwas_df = gwas_df[gwas_df["TEST"] == "ADD"]
gwas_df.index = gwas_df["SNP"]
# in order to reduce the computational load it is
# necessary to break up the SNPs into regions.
# The logical way would be to use recombination
# hotspots, however, this will still leave
# some very large windows
# Use a moving window over the chromosome of
# ~250Kb, with 25kb overlap.
chr_df = gwas_df[gwas_df["CHR"] == int(chromosome)]
# duplicates cause selection of individual SNPs
# to break - why are there still duplicates??
chr_df.drop_duplicates(subset="BP", keep="last",
inplace=True)
priority_list = []
ld_scores = {}
es_scores = {}
priority_scores = {}
snp_set = chr_df.index
if database:
dbh = sql.connect(database)
else:
pass
# iterate over SNPs
for snp in snp_set:
if database:
ld_values = selectLdFromDB(dbh,
table_name=table_name,
index_snp=snp,
index_label="SNP_B")
elif ld_dir:
snp_pos = int(chr_df.loc[snp, "BP"])
ld_values = selectLdFromTabix(ld_dir=ld_dir,
chromosome=chromosome,
snp_pos=snp_pos)
ldsnps = ld_values.loc[: ,"SNP_A"].values
ldsnps = {sx for sx in ldsnps}
ldscore = calcLdScores(ld_table=ld_values,
snps=ldsnps,
scale=False)
ld_scores[snp] = ldscore
try:
gwas_results = chr_df.loc[ldsnps]
escore = calcWeightedEffects(gwas_results=gwas_results,
snps=ldsnps,
calc_se=True,
scale=True)
except KeyError:
gwas_results = chr_df.loc[snp]
if gwas_results["P"] == 0:
gwas_results["P"] = np.finfo(np.float64).min
else:
pass
z_func = lambda x: - 0.862 + sqrt(0.743 - 2.404 * np.log(x))
gwas_results["Z"] = z_func(gwas_results["P"])
gwas_results["SE"] = abs(np.log(gwas_results["OR"])/gwas_results["Z"])
escore = gwas_results["SE"] * abs(np.log(gwas_results["OR"]))
es_scores[snp] = escore
weight = escore * ldscore
priority_scores[snp] = weight
SNP_scores = pd.DataFrame([pd.Series(ld_scores),
pd.Series(es_scores),
pd.Series(priority_scores)]).T
SNP_scores.columns = ["LDScore", "WeightEffectSize", "PriorityScore"]
SNP_scores.sort_values(by="PriorityScore", inplace=True)
return SNP_scores
def fitPrior(value, distribution, dist_params):
'''
Fit a prior probability given a value,
distribution and distribution parameters.
You are responsible for defining the appropriate
distribution and parameters
Arguments
---------
Value: float
A value to calculate a prior probability from
distribution: string
A distribution from which to calculate a probability.
Current values are "normal", "t", "gamma",
"lognormal", "exponential".
dist_params: tuple
parameters to define the distribution,
* normal: (mean, std)
* t: (df, ncp)
* gamma: (k, theta)
* lognormal: (ln(mean), std)
* exponential: (lambda)
Returns
-------
prior: float
Prior probability attached to input value
'''
# distribution parameters should be passed
# explicitly
if distribution == "normal":
prior = stats.norm(*dist_params).pdf(value)
elif distribution == "t":
prior = stats.t(*dist_params).pdf(value)
elif distribution == "gamma":
prior = stats.gamma(*dist_params).pdf(value)
elif distribution == "lognormal":
prior = stats.lognorm(*dist_params).pdf(value)
elif distribution == "exponential":
prior = stats.expon(*dist_params).pdf(value)
else:
raise ValueError("Distrubtion %s not "
"implemented" % distribution)
return prior
def calcPriorsOnSnps(snp_list, distribution, params=None):
'''
Calculate prior probabilities on SNPs based
on a predefined value, a distribution and
parameters to describe the distribution.
This relies inherently on the correct and
appropriate distribution to be defined, i.e.
that it is conjugate to the marginal likelihood
distribution.
TO DO: introduce robust Bayesian modelling
Arguments
---------
snp_list: dict
SNPs with score/value attached to determine
the prior probability
distribution: string
the distribution from which to draw probabilities
params: tuple
parameters to describe the appropriate
distribution.
Returns
-------
prior_probs: dict
dictionary of priors for SNPs
'''
prior_probs = {}
# if there is no score for that SNP then use an
# uninformative or Jeffrey's prior
for snp in snp_list.keys():
if snp_list[snp] != 0:
prior_probs[snp] = fitPrior(value=snp_list[snp],
distribution=distribution,
dist_params=params)
else:
prior_probs[snp] = 0.5
return prior_probs
def estimateDistributionParameters(data,
distribution,
fscale=None,
floc=None,
**kwargs):
'''
Use maximum likelihood to estimate the parameters
of the defined distribution.
Arguments
---------
data: pd.Series/np.array
data used to estimate parameters from
distribution: string
distribution assumed to underlie the data
generation process
fscale: float
scale parameter of the distribution to fix
floc: float
location parameter of the distribution to
fix
**kwargs: float
additional kwargs to pass as fixed parameters
Returns
-------
est_params: tuple
estimated distribution parameters
'''
if distribution == "normal":
mu, sigma = stats.norm.fit(data)
est_params = (mu, sigma,)
elif distribution == "t":
df, mu, sigma = stats.t.fit(data)
est_params = (df, mu, sigma,)
elif distribution == "gamma":
k, theta, mu = stats.gamma.fit(data)
est_params = (k, theta, mu,)
elif distribution == "lognormal":
exp_mu, sigma, theta = stats.lognorm.fit(data)
est_params = (exp_mu, sigma, theta,)
elif distribution == "exponential":
beta, lambda_x = stats.expon.fit(data)
est_params = (beta, lambda_x,)
else:
raise ValueError("Distrubtion %s not "
"implemented" % distribution)
return est_params
def calculatePicsValues(snp_id, index_log10p, ld_values,
priors=None, k=2):
'''
Use the PICS method to assign probability to SNPs as
being causal for association signals at a locus,
given the strength of their association (log10 P-value),
and linkage disequilbrium with the lead SNP (smallest
p-value at the locus).
This method allows the prioritisation of SNPs, including
those where there are multiple independent signals. It
requires that these independent signals are however
input as separate SNPs.
NB::
Fahr et al initially use k=6.4 based on their observation
that altering k=[6,8] does not have an appreciable impact
on the PICS values. However, when comparing this
implementation to the PICS webserver, k=2 gives more
similar values based on the current Phase3 1000 Genomes
LD values and SNPs.
Arguments
---------
snp_id: string
rs ID of the lead SNP from the associated region/
independent signal of association
index_log10p: float
the negative log10(p-value) of association with
the phenotype of interest
ld_values: pd.Core.DataFrame
A pandas dataframe of LD values between the index SNP
and all other SNPs given an arbitrary threshold. The
expected columns are index SNP, SNP of interest, r^2.
priors: dict
the prior value to attach to each SNP. Can be used to
integrate functional information into the PICS
calculations. EXPERIMENTAL
k: float
The power to raise the correlation of alleles to. When
k=2, this scales the standard deviation of the sample
distribution for the marignal likelihood by the residual
LD. Increasing k downweights the LD difference between
the index SNP and SNP of interest.
Returns
-------
PICS: pandas.Core.Series
A pandas series of SNPs and calculated PICS scores.
'''
# assume the SNPs of interest are all contained in the
# ld_values table index
top_p = stats.norm(index_log10p,
sqrt(index_log10p)/2).cdf(index_log10p)
prob_dict = {}
prob_dict[snp_id] = top_p
E.info("calculating scores for %i SNPs" % len(ld_values))
# If a SNP is in perfect LD with the index SNP this forces
# the standard deviation to be 0, add a small correction
# to allow the calculation of marginal likelihood value
# e.g. 0.0001
for snp in ld_values.index:
try:
r2 = ld_values.loc[snp]["R2"]
r = sqrt(r2)
mu = r2 * index_log10p
sigma = sqrt(1 - (r ** k)) * (sqrt(index_log10p)/2)
if sigma == 0:
sigma = 0.0001
else:
pass
# use log likelihoods, these are more numerically
# stable and avoid the multiplication of very small
# numbers
# if priors are not set, force uninformative prior
# i.e. if not conjugate with likelihood
likelihood = np.log(stats.norm(mu, sigma).pdf(index_log10p))
try:
prior = np.log(priors[snp])
except:
prior = np.log(1.0)
prob_dict[snp] = np.exp(likelihood + prior)
except KeyError:
E.warn("SNP %s not found in LD with %s" % (snp,
snp_id))
# calculate normalized probabilities, where sum of all probs=1
# use numpy sum to handle NaN values
sum_probs = np.sum(prob_dict.values())
pics_dict = {}
for snp_p in prob_dict.keys():
pics_dict[snp_p] = prob_dict[snp_p]/sum_probs
pics_series = pd.Series(pics_dict)
PICS = pics_series.sort_values(ascending=False)
return PICS
def getLdValues(database, table_name, index_snp, ld_threshold=0.5):
'''
Get all LD values for the index SNP above a given r^2
threshold
Arguments
---------
database: sql.connection
An SQL database connection to the DB
containing the LD values
table_name: string
The table to query containing LD information
index_snp: string
SNP ID to select LD values from the SQL
database on
ld_threshold: float
a threshold above which to select LD values
with the index SNP
Returns
-------
ld_df: pandas.Core.DataFrame
Pandas dataframe containing LD values over
target range.
'''
E.info("executing SQL query on table: %s" % table_name)
ld_a = selectLdFromDB(database=database,
table_name=table_name,
index_snp=index_snp,
index_label="SNP_B",
ld_threshold=ld_threshold)
ld_a.columns = ["SNP", "R2"]
ld_b = selectLdFromDB(database=database,
table_name=table_name,
index_snp=index_snp,
index_label="SNP_A",
ld_threshold=ld_threshold)
ld_b.columns = ["SNP", "R2"]
ld_df = ld_a.append(ld_b)
ld_df.index = ld_df["SNP"]
# drop duplicate indices
ld_df.drop_duplicates(subset="SNP",
keep="last",
inplace=True)
E.info("%i records found matching query" % len(ld_df))
return ld_df
def PICSscore(gwas_results, chromosome, database=None,
table_name=None, priors=None, clean=True,
ld_threshold=0.5, ld_dir=None):
'''
Prioritise SNPs based on the conditional probability
of being the causal SNP at an associated region given
the strength of association and LD with surrounding SNPs.
Originally described in::
Fahr et al Nature 518 (2015) pp337
The current implementation does not allow the integration
of a prior probability - this will come in the future.
Arguments
---------
gwas_results: string
Results from a GWAS, assumed to be in Plink format.
ld_dir: string
directory containing tabix index LD files from Plink
database: string
Path to an SQL database containing LD values in
table format
table_name: string
Specific table, often referring to a specific
chromosome, that contains LD values with columns
SNP_A, SNP_B, BP_A, BP_B and R2.
chromosome: string
A chromosome to select from the gwas_results
file.
priors: dict
the prior value to attach to each SNP. Can be used to
integrate functional information into the PICS
calculations. EXPERIMENTAL
clean: boolean
Whether the results table has been pre-cleaned to
remove results not relevant to SNPs. e.g. if
covariates had been included in the regression
model these should be removed.
ld_threshold: float
Threshold above which to select SNPs in LD
with the lead SNP
Returns
-------
PICS_scores: pd.Core.DataFrame
A pandas dataframe of PICS scores for SNPs.
'''
E.info("Reading association results from %s" % gwas_results)
if clean:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\t", header=0)
else:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\s*", header=0)
gwas_df = gwas_df[gwas_df["TEST"] == "ADD"]
gwas_df.index = gwas_df["SNP"]
E.info("subsetting data on chromosome %s" % chromosome)
chr_df = gwas_df[gwas_df["CHR"] == int(chromosome)]
try:
chr_df.loc[:, "STAT"] = abs(chr_df["STAT"])
chr_df.sort_values(by="STAT", inplace=True, ascending=False)
except KeyError:
chr_df.sort_values(by="CHISQ", inplace=True, ascending=False)
chr_df.loc[:, "P"][chr_df["P"] == 0] = 1.79769e-308
chr_df["P"].fillna(1.0)
chr_df.loc[:, "log10P"] = np.log10(chr_df["P"])
index_snp = chr_df.iloc[0]["SNP"]
try:
indexp = -chr_df.iloc[0]["log10P"]
except KeyError:
indexp = -np.log10(chr_df.iloc[0]["P"])
E.info("index SNP is %s with -log10(p)= %0.3f" % (index_snp,
indexp))
if database:
dbh = sql.connect(database)
ld_values = getLdValues(database=dbh,
table_name=table_name,
index_snp=index_snp,
ld_threshold=ld_threshold)
elif ld_dir:
snp_pos = int(chr_df.loc[index_snp]["BP"])
ld_values = selectLdFromTabix(ld_dir=ld_dir,
chromosome=chromosome,
snp_pos=snp_pos)
PICS_scores = calculatePicsValues(snp_id=index_snp,
index_log10p=indexp,
ld_values=ld_values,
priors=priors,
k=2)
return PICS_scores
def LdRank(gwas_results, chromosome,
ld_dir=None, database=None,
table_name=None, ld_threshold=0.8,
top_snps=0.01, clean=True):
'''
Rank SNPs based on the LD with the lead SNP
from the association region. Take the top
N% SNPs as the SNP set.
Arguments
---------
gwas_results: string
Results from a GWAS, assumed to be in Plink format.
ld_dir: string
directory containing tabix index LD files from Plink
database: string
Path to an SQL database containing LD values in
table format
table_name: string
Specific table, often referring to a specific
chromosome, that contains LD values with columns
SNP_A, SNP_B, BP_A, BP_B and R2.
chromosome: string
A chromosome to select from the gwas_results
file.
ld_threshold: float
Threshold above which to select SNPs in LD
with the lead SNP
top_snps: float
% SNPs to select, ranked on LD with the lead
SNP
Returns
-------
'''
E.info("Reading association results from %s" % gwas_results)
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\t", header=0)
if clean:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\t", header=0)
else:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\s*", header=0)
gwas_df = gwas_df[gwas_df["TEST"] == "ADD"]
gwas_df.index = gwas_df["SNP"]
E.info("subsetting data on chromosome %s" % chromosome)
chr_df = gwas_df[gwas_df["CHR"] == int(chromosome)]
try:
chr_df.loc[:, "STAT"] = abs(chr_df["STAT"])
chr_df.sort_values(by="STAT", inplace=True, ascending=False)
except KeyError:
chr_df.sort_values(by="CHISQ", inplace=True, ascending=False)
chr_df.loc[:, "P"][chr_df["P"] == 0] = 1.79769e-308
chr_df["P"].fillna(1.0)
chr_df.loc[:, "log10P"] = np.log10(chr_df["P"])
index_snp = chr_df.iloc[0]["SNP"]
if database:
dbh = sql.connect(database)
ld_values = getLdValues(database=dbh,
table_name=table_name,
index_snp=index_snp,
ld_threshold=ld_threshold)
elif ld_dir:
snp_pos = int(chr_df.loc[index_snp]["BP"])
ld_values = selectLdFromTabix(ld_dir=ld_dir,
chromosome=chromosome,
snp_pos=snp_pos)
# rank on LD with index SNP
E.info("sort and rank top %0.3f SNPs in "
"r2 > %0.3f with SNP %s" % (top_snps,
ld_threshold,
index_snp))
index_series = pd.DataFrame(
{"SNP": index_snp,
"R2": 1.00},
index=[index_snp])
if len(ld_values):
ld_values = ld_values.append(index_series)
else:
ld_values = index_series
ld_values.columns = ["SNP", "R2"]
ld_values.sort_values(by="R2", inplace=True,
ascending=False)
size = len(ld_values)
# use the math module ceil function to get
# smallest integer greater than or equal to
# the top %n SNPs
top = int(ceil(size * top_snps))
top_ld = ld_values.iloc[0:top,]
return top_ld
def calcApproxBayesFactor(log_or, standard_error,
prior_variance):
'''
Calculate the approximate Bayes Factor (ABF) from Wakefield
Am. J. Hum. Genet.(2015) for a SNP. The ABF is calculated
from the effect size (log OR), variance (Standard error ^2)
and a prior weight on the variance (W).
Arguments
---------
log_or: float
The natural logarithm of the odds ratio or the effect
size estimate on the observed scale.
standard_error: float
The standard error estimate on the effect size from
the appropriate regression model
prior_variance: float
A prior variance weighting to apply to the variance for
calculating the ABF.
Returns
-------
ABF: float
The calculated Approximate Bayes Factor
'''
# the variance on the MLE log OR is the squared standard error
variance = standard_error ** 2
_top = sqrt((prior_variance + variance)/variance)
_exp_left = -((log_or ** 2)/variance)/2.0
_exp_right = prior_variance/(prior_variance + variance)
ABF = _top * exp(_exp_left * _exp_right)
return ABF
def ABFScore(gwas_results, region_size, chromosome,
prior=None,
prior_variance=0.04, clean=True):
'''
Using approximate Bayes factors calculate the posterior
association signal for each variant. Credible intervals
will be constructed later.
Arguments
---------
gwas_results: string
Results from a GWAS, assumed to be in Plink format.
region_size: int
The region (in bp) by which to extend around the
association signal index SNP - taken as the
fine-mapping region. Region is index bp +/-
region_size/2
chromosome: string
A chromosome to select from the gwas_results
file.
prior: float
Prior probability NOT YET IMPLEMENTED
prior_variance: float
The variance prior that weights the standard error
clean: boolean
Whether the results table has been pre-cleaned to
remove results not relevant to SNPs. e.g. if
covariates had been included in the regression
model these should be removed.
Returns
-------
out_df: pandas.Core.DataFrame
All input SNPs in the fine-mapping interval with their
approximate Bayes Factors and posterior probabilities
'''
E.info("Reading association results from %s" % gwas_results)
try:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\s*", header=0)
except StopIteration:
gwas_df = pd.read_table(gwas_results, index_col=None,
sep="\t", header=0)
if clean:
pass
else:
gwas_df = gwas_df[gwas_df["TEST"] == "ADD"]
gwas_df.index = gwas_df["SNP"]
E.info("subsetting data on chromosome %s" % chromosome)
chr_df = gwas_df[gwas_df["CHR"] == int(chromosome)]
try:
try:
chr_df.loc[:, "STAT"] = abs(chr_df["STAT"])
chr_df.sort_values(by="STAT", inplace=True, ascending=False)
except KeyError:
chr_df.loc[:, "T"] = abs(chr_df["T"])
chr_df.sort_values(by="T", inplace=True, ascending=False)
except KeyError:
chr_df.sort_values(by="CHISQ", inplace=True, ascending=False)
# set p = 0 to minimum float value, ~1.79x10-308
chr_df.loc[:, "P"][chr_df["P"] == 0] = 1.79769e-308
chr_df["P"].fillna(1.0)
chr_df.loc[:, "log10P"] = np.log10(chr_df["P"])
# get the index SNP and calculate standard errors
# used to calculate the approximate Bayes factor
E.info("calculating standard errors from association "
"p-values")
index_snp = chr_df.iloc[0]["SNP"]
E.info("The lead SNP is {}".format(index_snp))
index_bp = chr_df.iloc[0]["BP"]
z_func = lambda x: - 0.862 + sqrt(0.743 - (2.404 * np.log(x)))
chr_df["Z"] = abs(chr_df["P"].apply(z_func))
chr_df["SE"] = np.log(chr_df["OR"])/abs(chr_df["Z"])
start = index_bp - region_size/2
end = index_bp + region_size/2
chr_df.index = chr_df["BP"]
E.info("Fine mapping region defined as %i - %i "
"on chromosome %i" % (start, end, int(chromosome)))
# subsetting on range will create many NaNs due to
# pandas broadcasting and filling in rows of DF
sig_df = chr_df.loc[range(start, end+1)]
sig_df.dropna(axis=0, how='all', inplace=True)
sig_df.drop_duplicates(subset="SNP", inplace=True)
sig_df.index = sig_df["SNP"]
# calculate the approximate bayes factor for
# each SNP
E.info("calculating approximate Bayes Factors")
bayes = {}
# test overriding the prior on the variance
# use the standard error on the medina log OR
med_logor = np.log(np.median(sig_df["OR"]))
std_logor = np.std(np.log(sig_df["OR"]))
prior_variance = std_logor/np.sqrt(sig_df.shape[0])
E.info("The prior variance for this fine-mapping"
" interval is {}, and the median log OR"
" is {:f}".format(prior_variance,
med_logor))
for snp in sig_df.index:
logor = np.log(sig_df.loc[snp]["OR"])
se = abs(sig_df.loc[snp]["SE"])
abf = calcApproxBayesFactor(log_or=logor,
standard_error=se,
prior_variance=prior_variance)
bayes[snp] = abf
sum_bayes = np.nansum(bayes.values())
# calculate posterior probabilities as the proportion
# of bayes factor/ sum all bayes factors
E.info("calculating posterior probabilities")
bayes_rank = pd.Series(bayes)
bayes_rank.sort_values(inplace=True, ascending=False)
bayes_rank = bayes_rank.fillna(0.0)
posteriors = bayes_rank/sum_bayes
posteriors.sort_values(ascending=False,
inplace=True)
# side effect - write all ABFs and Posteriors out to file
out_df = pd.DataFrame({"Posterior": posteriors,
"ApproxBayesFactor": bayes_rank,
"SNP": posteriors.index})
out_df.index = out_df["SNP"]
out_df.drop(["SNP"], axis=1, inplace=True)
out_df.sort(["Posterior"], inplace=True, ascending=False)
index_bayes = out_df.loc[index_snp]["ApproxBayesFactor"]
index_p = sig_df.loc[index_snp]["log10P"]
index_or = sig_df.loc[index_snp]["OR"]
index_se = sig_df.loc[index_snp]["SE"]
E.info("Bayes factor for lead SNP {} is {}, "
"p-value {}, OR {} and SE {}".format(index_snp,
index_bayes,
index_p,
index_or,
index_se))
return out_df
def getSnpIds(snp_set):
'''
Parse a text file with SNP IDs,
one per row. Remove duplicates.
Arguments
---------
snp_set: string
file containing SNP IDs
Returns
-------
snp_list: set
set of unique SNP IDs
'''
E.info("Parsing SNP set IDs")
with IOTools.openFile(snp_set, "r") as sfile:
snps = [sn.split("\t")[0] for sn in sfile.readlines()]
snpset = set(snps)
snp_list = [s.rstrip("\n") for s in snpset]
return snp_list
def getEigenScores(eigen_dir, bim_file, snp_file):
'''
Extract Eigen scores from tabix-index files
for all SNPs in a provided .bim file
Arguments
---------
eigen_dir: string
PATH to directory containing eigen scores, with
suffix .tab.bgz
bim_file: string
plink .bim file containing SNP co-ordinates
and alleles - assumes minor allele is A2
snp_file: string
file containing SNP IDs, one per line
Returns
-------
snp_dict: dict
SNP eigen scores
'''
# setup a generic tabix query to reduce number
# of operations
tab_query = """
tabix %(eigen_dir)s/%(tab_indx)s %(contig)i:%(start)i-%(end)i |
awk '{if($4 == "%(A1)s") print $0}'
"""
tab_dir = [td for td in os.listdir(eigen_dir) if re.search(".bgz$", td)]
snp_list = getSnpIds(snp_file)
E.info("SNP set of %i SNPs" % len(snp_list))
snp_dict = {}
E.info("Parsing SNP co-ordinates")
# tried straightforward file parsing, took too long
# as average .bim file contains millions of lines
# read in chunks in to pandas DataFrame, return
# a generator
header = ["CHR", "SNP", "cM", "BP", "A1", "A2"]
file_iterator = pd.read_table(bim_file, sep="\t",
chunksize=50000,
header=None,
index_col=None,
names=header)
for dataframe in file_iterator:
dataframe.index = dataframe["SNP"]
try:
snp_frame = dataframe.loc[snp_list]
# not all SNPs will appear together in a chunk
# remove NA rows and duplicates
snp_frame.dropna(axis=0, how='all',
inplace=True)
snp_frame.drop_duplicates(subset="SNP", keep="last",
inplace=True)
snp_frame.loc[:, "CHR"] = snp_frame["CHR"].astype(np.int64)
contig = snp_frame["CHR"][0]
recontig = re.compile("chr%i" % contig)
tab_indx = [tx for tx in tab_dir if re.search(recontig,
tx)][-1]
# redefine float types as int for output
# prettify and reduce downstream bugs with assumed
# data types
snp_frame.loc[:, "BP"] = snp_frame["BP"].astype(np.int64)
for snp in snp_frame.index:
# open a process with query, process on the fly
A1 = snp_frame.loc[snp, "A1"]
A2 = snp_frame.loc[snp, "A2"]
start = snp_frame.loc[snp, "BP"]
end = start
proc = subprocess.Popen(tab_query % locals(),
shell=True,
stdout=subprocess.PIPE)
score_line = proc.stdout.readlines()
if len(score_line):
eigen_score = score_line[0].split("\t")[-1].rstrip("\n")
else:
eigen_score = np.nan
score_dict = {"CHR": contig,
"BP": start,
"A1": A1,
"A2": A2,
"SCORE": eigen_score}
snp_dict[snp] = score_dict
E.info("Eigen scores found for %i SNPs" % len(snp_dict))
except KeyError:
pass
return snp_dict
def getSNPs(map_file, snp_list):
'''
Given a SNP list with GWAS results,
extract the relevant index
Arguments
---------
map_file: string
plink format .map file with SNP positions
in same order as .ped file
snp_list: list
list of SNP rs IDs with GWAS results
Returns
-------
snp_index: dict
dict of SNP, indices key,value pairs to select
'''
# variant order in the map file matters, use an ordered dict
variants = collections.OrderedDict()
with open(map_file, "r") as mfile:
for snp in mfile.readlines():
attrs = snp.split("\t")
snpid = attrs[1]
variants[snpid] = {"chr": attrs[0],
"pos": attrs[-1].strip("\n")}
variant_ids = [vj for vi, vj in enumerate(variants.keys()) if vj in snp_list]
variant_idx = [i for i,j in enumerate(variants.keys()) if j in snp_list]
var_idx = dict(zip(variant_ids, variant_idx))
return var_idx
def flipRiskAlleles(snp_index, snp_results, genos):
'''
Given an OR of a SNP on a binary phenotype,
convert minor alleles to "risk" alleles, i.e.
where OR > 1, if not, then invert allele
Arguments
---------
snp_index: list
list of snp indices with GWAS results
snp_results: dict
snp:OR key, value pairs of SNPs and GWAS
results
genos: np.ndarray
array of genotypes in format "11", "12" or
"22" where 1 = minor allele, 2 = major allele.
Returns
-------
risk_genos: np.ndarray
Genotypes where all "1" alleles are risk alleles,
not major alleles.
'''
genarray = np.array(genos)
# find SNP alleles to flip
flip = []
for snp in snp_results.keys():
if snp_results[snp] < 1:
flip.append(snp_index[snp])
else:
pass
E.info("Flipped alleles: %i" % len(flip))
# swap alleles for SNPs where the minor (A1) allele
# is protective
# use intermediate values to avoid overwriting values
flip_array = genarray[:, flip]
np.place(flip_array, flip_array == "22", ["88"])
np.place(flip_array, flip_array == "11", ["99"])
np.place(flip_array, flip_array == "88", ["11"])
np.place(flip_array, flip_array == "99", ["22"])
genarray[:, flip] = flip_array
return genarray
def parsePed(ped_file, delim="\t", compound_geno="False"):
'''
Parse a plink .ped file into a dataframe
Arguments
---------
ped_file: string
Path to a plink .ped file
delim: string
delimiter that separates columns
in ped_file
compound_geno: boolean
Whether alleles of genotype
are separated by a whitespace or not.
Returns
-------
ped_frame: pd.Core.DataFrame
pandas dataframe representation of
the ped_file. Genotypes are presented
as a numpy array.
'''
samples = []
# parse the ped file, return a dataframe
with open(ped_file, "r") as pfile:
for indiv in pfile.readlines():
ped_dict = {}
indiv = indiv.strip("\n")
indiv_split = indiv.split(delim)
ped_dict["FID"] = indiv_split[0]
ped_dict["IID"] = indiv_split[1]
ped_dict["SEX"] = int(indiv_split[4])
ped_dict["PHEN"] = int(indiv_split[5])
ped_dict["GENOS"] = np.array(indiv_split[6:])
samples.append(ped_dict)
ped_frame = pd.DataFrame(samples)
return ped_frame
def countRiskAlleles(ped_frame, snp_index, report, flag):
'''
Count the number of risk alleles per individual
and calculate the probability of the phenotype
Arguments
---------
ped_frame: pd.Core.DataFrame
Dataframe of SNP genotypes and phenotype information
snp_index: list
list of snp indices denoting which columns of
ped_frame are the relevant genotypes
report: string
either `cases_explained` - the proportion of cases
explained by risk allele carriage, or
`probability_phenotype` - the probability (frequency)
of the binary phenotype amongst all individuals given
the risk allele carriage
flag: boolean
output individuals explained by carriage of 2
risk alleles
Returns
-------
count_freq: np.ndarray
cumulative frequency array of #risk alleles
'''
case_freq = np.zeros(shape=len(snp_index)*2,
dtype=np.float64)
cntrl_freq = np.zeros(shape=len(snp_index)*2,
dtype=np.float64)
# group by phenotype column
phen_groups = ped_frame.groupby(by="PHEN")
for name, group in phen_groups:
genos = group.loc[:,snp_index]
# convert to 0,1,2 coding for ease of counting
# treat 00 as missing/NA
genos.replace({"22": 0,
"12": 1,
"11": 2,
"00": np.nan},
inplace=True)
risk_sums = np.nansum(genos, axis=1)
for val in risk_sums:
if name == 1:
cntrl_freq[val] += 1
elif name == 2:
case_freq[val] += 1
if flag:
explained = pd.DataFrame(risk_sums)
explained.index = group["FID"]
explained["IID"] = explained.index
explained.columns = ["IID", "riskAlleles"]
explained = explained[explained["riskAlleles"] == 2.0]
explained.to_csv("/".join([os.getcwd(), "cases_explained.tsv"]),
sep="\t", index_label="FID")
else:
pass
if report == "cases_explained":
# express as the proportion of cases explained
cumulative = np.cumsum(case_freq)/np.nansum(case_freq)
freqs = case_freq/np.nansum(case_freq)
elif report == "probability_phenotype":
cumulative = np.cumsum(case_freq + cntrl_freq)/np.nansum(case_freq + cntrl_freq)
freqs = case_freq/(case_freq + cntrl_freq)
freqs[np.isnan(freqs)] = 0
E.info("Individuals with pheno 1: %i" % np.nansum(cntrl_freq))
E.info("Individuals with pheno 2: %i" % np.nansum(case_freq))
res_dict = {"freqs": freqs, "cumulative": cumulative,
"cases": case_freq,
"controls": cntrl_freq}
return res_dict
def plotRiskFrequency(bins, frequencies, savepath=None, ytitle=None):
'''
Generate a plot of #risk alleles vs.
P(binary phenotype).
Arguments
---------
bins: list
list of histogram bins, i.e. #risk
alleles
frequencies: list
list of frequencies of binary phenotype
corresponding to #risk allele bins
Returns
-------
None - plot is generated
'''
hist_df = pd.DataFrame({"bins": bins,
"freq": frequencies})
py2ri.activate()
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''suppressPackageStartupMessages(library(scales))''')
r_df = py2ri.py2ri_pandasdataframe(hist_df)
R.assign("hist.df", r_df)
R('''p_hist <- ggplot(hist.df, aes(x=bins, y=freq)) + '''
'''geom_point() + theme_bw() + '''
'''xlim(c(0, dim(hist.df)[1])) + ylim(c(0, 1)) + '''
'''labs(x="Number of Risk Alleles", '''
'''y="%(ytitle)s")''' % locals())
R('''png("%(savepath)s")''' % locals())
R('''print(p_hist)''')
R('''dev.off()''')
return hist_df
def makeCredibleSet(probs_file, credible_set=0.95, lead_snp_indx=2,
filename_sep="_", snp_column=0, probs_column=1):
'''
Construct an N% credible set from a list of
SNPs with posterior probabilities attached.
If the top SNP has posterior prob >= 80%, then just this SNP
will be output.
Otherwise the N% credible set is output.
In addition to the output credible set, this function
also outputs several pieces of important information for
the credible set:
* The lead SNP
* The SNP with the highest posterior probability, and whether
this is also the lead SNP
* The size of the credible set
Arguments:
----------
probs_file: string
Path to a file containing SNP IDs and probabilities. It
must have these two columns, any others are optional and
will be ignored
credible_set: float
percentage of posterior probability signal to capture in
the credible set
lead_snp_indx: int
0-based index of the lead SNP for the associated region.
Used in the output file name and summary information
filename_sep: string
single character delimiter in the filename that can be
used to extract the information, i.e. chromosome, position
and lead SNP.
snp_column: int
0-based column number in the input file containing SNP
IDs.
probs_column: int
1-based column number in the input file containing the
posterior probabilities
Returns:
--------
posterior_set: pandas.Core.DataFrame
data frame of the N% credible set containing SNP IDs and posterior
probabilities
'''
df = pd.read_table(probs_file,
index_col=None, sep="\t",
header=None)
prob_df = df.iloc[:, [snp_column, probs_column]]
prob_df.columns = ["SNP", "Posterior"]
# some files may have header, others may not
if prob_df.iloc[0, 0] == "SNP":
prob_df = prob_df.iloc[1:, :]
else:
pass
# check probabilities have been properly interpreted as floats
#prob_df["Posterior"].astype(np.float64)
prob_df.loc[:, "Posterior"] = pd.to_numeric(prob_df["Posterior"])
# need to allow for non-rs IDs. check length of split file name
# expectation is 4, if longer then 2-5 together
split_name = probs_file.split("/")[-1].split(filename_sep)
if len(split_name) > 4:
lead_snp = filename_sep.join(split_name[lead_snp_indx:-1])
else:
lead_snp = split_name[lead_snp_indx]
E.info("Lead SNP is {}".format(lead_snp))
# sort by posterior signal then create credible set
prob_df.sort(["Posterior"], inplace=True,
ascending=False)
top_snp = prob_df.iloc[0, 0]
top_prob = prob_df.iloc[0, 1]
E.info("Top posterior signal SNP is {} with P = {}".format(top_snp,
top_prob))
if top_snp == lead_snp:
E.info("Lead SNP is the same as top posterior signal SNP")
else:
pass
# often if the top SNP posterior probability is >= 80%
# the remaining variants have extremely small probs
# in that case we're only practically interested in the
# top variant
if top_prob >= 0.8:
posterior_set = prob_df[:1]
posterior_set.index = posterior_set.loc[:, "SNP"]
posterior_set.drop(["SNP"], inplace=True, axis=1)
E.info("Size of {}% credible set: 1".format(credible_set * 100))
else:
set_indx = []
prob_set = 0.0
for ix in range(len(prob_df.index)):
prob_set += prob_df.iloc[ix, 1]
set_indx.append(ix)
if prob_set >= credible_set:
break
else:
continue
posterior_set = prob_df.iloc[set_indx]
posterior_set.index = posterior_set.iloc[:, 0]
posterior_set = pd.DataFrame(posterior_set.iloc[:, 1])
posterior_set.columns = ["Posterior"]
E.info("Size of {}% credible set: {}".format(credible_set * 100,
posterior_set.shape[0]))
return posterior_set
def summariseResults(file_list):
'''
Take a list of files from SNP prioritsation
and collate into a single table
Arguments
---------
file_list: list
list container of input files to collate
into the results table. File names are expected
to follow the format:
<contig>_<position>_<lead_snp>_<method>.tsv
Returns:
--------
summary_table: pandas.Core.DataFrame
pandas dataframe with columns:
* lead SNP
* credible set size
* top set SNP
* top set SNP probability
* chromosome
* lead SNP position
'''
# extract info from file name
# read in file as temporary dataframe
# extract info into a dictionary
# convert dict into a dataframe
name_re = re.compile(r"(?P<contig>\w{2,5})_(?P<position>\d+)_(?P<snp_id>\w+)_(?P<method>\w+).tsv")
snp_dicts = []
for path in file_list:
filename = re.search(name_re, path.split("/")[-1])
contig = filename.group("contig")
position = filename.group("position")
snp_id = filename.group("snp_id")
with open(path, "r") as ofile:
lines = ofile.readlines()
components = [xl.split("\t") for xl in lines[1:]]
# snp id is index 0 in first component
top_snp = components[0][0]
top_prob = components[0][1].rstrip("\n")
size = len(components)
file_dict = {"Lead_SNP": snp_id,
"Credible_set_size": size,
"Top_set_SNP": top_snp,
"Top_set_prob": top_prob,
"Chr": contig,
"Position": position}
snp_dicts.append(file_dict)
summary_table = pd.DataFrame(snp_dicts, index=range(len(snp_dicts)))
summary_table.index = summary_table["Lead_SNP"]
summary_table.drop(["Lead_SNP"], axis=1, inplace=True)
return summary_table
| mit | -427,224,829,082,830,100 | 35.841396 | 102 | 0.53317 | false |
halexan/RouteManagement | src/RMServer/cfg.py | 1 | 1228 | __author__ = 'Zhang Shaojun'
import logging
import sys
from logging.handlers import TimedRotatingFileHandler
# log
LOG = logging.getLogger('tflc')
LOG.setLevel(logging.DEBUG)
# LOG.addHandler(logging.StreamHandler(sys.stderr))
log_path = '/home/mimic/mpp/RouteManagement/logs/RMServer.log'
fmt = "%(asctime)-15s - %(levelname)s - %(message)s"
formatter = logging.Formatter(fmt)
lf = TimedRotatingFileHandler(filename=log_path, when="midnight", interval=1, backupCount=2)
lf.setLevel(logging.DEBUG)
lf.setFormatter(formatter)
LOG.addHandler(lf)
# version of TransFormed Layered Controller
TFLC_VERSION_1 = 1
# listening address and port
CC_LISTEN_HOST = ''
CC_LISTEN_PORT = 16633
# WSGI REST service address
WSGI_API_HOST = ''
WSGI_API_PORT = 8080
# state of local controller
LC_STATE_HANDSHAKE = "handshake"
LC_STATE_CONFIG = "config"
LC_STATE_LIVE = "live"
LC_STATE_DEAD = "dead"
# max message id
MAX_XID = 0xffffffff
# max local controller id
MAX_LC_ID = 0xffffffff
# flow dispatch option
FLOW_DISPATCH_STEPBYSTEP = 1
# echo_request interval
ECHO_REQ_INTERVAL = 100
# master-slave-switching interval
MS_SWITCH_INTERVAL = 300
# number of veths
NUM_VETH = 2 | mit | 9,022,306,108,233,804,000 | 19.964286 | 92 | 0.715798 | false |
scttcper/hangry-py | hangrypy/__init__.py | 1 | 1419 | from bs4 import BeautifulSoup
from .default_recipe_parser import recipe_parser
from .foodnetwork import foodnetwork
from .recipe import Recipe
from .schema_org_recipe_parser import schema_org_recipe_parser, use_schema_org
# messy python 3 support
try:
from urllib.request import urlopen, quote
from urllib.parse import urlunsplit, urlsplit
except ImportError:
from urllib2 import urlopen, quote
from urlparse import urlsplit, urlunsplit
parsers = {'schema_org_recipe_parser': schema_org_recipe_parser}
non_standard = {'foodnetwork.com': foodnetwork}
def url_setup(url):
scheme, netloc, path, qs, anchor = urlsplit(url)
domain = '.'.join(netloc.split('.')[-2:])
path = quote(path, '/%')
# remove everything after path
url = urlunsplit((scheme, netloc, path, '', ''))
return url, domain
def select_parser(html, parser, domain):
if parser:
return parsers[parser]
if domain in non_standard:
return non_standard[domain]
if use_schema_org(html):
return schema_org_recipe_parser
return recipe_parser
def Hangry(url, html=None, parser=None):
# open url or use passed
if not html:
html = urlopen(url).read()
soup = BeautifulSoup(html, 'html5lib')
url, domain = url_setup(url)
parser = select_parser(html, parser, domain)(soup)
recipe = Recipe(parser, domain, url)
return recipe
| mit | -7,977,861,761,553,599,000 | 28.5625 | 78 | 0.681466 | false |
codylane/python_twiddle | test_twiddle.py | 1 | 12348 | #!/usr/bin/env python
from __future__ import print_function
import twiddle
import sys
def get_database_connection_maximum(host, port=twiddle.DEFAULT_PORT):
'''
Returns the current maximum total connections for the database pool.
'''
result = twiddle.connect_factory(host, 'bean', 'datasource', 'MaxPoolSize', port)
return int(result)
def get_database_connection_minimum(host, port=twiddle.DEFAULT_PORT):
'''
Returns the current minim total connections for the database pool.
'''
result = twiddle.connect_factory(host, 'bean', 'datasource', 'MinPoolSize', port)
return int(result)
def get_database_connection_current_used(host, port=twiddle.DEFAULT_PORT):
'''
Returns the current number of used total connections for the database pool.'
'''
result = twiddle.connect_factory(host, 'bean', 'datasource', 'NumBusyConnections', port)
return int(result)
def get_database_connection_current_idle(host, port=twiddle.DEFAULT_PORT):
'''
Returns the current number of idle total connections for the database pool.'
'''
result = twiddle.connect_factory(host, 'bean', 'datasource', 'NumIdleConnections', port)
return int(result)
def calculate_percentage_used(host, port=twiddle.DEFAULT_PORT, decimals=0):
'''
Calculate the percentage of used database connections based from
the maximum and calculate the result to the nearest decimal.
Due to the way rounding works in binary form it is not a bug that
if you wanted the result to be 1.6, with one decimal it cannot be
represented as 1.6, instead the result would be 1.6000000000000001
'''
if decimals < 0: decimals = 0
max = float(get_database_connection_maximum(host, port))
used = float(get_database_connection_current_used(host, port))
result = (used / max) * 100
return round(result, decimals)
def calculate_percentage_idle(host, port=twiddle.DEFAULT_PORT, decimals=0):
'''
Calculate the percentage of idle database connections based from
the maximum and calculate the result to the nearest decimal.
Due to the way rounding works in binary form it is not a bug that
if you wanted the result to be 1.6, with one decimal it cannot be
represented as 1.6, instead the result would be 1.6000000000000001
'''
max = float(get_database_connection_maximum(host, port))
idle = float(get_database_connection_current_idle(host, port))
result = (idle / max) * 100
return round(result, decimals)
def validate_required_options():
'''
Ensures that all required command line options are present.
If not, exits with error message.
'''
# check for required options
if options.host is None:
print('ERR: required option --host', file=sys.stderr)
sys.exit(1)
if options.port is None:
print('ERR: required option --port', file=sys.stderr)
sys.exit(1)
def add_additional_options():
parser = twiddle.create_default_cmdline_options()
parser.add_option(
'--max-connections',
action='store_true',
default=False,
metavar='MAXCONNECTIONS',
dest='maxconnections',
help='Returns the amount of maximum connections'
)
parser.add_option(
'--min-connections',
action='store_true',
default=False,
metavar='MINCONNECTIONS',
dest='minconnections',
help='Returns the amount of minimum connections'
)
parser.add_option(
'--idle-connections',
action='store_true',
default=False,
metavar='IDLECONNECTIONS',
dest='idleconnections',
help='Returns the amount of idle connections if ' \
'-w and -c are not present. ' \
'Otherise this option is required with -w and -c'
)
parser.add_option(
'--used-connections',
action='store_true',
default=False,
metavar='USEDCONNECTIONS',
dest='usedconnections',
help='Returns the amount of used connections if ' \
'-w and -c are not present. ' \
'Otherwise this option is required with -w and -c'
)
parser.add_option(
'--idle-connection-percent',
action='store_true',
default=False,
metavar='IDLECONNECTIONPERCENT',
dest='idleconnectionpercent',
help='Returns the percentage amount of idle connections'
)
parser.add_option(
'--used-connection-percent',
action='store_true',
default=False,
metavar='USEDCONNECTIONPERCENT',
dest='usedconnectionpercent',
help='Returns the percentage amount of used connections'
)
parser.add_option(
'--operator',
action='store_action',
default='>=',
metavar='OPERATOR',
dest='operator',
help='Sets the operator that is used when calculating thresholds'
)
return parser
def critical_alarm(alarm_type, datasource, operator, retrieved_value, tresh_value):
'''
Constructs a critical alarm message that would look like the following
alarm_type --------|
datasource --------|---------|
operator ----------|---------|----------------------------|------------------|
retrieved_value ---|---------|----------------------------|--------------| |
thresh_value ------|---------|----------------------------|--------------|---|--|
V V V V V V
CRITICAL: The percentage of used database connections is >= threshold [60.0 >= 40]
@alarm_type The type of the alarm, example [percentage, number]
@datasource The datasource attribute for the alarm: example [used]
@operator The boolean operator for the alarm in string form, example: [>=, <=, <, >]
@retrieved_value The retrieved value that we got from the endpoint, example [60.0]
@thres_value The threshold value that was breached, example: [40]
'''
print('CRITICAL: The %s of %s database connections is %s threshold [%s %s %s]' \
%(alarm_type, datasource, operator, retrieved_value, operator, tresh_value),\
file=sys.stderr)
def warning_alarm(alarm_type, datasource, operator, retrieved_value, tresh_value):
'''
Constructs a warning alarm message that would look like the following
alarm_type --------|
datasource --------|---------|
operator ----------|---------|----------------------------|------------------|
retrieved_value ---|---------|----------------------------|--------------| |
thresh_value ------|---------|----------------------------|--------------|---|--|
V V V V V V
WARNING: The percentage of used database connections is >= threshold [60.0 >= 40]
@alarm_type The type of the alarm, example [percentage, number]
@datasource The datasource attribute for the alarm: example [used]
@operator The boolean operator for the alarm in string form, example: [>=, <=, <, >]
@retrieved_value The retrieved value that we got from the endpoint, example [60.0]
'''
print('WARNING: The %s of %s database connections is %s threshold [%s %s %s]' \
%(alarm_type, datasource, operator, retrieved_value, operator, tresh_value),
file=sys.stderr)
def process_thresholds(crit_thresh, warn_thresh, idle_pcnt, used_pcnt, used, idle):
'''
'''
calc_crit_percentage = False
calc_warn_percentage = False
if crit_thresh is not None:
calc_crit_percentage = crit_thresh.endswith('%')
crit_thresh = int(crit_thresh.rstrip('%'))
if warn_thresh is not None:
calc_warn_percentage = warn_thresh.endswith('%')
warn_thresh = int(warn_thresh.rstrip('%'))
print('DEBUG: crit_treshold ', crit_thresh, ' calc_crit_percentage ', calc_crit_percentage)
print('DEBUG: warn_treshold ', warn_thresh, ' calc_warn_percentage ', calc_warn_percentage)
if calc_crit_percentage:
print('DEBUG: calculating critical threshold percentages')
print('DEBUG: used_pcnt ', used_pcnt)
print('DEBUG: idle_pcnt ', idle_pcnt)
if used_pcnt and used_pcnt >= crit_thresh:
critical_alarm('percentage', 'used', '>=', used_pcnt, crit_thresh)
sys.exit(2)
elif idle_pcnt and idle_pcnt >= crit_thresh:
critical_alarm('percentage', 'idle', '>=', idle_pcnt, crit_thresh)
sys.exit(2)
else:
print('DEBUG: calculating critical threshold numbers')
print('DEBUG: used ', used)
print('DEBUG: idle ', idle)
if used and used >= crit_thresh:
critical_alarm('number', 'used', '>=', used, crit_thresh)
sys.exit(2)
elif idle and idle >= crit_thresh:
critical_alarm('number', 'idle', '>=', idle, crit_thresh)
sys.exit(2)
if calc_warn_percentage:
print('DEBUG: calculating warning threshold percentages')
print('DEBUG: used_pcnt ', used_pcnt)
print('DEBUG: idle_pcnt ', idle_pcnt)
if used_pcnt and used_pcnt >= warn_thresh:
warning_alarm('percentage', 'used', '>=', used_pcnt, warn_thresh)
sys.exit(1)
elif idle_pcnt and idle_pcnt >= warn_thresh:
warning_alarm('percentage', 'idle', '>=', idle_pcnt, warn_thresh)
sys.exit(1)
else:
print('DEBUG: calculating warning threshold numbers')
print('DEBUG: used ', used)
print('DEBUG: idle ', idle)
if used and used >= warn_thresh:
warning_alarm('percentage', 'used', '>=', used, warn_thresh)
sys.exit(1)
elif idle and idle >= warn_thresh:
warning_alarm('percentage', 'idle', '>=', idle, warn_thresh)
sys.exit(1)
decimals = 0
parser = add_additional_options()
(options, args) = parser.parse_args()
# ensure all required options are present
validate_required_options()
cmdline_results = {}
cmdline_results['max'] = None
cmdline_results['min'] = None
cmdline_results['used'] = None
cmdline_results['idle'] = None
cmdline_results['idle%'] = None
cmdline_results['used%'] = None
cmdline_results['warning'] = options.warning
cmdline_results['critical'] = options.critical
if options.maxconnections:
cmdline_results['max'] = get_database_connection_maximum(options.host)
if options.minconnections:
cmdline_results['min'] = get_database_connection_minimum(options.host)
if options.usedconnections:
cmdline_results['used'] = get_database_connection_current_used(options.host)
if options.idleconnections:
cmdline_results['idle'] = get_database_connection_current_idle(options.host)
if options.idleconnectionpercent:
cmdline_results['idle%'] = calculate_percentage_idle(options.host, options.port, decimals)
if options.usedconnectionpercent:
cmdline_results['used%'] = calculate_percentage_used(options.host, options.port, decimals)
if options.warning or options.critical:
if options.warning.endswith('%s') or options.critical.endswith('%'):
if cmdline_results.get('used%') is None:
cmdline_results['used%'] = calculate_percentage_used(options.host, options.port, decimals)
if cmdline_results.get('idle%') is None:
cmdline_results['idle%'] = calculate_percentage_idle(options.host, options.port, decimals)
if options.warning or options.critical:
process_thresholds( \
crit_thresh = cmdline_results.get('critical'), \
warn_thresh = cmdline_results.get('warning'), \
idle_pcnt = cmdline_results.get('idle%'), \
used_pcnt = cmdline_results.get('used%'), \
used = cmdline_results.get('used'), \
idle = cmdline_results.get('idle')
)
#if cmdline_results.get('idle') is None and cmdline_results.get('used') is None:
# print('ERR: You cannot specify a warning percentage without --idle-connections or --used-connections')
# sys.exit(1)
print(cmdline_results)
| mit | 4,603,005,909,222,156,300 | 39.090909 | 107 | 0.604227 | false |
jcfr/girder | tests/cases/file_test.py | 1 | 17570 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import io
import os
import shutil
import urllib
import zipfile
from hashlib import sha512
from .. import base
from girder.constants import SettingKey
from girder.models import getDbConnection
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
chunk1, chunk2 = ('hello ', 'world')
class FileTestCase(base.TestCase):
"""
Tests the uploading, downloading, and storage of files in each different
type of assetstore.
"""
def setUp(self):
base.TestCase.setUp(self)
user = {
'email': '[email protected]',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
}
self.user = self.model('user').createUser(**user)
folders = self.model('folder').childFolders(
parent=self.user, parentType='user', user=self.user)
for folder in folders:
if folder['public'] is True:
self.publicFolder = folder
else:
self.privateFolder = folder
secondUser = {
'email': '[email protected]',
'login': 'secondlogin',
'firstName': 'Second',
'lastName': 'User',
'password': 'secondpassword'
}
self.secondUser = self.model('user').createUser(**secondUser)
def _testEmptyUpload(self, name):
"""
Uploads an empty file to the server.
"""
resp = self.request(
path='/file', method='POST', user=self.user, params={
'parentType': 'folder',
'parentId': self.privateFolder['_id'],
'name': name,
'size': 0
})
self.assertStatusOk(resp)
file = resp.json
self.assertHasKeys(file, ['itemId'])
self.assertEqual(file['size'], 0)
self.assertEqual(file['name'], name)
self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
return file
def _testUploadFile(self, name):
"""
Uploads a non-empty file to the server.
"""
# Initialize the upload
resp = self.request(
path='/file', method='POST', user=self.user, params={
'parentType': 'folder',
'parentId': self.privateFolder['_id'],
'name': name,
'size': len(chunk1) + len(chunk2),
'mimeType': 'text/plain'
})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
# Uploading with no user should fail
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', 'helloWorld.txt', chunk1)]
resp = self.multipartRequest(
path='/file/chunk', fields=fields, files=files)
self.assertStatus(resp, 401)
# Uploading with the wrong user should fail
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', 'helloWorld.txt', chunk1)]
resp = self.multipartRequest(
path='/file/chunk', user=self.secondUser, fields=fields,
files=files)
self.assertStatus(resp, 403)
# Sending the first chunk should fail because the default minimum chunk
# size is larger than our chunk.
self.model('setting').unset(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE)
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', 'helloWorld.txt', chunk1)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'type': 'validation',
'message': 'Chunk is smaller than the minimum size.'
})
# Send the first chunk
self.model('setting').set(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE, 0)
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
# Attempting to send second chunk with incorrect offset should fail
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', name, chunk2)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatus(resp, 400)
# Ask for completion before sending second chunk should fail
resp = self.request(path='/file/completion', method='POST',
user=self.user, params={'uploadId': uploadId})
self.assertStatus(resp, 400)
# Request offset from server (simulate a resume event)
resp = self.request(path='/file/offset', method='GET', user=self.user,
params={'uploadId': uploadId})
self.assertStatusOk(resp)
# Trying to send too many bytes should fail
currentOffset = resp.json['offset']
fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
files = [('chunk', name, "extra_"+chunk2+"_bytes")]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'type': 'validation',
'message': 'Received too many bytes.'
})
# The offset should not have changed
resp = self.request(path='/file/offset', method='GET', user=self.user,
params={'uploadId': uploadId})
self.assertStatusOk(resp)
self.assertEqual(resp.json['offset'], currentOffset)
files = [('chunk', name, chunk2)]
# Now upload the second chunk
fields = [('offset', resp.json['offset']), ('uploadId', uploadId)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
file = resp.json
self.assertHasKeys(file, ['itemId'])
self.assertEqual(file['assetstoreId'], str(self.assetstore['_id']))
self.assertEqual(file['name'], name)
self.assertEqual(file['size'], len(chunk1 + chunk2))
return file
def _testDownloadFile(self, file, contents):
"""
Downloads the previously uploaded file from the server.
:param file: The file object to download.
:type file: dict
:param contents: The expected contents.
:type contents: str
"""
resp = self.request(path='/file/%s/download' % str(file['_id']),
method='GET', user=self.user, isJson=False)
self.assertStatusOk(resp)
if contents:
self.assertEqual(resp.headers['Content-Type'],
'text/plain;charset=utf-8')
self.assertEqual(contents, resp.collapse_body())
# Test downloading with an offset
resp = self.request(path='/file/%s/download' % str(file['_id']),
method='GET', user=self.user, isJson=False,
params={'offset': 1})
self.assertStatusOk(resp)
self.assertEqual(contents[1:], resp.collapse_body())
# Test downloading with a name
resp = self.request(
path='/file/%s/download/%s' % (
str(file['_id']), urllib.quote(file['name']).encode('utf8')),
method='GET', user=self.user, isJson=False)
self.assertStatusOk(resp)
if contents:
self.assertEqual(resp.headers['Content-Type'],
'text/plain;charset=utf-8')
self.assertEqual(contents, resp.collapse_body())
def _testDownloadFolder(self):
"""
Test downloading an entire folder as a zip file.
"""
# Create a subfolder
resp = self.request(
path='/folder', method='POST', user=self.user, params={
'name': 'Test',
'parentId': self.privateFolder['_id']
})
test = resp.json
contents = os.urandom(1024 * 1024) # Generate random file contents
# Upload the file into that subfolder
resp = self.request(
path='/file', method='POST', user=self.user, params={
'parentType': 'folder',
'parentId': test['_id'],
'name': 'random.bin',
'size': len(contents)
})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
# Send the file contents
fields = [('offset', 0), ('uploadId', uploadId)]
files = [('chunk', 'random.bin', contents)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
# Download the folder
resp = self.request(
path='/folder/%s/download' % str(self.privateFolder['_id']),
method='GET', user=self.user, isJson=False)
self.assertEqual(resp.headers['Content-Type'], 'application/zip')
zip = zipfile.ZipFile(io.BytesIO(resp.collapse_body()), 'r')
self.assertTrue(zip.testzip() is None)
extracted = zip.read('Private/Test/random.bin')
self.assertEqual(extracted, contents)
def _testDeleteFile(self, file):
"""
Deletes the previously uploaded file from the server.
"""
resp = self.request(
path='/file/%s' % str(file['_id']), method='DELETE', user=self.user)
self.assertStatusOk(resp)
def testFilesystemAssetstore(self):
"""
Test usage of the Filesystem assetstore type.
"""
self.assetstore = self.model('assetstore').getCurrent()
root = self.assetstore['root']
# Clean out the test assetstore on disk
shutil.rmtree(root)
# First clean out the temp directory
tmpdir = os.path.join(root, 'temp')
if os.path.isdir(tmpdir):
for tempname in os.listdir(tmpdir):
os.remove(os.path.join(tmpdir, tempname))
# Upload the two-chunk file
file = self._testUploadFile('helloWorld1.txt')
# Test editing of the file info
resp = self.request(path='/file/{}'.format(file['_id']), method='PUT',
user=self.user, params={'name': ' newName.json'})
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], 'newName.json')
# We want to make sure the file got uploaded correctly into
# the assetstore and stored at the right location
hash = sha512(chunk1 + chunk2).hexdigest()
self.assertEqual(hash, file['sha512'])
self.assertFalse(os.path.isabs(file['path']))
abspath = os.path.join(root, file['path'])
self.assertTrue(os.path.isfile(abspath))
self.assertEqual(os.stat(abspath).st_size, file['size'])
# Make sure access control is enforced on download
resp = self.request(
path='/file/{}/download'.format(file['_id']), method='GET')
self.assertStatus(resp, 401)
resp = self.request(
path='/folder/{}/download'.format(self.privateFolder['_id']),
method='GET')
self.assertStatus(resp, 401)
self._testDownloadFile(file, chunk1 + chunk2)
self._testDownloadFolder()
# Test updating of the file contents
newContents = 'test'
resp = self.request(
path='/file/{}/contents'.format(file['_id']), method='PUT',
user=self.user, params={'size': len(newContents)})
self.assertStatusOk(resp)
# Old contents should not be immediately destroyed
self.assertTrue(os.path.isfile(abspath))
# Send the first chunk
fields = (('offset', 0), ('uploadId', resp.json['_id']))
files = (('chunk', 'newName.json', newContents),)
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
file = resp.json
# Old contents should now be destroyed, new contents should be present
self.assertFalse(os.path.isfile(abspath))
abspath = os.path.join(root, file['path'])
self.assertTrue(os.path.isfile(abspath))
self._testDownloadFile(file, newContents)
# Test updating an empty file
resp = self.request(
path='/file/{}/contents'.format(file['_id']), method='PUT',
user=self.user, params={'size': 1})
self.assertStatusOk(resp)
self._testDeleteFile(file)
self.assertFalse(os.path.isfile(abspath))
# Upload two empty files to test duplication in the assetstore
empty1 = self._testEmptyUpload('empty1.txt')
empty2 = self._testEmptyUpload('empty2.txt')
hash = sha512().hexdigest()
abspath = os.path.join(root, empty1['path'])
self.assertEqual((hash, hash), (empty1['sha512'], empty2['sha512']))
self.assertTrue(os.path.isfile(abspath))
self.assertEqual(os.stat(abspath).st_size, 0)
self._testDownloadFile(empty1, '')
# Deleting one of the duplicate files but not the other should
# leave the file within the assetstore. Deleting both should remove it.
self._testDeleteFile(empty1)
self.assertTrue(os.path.isfile(abspath))
self._testDeleteFile(empty2)
self.assertFalse(os.path.isfile(abspath))
def testGridFsAssetstore(self):
"""
Test usage of the GridFS assetstore type.
"""
# Clear any old DB data
base.dropGridFSDatabase('girder_assetstore_test')
# Clear the assetstore database
conn = getDbConnection()
conn.drop_database('girder_assetstore_test')
self.model('assetstore').remove(self.model('assetstore').getCurrent())
assetstore = self.model('assetstore').createGridFsAssetstore(
name='Test', db='girder_assetstore_test')
self.assetstore = assetstore
chunkColl = conn['girder_assetstore_test']['chunk']
# Upload the two-chunk file
file = self._testUploadFile('helloWorld1.txt')
hash = sha512(chunk1 + chunk2).hexdigest()
self.assertEqual(hash, file['sha512'])
# We should have two chunks in the database
self.assertEqual(chunkColl.find({'uuid': file['chunkUuid']}).count(), 2)
self._testDownloadFile(file, chunk1 + chunk2)
self._testDownloadFolder()
# Delete the file, make sure chunks are gone from database
self._testDeleteFile(file)
self.assertEqual(chunkColl.find({'uuid': file['chunkUuid']}).count(), 0)
empty = self._testEmptyUpload('empty.txt')
self.assertEqual(sha512().hexdigest(), empty['sha512'])
self._testDownloadFile(empty, '')
self._testDeleteFile(empty)
def testLinkFile(self):
params = {
'parentType': 'folder',
'parentId': self.privateFolder['_id'],
'name': 'My Link Item',
'linkUrl': 'javascript:alert("x");'
}
# Try to create a link file with a disallowed URL, should be rejected
resp = self.request(
path='/file', method='POST', user=self.user, params=params)
self.assertValidationError(resp, 'linkUrl')
# Create a valid link file
params['linkUrl'] = ' http://www.google.com '
resp = self.request(
path='/file', method='POST', user=self.user, params=params)
self.assertStatusOk(resp)
file = resp.json
self.assertEqual(file['assetstoreId'], None)
self.assertEqual(file['name'], 'My Link Item')
self.assertEqual(file['linkUrl'], params['linkUrl'].strip())
# Attempt to download the link file, make sure we are redirected
resp = self.request(
path='/file/{}/download'.format(file['_id']), method='GET',
isJson=False, user=self.user)
self.assertStatus(resp, 303)
self.assertEqual(resp.headers['Location'], params['linkUrl'].strip())
# Download containing folder as zip file
resp = self.request(
path='/folder/{}/download'.format(self.privateFolder['_id']),
method='GET', user=self.user, isJson=False)
self.assertEqual(resp.headers['Content-Type'], 'application/zip')
body = ''.join([str(_) for _ in resp.body])
zip = zipfile.ZipFile(io.BytesIO(body), 'r')
self.assertTrue(zip.testzip() is None)
# The file should just contain the URL of the link
extracted = zip.read('Private/My Link Item')
self.assertEqual(extracted, params['linkUrl'].strip())
| apache-2.0 | -6,590,420,022,936,276,000 | 36.703863 | 80 | 0.581673 | false |
32bitmicro/EDA | python/eda/eda/dump.py | 1 | 2948 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Paweł Wodnicki
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the 32bitmicro nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL Paweł Wodnicki BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from edautils import *
from eda import *
CRLF = "\n"
class CDump:
" Dump class "
def __init__(self, sch=None,brd=None):
self.name=""
self.sch=sch
self.brd=brd
def dumpNet(self,net):
ns = ''
for node in net.nodes:
ns += " pin " + str(node.pin.num) + " - " + node.pin.name + " dev " + node.dev.refid + CRLF
return ns
def dumpNets(self, design):
ns = ''
ns += "NETS: " + CRLF
ns += "" + CRLF
for netname in design.nets:
net = design.nets[netname]
ns += " " + netname + CRLF
ns += self.dumpNet(net)
ns += "" + CRLF
return ns
def dumpDevice(self, dev):
ns = ''
for pinnum in dev.pins:
pin = dev.pins[pinnum]
ns += " pin " + str(pin.num) + " - " + pin.name + " net " + pin.netname + CRLF
return ns
def dumpDevices(self, design):
ns = ''
ns += "Devices: " + CRLF
ns += "" + CRLF
for devname in design.devices:
dev = design.devices[devname]
ns += " " + devname + CRLF
ns += self.dumpDevice(dev)
ns += "" + CRLF
return ns
| bsd-3-clause | 4,909,801,486,996,867,000 | 34.071429 | 111 | 0.598099 | false |
djangocon/2017.djangocon.eu | conference/schedule/models.py | 1 | 5351 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import datetime as dt
from autoslug import AutoSlugField
from autoslug.utils import slugify
from django.core.exceptions import ValidationError
from django.db import models
from django.template.defaultfilters import truncatechars_html
from django.utils.translation import gettext_lazy as _
from filer.fields.image import FilerImageField
from filer.models import ThumbnailOption
from meta.models import ModelMeta
from conference.cfp.models import Submission, WorkshopSubmission
class Slot(ModelMeta, models.Model):
"""
Model for conference time slots. It can be for a talk, a workshop, or a custom time slot (i. e. coffee break)
"""
talk = models.ForeignKey(
Submission, related_name='talks', limit_choices_to={'selected': True}, null=True, blank=True
)
slug = AutoSlugField(
_('Slug'), max_length=400, blank=True, populate_from='generated_slug', always_update=True
)
workshop = models.ForeignKey(
WorkshopSubmission, related_name='workshops', limit_choices_to={'selected': True}, null=True, blank=True
)
name = models.CharField(
_('Name'), max_length=250, null=True, blank=True,
help_text=_('Field for time slots that does not relate to a Talk or a Workshop.')
)
mugshot = FilerImageField(verbose_name=_('Speaker mughshot'), null=True, blank=True)
twitter = models.CharField(_('Twitter'), max_length=200, default='', blank=True)
schedule_abstract = models.TextField(_('Schedule abstract'), blank=True, null=True)
day = models.DateField(_('Date'))
start = models.TimeField(_('Start'))
duration = models.DurationField(_('Duration'))
sprint_days = models.BooleanField(_('Part of sprint days'), default=False)
show_end_time = models.BooleanField(_('Show end time in schedule'), default=False)
slides = models.URLField(_('Speaker slides'), blank=True, null=True)
video = models.URLField(_('Talk video'), blank=True, null=True)
_metadata = {
'title': 'title',
'description': 'get_meta_abstract',
'image': 'get_image',
}
class Meta:
verbose_name = _('Time slot')
verbose_name_plural = _('Time slots')
ordering = ('day', 'start')
def clean(self):
# ensure talk and workshop are NOT filled at the same time
if self.talk and self.workshop:
message = _('Please, select either a Talk or a Workshop, not both.')
raise ValidationError({
'talk': ValidationError(message=message, code='invalid'),
'workshop': ValidationError(message=message, code='invalid'),
})
def get_image(self):
if self.mugshot:
return self.mugshot.url
else:
return None
def get_meta_abstract(self):
return truncatechars_html(self.abstract, 180)
@property
def title(self):
if self.talk_id:
return self.talk.proposal_title
elif self.workshop_id:
return self.workshop.proposal_title
elif self.name:
return self.name
return ''
@property
def author(self):
if self.talk:
return self.talk.author
elif self.workshop:
return self.workshop.author
return ''
@property
def generated_slug(self):
return slugify(self.title)
@property
def twitter_split(self):
if self.twitter:
return self.twitter.split(',')
return ''
@property
def abstract(self):
if self.schedule_abstract:
return self.schedule_abstract
if self.talk:
return self.talk.proposal_abstract
elif self.workshop:
return self.workshop.proposal_abstract
return ''
@property
def bio(self):
if self.is_talk() and self.talk.author_bio and len(self.talk.author_bio) > 3:
return self.talk.author_bio
if self.is_workshop() and self.workshop.author_bio and len(self.workshop.author_bio) > 3:
return self.workshop.author_bio
return ''
@property
def parsed_duration(self):
minutes = self.duration.seconds//60
hours = minutes//60
if hours:
minutes -= hours * 60
if minutes:
return '{}h {}min'.format(hours, minutes)
return '{}h'.format(hours)
return '{}min'.format(minutes)
@property
def end_time(self):
combined = dt.datetime.combine(dt.date.today(), self.start)
end_time = combined + self.duration
return end_time.time()
@property
def height(self):
return self.duration.total_seconds() / 100 * 6
@property
def thumbnail_option(self):
return ThumbnailOption.objects.get(name__icontains='speaker').as_dict
def is_talk(self):
return True if self.talk else False
is_talk.short_description = _('Talk')
is_talk.boolean = True
def is_workshop(self):
return True if self.workshop else False
is_workshop.short_description = _('Workshop')
is_workshop.boolean = True
def is_custom(self):
return True if self.name else False
is_custom.short_description = _('Custom')
is_custom.boolean = True
| bsd-3-clause | -7,714,352,984,290,550,000 | 32.44375 | 113 | 0.63091 | false |
zliau/pivotalpy | pivotalpy/story.py | 1 | 1404 | import json
from project import Project
class Story(object):
def __init__(self, pivotal):
self.pivotal = pivotal
self.stories_url = pivotal.base_url + 'projects/%s/stories/'
# Get stories matching the query in @params
def get_all(self, project_id, params=None):
url = self.stories_url % (str(project_id))
r = self.pivotal.make_api_request(url, 'GET', params=params)
return r.json()
# Create new story with @data
def create(self, project_id, data):
url = self.stories_url % (str(project_id))
r = self.pivotal.make_api_request(url, 'POST', data=data)
return r.json()
# Get story specified by @story_id
def get(self, project_id, story_id):
url = self.stories_url % (str(project_id)) + story_id + '/'
r = self.pivotal.make_api_request(url, 'GET')
return r.json()
# Update story specified by @story_id
def update(self, project_id, story_id, data):
url = self.stories_url % (str(project_id)) + story_id + '/'
r = self.pivotal.make_api_request(url, 'PUT', data=data)
return r.json()
# Post comment on story specified by @story_id
def post_comment(self, project_id, story_id, data):
url = self.stories_url % (str(project_id)) + story_id + '/comments/'
r = self.pivotal.make_api_request(url, 'POST', data=data)
return r.json()
| mit | 8,934,251,698,385,529,000 | 36.945946 | 76 | 0.611823 | false |
xuru/pyvisdk | pyvisdk/do/vmfs_datastore_info.py | 1 | 1042 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VmfsDatastoreInfo(vim, *args, **kwargs):
'''Information details about a VMFS datastore.'''
obj = vim.client.factory.create('ns0:VmfsDatastoreInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'freeSpace', 'maxFileSize', 'name', 'url' ]
optional = [ 'vmfs', 'timestamp', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | -1,899,883,520,336,038,100 | 30.606061 | 124 | 0.592131 | false |
steinbep/Meraki-API---Python | mx_fw_rules.py | 1 | 1190 | #!/usr/bin/python
import meraki
import json
#
# Python Script Using Meraki API to collect all MX L3 Firewall Rules in all Networks to CSV file.
# Returns Site, Comments, Source port and CIDR, Destination port and CIDR.
#
# Enter User's API Key
apikey = 'xxxxxx'
# Enter Organization ID Here
organizationid = 'xxxxxxxxx'
#User Input of filename
print('Enter a file name below,\nthe .csv will be appended to the name given')
filename = input('Name: ')
#Network lookup
networks = meraki.getnetworklist(apikey, organizationid, suppressprint=True)
# print(format(str(networks)))
#Loop through Network
for row in networks:
# Device Lookup
rules = meraki.getmxl3fwrules(apikey, row['id'], suppressprint=True)
# print(format(str(rules)))
for rule in rules:
# print (rule)
try:
with open(filename + '.csv', 'a', newline='') as wr:
a = csv.writer(wr, delimiter=',' )
data = [str(row['name']), str(rule['comment']), str(rule['policy']), str(rule['protocol']), str(rule['srcPort']), str(rule['srcCidr']), str(rule['destPort']), str(rule['destCidr'])]
a.writerow(data)
except:
pass
| gpl-3.0 | 3,407,995,353,311,586,000 | 29.512821 | 197 | 0.647899 | false |
yashchandak/GNN | Sample_Run/Seq_att_Q/blogDWdata.py | 1 | 10658 | from __future__ import generators, print_function
import numpy as np
from random import shuffle
from scipy.io import loadmat
from copy import deepcopy
import functools
import Queue
#from multiprocessing import Process, Queue, Manager, Pool
import threading
import time
from collections import defaultdict
def async_prefetch_wrapper(iterable, buffer=100):
"""
wraps an iterater such that it produces items in the background
uses a bounded queue to limit memory consumption
"""
done = 'DONE'# object()
def worker(q, it):
for item in it:
q.put(item)
q.put(done)
# launch a thread to fetch the items in the background
queue = Queue.Queue(buffer)
#pool = Pool()
#m = Manager()
#queue = m.Queue()
it = iter(iterable)
#workers = pool.apply_async(worker, (queue, it))
thread = threading.Thread(target=worker, args=(queue, it))
#thread = Process(target=worker, args=(queue, it))
thread.daemon = True
thread.start()
# pull the items of the queue as requested
while True:
item = queue.get()
if item == 'DONE':#done:
return
else:
yield item
#pool.close()
#pool.join()
def async_prefetch(func):
"""
decorator to make generator functions fetch items in the background
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
return async_prefetch_wrapper(func(*args, **kwds))
return wrapper
class DataSet(object):
def __init__(self, cfg):
"""Construct a DataSet.
"""
self.cfg = cfg
self.all_walks, self.node_seq = self.get_walks(cfg.walks_dir) # reverse the sequence
#self.node_seq = self.all_walks[:, -1] # index by ending node
self.all_labels = self.get_labels(cfg.label_dir)
self.all_features= self.get_fetaures(cfg.features_dir)
#Increment the positions by 1 and mark the 0th one as False
self.train_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'train_ids.npy')))
self.val_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'val_ids.npy')))
self.test_nodes = np.concatenate(([False], np.load(cfg.label_fold_dir + 'test_ids.npy')))
# [!!!IMP!!]Assert no overlap between test/val/train nodes
self.change = 0
self.label_cache, self.update_cache = {0:self.all_labels[0]}, {}
#self.label_cache = defaultdict(lambda:self.all_labels[0], self.label_cache)
self.adj = loadmat(cfg.adj_dir)['adjmat'].toarray()
self.wce = self.get_wce()
def get_walks(self, path):
#Reverse sequences and padding in beginning
#return np.fliplr(np.loadtxt(path, dtype=np.int))
walks = np.fliplr(np.loadtxt(path, dtype=np.int)) # reverse the sequence
seq = deepcopy(walks[:,-1])
#rotate around the sequences, such that ends are padded with zeros
for i in range(np.shape(walks)[0]):
non_zeros = np.sum(walks[i] > 0)
walks[i] = np.roll(walks[i], non_zeros)
return walks, seq
def get_update_cache(self):
updated = {}
for k,v in self.update_cache.items():
updated[k] = v[0]/v[1]
return updated
def get_wce(self):
if self.cfg.solver.wce:
valid = self.train_nodes + self.val_nodes
tot = np.dot(valid, self.all_labels)
wce = 1/(len(tot) * (tot*1.0/np.sum(tot)))
else:
wce = [1]*self.all_labels.shape[1]
print("Cross-Entropy weights: ",wce)
return wce
def get_fetaures(self, path):
# Serves 2 purpose:
# a) add feature for dummy node 0 a.k.a <EOS> and <unlabeled>
# b) increments index of all features by 1, thus aligning it with indices in walks
all_features = np.load(path)
all_features = all_features.astype(np.float32, copy=False) # Required conversion for Python3
all_features = np.concatenate(([np.zeros(all_features.shape[1])], all_features), 0)
return all_features
def get_labels(self, path):
# Labels start with node '0'; Walks_data with node '1'
# To get corresponding mapping, increment the label node number by 1
# add label for dummy node 0 a.k.a <EOS> and <unlabeled>
all_labels = np.load(path)
all_labels = np.concatenate(([np.zeros(all_labels.shape[1])], all_labels), 0)
return all_labels
def accumulate_label_cache(self, labels, nodes):
#Aggregates all the labels for the corresponding nodes
#and tracks the count of updates made
default = (self.all_labels[0], 0) #Initial estimate -> all_zeros
if self.cfg.data_sets.binary_label_updates:
#Convert to binary and keep only the maximum value as 1
amax = np.argmax(labels, axis = 1)
labels = np.zeros(labels.shape)
for idx, pos in enumerate(amax):
labels[idx,pos] = 1
for idx, node in enumerate(nodes):
prv_label, prv_count = self.update_cache.get(node, default)
new_label = prv_label + labels[idx]
new_count = prv_count + 1
self.update_cache[node] = (new_label, new_count)
def update_label_cache(self):
#Average all the predictions made for the corresponding nodes and reset cache
alpha = self.cfg.solver.label_update_rate
if len(self.label_cache.items()) <= 1: alpha =1
for k, v in self.update_cache.items():
old = self.label_cache.get(k, self.all_labels[0])
new = (1-alpha)*old + alpha*(v[0]/v[1])
self.change += np.mean((new - old) **2)
self.label_cache[k] = new
print("\nChange in label: :", np.sqrt(self.change/self.cfg.data_sets._len_vocab)*100)
self.change = 0
self.update_cache = {}
def get_nodes(self, dataset):
nodes = []
if dataset == 'train':
nodes = self.train_nodes
elif dataset == 'val':
nodes = self.val_nodes
elif dataset == 'test':
nodes = self.test_nodes
elif dataset == 'all':
# Get all the nodes except the 0th node
nodes = [True]*len(self.train_nodes)
nodes[0] = False
else:
raise ValueError
return nodes
#@async_prefetch
def next_batch(self, dataset, batch_size, shuffle=True):
nodes = self.get_nodes(dataset)
label_len = np.shape(self.all_labels)[1]
max_len = self.all_walks.shape[1]
# Get position of all walks ending with desired set of nodes
pos = []
seq = []
for node in np.where(nodes)[0]:
temp = np.where(self.node_seq == node)[0]
pos.extend(temp)
seq.extend([node]*len(temp))
pos = np.array(pos)
seq = np.array(seq)
if shuffle:
indices = np.random.permutation(len(pos))
pos = pos[indices]
seq = seq[indices]
if batch_size == -1:
batch_size = len(pos)
tot = len(pos)//batch_size
for i in range(0, len(pos), batch_size):
x = self.all_walks[pos[i: i + batch_size]]
#get number of nodes per path
lengths = np.sum(np.array(x)>0, axis=1)
x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)
# get labels for valid data points, for others: select the 0th label
x2 = [[self.label_cache.get(item, self.all_labels[0]) for item in row] for row in x]
#y = [self.all_labels[item] for item in x[-1]]
y = [self.all_labels[item] for item in seq[i: i+batch_size]]
# get features for all data points
x1 = [[self.all_features[item] for item in row] for row in x]
#print(x,y, lengths, seq[i: i + batch_size])
yield (x1, x2, seq[i: i + batch_size], y, tot, lengths)
@async_prefetch
def next_batch_same(self, dataset, node_count=1):
nodes = self.get_nodes(dataset)
pos = []
counts = []
seq = []
for node in np.where(nodes)[0]:
temp = np.where(self.node_seq == node)[0]
counts.append(len(temp))
seq.append(node)
pos.extend(temp)
pos = np.array(pos)
start = 0
max_len = self.all_walks.shape[1]
# Get a batch of all walks for 'node_count' number of node
for idx in range(0, len(counts), node_count):
#print(idx)
stop = start + np.sum(counts[idx:idx+node_count]) #start + total number of walks to be considered this time
x = self.all_walks[pos[start:stop]] #get the walks corresponding to respective positions
temp = np.array(x)>0 #get locations of all zero inputs
lengths = np.sum(temp, axis=1)
x = np.swapaxes(x, 0, 1) # convert from (batch x step) to (step x batch)
#"""
#original
# get labels for valid data points, for others: select the 0th label
x2 = [[self.label_cache.get(item, self.all_labels[0]) for item in row] for row in x]
y = [self.all_labels[item] for item in x[-1,:]] #Not useful, only presetn for sake of placeholder
# get features for all data points
x1 = [[self.all_features[item] for item in row] for row in x]
#"""
"""
#Unique based
u, inv = np.unique(x, return_inverse=True)
u2, inv2 = np.unique(x[-1:], return_inverse=True)
x2 = np.array([self.label_cache.get(item, self.all_labels[0]) for item in u])[inv]#.reshape(x.shape)
x1 = np.array([self.all_features[item] for item in u])[inv]#.reshape(x.shape)
y = np.array([self.all_labels[item] for item in u2])[inv2]
"""
"""
# Vectorized
# get labels for valid data points, for others: select the 0th label
x2 = np.vectorize(self.label_cache.get)(x)
x1 = np.vectorize(self.all_features.__getitem__)(x)
y = np.vectorize(self.all_labels.__getitem__)(x[-1:])
"""
start = stop
yield (x, x1, x2, seq[idx:idx+node_count], counts[idx:idx+node_count], y, lengths)
def testPerformance(self):
start = time.time()
step =0
for a,b,c,d,e,f,g in self.next_batch_same('all'):
step += 1
if step%500 == 0: print(step)
print ('total time: ', time.time()-start) | mit | -3,953,667,238,478,411,300 | 34.768456 | 119 | 0.572528 | false |
SergeySatskiy/codimension | codimension/ui/runparamsdlg.py | 1 | 35213 | # -*- coding: utf-8 -*-
#
# codimension - graphics python two-way code editor and analyzer
# Copyright (C) 2010-2017 Sergey Satskiy <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Run parameters dialog"""
import os
import os.path
import copy
from utils.runparams import RunParameters, RUN, PROFILE, DEBUG
from utils.run import parseCommandLineArguments, checkOutput
from .qt import (Qt, QDoubleValidator, QDialog, QDialogButtonBox, QVBoxLayout,
QSizePolicy, QLabel, QGridLayout, QHBoxLayout, QRadioButton,
QGroupBox, QPushButton, QFileDialog, QLineEdit, QTreeWidget,
QAbstractItemView, QTreeWidgetItem, QCheckBox)
from .itemdelegates import NoOutlineHeightDelegate
class EnvVarDialog(QDialog):
"""Single environment variable add/edit dialog"""
def __init__(self, name='', value='', parent=None):
QDialog.__init__(self, parent)
self.name = name
self.value = value
self.__nameEdit = None
self.__valueEdit = None
self.__OKButton = None
self.__createLayout()
self.setWindowTitle("Environment variable")
self.setMaximumHeight(self.sizeHint().height())
self.setMaximumHeight(self.sizeHint().height())
self.__nameEdit.setText(name)
self.__valueEdit.setText(value)
self.__nameEdit.setEnabled(name == "")
self.__OKButton.setEnabled(name != "")
def __createLayout(self):
"""Creates the dialog layout"""
self.resize(300, 50)
self.setSizeGripEnabled(True)
# Top level layout
layout = QVBoxLayout(self)
gridLayout = QGridLayout()
nameLabel = QLabel("Name")
gridLayout.addWidget(nameLabel, 0, 0)
valueLabel = QLabel("Value")
gridLayout.addWidget(valueLabel, 1, 0)
self.__nameEdit = QLineEdit()
self.__nameEdit.textChanged.connect(self.__nameChanged)
gridLayout.addWidget(self.__nameEdit, 0, 1)
self.__valueEdit = QLineEdit()
self.__valueEdit.textChanged.connect(self.__valueChanged)
gridLayout.addWidget(self.__valueEdit, 1, 1)
layout.addLayout(gridLayout)
buttonBox = QDialogButtonBox(self)
buttonBox.setOrientation(Qt.Horizontal)
buttonBox.setStandardButtons(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self.__OKButton = buttonBox.button(QDialogButtonBox.Ok)
self.__OKButton.setDefault(True)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.close)
layout.addWidget(buttonBox)
def __nameChanged(self, newName):
"""Triggered when a variable name is changed"""
strippedName = str(newName).strip()
self.__OKButton.setEnabled(strippedName != "" and
' ' not in strippedName)
self.name = strippedName
def __valueChanged(self, newValue):
"""Triggered when a variable value is changed"""
self.value = newValue
class RunDialog(QDialog):
"""Run parameters dialog implementation"""
ACTION_TO_VERB = {RUN: 'Run',
PROFILE: 'Profile',
DEBUG: 'Debug'}
# See utils.run for runParameters
def __init__(self, path, runParameters,
profilerParams, debuggerParams,
action, parent=None):
QDialog.__init__(self, parent)
# Used as a return value
self.runParams = copy.deepcopy(runParameters)
self.profilerParams = copy.deepcopy(profilerParams)
self.debuggerParams = copy.deepcopy(debuggerParams)
self.__action = action
# Avoid pylint complains
self.__argsEdit = None
self.__scriptWDRButton = None
self.__dirRButton = None
self.__dirEdit = None
self.__dirSelectButton = None
self.__inheritParentRButton = None
self.__inheritParentPlusRButton = None
self.__inhPlusEnvTable = None
self.__addInhButton = None
self.__delInhButton = None
self.__editInhButton = None
self.__specificRButton = None
self.__specEnvTable = None
self.__addSpecButton = None
self.__delSpecButton = None
self.__editSpecButton = None
self.__runButton = None
self.__nodeLimitEdit = None
self.__edgeLimitEdit = None
self.__debugChildCheckBox = None
self.__edgeLimitValidator = None
self.__nodeLimitValidator = None
self.__intSelectButton = None
self.__intEdit = None
self.__redirectedRButton = None
self.__customIntRButton = None
self.__customTermRButton = None
self.__stopAtFirstCheckBox = None
self.__traceInterpreterCheckBox = None
self.__autoforkCheckBox = None
self.__reportExceptionCheckBox = None
self.__termEdit = None
self.__inheritedInterpreterRButton = None
self.__createLayout()
self.setWindowTitle(RunDialog.ACTION_TO_VERB[action] +
' parameters for ' + path)
self.__populateValues()
def __populateValues(self):
"""Populates the dialog UI controls"""
self.__argsEdit.setText(self.runParams['arguments'])
self.__populateWorkingDir()
self.__populateEnvironment()
self.__populateInterpreter()
self.__populateIO()
if self.__action == PROFILE:
self.__populateProfile()
elif self.__action == DEBUG:
self.__populateDebug()
self.__setRunButtonProps()
def __populateWorkingDir(self):
"""Populates the working directory"""
if self.runParams['useScriptLocation']:
self.__scriptWDRButton.setChecked(True)
self.__dirEdit.setEnabled(False)
self.__dirSelectButton.setEnabled(False)
else:
self.__dirRButton.setChecked(True)
self.__dirEdit.setEnabled(True)
self.__dirSelectButton.setEnabled(True)
self.__dirEdit.setText(self.runParams['specificDir'])
def __populateEnvironment(self):
"""Populates the environment variables"""
self.__populateTable(self.__inhPlusEnvTable,
self.runParams['additionToParentEnv'])
self.__populateTable(self.__specEnvTable,
self.runParams['specificEnv'])
if self.runParams['envType'] == RunParameters.InheritParentEnv:
self.__inheritParentRButton.setChecked(True)
self.__setEnabledInheritedPlusEnv(False)
self.__setEnabledSpecificEnv(False)
elif self.runParams['envType'] == RunParameters.InheritParentEnvPlus:
self.__inheritParentPlusRButton.setChecked(True)
self.__setEnabledSpecificEnv(False)
else:
self.__specificRButton.setChecked(True)
self.__setEnabledInheritedPlusEnv(False)
def __populateInterpreter(self):
"""Populates the interpreter"""
if self.runParams['useInherited']:
self.__inheritedInterpreterRButton.setChecked(True)
self.__intEdit.setEnabled(False)
self.__intSelectButton.setEnabled(False)
else:
self.__customIntRButton.setChecked(True)
self.__intEdit.setEnabled(True)
self.__intSelectButton.setEnabled(True)
self.__intEdit.setText(self.runParams['customInterpreter'])
def __populateIO(self):
"""Populate I/O"""
if self.runParams['redirected']:
self.__redirectedRButton.setChecked(True)
self.__termEdit.setEnabled(False)
else:
self.__customTermRButton.setChecked(True)
self.__termEdit.setEnabled(True)
self.__termEdit.setText(self.runParams['customTerminal'])
self.__termEdit.setToolTip(
'Use ${prog} substitution if needed.\n'
'Otherwise the command line is attached at the end.\n'
'E.g.: xterm -e /bin/bash -c "${prog}; /bin/bash" &')
def __populateProfile(self):
"""Populates profile"""
if self.profilerParams.nodeLimit < 0.0 or \
self.profilerParams.nodeLimit > 100.0:
self.profilerParams.nodeLimit = 1.0
self.__nodeLimitEdit.setText(str(self.profilerParams.nodeLimit))
if self.profilerParams.edgeLimit < 0.0 or \
self.profilerParams.edgeLimit > 100.0:
self.profilerParams.edgeLimit = 1.0
self.__edgeLimitEdit.setText(str(self.profilerParams.edgeLimit))
def __populateDebug(self):
"""Populates debug"""
self.__reportExceptionCheckBox.setChecked(
self.debuggerParams.reportExceptions)
self.__traceInterpreterCheckBox.setChecked(
self.debuggerParams.traceInterpreter)
self.__stopAtFirstCheckBox.setChecked(
self.debuggerParams.stopAtFirstLine)
self.__autoforkCheckBox.setChecked(self.debuggerParams.autofork)
self.__debugChildCheckBox.setChecked(self.debuggerParams.followChild)
self.__debugChildCheckBox.setEnabled(self.debuggerParams.autofork)
@staticmethod
def __populateTable(table, dictionary):
"""Populates the given table"""
for key, value in dictionary.items():
item = QTreeWidgetItem([key, value])
table.addTopLevelItem(item)
if dictionary:
table.setCurrentItem(table.topLevelItem(0))
def __setEnabledInheritedPlusEnv(self, value):
"""Disables/enables 'inherited and add' section controls"""
self.__inhPlusEnvTable.setEnabled(value)
self.__addInhButton.setEnabled(value)
self.__delInhButton.setEnabled(value)
self.__editInhButton.setEnabled(value)
def __setEnabledSpecificEnv(self, value):
"""Disables/enables 'specific env' section controls"""
self.__specEnvTable.setEnabled(value)
self.__addSpecButton.setEnabled(value)
self.__delSpecButton.setEnabled(value)
self.__editSpecButton.setEnabled(value)
def __createLayout(self):
"""Creates the dialog layout"""
self.resize(650, 300)
self.setSizeGripEnabled(True)
layout = QVBoxLayout(self) # top level layout
layout.addLayout(self.__getArgLayout())
layout.addWidget(self.__getWorkingDirGroupbox())
layout.addWidget(self.__getEnvGroupbox())
layout.addWidget(self.__getInterpreterGroupbox())
layout.addWidget(self.__getIOGroupbox())
if self.__action == PROFILE:
layout.addWidget(self.__getProfileLimitsGroupbox())
elif self.__action == DEBUG:
layout.addWidget(self.__getDebugGroupbox())
# Buttons at the bottom
buttonBox = QDialogButtonBox(self)
buttonBox.setOrientation(Qt.Horizontal)
buttonBox.setStandardButtons(QDialogButtonBox.Cancel)
self.__runButton = buttonBox.addButton(
RunDialog.ACTION_TO_VERB[self.__action],
QDialogButtonBox.AcceptRole)
self.__runButton.setDefault(True)
self.__runButton.clicked.connect(self.onAccept)
layout.addWidget(buttonBox)
buttonBox.rejected.connect(self.close)
def __getArgLayout(self):
"""Provides the arguments layout"""
argsLabel = QLabel("Command line arguments")
self.__argsEdit = QLineEdit()
self.__argsEdit.textChanged.connect(self.__argsChanged)
argsLayout = QHBoxLayout()
argsLayout.addWidget(argsLabel)
argsLayout.addWidget(self.__argsEdit)
return argsLayout
@staticmethod
def __getSizePolicy(item):
"""Provides a common size policy"""
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(item.sizePolicy().hasHeightForWidth())
return sizePolicy
def __getWorkingDirGroupbox(self):
"""Provides the working dir groupbox"""
workDirGroupbox = QGroupBox('Working Directory', self)
workDirGroupbox.setSizePolicy(self.__getSizePolicy(workDirGroupbox))
gridLayoutWD = QGridLayout(workDirGroupbox)
self.__scriptWDRButton = QRadioButton("&Use script location",
workDirGroupbox)
gridLayoutWD.addWidget(self.__scriptWDRButton, 0, 0)
self.__scriptWDRButton.clicked.connect(lambda: self.__wdDir(True))
self.__dirRButton = QRadioButton("Select &directory", workDirGroupbox)
gridLayoutWD.addWidget(self.__dirRButton, 1, 0)
self.__dirRButton.clicked.connect(lambda: self.__wdDir(False))
self.__dirEdit = QLineEdit(workDirGroupbox)
gridLayoutWD.addWidget(self.__dirEdit, 1, 1)
self.__dirEdit.textChanged.connect(self.__workingDirChanged)
self.__dirSelectButton = QPushButton("...", workDirGroupbox)
gridLayoutWD.addWidget(self.__dirSelectButton, 1, 2)
self.__dirSelectButton.clicked.connect(self.__selectDirClicked)
return workDirGroupbox
def __getEnvGroupbox(self):
"""Provides the environment groupbox"""
envGroupbox = QGroupBox('Environment', self)
envGroupbox.setSizePolicy(self.__getSizePolicy(envGroupbox))
layoutEnv = QVBoxLayout(envGroupbox)
self.__inheritParentRButton = QRadioButton("Inherit &parent",
envGroupbox)
self.__inheritParentRButton.clicked.connect(self.__inhClicked)
layoutEnv.addWidget(self.__inheritParentRButton)
self.__inheritParentPlusRButton = QRadioButton(
"Inherit parent and add/&modify", envGroupbox)
self.__inheritParentPlusRButton.clicked.connect(self.__inhPlusClicked)
layoutEnv.addWidget(self.__inheritParentPlusRButton)
hInhPlusLayout = QHBoxLayout()
self.__inhPlusEnvTable = QTreeWidget()
self.__inhPlusEnvTable.itemActivated.connect(
lambda x, y: self.__editInhClicked())
self.__tuneTable(self.__inhPlusEnvTable)
hInhPlusLayout.addWidget(self.__inhPlusEnvTable)
vInhPlusLayout = QVBoxLayout()
self.__addInhButton = QPushButton('Add')
self.__addInhButton.clicked.connect(self.__addInhClicked)
vInhPlusLayout.addWidget(self.__addInhButton)
self.__delInhButton = QPushButton('Delete')
self.__delInhButton.clicked.connect(self.__delInhClicked)
vInhPlusLayout.addWidget(self.__delInhButton)
self.__editInhButton = QPushButton("Edit")
self.__editInhButton.clicked.connect(self.__editInhClicked)
vInhPlusLayout.addWidget(self.__editInhButton)
hInhPlusLayout.addLayout(vInhPlusLayout)
layoutEnv.addLayout(hInhPlusLayout)
self.__specificRButton = QRadioButton("&Specific", envGroupbox)
self.__specificRButton.clicked.connect(self.__specClicked)
layoutEnv.addWidget(self.__specificRButton)
hSpecLayout = QHBoxLayout()
self.__specEnvTable = QTreeWidget()
self.__specEnvTable.itemActivated.connect(
lambda x, y: self.__editSpecClicked())
self.__tuneTable(self.__specEnvTable)
hSpecLayout.addWidget(self.__specEnvTable)
vSpecLayout = QVBoxLayout()
self.__addSpecButton = QPushButton('Add')
self.__addSpecButton.clicked.connect(self.__addSpecClicked)
vSpecLayout.addWidget(self.__addSpecButton)
self.__delSpecButton = QPushButton('Delete')
self.__delSpecButton.clicked.connect(self.__delSpecClicked)
vSpecLayout.addWidget(self.__delSpecButton)
self.__editSpecButton = QPushButton("Edit")
self.__editSpecButton.clicked.connect(self.__editSpecClicked)
vSpecLayout.addWidget(self.__editSpecButton)
hSpecLayout.addLayout(vSpecLayout)
layoutEnv.addLayout(hSpecLayout)
return envGroupbox
def __getInterpreterGroupbox(self):
"""Creates the interpreter groupbox"""
interpreterGroupbox = QGroupBox('Python Interpreter', self)
interpreterGroupbox.setSizePolicy(
self.__getSizePolicy(interpreterGroupbox))
gridLayoutInt = QGridLayout(interpreterGroupbox)
self.__inheritedInterpreterRButton = QRadioButton(
"&Inherited", interpreterGroupbox)
gridLayoutInt.addWidget(self.__inheritedInterpreterRButton, 0, 0)
self.__inheritedInterpreterRButton.clicked.connect(
lambda: self.__interpreter(True))
self.__customIntRButton = QRadioButton(
"Select interpreter (series &3)", interpreterGroupbox)
gridLayoutInt.addWidget(self.__customIntRButton, 1, 0)
self.__customIntRButton.clicked.connect(
lambda: self.__interpreter(False))
self.__intEdit = QLineEdit(interpreterGroupbox)
gridLayoutInt.addWidget(self.__intEdit, 1, 1)
self.__intEdit.textChanged.connect(self.__interpreterChanged)
self.__intSelectButton = QPushButton("...", interpreterGroupbox)
gridLayoutInt.addWidget(self.__intSelectButton, 1, 2)
self.__intSelectButton.clicked.connect(self.__selectIntClicked)
return interpreterGroupbox
def __getIOGroupbox(self):
"""Creates the interpreter groupbox"""
ioGroupbox = QGroupBox('Input/output', self)
ioGroupbox.setSizePolicy(self.__getSizePolicy(ioGroupbox))
gridLayoutInt = QGridLayout(ioGroupbox)
self.__redirectedRButton = QRadioButton("&Redirected I/O", ioGroupbox)
gridLayoutInt.addWidget(self.__redirectedRButton, 0, 0)
self.__redirectedRButton.clicked.connect(
lambda: self.__redirected(True))
self.__customTermRButton = QRadioButton("Custom terminal string",
ioGroupbox)
gridLayoutInt.addWidget(self.__customTermRButton, 1, 0)
self.__customTermRButton.clicked.connect(
lambda: self.__redirected(False))
self.__termEdit = QLineEdit(ioGroupbox)
gridLayoutInt.addWidget(self.__termEdit, 1, 1)
self.__termEdit.textChanged.connect(self.__customTermChanged)
return ioGroupbox
def __getProfileLimitsGroupbox(self):
"""Creates the profile limits groupbox"""
limitsGroupbox = QGroupBox('Profiler diagram limits (IDE wide)', self)
limitsGroupbox.setSizePolicy(self.__getSizePolicy(limitsGroupbox))
layoutLimits = QGridLayout(limitsGroupbox)
self.__nodeLimitEdit = QLineEdit()
self.__nodeLimitEdit.textEdited.connect(self.__setRunButtonProps)
self.__nodeLimitValidator = QDoubleValidator(0.0, 100.0, 2, self)
self.__nodeLimitValidator.setNotation(
QDoubleValidator.StandardNotation)
self.__nodeLimitEdit.setValidator(self.__nodeLimitValidator)
nodeLimitLabel = QLabel("Hide nodes below")
self.__edgeLimitEdit = QLineEdit()
self.__edgeLimitEdit.textEdited.connect(self.__setRunButtonProps)
self.__edgeLimitValidator = QDoubleValidator(0.0, 100.0, 2, self)
self.__edgeLimitValidator.setNotation(
QDoubleValidator.StandardNotation)
self.__edgeLimitEdit.setValidator(self.__edgeLimitValidator)
edgeLimitLabel = QLabel("Hide edges below")
layoutLimits.addWidget(nodeLimitLabel, 0, 0)
layoutLimits.addWidget(self.__nodeLimitEdit, 0, 1)
layoutLimits.addWidget(QLabel("%"), 0, 2)
layoutLimits.addWidget(edgeLimitLabel, 1, 0)
layoutLimits.addWidget(self.__edgeLimitEdit, 1, 1)
layoutLimits.addWidget(QLabel("%"), 1, 2)
return limitsGroupbox
def __getDebugGroupbox(self):
"""Creates the debug settings groupbox"""
dbgGroupbox = QGroupBox('Debugger (IDE wide)', self)
dbgGroupbox.setSizePolicy(self.__getSizePolicy(dbgGroupbox))
dbgLayout = QVBoxLayout(dbgGroupbox)
self.__reportExceptionCheckBox = QCheckBox("Report &exceptions")
self.__reportExceptionCheckBox.stateChanged.connect(
self.__onReportExceptionChanged)
self.__traceInterpreterCheckBox = QCheckBox("T&race interpreter libs")
self.__traceInterpreterCheckBox.stateChanged.connect(
self.__onTraceInterpreterChanged)
self.__stopAtFirstCheckBox = QCheckBox("Stop at first &line")
self.__stopAtFirstCheckBox.stateChanged.connect(
self.__onStopAtFirstChanged)
self.__autoforkCheckBox = QCheckBox("&Fork without asking")
self.__autoforkCheckBox.stateChanged.connect(self.__onAutoforkChanged)
self.__debugChildCheckBox = QCheckBox("Debu&g child process")
self.__debugChildCheckBox.stateChanged.connect(self.__onDebugChild)
dbgLayout.addWidget(self.__reportExceptionCheckBox)
dbgLayout.addWidget(self.__traceInterpreterCheckBox)
dbgLayout.addWidget(self.__stopAtFirstCheckBox)
dbgLayout.addWidget(self.__autoforkCheckBox)
dbgLayout.addWidget(self.__debugChildCheckBox)
return dbgGroupbox
@staticmethod
def __tuneTable(table):
"""Sets the common settings for a table"""
table.setAlternatingRowColors(True)
table.setRootIsDecorated(False)
table.setItemsExpandable(False)
table.setUniformRowHeights(True)
table.setSelectionMode(QAbstractItemView.SingleSelection)
table.setSelectionBehavior(QAbstractItemView.SelectRows)
table.setItemDelegate(NoOutlineHeightDelegate(4))
table.setHeaderLabels(["Variable", "Value"])
header = table.header()
header.setSortIndicator(0, Qt.AscendingOrder)
header.setSortIndicatorShown(True)
header.setSectionsClickable(True)
table.setSortingEnabled(True)
def __wdDir(self, useScriptLocation):
"""Working dir radio selection changed"""
self.__dirEdit.setEnabled(not useScriptLocation)
self.__dirSelectButton.setEnabled(not useScriptLocation)
self.runParams['useScriptLocation'] = useScriptLocation
self.__setRunButtonProps()
def __interpreter(self, useInherited):
"""Interpreter radio selection changed"""
self.__intEdit.setEnabled(not useInherited)
self.__intSelectButton.setEnabled(not useInherited)
self.runParams['useInherited'] = useInherited
self.__setRunButtonProps()
def __redirected(self, redirected):
"""I/O radio button changed"""
self.__termEdit.setEnabled(not redirected)
self.runParams['redirected'] = redirected
self.__setRunButtonProps()
def __customTermChanged(self, value):
"""Triggered when a custom terminal string changed"""
value = str(value).strip()
self.runParams['customTerminal'] = value
self.__setRunButtonProps()
def __argsChanged(self, value):
"""Triggered when cmd line args are changed"""
value = str(value).strip()
self.runParams['arguments'] = value
self.__setRunButtonProps()
def __workingDirChanged(self, value):
"""Triggered when a working dir value is changed"""
value = str(value)
self.runParams['specificDir'] = value
self.__setRunButtonProps()
def __interpreterChanged(self, value):
"""Triggered when an interpreter is changed"""
value = str(value).strip()
self.runParams['customInterpreter'] = value
self.__setRunButtonProps()
def __onCloseChanged(self, state):
"""Triggered when the close terminal check box changed"""
self.runParams['closeTerminal'] = state != 0
def __onReportExceptionChanged(self, state):
"""Triggered when exception report check box changed"""
self.debuggerParams.reportExceptions = state != 0
def __onTraceInterpreterChanged(self, state):
"""Triggered when trace interpreter changed"""
self.debuggerParams.traceInterpreter = state != 0
def __onStopAtFirstChanged(self, state):
"""Triggered when stop at first changed"""
self.debuggerParams.stopAtFirstLine = state != 0
def __onAutoforkChanged(self, state):
"""Triggered when autofork changed"""
self.debuggerParams.autofork = state != 0
self.__debugChildCheckBox.setEnabled(self.debuggerParams.autofork)
def __onDebugChild(self, state):
"""Triggered when debug child changed"""
self.debuggerParams.followChild = state != 0
def __argumentsOK(self):
"""Returns True if the arguments are OK"""
try:
parseCommandLineArguments(self.runParams['arguments'])
return True
except:
return False
def __dirOK(self):
"""Returns True if the working dir is OK"""
if self.__scriptWDRButton.isChecked():
return True
return os.path.isdir(self.__dirEdit.text())
def __interpreterOK(self):
"""Checks if the interpreter is OK"""
if self.__inheritedInterpreterRButton.isChecked():
return True
path = self.__intEdit.text().strip()
if not path:
return 'No executable specified'
try:
code = "from __future__ import print_function; " \
"import sys; print(sys.version_info.major)"
output = checkOutput(path + ' -c "' + code + '"', useShell=True)
output = output.strip()
if output != '3':
return 'Only python series 3 is supported ' \
'(provided: series ' + output + ')'
except:
return 'Error checking the provided interpreter'
def __ioOK(self):
"""Checks if the IO is correct"""
if self.__redirectedRButton.isChecked():
return True
term = self.__termEdit.text().strip()
if not term:
return 'No custom terminal line specified'
def __setRunButtonProps(self, _=None):
"""Enable/disable run button and set its tooltip"""
if not self.__argumentsOK():
self.__runButton.setEnabled(False)
self.__runButton.setToolTip("No closing quotation in arguments")
return
if not self.__dirOK():
self.__runButton.setEnabled(False)
self.__runButton.setToolTip("The given working "
"dir is not found")
return
interpreterOK = self.__interpreterOK()
if isinstance(interpreterOK, str):
self.__runButton.setEnabled(False)
self.__runButton.setToolTip('Invalid interpreter. ' +
interpreterOK)
return
ioOK = self.__ioOK()
if isinstance(ioOK, str):
self.__runButton.setEnabled(False)
self.__runButton.setToolTip('Invalid terminal. ' + ioOK)
return
if self.__nodeLimitEdit is not None:
txt = self.__nodeLimitEdit.text().strip()
try:
value = float(txt)
if value < 0.0 or value > 100.0:
raise Exception("Out of range")
except:
self.__runButton.setEnabled(False)
self.__runButton.setToolTip("The given node limit "
"is out of range")
return
if self.__edgeLimitEdit is not None:
txt = self.__edgeLimitEdit.text().strip()
try:
value = float(txt)
if value < 0.0 or value > 100.0:
raise Exception("Out of range")
except:
self.__runButton.setEnabled(False)
self.__runButton.setToolTip("The given edge limit "
"is out of range")
return
self.__runButton.setEnabled(True)
self.__runButton.setToolTip(
"Save parameters and " +
RunDialog.ACTION_TO_VERB[self.__action].lower() + " script")
def __selectDirClicked(self):
"""Selects the script working dir"""
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog | QFileDialog.ShowDirsOnly
dirName = QFileDialog.getExistingDirectory(
self, "Select the script working directory",
self.__dirEdit.text(), options=options)
if dirName:
self.__dirEdit.setText(os.path.normpath(dirName))
def __selectIntClicked(self):
"""Selects a python interpreter"""
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
path, _ = QFileDialog.getOpenFileName(
self, "Select python series 3 interpreter",
options=options)
if path:
self.__intEdit.setText(os.path.normpath(path))
self.__setRunButtonProps()
def __inhClicked(self):
"""Inerit parent env radio button clicked"""
self.__setEnabledInheritedPlusEnv(False)
self.__setEnabledSpecificEnv(False)
self.runParams['envType'] = RunParameters.InheritParentEnv
def __inhPlusClicked(self):
"""Inherit parent and add radio button clicked"""
self.__setEnabledInheritedPlusEnv(True)
self.__setEnabledSpecificEnv(False)
self.runParams['envType'] = RunParameters.InheritParentEnvPlus
if self.__inhPlusEnvTable.selectedIndexes():
self.__delInhButton.setEnabled(True)
self.__editInhButton.setEnabled(True)
else:
self.__delInhButton.setEnabled(False)
self.__editInhButton.setEnabled(False)
def __specClicked(self):
"""Specific env radio button clicked"""
self.__setEnabledInheritedPlusEnv(False)
self.__setEnabledSpecificEnv(True)
self.runParams['envType'] = RunParameters.SpecificEnvironment
if self.__specEnvTable.selectedIndexes():
self.__delSpecButton.setEnabled(True)
self.__editSpecButton.setEnabled(True)
else:
self.__delSpecButton.setEnabled(False)
self.__editSpecButton.setEnabled(False)
@staticmethod
def __delAndInsert(table, name, value):
"""Deletes an item by name if so; insert new; highlight it"""
for index in range(table.topLevelItemCount()):
item = table.topLevelItem(index)
if str(item.text(0)) == name:
table.takeTopLevelItem(index)
break
item = QTreeWidgetItem([name, value])
table.addTopLevelItem(item)
table.setCurrentItem(item)
return item
def __addInhClicked(self):
"""Add env var button clicked"""
dlg = EnvVarDialog()
if dlg.exec_() == QDialog.Accepted:
name = str(dlg.name)
value = str(dlg.value)
self.__delAndInsert(self.__inhPlusEnvTable, name, value)
self.runParams['additionToParentEnv'][name] = value
self.__delInhButton.setEnabled(True)
self.__editInhButton.setEnabled(True)
def __addSpecClicked(self):
"""Add env var button clicked"""
dlg = EnvVarDialog()
if dlg.exec_() == QDialog.Accepted:
name = str(dlg.name)
value = str(dlg.value)
self.__delAndInsert(self.__specEnvTable, name, value)
self.runParams['specificEnv'][name] = value
self.__delSpecButton.setEnabled(True)
self.__editSpecButton.setEnabled(True)
def __delInhClicked(self):
"""Delete the highlighted variable"""
if self.__inhPlusEnvTable.topLevelItemCount() == 0:
return
name = self.__inhPlusEnvTable.currentItem().text(0)
for index in range(self.__inhPlusEnvTable.topLevelItemCount()):
item = self.__inhPlusEnvTable.topLevelItem(index)
if name == item.text(0):
self.__inhPlusEnvTable.takeTopLevelItem(index)
break
del self.runParams['additionToParentEnv'][str(name)]
if self.__inhPlusEnvTable.topLevelItemCount() == 0:
self.__delInhButton.setEnabled(False)
self.__editInhButton.setEnabled(False)
else:
self.__inhPlusEnvTable.setCurrentItem(
self.__inhPlusEnvTable.topLevelItem(0))
def __delSpecClicked(self):
"""Delete the highlighted variable"""
if self.__specEnvTable.topLevelItemCount() == 0:
return
name = self.__specEnvTable.currentItem().text(0)
for index in range(self.__specEnvTable.topLevelItemCount()):
item = self.__specEnvTable.topLevelItem(index)
if name == item.text(0):
self.__specEnvTable.takeTopLevelItem(index)
break
del self.runParams['specificEnv'][str(name)]
if self.__specEnvTable.topLevelItemCount() == 0:
self.__delSpecButton.setEnabled(False)
self.__editSpecButton.setEnabled(False)
else:
self.__specEnvTable.setCurrentItem(
self.__specEnvTable.topLevelItem(0))
def __editInhClicked(self):
"""Edits the highlighted variable"""
if self.__inhPlusEnvTable.topLevelItemCount() == 0:
return
item = self.__inhPlusEnvTable.currentItem()
dlg = EnvVarDialog(str(item.text(0)), str(item.text(1)), self)
if dlg.exec_() == QDialog.Accepted:
name = str(dlg.name)
value = str(dlg.value)
self.__delAndInsert(self.__inhPlusEnvTable, name, value)
self.runParams['additionToParentEnv'][name] = value
def __editSpecClicked(self):
"""Edits the highlighted variable"""
if self.__specEnvTable.topLevelItemCount() == 0:
return
item = self.__specEnvTable.currentItem()
dlg = EnvVarDialog(str(item.text(0)), str(item.text(1)), self)
if dlg.exec_() == QDialog.Accepted:
name = str(dlg.name)
value = str(dlg.value)
self.__delAndInsert(self.__specEnvTable, name, value)
self.runParams['specificEnv'][name] = value
def onAccept(self):
"""Saves the selected terminal and profiling values"""
if self.__action == PROFILE:
self.profilerParams.nodeLimit = float(
self.__nodeLimitEdit.text())
self.profilerParams.edgeLimit = float(
self.__edgeLimitEdit.text())
self.accept()
| gpl-3.0 | 4,505,693,968,886,528,000 | 39.708671 | 78 | 0.63221 | false |
rouxcode/django-text-ckeditor | text_ckeditor/utils.py | 1 | 3968 | import random
import re
from lxml.html import fragments_fromstring, fragment_fromstring, tostring
from django.apps import apps
from django.utils.safestring import mark_safe
from . import conf
try:
basestring
except NameError:
basestring = str
class CKEditorHtml(object):
# TODO replace data-djangolink="true" constant
link_model = apps.get_model(
app_label=conf.LINK_MODULE,
model_name=conf.LINK_MODEL_NAME
)
def __init__(self, input):
self.input = input
self.empty_link = self.link_model()
def render(self):
output = ''
fragments = fragments_fromstring(self.input)
for fragment in fragments:
output += self._render_fragment(fragment)
if conf.CKEDITOR_HTML_MARK_SAFE:
output = mark_safe(output)
return output
def _render_fragment(self, fragment):
if isinstance(fragment, basestring):
fragment = fragment_fromstring('<p>' + fragment + '</p>')
django_links = fragment.cssselect('a[data-djangolink="true"]')
for link in django_links:
self._alter_link(link, fragment)
return tostring(fragment, encoding='unicode')
def _alter_link(self, link, fragment):
link.attrib.pop('data-djangolink')
kwargs = {}
for key, value in link.items():
if key.startswith('data-'):
field = key.replace('data-', '', 1)
value = link.attrib.pop(key)
if hasattr(self.empty_link, field) and value:
# TODO find a proper way to do this
try:
value = int(value)
field = '{0}_id'.format(field)
except Exception:
pass
kwargs.update({field: value})
obj = self.link_model(**kwargs)
href = obj.get_link()
if hasattr(obj, 'get_css_class'):
css_class = obj.get_css_class()
else:
css_class = ''
if 'mailto:' in href and conf.CKEDITOR_HTML_PROTECT_MAILTO:
if hasattr(obj, 'get_email'):
href = obj.get_email()
else:
href = href.replace('mailto:', '')
if link.text:
text = link.text
else:
text = href
mail = mail_to_js(href, link_text=text, css_class=css_class)
link_new = fragment_fromstring(mail)
link.addnext(link_new)
link.getparent().remove(link)
else:
link.set('href', href)
if hasattr(obj, 'get_target'):
link.set('target', obj.get_target())
if css_class:
link.set('class', css_class)
def mail_to_js(email, *args, **kwargs):
result = ''
text = kwargs.get('link_text', email)
css_class = kwargs.get('css_class', '')
email_array_content = ''
text_array_content = ''
def r(c): return '"' + str(ord(c)) + '",' # NOQA
for c in email:
email_array_content += r(c)
for c in text:
text_array_content += r(c)
id = "_tyjsdfss-" + str(random.randint(1000, 999999999999999999))
re_email = re.sub(r',$', '', email_array_content)
re_text = re.sub(r',$', '', text_array_content)
result = (
'<span id="%s"><script>'
'var _tyjsdf=[%s];'
'var _qplmks=[%s];'
'var content=('
'\'<a class="%s" href="mailto:\''
');'
'for(_i=0;_i<_tyjsdf.length;_i++){'
'content+=("&#"+_tyjsdf[_i]+";");'
'}'
'content+=(\'">\');'
'for(_i=0;_i<_qplmks.length;_i++){'
'content+=(\'&#\'+_qplmks[_i]+\';\');'
'}'
'content+=(\'</a>\');'
'document.getElementById(\'%s\').innerHTML=content;'
'</script></span>'
) % (id, re_email, re_text, css_class, id)
return mark_safe(result)
| mit | -5,744,339,242,675,329,000 | 32.066667 | 78 | 0.519153 | false |
chrisb87/advent_of_code_2016 | day11/day11.py | 1 | 3413 | import pdb
import itertools
def layout_to_grid(layout, elevator, objects):
text = []
for floor in reversed(xrange(len(layout))):
floor_text = ["F%d" % (floor + 1)]
if floor == elevator:
floor_text.append("E ")
else:
floor_text.append(". ")
floor_objects = [objects[n] if n in layout[floor] else ". "
for n, i in enumerate(objects)]
text.append(' '.join(floor_text + floor_objects))
return '\n'.join(text)
def next_moves(layout, current_floor):
results = []
next_floors = []
# can move up?
if current_floor < (len(layout) - 1):
next_floors.append(current_floor + 1)
# can move down?
if current_floor > 0:
next_floors.append(current_floor - 1)
for next_floor in next_floors:
for moved_objects in itertools.chain(
itertools.combinations(layout[current_floor], 1),
itertools.combinations(layout[current_floor], 2)
):
new_floor_layout = layout[next_floor] + moved_objects
if valid_floor(new_floor_layout):
new_layout = []
for floor_number in xrange(len(layout)):
if floor_number == current_floor:
new_layout.append(tuple(filter(
lambda o: o not in moved_objects,
[m for m in layout[floor_number]])))
elif floor_number == next_floor:
new_layout.append(new_floor_layout)
else:
new_layout.append(layout[floor_number])
results.append((tuple(new_layout), next_floor))
return results
def solve(floors, objects, max_depth):
elevator = 0
queue = []
seen = set()
path = [(floors, elevator)]
nexts = next_moves(floors, elevator)
queue.append((path, nexts))
seen.add(seen_hash(floors, elevator))
while queue:
path, nexts = queue.pop(0)
for floors_i, elevator_i in nexts:
hsh = seen_hash(floors_i, elevator_i)
if hsh in seen:
continue
else:
seen.add(hsh)
new_path = path + [(floors_i, elevator_i)]
if is_solution(floors_i, elevator_i):
return new_path[1:]
if len(new_path) < max_depth:
new_nexts = next_moves(floors_i, elevator_i)
queue.append((new_path, new_nexts))
def is_solution(layout, elevator):
if elevator != (len(layout) - 1):
return False
for floor in xrange(len(layout) - 1):
if len(layout[floor]) > 0:
return False
return True
def valid_floor(floor_layout):
generators = filter(lambda i: i%2==0, floor_layout)
chips = filter(lambda i: i%2==1, floor_layout)
unpaired_generators = []
unpaired_chips = []
for generator in generators:
if (generator + 1) not in chips:
unpaired_generators.append(generator)
for chip in chips:
if (chip - 1) not in generators:
unpaired_chips.append(chip)
if (len(unpaired_chips) > 0) and (len(unpaired_generators) > 0):
return False
else:
return True
def seen_hash(layout, elevator):
pairs = {}
for f_n, floor in enumerate(layout):
for obj in floor:
k = obj / 2
if k not in pairs:
pairs[k] = []
pairs[k].append(f_n)
pairs = sorted(map(lambda p: tuple(p), pairs.values()))
return (elevator, tuple(pairs))
if __name__ == '__main__':
objects = (
'PG', 'PM',
'TG', 'TM',
'MG', 'MM',
'RG', 'RM',
'CG', 'CM',
'EG', 'EM',
'DG', 'DM'
)
layout = (
(0,2,3,4,6,7,8,9,10,11,12,13),
(1,5),
(),
()
)
elevator = 0
print layout_to_grid(layout, elevator, objects)
print ""
solution = solve(layout, objects, max_depth = 100)
if solution:
print "%d step solution found" % len(solution)
else:
print "no solution"
| unlicense | -7,786,298,473,346,507,000 | 19.93865 | 65 | 0.63639 | false |
germtb/LightBeam | src/medium.py | 1 | 1262 | from math import pi
from drawable import Drawable
from matrix2D import Matrix
from ray import Ray
class Medium(Drawable):
def __init__(self, refractive_index, polygon):
self.refractiveIndex = refractive_index
self.polygon = polygon
def on_hit(self, ray, hit_point):
pass
def draw(self, resolution=100):
self.polygon.draw(resolution)
class Detector(Medium):
def __init__(self, refractive_index, polygon):
super().__init__(refractive_index, polygon)
self.detections = {}
def on_hit(self, ray, hit_point):
if hit_point not in self.detections.keys():
self.detections[hit_point] = []
self.detections[hit_point].append(ray)
return []
class Reflector(Medium):
def __init__(self, refractive_index, polygon):
super().__init__(refractive_index, polygon)
def on_hit(self, ray, hit_point):
line = filter(lambda l: l.contains(hit_point), self.polygon.lines()).__next__()
alpha = line.angle
if alpha > pi:
alpha -= pi
reflection_matrix = Matrix.reflection_matrix(alpha)
new_direction = reflection_matrix.dot(ray.direction)
return [Ray(new_direction, hit_point, ray.energy, ray.phase)] | mit | 5,808,206,299,481,627,000 | 27.704545 | 87 | 0.631537 | false |
zstars/weblabdeusto | server/src/weblab/core/data_retriever.py | 1 | 11519 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
import threading
import time
import voodoo.log as log
from weblab.data.experiments import CommandSent, ExperimentUsage, FileSent
import weblab.core.file_storer as file_storer
import weblab.data.command as Command
class TemporalInformationRetriever(threading.Thread):
"""
This class retrieves continuously the information of initial and finished experiments.
"""
PRINT_ERRORS = True
def __init__(self, cfg_manager, initial_store, finished_store, commands_store, completed_store, db_manager):
threading.Thread.__init__(self)
self.cfg_manager = cfg_manager
self.keep_running = True
self.initial_store = initial_store
self.finished_store = finished_store
self.commands_store = commands_store
self.completed_store = completed_store
self.iterations = 0
self.db_manager = db_manager
self.timeout = None # Take the default of TemporalInformationStore
self.entry_id2command_id = {}
self.entry_id2command_id_lock = threading.Lock()
self.setDaemon(True)
def run(self):
while self.keep_running:
try:
self.iterations += 1
self.iterate()
except:
if self.PRINT_ERRORS:
import traceback
traceback.print_exc()
log.log( TemporalInformationRetriever, log.level.Critical, "Exception iterating in TemporalInformationRetriever!!!")
log.log_exc( TemporalInformationRetriever, log.level.Critical )
def stop(self):
self.keep_running = False
def iterate(self):
self.iterate_initial()
if self.keep_running and self.commands_store.empty() and self.completed_store.empty():
self.iterate_finish()
if self.keep_running:
self.iterate_command()
if self.keep_running:
self.iterate_completed()
def iterate_initial(self):
initial_information = self.initial_store.get(timeout=self.timeout)
if initial_information is not None:
initial_timestamp = time.mktime(initial_information.initial_time.timetuple()) + initial_information.initial_time.microsecond / 1e6
end_timestamp = time.mktime(initial_information.end_time.timetuple()) + initial_information.end_time.microsecond / 1e6
request_info = initial_information.request_info
from_ip = request_info.pop('from_ip','<address not found>')
try:
username = request_info.pop('username')
except:
log.log( TemporalInformationRetriever, log.level.Critical, "Provided information did not contain some required fields (such as username or role). This usually means that the reservation has previously been expired. Provided request_info: %r; provided data: %r" % (request_info, initial_information), max_size = 10000)
log.log_exc( TemporalInformationRetriever, log.level.Critical )
return
usage = ExperimentUsage()
usage.start_date = initial_timestamp
usage.from_ip = from_ip
usage.experiment_id = initial_information.experiment_id
usage.reservation_id = initial_information.reservation_id
usage.coord_address = initial_information.exp_coordaddr
usage.request_info = initial_information.request_info
command_request = CommandSent(
Command.Command("@@@initial::request@@@"), initial_timestamp,
Command.Command(str(initial_information.client_initial_data)), end_timestamp)
command_response = CommandSent(
Command.Command("@@@initial::response@@@"), initial_timestamp,
Command.Command(str(initial_information.initial_configuration)), end_timestamp)
usage.append_command(command_request)
usage.append_command(command_response)
self.db_manager.store_experiment_usage(username, usage)
def iterate_completed(self):
completed_information = self.completed_store.get(timeout=self.timeout)
if completed_information is not None:
username, usage, callback = completed_information
self.db_manager.store_experiment_usage(username, usage)
callback()
def iterate_finish(self):
information = self.finished_store.get(timeout=self.timeout)
if information is not None:
reservation_id, obj, initial_time, end_time = information
if not self.commands_store.empty() or not self.completed_store.empty():
# They have higher priority
self.finished_store.put(reservation_id, obj, initial_time, end_time)
return
initial_timestamp = time.mktime(initial_time.timetuple()) + initial_time.microsecond / 1e6
end_timestamp = time.mktime(end_time.timetuple()) + end_time.microsecond / 1e6
command = CommandSent(
Command.Command("@@@finish@@@"), initial_timestamp,
Command.Command(str(obj)), end_timestamp)
if not self.db_manager.finish_experiment_usage(reservation_id, initial_timestamp, command):
# If it could not be added because the experiment id
# did not exist, put it again in the queue
self.finished_store.put(reservation_id, obj, initial_time, end_time)
time.sleep(0.01)
def iterate_command(self):
information = self.commands_store.get(timeout=self.timeout)
if information is not None:
all_information = [ information ]
# Retrieve all the remaining information to ensure that it it finally empty,
# with a maximum of 1000 registries per request
max_registries = 1000
counter = 0
while not self.commands_store.empty() and counter < max_registries:
counter += 1
information = self.commands_store.get(timeout=0)
if information is not None:
all_information.append(information)
command_pairs = []
command_responses = []
command_requests = {}
file_pairs = []
file_responses = []
file_requests = {}
backup_information = {}
backup_information_responses = {}
# Process
for information in all_information:
if information.is_command:
if information.is_before:
backup_information[information.entry_id] = information
command_requests[information.entry_id] = (information.reservation_id, CommandSent( information.payload, information.timestamp))
else:
backup_information_responses[information.entry_id] = information
command_request = command_requests.pop(information.entry_id, None)
if command_request is not None:
reservation_id, command_sent = command_request
complete_command = CommandSent(
command_sent.command, command_sent.timestamp_before,
information.payload, information.timestamp)
command_pairs.append((reservation_id, information.entry_id, complete_command))
else:
with self.entry_id2command_id_lock:
command_id = self.entry_id2command_id.pop(information.entry_id, None)
if command_id is None:
self.commands_store.put(information)
else:
command_responses.append((information.entry_id, command_id, information.payload, information.timestamp))
else:
if information.is_before:
backup_information[information.entry_id] = information
file_requests[information.entry_id] = (information.reservation_id, information.payload)
else:
backup_information_responses[information.entry_id] = information
file_request = file_requests.pop(information.entry_id, None)
if file_request is not None:
reservation_id, file_sent = file_request
if file_sent.is_loaded():
storer = file_storer.FileStorer(self.cfg_manager, reservation_id)
stored = storer.store_file(self, file_sent.file_content, file_sent.file_info)
file_path = stored.file_path
file_hash = stored.file_hash
else:
file_path = file_sent.file_path
file_hash = file_sent.file_hash
complete_file = FileSent(file_path, file_hash, file_sent.timestamp_before,
information.payload, information.timestamp)
file_pairs.append((reservation_id, information.entry_id, complete_file))
else:
with self.entry_id2command_id_lock:
command_id = self.entry_id2command_id.pop(information.entry_id, None)
if command_id is None:
self.commands_store.put(information)
else:
file_responses.append((information.entry_id, command_id, information.payload, information.timestamp))
# At this point, we have all the information processed and
# ready to be passed to the database in a single commit
mappings = self.db_manager.store_commands(command_pairs, command_requests, command_responses, file_pairs, file_requests, file_responses)
elements_to_backup = []
with self.entry_id2command_id_lock:
for entry_id in mappings:
command_id = mappings[entry_id]
if command_id is not None and command_id is not False:
self.entry_id2command_id[entry_id] = mappings[entry_id]
else:
elements_to_backup.append(entry_id)
for entry_id in elements_to_backup:
if entry_id in backup_information:
self.commands_store.put(backup_information[entry_id])
if entry_id in backup_information_responses:
self.commands_store.put(backup_information_responses[entry_id])
| bsd-2-clause | 2,681,901,111,945,592,000 | 47.805085 | 333 | 0.575534 | false |
HiroyukiSakai/Contour | contour/views.py | 1 | 25693 | # Contour Copyright (C) 2013-2014 Hiroyuki Sakai
#
# This file is part of Contour.
#
# Contour is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Contour is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Contour. If not, see <http://www.gnu.org/licenses/>.
"""Describes the views used in Contour.
.. moduleauthor:: Hiroyuki Sakai <[email protected]>
"""
import base64, os, pdb, urllib
from PIL import Image as PImage
from django.core.files import File
from django.http import Http404, HttpResponse
from django.shortcuts import render
import flickrapi, numpy as np
from scipy import misc, ndimage
from skimage import filter, io, transform
import secret
from .. import settings
from .forms import *
from .models import *
from .set_metrics import *
from .util import slugify
MISSING_PIXELS_PENALTY_FACTOR = 1.
SUPERFLUOUS_PIXELS_PENALTY_FACTOR = .1
def create_session(request, view_name, id):
"""Creates a user session.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:param view_name: The name of the view to which the session should be associated.
:type view_name: string.
:param id: The id of the :class:`.models.Image` or :class:`.models.Track`.
:type id: int.
"""
if not request.session.get('is_playing'):
request.session['is_playing'] = True
request.session['view_name'] = view_name
request.session['id'] = id
def clear_session(request):
"""Clears all varibles of the user session.
:param request: The request object containing the user session.
:type request: :class:`django.http.HttpRequest`.
"""
request.session['is_playing'] = False
request.session['view_name'] = None
request.session['id'] = None
request.session['image_id'] = None
request.session['image_index'] = None
request.session['track_session_id'] = None
request.session['drawing_id'] = None
request.session['last_drawing_id'] = None
def destroy_session(request):
"""Destroys a currently running user session if such a request has been sent.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:returns: bool -- `True` if the session was cleared, otherwise `None`.
"""
if request.session.get('is_playing') and request.method == 'POST':
form = DiscardSessionForm(request.POST)
if form.is_valid() and form.cleaned_data['discard_session']:
clear_session(request)
return True
return
def check_session(request, view_name=None, id=None):
"""Checks if the requested URL is in canon with the currently running session. The user will be asked if he wants to discad his session if there's a discrepancy.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:param view_name: The name of the requested view to which the session should be associated.
:type view_name: string.
:param id: The id of the requested :class:`.models.Image` or :class:`.models.Track`.
:type id: int.
:returns: :class:`django.http.HttpResponse` -- The rendered template as response.
"""
if request.session.get('is_playing') and (view_name != request.session.get('view_name') or id != request.session.get('id')):
return render(request, 'confirm_discard.html', {
'form': DiscardSessionForm(),
'view_name': request.session.get('view_name'),
'id': request.session.get('id'),
});
def get_player(name):
"""Returns a :class:`.models.Player` object. A new player will be created if the requested player doesn't exist.
:param view_name: The name of the requested player.
:type view_name: string.
:returns: :class:`models.Player` -- The requested player.
"""
try:
player = Player.objects.get(name=name)
except Player.DoesNotExist:
player = Player(name=name)
player.save()
return player
def save_session(request):
"""Saves a track session. This function is called as soon as the player chooses to save his scores.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
"""
if request.session.get('is_playing') and request.method == 'POST':
form = SaveSessionForm(request.POST)
if form.is_valid() and form.cleaned_data['save_session']:
if request.session.get('drawing_id'):
drawing = Drawing.objects.get(id=request.session.get('drawing_id'))
player = get_player(form.cleaned_data['name'])
drawing.player = player
drawing.save()
elif request.session.get('track_session_id'):
track_session = TrackSession.objects.get(id=request.session.get('track_session_id'))
player = get_player(form.cleaned_data['name'])
track_session.player = player
track_session.save()
for drawing in Drawing.objects.filter(track_session=track_session):
drawing.player = player
drawing.save()
clear_session(request)
def process_image(request, image):
"""Creates an edge image and calculates the values needed for the score calculation if necessary. This function is called as soon as an image is requested.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:param image: The image to be processed.
:type image: :class:`models.Image`.
"""
# detect edges
if not image.edge_image:
greyscale_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.image.name), as_grey=True)
# resize image
height = len(greyscale_image)
width = len(greyscale_image[0])
factor = 768.0 / height
greyscale_image = transform.resize(greyscale_image, [height * factor, width * factor])
# detect edges
edges = filter.canny(greyscale_image, sigma=image.canny_sigma, low_threshold=image.canny_low_threshold, high_threshold=image.canny_high_threshold)
# save edge image
temp_filename = '/tmp/' + request.session.session_key + '.png'
io.imsave(temp_filename, ~edges * 1.)
image.edge_image.save(slugify(os.path.splitext(os.path.basename(image.image.name))[0]) + '.png', File(open(temp_filename)))
os.remove(temp_filename)
if not image.dilated_edge_image:
edge_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.edge_image.name), as_grey=True)
edge_image = edge_image.astype(np.float64)
if edge_image.max() > 1.:
edge_image /= 255.
# map values greater .5 as edge
edge_image = (1. - edge_image) / .5
# save dilated edge image
temp_filename = '/tmp/' + request.session.session_key + '.png'
io.imsave(temp_filename, ~ndimage.binary_dilation(edge_image, iterations=2) * 1.)
image.dilated_edge_image.save(slugify(os.path.splitext(os.path.basename(image.image.name))[0]) + '.png', File(open(temp_filename)))
os.remove(temp_filename)
# save maximum distance (needed for score calculation)
if not image.max_distance:
ones = np.ones(image.edge_image.height * image.edge_image.width).reshape((image.edge_image.height, image.edge_image.width))
dilated_edge_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.dilated_edge_image.name), as_grey=True)
dilated_edge_image = dilated_edge_image.astype(np.float64)
if dilated_edge_image.max() > 1.:
dilated_edge_image /= 255.
image.max_distance = np.sum(np.absolute(ones - dilated_edge_image))
image.save()
def handle_finished_drawing(request):
"""This function is called as soon as the user finishes his drawing. It saves and associates his drawing to the running track session. It also assesses the score of the drawing.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:returns: :class:`models.Drawing` -- The created drawing object.
"""
if request.session.get('is_playing'):
if request.method == 'POST':
form = FinishDrawingForm(request.POST)
if form.is_valid() and form.cleaned_data['finish_drawing']:
# save drawing
image_data = base64.b64decode(request.POST['image'])
temp_filename = '/tmp/' + request.session.session_key + '.png'
file = open(temp_filename, 'wb')
file.write(image_data)
file.close()
im = PImage.open(temp_filename)
im = im.convert("RGB")
im.save(temp_filename, "PNG")
image = Image.objects.get(id=request.session.get('image_id'))
# calculate distance
greyscale_drawing = io.imread(temp_filename, as_grey=True)
greyscale_drawing = misc.imresize(greyscale_drawing, (image.edge_image.height, image.edge_image.width), mode='F')
dilated_edge_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.dilated_edge_image.name), as_grey=True)
greyscale_drawing = greyscale_drawing.astype(np.float64)
dilated_edge_image = dilated_edge_image.astype(np.float64)
# correct ranges of images if necessary
if greyscale_drawing.max() > 1.:
greyscale_drawing /= 255.
if dilated_edge_image.max() > 1.:
dilated_edge_image /= 255.
missing_pixels = np.clip(greyscale_drawing - dilated_edge_image, 0., 1.)
overlapping_pixels = np.clip((1. - greyscale_drawing) * (1. - dilated_edge_image), 0., 1.)
superfluous_pixels = np.clip(dilated_edge_image - greyscale_drawing, 0., 1.)
# number of pixels in the edge image which are not covered
distance = np.sum(missing_pixels) * MISSING_PIXELS_PENALTY_FACTOR;
# number of pixels in the drawing which are misplaced
distance += np.sum(superfluous_pixels) * SUPERFLUOUS_PIXELS_PENALTY_FACTOR;
score = max((image.max_distance - distance) / image.max_distance * 100, 0.)
# save drawing
drawing = Drawing(image=image, distance=distance, score=score)
drawing.drawing.save(request.session.session_key + '.png', File(open(temp_filename)))
# generate and save score image
score_image = np.zeros((image.edge_image.height, image.edge_image.width, 3), dtype=np.float64)
score_image[:, :, 0] += missing_pixels
score_image[:, :, 1] += missing_pixels
score_image[:, :, 2] += missing_pixels
score_image[:, :, 0] += superfluous_pixels
score_image[:, :, 1] += overlapping_pixels
io.imsave(temp_filename, score_image * 1.)
drawing.score_image.save(request.session.session_key + '_score.png', File(open(temp_filename)))
drawing.save()
# delete temporary file
os.remove(temp_filename)
return drawing
return
def handle_finished_edge_image(request):
"""This function is called as soon as the admin finishes his drawing.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:returns: :class:`models.Drawing` -- The created drawing object.
"""
if request.method == 'POST':
form = FinishEdgeImageForm(request.POST)
if form.is_valid() and form.cleaned_data['finish_edge_image']:
# save new edge image
image_data = base64.b64decode(request.POST['image'])
temp_filename = '/tmp/' + request.session.session_key + '.png'
file = open(temp_filename, 'wb')
file.write(image_data)
file.close()
image = Image.objects.get(id=form.cleaned_data['image_id'])
im = PImage.open(temp_filename)
im = im.convert("RGB")
im.save(temp_filename, "PNG")
edge_image = io.imread(temp_filename, as_grey=True)
edge_image = misc.imresize(edge_image, (image.edge_image.height, image.edge_image.width), mode='F')
edge_image = edge_image.astype(np.float64)
# correct ranges of images if necessary
if edge_image.max() > 1.:
edge_image /= 255.
# save edge image
image.edge_image.save(image.edge_image.name, File(open(temp_filename)))
# delete old computed values
image.max_distance = None
image.dilated_edge_image.delete()
image.save()
# delete temporary file
os.remove(temp_filename)
return image.edge_image
return
def handle_uploaded_file(request, form):
"""This function is called as soon as the user uploads a file. It saves his image on the filesystem.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:returns: :class:`models.Image` -- The created image object.
"""
file = request.FILES['file']
sigma = form.cleaned_data['sigma']
# save file
temp_filename = '/tmp/' + request.session.session_key
with open(temp_filename, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
image = Image(title=slugify(file.name), canny_sigma=sigma)
split = os.path.splitext(os.path.basename(file.name))
image.image.save(slugify(split[0]) + split[1], File(open(temp_filename)))
image.save()
os.remove(temp_filename)
return image
def handle_flickr_search(request, form):
"""This function is called as soon as the user submits a Flickr search query. It saves the found image on the filesystem.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:returns: :class:`models.Image` -- The created image object.
"""
query = form.cleaned_data['query']
sigma = form.cleaned_data['sigma']
flickr = flickrapi.FlickrAPI(secret.FLICKR_API_KEY)
for photo in flickr.walk(text=query, extras='1'):
temp_filename = '/tmp/' + request.session.session_key + '.jpg'
urllib.urlretrieve('http://farm' + photo.get('farm') + '.staticflickr.com/' + photo.get('server') + '/' + photo.get('id') + '_' + photo.get('secret') + '.jpg', temp_filename)
title = slugify(str(photo.get('title')))
image = Image(title=title, url='http://www.flickr.com/photos/' + photo.get('owner') + '/' + photo.get('id'), canny_sigma=sigma)
image.image.save(title[:64] + '.jpg', File(open(temp_filename)))
image.save()
os.remove(temp_filename)
return image
def index(request):
"""This is the view function for the home page.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:returns: :class:`django.http.HttpResponse` -- The rendered template as response.
"""
clear_canvas = destroy_session(request)
save_session(request)
discard_session_page = check_session(request)
if discard_session_page:
return discard_session_page
tracks = Track.objects.all()
track_highscores = {}
for track in tracks:
track_highscores[track.id] = {
'title': track.title,
'highscores': TrackSession.objects.filter(player__isnull=False, track=track).order_by('-score'),
}
return render(request, 'main_menu.html', {
'upload_file_form': UploadFileForm(),
'search_flickr_form': SearchFlickrForm(),
'tracks': tracks,
'track_highscores': track_highscores,
'single_drawing_highscores': Drawing.objects.filter(player__isnull=False, track_session__isnull=True),
'clear_canvas': clear_canvas,
})
def canvas(request, id=None):
"""This is the view function for a single drawing canvas. It is called for the file upload and Flickr game modes.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:param id: The id of the requested :class:`.models.Image`.
:type id: int.
:returns: :class:`django.http.HttpResponse` -- The rendered template as response.
"""
view_name = 'Contour.contour.views.canvas'
if id:
id = long(id)
elif view_name == request.session.get('view_name'):
id = request.session.get('id')
image = None;
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
image = handle_uploaded_file(request, form)
if image:
id = image.id
form = SearchFlickrForm(request.POST)
if form.is_valid():
image = handle_flickr_search(request, form)
if image:
id = image.id
if not image:
try:
image = Image.objects.get(id=id)
except Image.DoesNotExist:
raise Http404
clear_canvas = destroy_session(request)
discard_session_page = check_session(request, view_name, id)
if discard_session_page:
return discard_session_page
if not request.session.get('is_playing'):
request.session['image_id'] = id
request.session['image_index'] = 0
create_session(request, view_name, id)
drawing = handle_finished_drawing(request)
if drawing:
request.session['last_drawing_id'] = drawing.id
request.session['dont_show_welcome'] = True
request.session['drawing_id'] = drawing.id
request.session['image_index'] = request.session.get('image_index') + 1
return HttpResponse(True)
last_drawing = None
if request.session.get('last_drawing_id'):
try:
last_drawing = Drawing.objects.get(id=request.session.get('last_drawing_id'))
except Drawing.DoesNotExist:
None
if request.method == 'POST':
form = RetryDrawingForm(request.POST)
if form.is_valid() and form.cleaned_data['retry_drawing']:
request.session['last_drawing_id'] = None
request.session['image_index'] = request.session.get('image_index') - 1
last_drawing.delete()
last_drawing = None
if request.session.get('image_index'):
return render(request, 'completed.html', {
'retry_drawing_form': RetryDrawingForm(),
'save_session_form': SaveSessionForm(),
'discard_session_form': DiscardSessionForm(),
'drawing': Drawing.objects.get(id=request.session.get('drawing_id')),
'last_drawing': last_drawing,
})
process_image(request, image)
return render(request, 'game.html', {
'finish_drawing_form': FinishDrawingForm(),
'retry_drawing_form': RetryDrawingForm(),
'image': image,
'score': 0,
'clear_canvas': clear_canvas,
'show_welcome': not request.session.get('dont_show_welcome', False),
'last_drawing': last_drawing,
})
def track(request, id):
"""This is the view function for track sessions.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:param id: The id of the requested :class:`.models.Track`.
:type id: int.
:returns: :class:`django.http.HttpResponse` -- The rendered template as response.
"""
view_name = 'Contour.contour.views.track'
if id:
id = long(id)
try:
track = Track.objects.get(id=id)
except Track.DoesNotExist:
raise Http404
clear_canvas = destroy_session(request)
discard_session_page = check_session(request, view_name, id)
if discard_session_page:
return discard_session_page
if not request.session.get('is_playing'):
track_session = TrackSession(session_key=request.session.session_key, track=track, score=0)
track_session.save()
request.session['image_index'] = 0
request.session['track_session_id'] = track_session.id
else:
track_session = TrackSession.objects.get(id=request.session.get('track_session_id'))
create_session(request, view_name, id)
drawing = handle_finished_drawing(request)
if drawing:
drawing.track_session = track_session
drawing.track_session_index = request.session.get('image_index')
drawing.save()
request.session['last_drawing_id'] = drawing.id
request.session['dont_show_welcome'] = True
request.session['image_index'] = request.session.get('image_index') + 1
track_session.score += drawing.score
track_session.save()
return HttpResponse(True)
last_drawing = None
if request.session.get('last_drawing_id'):
try:
last_drawing = Drawing.objects.get(id=request.session.get('last_drawing_id'))
except Drawing.DoesNotExist:
None
if request.method == 'POST':
form = RetryDrawingForm(request.POST)
if form.is_valid() and form.cleaned_data['retry_drawing']:
request.session['last_drawing_id'] = None
request.session['image_index'] = request.session.get('image_index') - 1
track_session.score -= last_drawing.score
track_session.save()
last_drawing.delete()
last_drawing = None
try:
image = TrackImage.objects.filter(track=track)[request.session.get('image_index')].image
except IndexError:
return render(request, 'completed.html', {
'retry_drawing_form': RetryDrawingForm(),
'save_session_form': SaveSessionForm(),
'discard_session_form': DiscardSessionForm(),
'track_session': track_session,
'drawings': Drawing.objects.filter(track_session=track_session),
'last_drawing': last_drawing,
})
request.session['image_id'] = image.id
process_image(request, image)
return render(request, 'game.html', {
'finish_drawing_form': FinishDrawingForm(),
'retry_drawing_form': RetryDrawingForm(),
'image': image,
'score': track_session.score,
'clear_canvas': clear_canvas,
'image_number': request.session.get('image_index') + 1,
'image_count': TrackImage.objects.filter(track=track).count(),
'show_welcome': not request.session.get('dont_show_welcome', False),
'last_drawing': last_drawing,
})
def drawing(request, id):
"""This is the view function to view the score summary of single drawings.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:param id: The id of the requested :class:`.models.Drawing`.
:type id: int.
:returns: :class:`django.http.HttpResponse` -- The rendered template as response.
"""
if id:
id = long(id)
try:
drawing = Drawing.objects.get(id=id, player__isnull=False)
except Drawing.DoesNotExist:
raise Http404
return render(request, 'drawing.html', {
'drawing': drawing,
})
def session(request, id):
"""This is the view function to view the score summary of track sessions.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:param id: The id of the requested :class:`.models.TrackSession`.
:type id: int.
:returns: :class:`django.http.HttpResponse` -- The rendered template as response.
"""
if id:
id = long(id)
try:
track_session = TrackSession.objects.get(id=id, player__isnull=False)
except TrackSession.DoesNotExist:
raise Http404
return render(request, 'session.html', {
'track_session': track_session,
'drawings': Drawing.objects.filter(track_session=track_session),
})
def admin_edge_image(request, id):
"""This is the view function to edit the edge images in the admin section.
:param request: The request object containing the user request.
:type request: :class:`django.http.HttpRequest`.
:param id: The id of the requested :class:`.models.Image`.
:type id: int.
:returns: :class:`django.http.HttpResponse` -- The rendered template as response.
"""
if id:
id = long(id)
try:
image = Image.objects.get(id=id)
except Image.DoesNotExist:
raise Http404
process_image(request, image)
edge_image = handle_finished_edge_image(request)
if edge_image:
return HttpResponse(True)
return render(request, 'admin/edge_image.html', {
'form': FinishEdgeImageForm(),
'image': image,
})
| gpl-3.0 | -2,998,517,735,398,188,500 | 35.392351 | 182 | 0.63313 | false |
jdfekete/progressivis | progressivis/core/changemanager_literal.py | 1 | 1960 | "Change Manager for literal values (supporting ==)"
from .bitmap import bitmap
from .index_update import IndexUpdate
from .changemanager_base import BaseChangeManager
class LiteralChangeManager(BaseChangeManager):
"""
Manage changes that occured in a literal value between runs.
"""
VALUE = bitmap([0])
def __init__(self,
slot,
buffer_created=True,
buffer_updated=False,
buffer_deleted=True,
buffer_exposed=False,
buffer_masked=False):
super(LiteralChangeManager, self).__init__(
slot,
buffer_created,
buffer_updated,
buffer_deleted,
buffer_exposed,
buffer_masked)
self._last_value = None
def reset(self, name=None):
super(LiteralChangeManager, self).reset(name)
self._last_value = None
def compute_updates(self, data):
last_value = self._last_value
changes = IndexUpdate()
if last_value == data:
return changes
if last_value is None:
if self.created.buffer:
changes.created.update(self.VALUE)
elif data is None:
if self.deleted.buffer:
changes.deleted.update(self.VALUE)
elif self.updated.buffer:
changes.updated.update(self.VALUE)
self._last_value = data
return changes
def update(self, run_number, data, mid):
# pylint: disable=unused-argument
assert isinstance(data, bitmap)
if run_number != 0 and run_number <= self._last_update:
return
changes = self.compute_updates(data)
self._last_update = run_number
self._row_changes.combine(changes,
self.created.buffer,
self.updated.buffer,
self.deleted.buffer)
| bsd-2-clause | 7,233,266,110,331,283,000 | 31.131148 | 64 | 0.557653 | false |
dteal/dteal-site | pages/links.py | 1 | 2392 | title = 'Links'
class Link:
def __init__(self, section, url, name):
self.section = section; self.url = url; self.name = name
links = [
Link('News', 'https://hackaday.com', 'Hackaday : viz, articles on crazy engineering projects'),
Link('News', 'https://news.ycombinator.com', 'Hacker News : a mostly-software-related news aggregator'),
Link('News', 'https://slashdot.org', 'Slashdot : "News for nerds, stuff that matters."'),
Link('News', 'https://www.nytimes.com', 'The New York Times : the canonical world news source'),
Link('References', 'https://en.wikipedia.org/wiki/Main_Page', 'Wikipedia : the nascent <em>Encyclopedia Galactica</em>'),
Link('References', 'https://oeis.org', 'The On-Line Encyclopedia of Integer Sequences (OEIS).'),
Link('Vendors', 'https://www.mcmaster.com/#', 'McMaster-Carr : <em>the</em> American hardware vendor'),
Link('Vendors', 'https://www.digikey.com', 'Digi-Key : McMaster-Carr for electronics'),
Link('Vendors', 'https://www.pololu.com', 'Pololu : small robot parts'),
Link('Vendors', 'https://www.dreamhost.com', 'Dreamhost : excellent web hosting'),
Link('Vendors', 'https://darksky.net', 'Dark Sky : sufficiently modern weather forecasting'),
Link('Journals', 'https://www.nature.com/nature/', 'Nature : the premier scientific journal'),
Link('Journals', 'https://www.nature.com/nnano/', 'Nature Nanotechnology : nanotechnology from the Nature publishing group'),
Link('Journals', 'https://onlinelibrary.wiley.com/journal/15214095', 'Advanced Materials : best of materials science'),
Link('Journals', 'https://onlinelibrary.wiley.com/journal/16163028', 'Advanced Functional Materials : more materials science'),
Link('Journals', 'https://pubs.acs.org/journal/ancac3', 'ACS Nano : nanoscience'),
Link('Libraries', 'https://www.gutenberg.org', 'Project Gutenberg : public domain e-books'),
Link('Libraries', 'https://pixabay.com', 'Pixabay : public domain images'),
Link('Libraries', 'http://magnatune.com', 'Magnatune : DRM-free music'),
]
content="""<header>
<h1>Links / Daniel Teal</h1>
<p>Herein lie portals to stupendously awesome websites I find useful.</p>
</header>"""
sections = []
for link in links:
if not link.section in sections:
sections.append(link.section)
content += '<div class="section">' + link.section + '</div>\n'
content += '<a href="' + link.url + '" class="link">' + link.name + '</a>\n'
| cc0-1.0 | 3,724,751,775,368,633,000 | 60.333333 | 127 | 0.693562 | false |
iotile/coretools | iotiletest/setup.py | 1 | 2754 | # This file is adapted from python code released by WellDone International
# under the terms of the LGPLv3. WellDone International's contact information is
# [email protected]
# http://welldone.org
#
# Modifications to this file from the original created at WellDone International
# are copyright Arch Systems Inc.
# Caveats and possible issues
# Mac OS X
# - when using a virtualenv, readline is not properly installed into the virtualenv
# and cannot be imported. You need to install it using easy_install as described here
# http://calvinx.com/tag/readline/
from setuptools import setup, find_packages
import version
setup(
name="iotile-test",
packages=find_packages(exclude=("test",)),
version=version.version,
license="LGPLv3",
description="IOTile Test Infrastructure",
install_requires=[
"iotile-core>=5.2",
],
python_requires=">=3.7,<4",
entry_points={'iotile.virtual_device': ['simple = iotile.mock.devices.simple_virtual_device:SimpleVirtualDevice',
'report_test = iotile.mock.devices.report_test_device:ReportTestDevice',
'realtime_test = iotile.mock.devices.realtime_test_device:RealtimeTestDevice',
'no_app = iotile.mock.devices.noapp:NoAppVirtualDevice',
'tracing_test = iotile.mock.devices.tracing_test_device:TracingTestDevice',
'sg_test = iotile.mock.devices.sg_test_device:SensorGraphTestDevice'],
'iotile.proxy': ['simple = iotile.mock.devices.simple_virtual_proxy',
'report_test = iotile.mock.devices.report_test_device_proxy'],
'console_scripts': ['prepare_device = iotile.test_scripts.prepare_device:main']},
author="Arch",
author_email="[email protected]",
url="https://github.com/iotile/coretools/iotilecore",
keywords=["iotile", "arch", "embedded", "hardware"],
classifiers=[
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
long_description="""\
IOTileTest
----------
A python package for testing IOTile based infrastructure including mocks for major portions.
See https://www.arch-iot.com.
"""
)
| gpl-3.0 | 5,863,704,992,430,129,000 | 44.147541 | 122 | 0.632171 | false |
rzzzwilson/morse_trainer | tests/test_proficiency.py | 1 | 1854 | #!/usr/bin/env python3
"""
Test the 'show proficiency' widget.
"""
import sys
sys.path.append('..')
from random import randint
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton,
QHBoxLayout, QVBoxLayout)
from proficiency import Proficiency
import utils
class ProficiencyExample(QWidget):
"""Application to demonstrate the Morse Trainer 'display' widget."""
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.alphabet_status = Proficiency(utils.Alphabetics)
self.numbers_status = Proficiency(utils.Numbers)
self.punctuation_status = Proficiency(utils.Punctuation)
redisplay_button = QPushButton('Redisplay', self)
hbox1 = QHBoxLayout()
hbox1.addWidget(self.alphabet_status)
hbox1.addWidget(self.numbers_status)
hbox1.addWidget(self.punctuation_status)
hbox2 = QHBoxLayout()
hbox2.addWidget(redisplay_button)
vbox = QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
self.setLayout(vbox)
redisplay_button.clicked.connect(self.redisplayButtonClicked)
self.setGeometry(100, 100, 800, 200)
self.setWindowTitle('Example of Proficiency widget')
self.show()
def redisplayButtonClicked(self):
"""Regenerate some data (random) and display it."""
for gd in (self.alphabet_status,
self.numbers_status, self.punctuation_status):
# generate random data
new = {}
for char in gd.data:
new[char] = randint(0,100)/100
# set first character to 0
new[gd.data[0]] = 0
# redisplay
gd.setState(new)
app = QApplication(sys.argv)
ex = ProficiencyExample()
sys.exit(app.exec())
| mit | -354,986,496,323,980,800 | 27.090909 | 72 | 0.625674 | false |
UpSea/midProjects | BasicOperations/01_01_PyQt4/StandardSaveAndOpen.py | 1 | 1219 | from PyQt4 import QtGui,QtCore
import os
#from PyQt4.QtWidgets import QFileDialog
class MyWindow(QtGui.QWidget):
def __init__(self):
super(MyWindow,self).__init__()
self.myButton = QtGui.QPushButton(self)
self.myButton.setObjectName("myButton")
self.myButton.setText("Test")
self.myButton.clicked.connect(self.msg)
def msg(self):
#directory1 = QtGui.QFileDialog.getExistingDirectory(self,"选取文件夹",os.getcwd()) #起始路径
#print(directory1)
#fileName1 = QtGui.QFileDialog.getOpenFileName(self, "选取文件", os.getcwd(), "All Files (*);;Text Files (*.txt)") #设置文件扩展名过滤,注意用双分号间隔
#print(fileName1)
#files= QtGui.QFileDialog.getOpenFileNames(self,"多文件选择",os.getcwd(), "All Files (*);;Text Files (*.txt)")
#print(files)
fileName2 = QtGui.QFileDialog.getSaveFileName(self, "文件保存", os.getcwd(),"All Files (*);;Text Files (*.txt)")
print(fileName2)
if __name__=="__main__":
import sys
app=QtGui.QApplication(sys.argv)
myshow=MyWindow()
myshow.show()
sys.exit(app.exec_()) | mit | 6,061,444,577,844,532,000 | 41.296296 | 142 | 0.617003 | false |
muk-it/muk_dms | muk_dms_actions/__manifest__.py | 1 | 1913 | ###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Documents Actions
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
{
"name": "MuK Documents Actions",
"summary": """Custom File Operations""",
"version": '12.0.2.0.1',
"category": 'Document Management',
"license": "LGPL-3",
"website": "http://www.mukit.at",
'live_test_url': 'https://mukit.at/r/SgN',
"author": "MuK IT",
"contributors": [
"Mathias Markl <[email protected]>",
],
"depends": [
"muk_dms",
],
"data": [
"security/ir.model.access.csv",
"template/assets.xml",
"views/action.xml",
"views/file.xml",
"views/server_actions.xml",
"views/res_config_settings.xml",
],
"demo": [
"demo/action.xml",
],
"qweb": [
"static/src/xml/*.xml",
],
"images": [
'static/description/banner.png'
],
"external_dependencies": {
"python": [],
"bin": [],
},
"application": False,
"installable": True,
} | lgpl-3.0 | 1,862,036,526,688,027,000 | 30.377049 | 83 | 0.544694 | false |
debugger06/MiroX | tv/lib/frontends/widgets/gtk/tableviewcells.py | 1 | 10798 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
"""tableviewcells.py - Cell renderers for TableView."""
import gobject
import gtk
import pango
from miro import signals
from miro import infolist
from miro.frontends.widgets import widgetconst
from miro.frontends.widgets.gtk import drawing
from miro.frontends.widgets.gtk import wrappermap
from miro.frontends.widgets.gtk.base import make_gdk_color
class CellRenderer(object):
"""Simple Cell Renderer
https://develop.participatoryculture.org/index.php/WidgetAPITableView"""
def __init__(self):
self._renderer = self.build_renderer()
self.want_hover = False
def build_renderer(self):
return gtk.CellRendererText()
def setup_attributes(self, column, attr_map):
column.add_attribute(self._renderer, 'text', attr_map['value'])
def set_align(self, align):
if align == 'left':
self._renderer.props.xalign = 0.0
elif align == 'center':
self._renderer.props.xalign = 0.5
elif align == 'right':
self._renderer.props.xalign = 1.0
else:
raise ValueError("unknown alignment: %s" % align)
def set_color(self, color):
self._renderer.props.foreground_gdk = make_gdk_color(color)
def set_bold(self, bold):
font_desc = self._renderer.props.font_desc
if bold:
font_desc.set_weight(pango.WEIGHT_BOLD)
else:
font_desc.set_weight(pango.WEIGHT_NORMAL)
self._renderer.props.font_desc = font_desc
def set_text_size(self, size):
if size == widgetconst.SIZE_NORMAL:
self._renderer.props.scale = 1.0
elif size == widgetconst.SIZE_SMALL:
# FIXME: on 3.5 we just ignored the call. Always setting scale to
# 1.0 basically replicates that behavior, but should we actually
# try to implement the semantics of SIZE_SMALL?
self._renderer.props.scale = 1.0
else:
raise ValueError("unknown size: %s" % size)
def set_font_scale(self, scale_factor):
self._renderer.props.scale = scale_factor
class ImageCellRenderer(CellRenderer):
"""Cell Renderer for images
https://develop.participatoryculture.org/index.php/WidgetAPITableView"""
def build_renderer(self):
return gtk.CellRendererPixbuf()
def setup_attributes(self, column, attr_map):
column.add_attribute(self._renderer, 'pixbuf', attr_map['image'])
class GTKCheckboxCellRenderer(gtk.CellRendererToggle):
def do_activate(self, event, treeview, path, background_area, cell_area,
flags):
iter = treeview.get_model().get_iter(path)
self.set_active(not self.get_active())
wrappermap.wrapper(self).emit('clicked', iter)
gobject.type_register(GTKCheckboxCellRenderer)
class CheckboxCellRenderer(signals.SignalEmitter):
"""Cell Renderer for booleans
https://develop.participatoryculture.org/index.php/WidgetAPITableView"""
def __init__(self):
signals.SignalEmitter.__init__(self)
self.create_signal("clicked")
self._renderer = GTKCheckboxCellRenderer()
wrappermap.add(self._renderer, self)
self.want_hover = False
def set_control_size(self, size):
pass
def setup_attributes(self, column, attr_map):
column.add_attribute(self._renderer, 'active', attr_map['value'])
class GTKCustomCellRenderer(gtk.GenericCellRenderer):
"""Handles the GTK hide of CustomCellRenderer
https://develop.participatoryculture.org/index.php/WidgetAPITableView"""
def on_get_size(self, widget, cell_area=None):
wrapper = wrappermap.wrapper(self)
widget_wrapper = wrappermap.wrapper(widget)
style = drawing.DrawingStyle(widget_wrapper, use_base_color=True)
# NOTE: CustomCellRenderer.cell_data_func() sets up its attributes
# from the model itself, so we don't have to worry about setting them
# here.
width, height = wrapper.get_size(style, widget_wrapper.layout_manager)
x_offset = self.props.xpad
y_offset = self.props.ypad
width += self.props.xpad * 2
height += self.props.ypad * 2
if cell_area:
x_offset += cell_area.x
y_offset += cell_area.x
extra_width = max(0, cell_area.width - width)
extra_height = max(0, cell_area.height - height)
x_offset += int(round(self.props.xalign * extra_width))
y_offset += int(round(self.props.yalign * extra_height))
return x_offset, y_offset, width, height
def on_render(self, window, widget, background_area, cell_area, expose_area,
flags):
widget_wrapper = wrappermap.wrapper(widget)
cell_wrapper = wrappermap.wrapper(self)
selected = (flags & gtk.CELL_RENDERER_SELECTED)
if selected:
if widget.flags() & gtk.HAS_FOCUS:
state = gtk.STATE_SELECTED
else:
state = gtk.STATE_ACTIVE
else:
state = gtk.STATE_NORMAL
if cell_wrapper.IGNORE_PADDING:
area = background_area
else:
xpad = self.props.xpad
ypad = self.props.ypad
area = gtk.gdk.Rectangle(cell_area.x + xpad, cell_area.y + ypad,
cell_area.width - xpad * 2, cell_area.height - ypad * 2)
context = drawing.DrawingContext(window, area, expose_area)
if (selected and not widget_wrapper.draws_selection and
widget_wrapper.use_custom_style):
# Draw the base color as our background. This erases the gradient
# that GTK draws for selected items.
window.draw_rectangle(widget.style.base_gc[state], True,
background_area.x, background_area.y,
background_area.width, background_area.height)
context.style = drawing.DrawingStyle(widget_wrapper,
use_base_color=True, state=state)
widget_wrapper.layout_manager.update_cairo_context(context.context)
hotspot_tracker = widget_wrapper.hotspot_tracker
if (hotspot_tracker and hotspot_tracker.hit and
hotspot_tracker.column == self.column and
hotspot_tracker.path == self.path):
hotspot = hotspot_tracker.name
else:
hotspot = None
if (self.path, self.column) == widget_wrapper.hover_info:
hover = widget_wrapper.hover_pos
hover = (hover[0] - xpad, hover[1] - ypad)
else:
hover = None
# NOTE: CustomCellRenderer.cell_data_func() sets up its attributes
# from the model itself, so we don't have to worry about setting them
# here.
widget_wrapper.layout_manager.reset()
cell_wrapper.render(context, widget_wrapper.layout_manager, selected,
hotspot, hover)
def on_activate(self, event, widget, path, background_area, cell_area,
flags):
pass
def on_start_editing(self, event, widget, path, background_area,
cell_area, flags):
pass
gobject.type_register(GTKCustomCellRenderer)
class CustomCellRenderer(CellRenderer):
"""Customizable Cell Renderer
https://develop.participatoryculture.org/index.php/WidgetAPITableView"""
IGNORE_PADDING = False
def __init__(self):
CellRenderer.__init__(self)
wrappermap.add(self._renderer, self)
def build_renderer(self):
return GTKCustomCellRenderer()
def setup_attributes(self, column, attr_map):
column.set_cell_data_func(self._renderer, self.cell_data_func,
attr_map)
def cell_data_func(self, column, cell, model, iter, attr_map):
cell.column = column
cell.path = model.get_path(iter)
row = model[iter]
# Set attributes on self instead cell This works because cell is just
# going to turn around and call our methods to do the rendering.
for name, index in attr_map.items():
setattr(self, name, row[index])
def hotspot_test(self, style, layout, x, y, width, height):
return None
class ItemListRenderer(CustomCellRenderer):
"""Custom Renderer for InfoListModels
https://develop.participatoryculture.org/index.php/WidgetAPITableView"""
def cell_data_func(self, column, cell, model, it, attr_map):
item_list = wrappermap.wrapper(model).item_list
row = model.row_of_iter(it)
self.info = item_list.get_row(row)
self.attrs = item_list.get_attrs(self.info.id)
self.group_info = item_list.get_group_info(row)
cell.column = column
cell.path = row
class ItemListRendererText(CellRenderer):
"""Renderer for InfoListModels that only display text
https://develop.participatoryculture.org/index.php/WidgetAPITableView"""
def setup_attributes(self, column, attr_map):
column.set_cell_data_func(self._renderer, self.cell_data_func,
attr_map)
def cell_data_func(self, column, cell, model, it, attr_map):
item_list = wrappermap.wrapper(model).item_list
info = item_list.get_row(model.row_of_iter(it))
cell.set_property("text", self.get_value(info))
def get_value(self, info):
"""Get the text to render for this cell
:param info: ItemInfo to render.
"""
| gpl-2.0 | -5,552,584,922,404,404,000 | 39.291045 | 80 | 0.655214 | false |
kvar/ansible | lib/ansible/plugins/action/nxos.py | 1 | 8406 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import re
import sys
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.nxos.nxos import nxos_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
module_name = self._task.action.split('.')[-1]
self._config_module = True if module_name == 'nxos_config' else False
socket_path = None
persistent_connection = self._play_context.connection.split('.')[-1]
if (persistent_connection == 'httpapi' or self._task.args.get('provider', {}).get('transport') == 'nxapi') \
and module_name in ('nxos_file_copy', 'nxos_nxapi'):
return {'failed': True, 'msg': "Transport type 'nxapi' is not valid for '%s' module." % (module_name)}
if module_name == 'nxos_file_copy':
self._task.args['host'] = self._play_context.remote_addr
self._task.args['password'] = self._play_context.password
if self._play_context.connection == 'network_cli':
self._task.args['username'] = self._play_context.remote_user
elif self._play_context.connection == 'local':
self._task.args['username'] = self._play_context.connection_user
if module_name == 'nxos_install_os':
persistent_command_timeout = 0
persistent_connect_timeout = 0
connection = self._connection
if connection.transport == 'local':
persistent_command_timeout = C.PERSISTENT_COMMAND_TIMEOUT
persistent_connect_timeout = C.PERSISTENT_CONNECT_TIMEOUT
else:
persistent_command_timeout = connection.get_option('persistent_command_timeout')
persistent_connect_timeout = connection.get_option('persistent_connect_timeout')
display.vvvv('PERSISTENT_COMMAND_TIMEOUT is %s' % str(persistent_command_timeout), self._play_context.remote_addr)
display.vvvv('PERSISTENT_CONNECT_TIMEOUT is %s' % str(persistent_connect_timeout), self._play_context.remote_addr)
if persistent_command_timeout < 600 or persistent_connect_timeout < 600:
msg = 'PERSISTENT_COMMAND_TIMEOUT and PERSISTENT_CONNECT_TIMEOUT'
msg += ' must be set to 600 seconds or higher when using nxos_install_os module.'
msg += ' Current persistent_command_timeout setting:' + str(persistent_command_timeout)
msg += ' Current persistent_connect_timeout setting:' + str(persistent_connect_timeout)
return {'failed': True, 'msg': msg}
if persistent_connection in ('network_cli', 'httpapi'):
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['provider']
if self._task.args.get('transport'):
display.warning('transport is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['transport']
elif self._play_context.connection == 'local':
provider = load_provider(nxos_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'nxos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
self._task.args['provider'] = ActionModule.nxapi_implementation(provider, self._play_context)
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
# Match prompts ending in )# except those with (maint-mode)#
config_prompt = re.compile(r'^.*\((?!maint-mode).*\)#$')
out = conn.get_prompt()
while config_prompt.match(to_text(out, errors='surrogate_then_replace').strip()):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
@staticmethod
def nxapi_implementation(provider, play_context):
provider['transport'] = 'nxapi'
if provider.get('host') is None:
provider['host'] = play_context.remote_addr
if provider.get('port') is None:
if provider.get('use_ssl'):
provider['port'] = 443
else:
provider['port'] = 80
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('username') is None:
provider['username'] = play_context.connection_user
if provider.get('password') is None:
provider['password'] = play_context.password
if provider.get('use_ssl') is None:
provider['use_ssl'] = False
if provider.get('validate_certs') is None:
provider['validate_certs'] = True
return provider
| gpl-3.0 | 4,933,697,414,735,714,000 | 47.589595 | 138 | 0.621342 | false |
intelmakers/candy_machine | Python/listen.py | 1 | 1677 | #!/usr/bin/python
import collections
import mraa
import os
import sys
import time
# Import things for pocketsphinx
import pyaudio
import wave
import pocketsphinx as ps
import sphinxbase
my_dir = os.path.dirname(__file__)
dict_name = 8670
# Parameters for pocketsphinx
LMD = "{0}/dict/{1}.lm".format(my_dir, dict_name)
DICTD = "{0}/dict/{1}.dic".format(my_dir, dict_name)
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
RECORD_SECONDS = 3
PATH = 'output'
# if "ALL" in words:
# print "ALL"
if not os.path.exists(PATH):
os.makedirs(PATH)
pya = pyaudio.PyAudio()
speech_rec = ps.Decoder(lm=LMD, dict=DICTD)
def decodeSpeech(speech_rec, wav_file):
wav_file = file(wav_file,'rb')
wav_file.seek(44)
speech_rec.decode_raw(wav_file)
result = speech_rec.get_hyp()
return result[0]
def doListen():
# Record audio
stream = pya.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
#pya.terminate()
# Write .wav file
fn = "o.wav"
wf = wave.open(os.path.join(PATH, fn), 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(pya.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
# Decode speech
wav_file = os.path.join(PATH, fn)
recognised = decodeSpeech(speech_rec, wav_file)
rec_words = recognised.split()
print "Recognized: {0}".format(recognised)
# Playback recognized word(s)
cm = 'espeak "'+recognised+'"'
os.system(cm)
return recognised
| cc0-1.0 | -399,519,975,128,383,940 | 20.5 | 100 | 0.698271 | false |
kietdlam/Dator | data_api/models.py | 1 | 7208 | from uuid import uuid4
from django.db import models
from django.contrib.auth.models import Group
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from data_api import file_provider
import pandas as pd
import django.utils.timezone as tmz
import pytz
import delorean
class SystemModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
uuid = models.CharField(max_length=128, db_index=True)
class Meta:
abstract = True
class Event(SystemModel):
"""
An event is used to record controller specific events for correlation with data signals.
"""
group = models.ManyToManyField(Group)
type = models.CharField(max_length=32)
info = models.TextField(null=True)
local_computer = models.ForeignKey('LocalComputer', null=True)
system = models.ForeignKey('System', null=True)
def __unicode__(self):
return "{}:{}".format(self.local_computer_id, self.type)
class System(SystemModel):
"""
A system is a group of related LocalComputers that coordinate actions and signals with each other.
"""
group = models.ManyToManyField(Group)
name = models.CharField(max_length=128)
timezone = models.CharField(max_length=32)
shifts = models.ManyToManyField('Shift')
def __unicode__(self):
return self.name
class Shift(SystemModel):
"""
A Shift is used to record the beginning and the end of an experiment
"""
name = models.CharField(max_length=128)
ended_at = models.DateTimeField(null=True)
class LocalComputer(SystemModel):
"""
A LocalComputer system is a cpu capable of loading a program, recording data from sensors and operating actuators.
"""
group = models.ManyToManyField(Group)
name = models.CharField(max_length=128)
registration_token = models.CharField(max_length=128)
secret_uuid = models.CharField(max_length=128)
system = models.ForeignKey('System', null=True)
command_refresh_sec = models.IntegerField(default=10)
is_running = models.BooleanField(default=False)
def __unicode__(self):
return self.name
# No-op
COMMAND_NOOP = 0
# shut down local_computer listener
COMMAND_DONE = 1
# load and start the indicated program on the local computer
COMMAND_LOAD_PROGRAM = 2
# stop the indicated program on the local computer
COMMAND_STOP_PROGRAM = 3
class Command(SystemModel):
"""
Commands are enumerated json messages for LocalComputers.
When command has been successfully executed, the is_executed flag is set to True
"""
local_computer = models.ForeignKey('LocalComputer')
type = models.IntegerField(default=COMMAND_NOOP, db_index=True)
json_command = models.CharField(max_length="512", null=True)
is_executed = models.BooleanField(default=False, db_index=True)
def __unicode__(self):
return "{}:{}:{}".format(self.local_computer_id, self.type, self.created_at)
class Program(SystemModel):
"""
A loadable script/code file that can be run on a local computer.
A program will be run periodically with with a pause of the indicated
sleep_time between sucessive runs.
"""
group = models.ManyToManyField(Group)
code = models.TextField(null=True)
description = models.TextField(null=True)
name = models.CharField(max_length=128)
sleep_time_sec = models.FloatField(default=1.0)
def __unicode__(self):
return self.name
class Map(SystemModel):
"""
A map is a list of known signals with semantic meaning.
"""
group = models.ManyToManyField(Group)
name = models.CharField(max_length=128)
controller = models.ForeignKey('LocalComputer')
def __unicode__(self):
return self.name
ACTUATOR = 1
SENSOR = 2
class MapPoint(SystemModel):
map = models.ForeignKey('Map')
point_type = models.IntegerField(default=SENSOR)
name = models.CharField(max_length=128)
path = models.CharField(max_length=128)
controller = models.ForeignKey('LocalComputer')
def __unicode__(self):
return self.name
SIGNAL_PROVIDER = file_provider
BLOB_PROVIDER = file_provider
class Signal(SystemModel):
"""
A time signal of floats.
"""
group = models.ManyToManyField(Group)
name = models.CharField(max_length=128, db_index=True)
system = models.ForeignKey('System', null=True)
local_computer = models.ForeignKey('LocalComputer', null=True)
def __unicode__(self):
return self.name
def add_points(self, data_points):
"""Add points to the signal
:param data_points [[<float value>,<time in millisec>],...]
"""
SIGNAL_PROVIDER.startup()
SIGNAL_PROVIDER.append_data(self.uuid,
''.join(["[{:.15},{:.15}]".format(float(datum[0]),float(datum[1])) for datum in data_points]))
def get_data(self):
SIGNAL_PROVIDER.startup()
data = SIGNAL_PROVIDER.get_blob(self.uuid)
tokens = data.split("]")
points = []
for token in tokens:
if token != '':
ts = token[1:].split(",")
points.append((float(ts[0]), float(ts[1])))
return points
@classmethod
def millisec_to_utc(cls, millisec):
return tmz.datetime.fromtimestamp(float(millisec), tz=pytz.UTC)
@classmethod
def utc_to_millisec(cls, dt):
return delorean.Delorean(dt, timezone="UTC").epoch()
def get_time_series(self):
values, dates = self.get_data()
return pd.TimeSeries(values, index=dates)
def clear(self):
SIGNAL_PROVIDER.startup()
SIGNAL_PROVIDER.clear(self.uuid)
class Setting(SystemModel):
group = models.ManyToManyField(Group)
key = models.CharField(max_length=128, db_index=True)
value = models.CharField(max_length=128)
local_computer = models.ForeignKey('LocalComputer', null=True)
system = models.ForeignKey('System', null=True)
def __unicode__(self):
return '{},{}'.format(self.key, self.value)
class Blob(SystemModel):
group = models.ManyToManyField(Group)
name = models.CharField(max_length=128, db_index=True)
system = models.ForeignKey('System', null=True)
local_computer = models.ForeignKey('LocalComputer', null=True)
def __unicode__(self):
return self.name
def get_data(self):
BLOB_PROVIDER.startup()
data = BLOB_PROVIDER.get_blob(self.uuid)
return data
def set_data(self, json_data):
BLOB_PROVIDER.startup()
BLOB_PROVIDER.write_blob(self.uuid, json_data)
@receiver(pre_save, sender=Command)
@receiver(pre_save, sender=LocalComputer)
@receiver(pre_save, sender=Map)
@receiver(pre_save, sender=MapPoint)
@receiver(pre_save, sender=Program)
@receiver(pre_save, sender=Shift)
@receiver(pre_save, sender=Signal)
@receiver(pre_save, sender=System)
@receiver(pre_save, sender=Setting)
@receiver(pre_save, sender=Event)
@receiver(pre_save, sender=Blob)
def set_uuid(sender, instance, **kwargs):
"""
Register all SystemModel derived classes to set uuid
"""
if not instance.uuid:
instance.uuid = str(uuid4())
| mit | -4,165,941,014,930,690,000 | 30.203463 | 130 | 0.677026 | false |
4dsolutions/Python5 | get_movie.py | 1 | 1400 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 11 2016
@author: Kirby Urner
Uses API documented at http://www.omdbapi.com/ to query
IMDB movie database.
"""
import requests
# from collections import namedtuple
import json
# Movie = namedtuple("Movie", "status_code content")
class Movie:
def __init__(self, status_code, json_data):
self.status = status_code
self.content = json.loads(str(json_data, encoding="UTF-8"))
def __str__(self):
obj = self.content # decoded json, a dict
the_title = "Title: {:^40}\n".format(obj["Title"])
the_actors = "Actors: \n"
for actor in obj["Actors"].split(","):
the_actors += ".....{:>30}\n".format(actor)
the_story = ("Story: \n")
the_story += obj["Plot"]
return the_title + the_actors + the_story
def __repr__(self):
return "Movie(Title={}, Released={})".format(self.content["Title"],
self.content["Released"])
def get_movie(url):
r = requests.get(url)
return Movie(r.status_code, r.content)
# test
the_url = "http://www.omdbapi.com/?i=tt0120338&plot=full&r=json" # GET
result = get_movie(the_url)
print(result.content)
print("-----------")
the_url = "http://www.omdbapi.com/?t=Titanic&y=1997&plot=full&r=json"
result = get_movie(the_url)
print(result)
| mit | -994,236,702,834,751,500 | 26.45098 | 76 | 0.575 | false |
kdeldycke/smile_openerp_matrix_widget | smile_matrix_demo/smile_profile.py | 1 | 1180 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 Smile. All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
class smile_activity_profile(osv.osv):
_name = 'smile.activity.profile'
_columns = {
'name': fields.char('Name', size=32, required=True),
}
smile_activity_profile()
| gpl-3.0 | -3,846,043,479,337,647,600 | 34.757576 | 78 | 0.605085 | false |
peterbe/headsupper | headsupper/base/models.py | 1 | 1493 | from django.db import models
from django.utils import timezone
from django.conf import settings
from jsonfield import JSONField
class Project(models.Model):
# e.g. mozilla/socorro
github_full_name = models.CharField(max_length=200)
# This'll match '^Headsup: ...'
trigger_word = models.CharField(default='Headsup', max_length=100)
case_sensitive_trigger_word = models.BooleanField(default=False)
github_webhook_secret = models.CharField(max_length=100)
# email(s)
send_to = models.TextField()
send_cc = models.TextField(blank=True, null=True)
send_bcc = models.TextField(blank=True, null=True)
# If this is set to true, don't react to individual commit
# payloads, but only on commits that are tags, and then
# find all the commits in that tag range.
on_tag_only = models.BooleanField(default=False)
on_branch = models.CharField(default='master', blank=True, max_length=200)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.github_full_name)
class Payload(models.Model):
project = models.ForeignKey(Project, null=True)
payload = JSONField()
http_error = models.IntegerField()
messages = JSONField()
date = models.DateTimeField(default=timezone.now)
def replay(self):
raise NotImplementedError
| mpl-2.0 | 7,377,486,451,298,321,000 | 32.177778 | 78 | 0.701273 | false |
robtherad/BC-Mod | tools/search_privates.py | 1 | 4456 | #!/usr/bin/env python3
# Created by ACE 3 team, modified by BC: https://github.com/acemod/ACE3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
def get_private_declare(content):
priv_declared = []
srch = re.compile('private.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
priv_declared += priv_split;
srch = re.compile('params \[.*\]|PARAMS_[0-9].*|EXPLODE_[0-9]_PVT.*|DEFAULT_PARAM.*|KEY_PARAM.*|IGNORE_PRIVATE_WARNING.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
priv_declared += priv_split;
srch = re.compile('(?i)[\s]*local[\s]+(_[\w\d]*)[\s]*=.*')
priv_local = srch.findall(content)
priv_local_declared = sorted(set(priv_local))
priv_declared += priv_local_declared;
return priv_declared
def check_privates(filepath):
bad_count_file = 0
def pushClosing(t):
closingStack.append(closing.expr)
closing << Literal( closingFor[t[0]] )
def popClosing():
closing << closingStack.pop()
with open(filepath, 'r') as file:
content = file.read()
priv_use = []
priv_use = []
# Regex search privates
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ =,\^\-\+\/\*\%\}\]\)";]')
priv_use = srch.findall(content)
priv_use = sorted(set(priv_use))
# Private declaration search
priv_declared = get_private_declare(content)
if '_this' in priv_declared: priv_declared.remove('_this')
if '_this' in priv_use: priv_use.remove('_this')
if '_x' in priv_declared: priv_declared.remove('_x')
if '_x' in priv_use: priv_use.remove('_x')
if '_forEachIndex' in priv_declared: priv_declared.remove('_forEachIndex')
if '_forEachIndex' in priv_use: priv_use.remove('_forEachIndex')
if '_foreachIndex' in priv_declared: priv_declared.remove('_foreachIndex')
if '_foreachIndex' in priv_use: priv_use.remove('_foreachIndex')
if '_foreachindex' in priv_declared: priv_declared.remove('_foreachindex')
if '_foreachindex' in priv_use: priv_use.remove('_foreachindex')
missing = []
for s in priv_use:
if s.lower() not in map(str.lower,priv_declared):
if s.lower() not in map(str.lower,missing):
missing.append(s)
if len(missing) > 0:
print (filepath)
private_output = 'private[';
first = True
for bad_priv in missing:
if first:
first = False
private_output = private_output + '"' + bad_priv
else:
private_output = private_output + '", "' + bad_priv
private_output = private_output + '"];';
print (private_output)
for bad_priv in missing:
print ('\t' + bad_priv)
bad_count_file = bad_count_file + 1
return bad_count_file
def main():
print("#########################")
print("# Search your Privates #")
print("#########################")
sqf_list = []
bad_count = 0
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default=".")
args = parser.parse_args()
for root, dirnames, filenames in os.walk('../addons' + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.sqf'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
bad_count = bad_count + check_privates(filename)
print ("Bad Count {0}".format(bad_count))
if __name__ == "__main__":
main()
| gpl-3.0 | -4,066,306,607,683,146,000 | 31.75 | 126 | 0.540638 | false |
ssebastianj/ia2013-tpi-rl | src/gui/qtgen/codetailsdialog.py | 1 | 5881 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Sebastian\Mis documentos\Programacion\Proyectos\IA2013TPIRL\gui\qt\IA2013TPIRLGUI\codetailsdialog.ui'
#
# Created: Tue Jul 09 15:27:46 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CODetailsDialog(object):
def setupUi(self, CODetailsDialog):
CODetailsDialog.setObjectName(_fromUtf8("CODetailsDialog"))
CODetailsDialog.setWindowModality(QtCore.Qt.WindowModal)
CODetailsDialog.resize(345, 490)
self.gridLayout_2 = QtGui.QGridLayout(CODetailsDialog)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label_3 = QtGui.QLabel(CODetailsDialog)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.tblSecuenciaEstados = QtGui.QTableWidget(CODetailsDialog)
self.tblSecuenciaEstados.setObjectName(_fromUtf8("tblSecuenciaEstados"))
self.tblSecuenciaEstados.setColumnCount(3)
self.tblSecuenciaEstados.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tblSecuenciaEstados.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tblSecuenciaEstados.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tblSecuenciaEstados.setHorizontalHeaderItem(2, item)
self.verticalLayout.addWidget(self.tblSecuenciaEstados)
self.gridLayout_2.addLayout(self.verticalLayout, 2, 0, 1, 2)
self.btnCerrar = QtGui.QPushButton(CODetailsDialog)
self.btnCerrar.setDefault(True)
self.btnCerrar.setObjectName(_fromUtf8("btnCerrar"))
self.gridLayout_2.addWidget(self.btnCerrar, 4, 1, 1, 1)
self.line = QtGui.QFrame(CODetailsDialog)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.gridLayout_2.addWidget(self.line, 3, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 4, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout_2.addItem(spacerItem1, 1, 0, 1, 1)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(CODetailsDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(125, 0))
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.lblCOCantidadEstados = QtGui.QLabel(CODetailsDialog)
self.lblCOCantidadEstados.setObjectName(_fromUtf8("lblCOCantidadEstados"))
self.gridLayout.addWidget(self.lblCOCantidadEstados, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(CODetailsDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setMinimumSize(QtCore.QSize(125, 0))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.lblCOSumValQ = QtGui.QLabel(CODetailsDialog)
self.lblCOSumValQ.setObjectName(_fromUtf8("lblCOSumValQ"))
self.gridLayout.addWidget(self.lblCOSumValQ, 1, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 2)
self.retranslateUi(CODetailsDialog)
QtCore.QObject.connect(self.btnCerrar, QtCore.SIGNAL(_fromUtf8("clicked()")), CODetailsDialog.accept)
QtCore.QMetaObject.connectSlotsByName(CODetailsDialog)
def retranslateUi(self, CODetailsDialog):
CODetailsDialog.setWindowTitle(_translate("CODetailsDialog", "Detalles de camino óptimo", None))
self.label_3.setText(_translate("CODetailsDialog", "Secuencia de estados:", None))
item = self.tblSecuenciaEstados.horizontalHeaderItem(0)
item.setText(_translate("CODetailsDialog", "Estado", None))
item = self.tblSecuenciaEstados.horizontalHeaderItem(1)
item.setText(_translate("CODetailsDialog", "Coordenadas", None))
item = self.tblSecuenciaEstados.horizontalHeaderItem(2)
item.setText(_translate("CODetailsDialog", "Valor Q", None))
self.btnCerrar.setText(_translate("CODetailsDialog", "&Cerrar", None))
self.label.setText(_translate("CODetailsDialog", "Cantidad de estados:", None))
self.lblCOCantidadEstados.setText(_translate("CODetailsDialog", "-", None))
self.label_2.setText(_translate("CODetailsDialog", "Sumatoria de valores Q:", None))
self.lblCOSumValQ.setText(_translate("CODetailsDialog", "-", None))
| mit | 4,508,036,040,088,869,400 | 52.944954 | 158 | 0.713435 | false |
CraigBryan/pellinglab_twitter_microscope | src/production_files/receivers/gui_receiver.py | 1 | 2402 | '''
Created on Sep 6, 2013
@author: Craig Bryan
'''
from receiver import Receiver
class GuiReceiver(Receiver):
"""
A receiver that allows the router to receive and send data to a local GUI.
This is to allow local requests for images, without using the Twitter-based
interface. No GUI is currently implemented, so this acts as a hook for later.
Attributes:
gui: The gui this receiver is communicating with. The gui must have a post
method that allows data to be displayed, and a retreive_requests
method that allows pulling of a list of requests for images or information.
"""
def __init__(self, router, r_id, gui):
"""
Args:
router: A reference to the router that this receiver is associated with.
r_id: The string that the router refers to this receiver with.
gui: The gui this receiver is communicating with.
"""
super(GuiReceiver, self).__init__(router, r_id)
self.gui = gui
def process_transaction(self, transaction):
"""
The method the router calls when a transaction is routed to this receiver.
Args:
transaction: The transaction that is being processed by the receiver.
Commands:
update: Pull any new requests from the GUI and post turn them into new
transactions.
post: Send data to the GUI to display.
"""
if transaction.command == "update":
requests = self.gui.retrieve_requests()
while len(requests) > 0:
#requests are routed to the translator
self.router.create_transaction(origin = self.r_id, to_id = "translator",
command = "parse", command_arg = requests.popleft())
transaction.process(success = True, finished = True, allow_log = False)
elif transaction.command == "post":
self.gui.send_data(transaction.message)
transaction.process(success = True, finished = True)
else:
transaction.log(info = "Unknown command passed to gui receiver: %s"
% transaction.command)
| mit | -4,373,891,099,005,391,400 | 37.126984 | 94 | 0.56453 | false |
leppa/home-assistant | homeassistant/components/idteck_prox/__init__.py | 1 | 2205 | """Component for interfacing RFK101 proximity card readers."""
import logging
from rfk101py.rfk101py import rfk101py
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "idteck_prox"
EVENT_IDTECK_PROX_KEYCARD = "idteck_prox_keycard"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_NAME): cv.string,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the IDTECK proximity card component."""
conf = config[DOMAIN]
for unit in conf:
host = unit[CONF_HOST]
port = unit[CONF_PORT]
name = unit[CONF_NAME]
try:
reader = IdteckReader(hass, host, port, name)
reader.connect()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, reader.stop)
except OSError as error:
_LOGGER.error("Error creating %s. %s", name, error)
return False
return True
class IdteckReader:
"""Representation of an IDTECK proximity card reader."""
def __init__(self, hass, host, port, name):
"""Initialize the reader."""
self.hass = hass
self._host = host
self._port = port
self._name = name
self._connection = None
def connect(self):
"""Connect to the reader."""
self._connection = rfk101py(self._host, self._port, self._callback)
def _callback(self, card):
"""Send a keycard event message into HASS whenever a card is read."""
self.hass.bus.fire(
EVENT_IDTECK_PROX_KEYCARD, {"card": card, "name": self._name}
)
def stop(self):
"""Close resources."""
if self._connection:
self._connection.close()
self._connection = None
| apache-2.0 | -3,721,587,121,090,566,000 | 24.941176 | 77 | 0.559637 | false |
django-nonrel/django-nonrel | docs/conf.py | 1 | 8711 | # -*- coding: utf-8 -*-
#
# Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["djangodocs"]
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3.7'
# The full version, including alpha/beta/rc tags.
release = '1.3.7'
# The next version to be released
django_next_version = '1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Sphinx will recurse into subversion configuration folders and try to read
# any document file within. These should be ignored.
# Note: exclude_dirnames is new in Sphinx 0.5
exclude_dirnames = ['.svn']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# HTML translator class for the builder
html_translator_class = "djangodocs.DjangoHTMLTranslator"
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
#latex_documents = []
latex_documents = [
('contents', 'django.tex', u'Django Documentation',
u'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'django', 'Django Documentation', ['Django Software Foundation'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Django'
epub_author = u'Django Software Foundation'
epub_publisher = u'Django Software Foundation'
epub_copyright = u'2010, Django Software Foundation'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| bsd-3-clause | -8,922,529,584,521,349,000 | 31.3829 | 85 | 0.711514 | false |
magenta-aps/mox | python_agents/notification_service/notify_to_amqp_service.py | 1 | 2455 | # Copyright (C) 2015-2019 Magenta ApS, https://magenta.dk.
# Contact: [email protected].
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""" Simple class to relay messages from PostgreSQL notifications
into an AMQP-queue """
import select
import json
import pika
import psycopg2
from os import getenv
def AMQPNotifier(database, user, password, host):
""" Main notification thread.
:param database: The PostgreSQL database
:param user: The PostgreSQL user
:param password: The PostgreSQL password
:param host: The PostgreSQL hostname
"""
pg_conn = psycopg2.connect(database=database, user=user,
password=password, host=host)
pg_cursor = pg_conn.cursor()
pika_params = pika.ConnectionParameters('localhost')
pika_connection = pika.BlockingConnection(pika_params)
amqp = pika_connection.channel()
amqp.queue_declare(queue='mox.notifications')
pg_cursor.execute("LISTEN mox_notifications;")
pg_conn.poll()
pg_conn.commit()
while True:
if select.select([pg_conn], [], [], 60) == ([], [], []):
pass
else:
pg_conn.poll()
pg_conn.commit()
while pg_conn.notifies:
notify = pg_conn.notifies.pop(0)
payload_dict = json.loads(notify.payload)
table = payload_dict['table']
objekttype = table[0:table.find('registrering')-1]
objektID = payload_dict['data'][objekttype + '_id']
registrering = payload_dict['data']['registrering']
livscykluskode = registrering['livscykluskode']
amqp_payload = {'beskedtype': 'Notification',
'objektID': objektID,
'objekttype': objekttype,
'livscykluskode': livscykluskode}
amqp.basic_publish(exchange='',
routing_key='mox.notifications',
body=json.dumps(amqp_payload))
if __name__ == '__main__':
amqp_notifier = AMQPNotifier(
database=getenv("DB_NAME", "mox"),
user=getenv("DB_USER", "mox"),
password=getenv("DB_PASS", "mox"),
host=getenv("DB_HOST", "localhost")
)
| mpl-2.0 | 3,567,605,309,189,501,400 | 35.102941 | 69 | 0.58167 | false |
NervanaSystems/neon | examples/ssd/layer.py | 1 | 15145 | from neon.layers.layer import Layer, ParameterLayer
import numpy as np
from neon.transforms import Softmax
from neon.initializers.initializer import Constant
import math
from collections import OrderedDict
class Normalize(ParameterLayer):
def __init__(self, init=Constant(20.0), name=None):
super(Normalize, self).__init__(name=name, init=init)
self.bottom_data = None
self.norm_data = None
self.owns_outputs = True
def allocate(self, shared_outputs=None):
super(Normalize, self).allocate()
self.outputs_view = self.outputs.reshape(self.channels, -1)
def configure(self, in_obj):
self.prev_layer = in_obj
self.in_shape = in_obj.out_shape
self.out_shape = in_obj.out_shape
assert len(self.in_shape) == 3, "Normalize layer must have (C, H, W) input"
self.channels = self.in_shape[0]
self.weight_shape = (self.channels, 1)
return self
def fprop(self, inputs, inference=True):
self.bottom_data = inputs.reshape(self.channels, -1)
self.norm_data = self.be.sqrt(self.be.sum(self.be.square(self.bottom_data), axis=0))
self.outputs_view[:] = self.W * self.bottom_data / self.norm_data
return self.outputs
def bprop(self, error, alpha=1.0, beta=0.0):
error_rs = error.reshape(self.channels, -1)
self.dW[:] = self.be.sum(self.outputs_view*error_rs, axis=1)/self.W
self.deltas_view = self.deltas.reshape(self.channels, -1)
# we may be able to join these back together into 1 assing call
self.deltas_view[:] = -self.outputs_view * self.be.sum(self.bottom_data * error_rs, axis=0)
self.deltas_view[:] = self.deltas_view / self.be.square(self.norm_data)
# this is separate
self.deltas_view[:] += self.W * error_rs / self.norm_data
return self.deltas
class ConcatTranspose(Layer):
"""
Takes a list of inputs, each with a shape CHWN, and transposes
to HWCN, then concatenates along the HWC axis.
"""
def __init__(self, name=None, parallelism='Disabled'):
super(ConcatTranspose, self).__init__(name, parallelism=parallelism)
def configure(self, in_obj):
# we expect a list of layers
assert isinstance(in_obj, list)
self.in_shapes = [l.out_shape for l in in_obj]
self.num_elements = np.sum(np.prod(l.out_shape) for l in in_obj)
self.out_shape = (self.num_elements)
# store the number of channels from the layer shapes
self.channels = [l.out_shape[0] for l in in_obj]
def allocate(self, shared_outputs=None):
self.outputs = self.be.iobuf((self.num_elements))
# create destination delta buffers
self.deltas = [self.be.iobuf(in_shape) for in_shape in self.in_shapes]
def fprop(self, inputs):
start = 0
for (layer, num_channels) in zip(inputs, self.channels):
# reshape (C, HW, N)
rlayer = layer.reshape((num_channels, -1, self.be.bsz))
# transpose to (HW, C, N) and store in buffer
C, HW, N = rlayer.shape
end = start + C * HW
output_view = self.outputs[start:end, :].reshape((HW, C, N))
self.be.copy_transpose(rlayer, output_view, axes=(1, 0, 2))
start = end
return self.outputs
def bprop(self, error):
# error is in (HWC, N)
# need to transpose to (CHW, N) and unstack
start = 0
for (delta, num_channels) in zip(self.deltas, self.channels):
# reshape (C, HW, N)
rdelta = delta.reshape((num_channels, -1, self.be.bsz))
C, HW, N = rdelta.shape
end = start + C * HW
error_view = error[start:end, :].reshape((HW, C, N))
self.be.copy_transpose(error_view, rdelta, axes=(1, 0, 2))
start = end
return self.deltas
class DetectionOutput(Layer):
def __init__(self, num_classes, nms_threshold=0.45,
nms_topk=400, topk=200, threshold=0.01, name=None):
super(DetectionOutput, self).__init__(name)
self.num_classes = num_classes
self.nms_threshold = nms_threshold
self.nms_topk = nms_topk
self.topk = topk
self.threshold = 0.01
self.softmax = Softmax(axis=1)
def configure(self, in_obj):
self.out_shape = (self.topk, 5)
# we expect a list of layers from the SSD model
(leafs, prior_boxes) = in_obj
# store total number of boxes
self.num_boxes = np.sum([prior_box.num_boxes for prior_box in prior_boxes])
def allocate(self, shared_outputs=None):
self.conf = self.be.iobuf((self.num_boxes * self.num_classes))
self.loc = self.be.iobuf((self.num_boxes * 4))
# intermediate buffer for compute
# these are needed to keep compute on the GPU
# 1. proposals for each class and image
# 2. store detections after sort/threshold
# 3. store softmax
self.proposals = self.be.empty((self.num_boxes, 4))
self.detections = self.be.empty((self.nms_topk, 5))
self.scores = self.be.empty((self.num_boxes, self.num_classes))
def fprop(self, inputs, inference=True):
# assumes the inputs are a tuple of (outputs, prior_boxes),
# where outputs is a vector of outputs from the model.
# flatten the nested vector generated by tree-in-tree
# also reorder the list in: 4_3, fc7, conv6, conv7, conv8, conv9
# x = self.reorder(inputs[0])
self.loc = inputs[0][0]
self.conf = inputs[0][1]
prior_boxes = inputs[1]
# reshape loc from (HWC, N) to (HWK, 4, N)
# reshape conf from (HWC, N) to (HWK, 21, N)
conf_view = self.conf.reshape((-1, self.num_classes, self.be.bsz))
loc_view = self.loc.reshape((-1, 4, self.be.bsz))
# for mkl backend optimization
if self.be.is_mkl():
return self.be.detectionOutput_fprop(conf_view, loc_view, self.detections,
prior_boxes, self.proposals,
self.nms_topk, self.topk,
self.threshold, self.nms_threshold)
# convert the prior boxes to bbox predictions by applying
# the loc regression targets
# process each image individually
batch_all_detections = [None] * self.be.bsz
for k in range(self.be.bsz):
self.bbox_transform_inv(prior_boxes, loc_view[:, :, k], self.proposals)
all_detections = np.zeros((0, 6)) # detections for this image
conf = conf_view[:, :, k]
self.scores[:] = self.softmax(conf)
for c in range(self.num_classes):
if (c == 0): # skip processing of background classes
continue
# apply softmax
scores = self.scores[:, c]
# 1. apply threshold, sort, and get the top nms_k
top_N_ind = self.get_top_N_index(scores, self.nms_topk, self.threshold)
# fill the detections
if len(top_N_ind) > 0:
self.detections.fill(0)
self.detections[:len(top_N_ind), :4] = self.proposals[top_N_ind, :]
self.detections[:len(top_N_ind), 4] = scores[top_N_ind]
# 2. apply NMS
keep = self.be.nms(self.detections, self.nms_threshold, normalized=True)
keep = keep[:self.nms_topk]
# 3. store the detections per class
# add an additional dimension for the category label
dets = np.append(self.detections[keep, :].get(),
c * np.ones((len(keep), 1)), axis=1)
all_detections = np.vstack([all_detections, dets])
if all_detections.shape[0] > self.topk:
top_N_ind = self.get_top_N_index(all_detections[:, 4], self.topk, None)
all_detections = all_detections[top_N_ind, :]
batch_all_detections[k] = all_detections
return batch_all_detections
def bprop(self, error, alpha=1.0, beta=0.0):
raise NotImplementedError
def get_top_N_index(self, scores, N, threshold):
# this function handles scores still being device tensors
# move scores to host if needed
if isinstance(scores, np.ndarray):
np_scores = scores.ravel()
else:
np_scores = scores.get().ravel()
# apply threshold if needed
if threshold is None:
count = len(np_scores)
else:
count = len(np.where(np_scores > threshold)[0])
order = np_scores.argsort()[::-1].tolist()
order = order[:count]
if N > 0:
order = order[:N]
return order
def bbox_transform_inv(self, boxes, deltas, output, variance=[0.1, 0.1, 0.2, 0.2]):
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0]
dy = deltas[:, 1]
dw = deltas[:, 2]
dh = deltas[:, 3]
pred_ctr_x = variance[0] * dx * widths + ctr_x
pred_ctr_y = variance[1] * dy * heights + ctr_y
pred_w = self.be.exp(variance[2] * dw) * widths
pred_h = self.be.exp(variance[3] * dh) * heights
# pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
output[:, 0] = pred_ctr_x - 0.5 * pred_w
# y1
output[:, 1] = pred_ctr_y - 0.5 * pred_h
# x2
output[:, 2] = pred_ctr_x + 0.5 * pred_w
# y2
output[:, 3] = pred_ctr_y + 0.5 * pred_h
return output
class PriorBox(Layer):
def __init__(self, min_sizes, max_sizes, step=None, aspect_ratios=[2, 3], img_shape=(300, 300),
flip=True, clip=False, variance=[0.1, 0.1, 0.2, 0.2], offset=0.5, name=None):
super(PriorBox, self).__init__(name)
self.offset = offset
self.variance = variance
self.flip = flip
self.clip = clip
if type(step) in (dict, OrderedDict):
assert set(step.keys()) == set(('step_w', 'step_h'))
self.step_w = step['step_w']
self.step_h = step['step_h']
else:
assert step is not None
self.step_w = step
self.step_h = step
self.prior_boxes = None
self.img_w = img_shape[0]
self.img_h = img_shape[1]
assert isinstance(min_sizes, tuple)
assert isinstance(max_sizes, tuple)
self.min_sizes = min_sizes
self.max_sizes = max_sizes
assert len(self.min_sizes) == len(self.max_sizes)
# compute the number of prior boxes
# with flip, order the aspect ratios in the same was as caffe
self.aspect_ratios = []
for ar in aspect_ratios:
self.aspect_ratios.extend([ar])
if(self.flip):
self.aspect_ratios.extend([1.0 / float(ar)])
# number of prior baxes per feature map pixel
# there is 1 box with AR=1 for each min and max sizes
self.num_priors_per_pixel = len(self.min_sizes) * 2
# and one box for each aspect ratio at the min size
self.num_priors_per_pixel += len(self.aspect_ratios) * len(self.min_sizes)
def configure(self, in_objs):
conv_layer = in_objs[1]
self.in_shape = conv_layer.out_shape
(_, self.layer_height, self.layer_width) = self.in_shape
if self.step_w is None or self.step_h is None:
self.step_w = math.ceil(float(self.img_w) / self.layer_width)
self.step_h = math.ceil(float(self.img_h) / self.layer_height)
self.num_boxes = self.layer_height * self.layer_width * self.num_priors_per_pixel
self.out_shape = (4*self.num_priors_per_pixel, self.layer_height, self.layer_width)
def allocate(self, shared_outputs=None):
self.outputs = self.be.empty((self.num_boxes, 4))
def fprop(self, inputs, inference=True):
# the priors will be of shape layer_width * layer_height * num_priors_per_pixel * 4
# with 2 chans per element (one for mean fnd one or vairance)
# we only need to calculate these once if the image size does not change
# right now we don't support changing image sizes anyways
if self.prior_boxes is not None:
return self.outputs
img_shape = [self.img_w, self.img_h]
self.prior_boxes = []
def gen_box(center, box_size, image_size, variance, clip):
box_ = [None] * 4
box_[0] = (center[0] - box_size[0] * 0.5) / image_size[0] # xmin
box_[1] = (center[1] - box_size[1] * 0.5) / image_size[1] # ymin
box_[2] = (center[0] + box_size[0] * 0.5) / image_size[0] # xmax
box_[3] = (center[1] + box_size[1] * 0.5) / image_size[1] # ymax
if clip:
for ind in range(4):
box_[ind] = min([max([box_[ind], 0.0]), 1.0])
return box_
offset = self.offset
# the output is 2 chans (the 4 prior coordinates, the 4 prior variances) for
# each output feature map pixel so the output array is
# 2 x layer_height x layer_width x num_priors x 4
center = [0, 0]
for h in range(self.layer_height):
center[1] = (h + offset) * self.step_h
for w in range(self.layer_width):
center[0] = (w + offset) * self.step_w
# do the min and max boxes with aspect ratio 1
for (min_size, max_size) in zip(self.min_sizes, self.max_sizes):
# do the min box
box_shape = [min_size, min_size]
self.prior_boxes += (gen_box(center, box_shape, img_shape,
self.variance, self.clip))
# do the max size box
sz_ = math.sqrt(min_size * max_size)
box_shape = [sz_, sz_]
self.prior_boxes += (gen_box(center, box_shape, img_shape,
self.variance, self.clip))
# now do the different aspect ratio boxes
for ar in self.aspect_ratios:
assert np.abs(ar - 1.0) > 1.0e-6
box_width = min_size * math.sqrt(ar)
box_height = min_size / math.sqrt(ar)
box_shape = [box_width, box_height]
self.prior_boxes += (gen_box(center, box_shape, img_shape,
self.variance, self.clip))
self.outputs.set(np.array(self.prior_boxes).reshape(-1, 4))
return self.outputs
def bprop(self, error, alpha=1.0, beta=0.0):
raise NotImplementedError
| apache-2.0 | -5,077,914,769,578,952,000 | 37.341772 | 99 | 0.553516 | false |
fimad/mitmproxy | mitmproxy/controller.py | 1 | 3905 | from __future__ import absolute_import
from six.moves import queue
import threading
class DummyReply:
"""
A reply object that does nothing. Useful when we need an object to seem
like it has a channel, and during testing.
"""
def __init__(self):
self.acked = False
def __call__(self, msg=False):
self.acked = True
class Reply:
"""
Messages sent through a channel are decorated with a "reply" attribute.
This object is used to respond to the message through the return
channel.
"""
def __init__(self, obj):
self.obj = obj
self.q = queue.Queue()
self.acked = False
def __call__(self, msg=None):
if not self.acked:
self.acked = True
if msg is None:
self.q.put(self.obj)
else:
self.q.put(msg)
class Channel:
def __init__(self, q, should_exit):
self.q = q
self.should_exit = should_exit
def ask(self, mtype, m):
"""
Decorate a message with a reply attribute, and send it to the
master. then wait for a response.
"""
m.reply = Reply(m)
self.q.put((mtype, m))
while not self.should_exit.is_set():
try:
# The timeout is here so we can handle a should_exit event.
g = m.reply.q.get(timeout=0.5)
except queue.Empty: # pragma: no cover
continue
return g
def tell(self, mtype, m):
"""
Decorate a message with a dummy reply attribute, send it to the
master, then return immediately.
"""
m.reply = DummyReply()
self.q.put((mtype, m))
class Slave(threading.Thread):
"""
Slaves get a channel end-point through which they can send messages to
the master.
"""
def __init__(self, channel, server):
self.channel, self.server = channel, server
self.server.set_channel(channel)
threading.Thread.__init__(self)
self.name = "SlaveThread (%s:%s)" % (
self.server.address.host, self.server.address.port)
def run(self):
self.server.serve_forever()
class Master(object):
"""
Masters get and respond to messages from slaves.
"""
def __init__(self, server):
"""
server may be None if no server is needed.
"""
self.server = server
self.masterq = queue.Queue()
self.should_exit = threading.Event()
def tick(self, q, timeout):
changed = False
try:
# This endless loop runs until the 'Queue.Empty'
# exception is thrown. If more than one request is in
# the queue, this speeds up every request by 0.1 seconds,
# because get_input(..) function is not blocking.
while True:
msg = q.get(timeout=timeout)
self.handle(*msg)
q.task_done()
changed = True
except queue.Empty:
pass
return changed
def run(self):
self.should_exit.clear()
self.server.start_slave(Slave, Channel(self.masterq, self.should_exit))
while not self.should_exit.is_set():
# Don't choose a very small timeout in Python 2:
# https://github.com/mitmproxy/mitmproxy/issues/443
# TODO: Lower the timeout value if we move to Python 3.
self.tick(self.masterq, 0.1)
self.shutdown()
def handle(self, mtype, obj):
c = "handle_" + mtype
m = getattr(self, c, None)
if m:
m(obj)
else:
obj.reply()
def shutdown(self):
if not self.should_exit.is_set():
self.should_exit.set()
if self.server:
self.server.shutdown()
| mit | 5,079,605,450,579,444,000 | 26.307692 | 79 | 0.541613 | false |
imathur/HPC-PPE | extract-timing-info.py | 1 | 3102 | import os
import numpy as np
import pandas as pd
# Create dataframe in which dataset will be stored
df = pd.DataFrame(columns=['pre-job input', 'opening input file', 'upto appmgr start', 'initialisation', 'event loop', 'ending'])
# Initialize counting variables
filecount = 0
included = 0
# Loop through all files in directory test1/output/ and search for log.EVNTtoHITS files for each Athena job
for subdir, dirs, files in os.walk('/work/d60/d60/shared/optimisation/benchmark/test1/output'):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith('.EVNTtoHITS'):
filecount = filecount + 1
# Extract lines containing certain strings from the log file and write the lines to a list
linelist = [ line.rstrip('\n') for line in open(filepath) if ('Setting up DBRelease' in line or \
'in ISF_Input' in line or \
'Welcome to ApplicationMgr' in line or \
'Event Counter process created' in line or \
'Statuses of sub-processes' in line) ]
# Extract last line of log file and append it to the list
with open(filepath,'rb') as source:
source.seek(-2, 2)
while source.read(1) != b"\n":
source.seek(-2, 1)
linelist.append(str(source.readline()))
# Create a list 'timelist' of the first word (string containing timestamp) on each line in the temporary file
timelist = [line.split()[0] for line in linelist]
# Convert each timestamp string element in the list to its equivalent value in seconds
ftr = [3600,60,1]
timelist = map(lambda x: sum([a*b for a,b in zip(ftr, [int(i) for i in x.split(":")])]), timelist)
# Create a new list 'timelist2' containing the difference of each consecutive pair of elements from 'timelist'
timelist2 = []
timelist2 = np.diff(timelist)
# If the list 'timelist2' has 6 elements (i.e., if the job finished execution and wasn't stopped prematurely), append the list as a new row to the dataframe
if timelist2.size == 6:
included = included + 1
print (filepath)
df = df.append(pd.Series(timelist2, index=['pre-job input', 'opening input file', 'upto appmgr start', 'initialisation', 'event loop', 'ending']), ignore_index=True)
# Write dataframe back to CSV file and print confirmation of completion of program.
df.to_csv('csvfiles/stageTiming.csv')
print ("\nFinished scanning %d of %d log files. Output: csvfiles/stageTiming.csv\n") % (included, filecount)
| gpl-3.0 | 6,145,991,294,068,977,000 | 46 | 181 | 0.548034 | false |
AmauryOrtega/Python-6.00.1x | Week 3/File5.py | 1 | 4677 | # -*- coding: utf-8 -*-
"""
Created on 25/09/2016
@author: Amaury Ortega <[email protected]>
"""
'''
Things that can be done to strings, range, list or tuple
being seq the name of any of the above variable
-----OPERATIONS-----
seq[i]
len(seq)
seq1 + seq2 (not range)
n*seq (not range)
seq[start:end]
e in seq
e not in seq
for e in seq
-----PROPERTIES-----
----- type mutable
str char not mutable
tuple any not mutable
range int not mutable
list any yes mutable
'''
# Dictionaries
'''
-----PROPERTIES-----
Value
any type (mutable and not mutable)
Key
unique
not mutable type (int, float, string, tuple, bool)
really it needs and hashable type, all immutable types are hashable
'''
grades = {'Ana': 'B', 'John': 'A+', 'Denise': 'A', 'Katy': 'A'}
grades['John']
grades['Sylvan'] = 'A'
'John' in grades
del (grades['Ana'])
grades.keys()
grades.values()
d = {4: {1: 0}, (1, 3): "twelve", 'const': [3.14, 2.7, 8.44]}
# Analyze song lyrics
def lyrics_to_frecuencies(lyrics):
myDict = {}
for word in lyrics:
if word in myDict:
myDict[word] += 1
else:
myDict[word] = 1
return myDict
she_loves_you = ['she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'you', 'think', "you've", 'lost', 'your', 'love',
'well', 'i', 'saw', 'her', 'yesterday-yi-yay',
"it's", 'you', "she's", 'thinking', 'of',
'and', 'she', 'told', 'me', 'what', 'to', 'say-yi-yay',
'she', 'says', 'she', 'loves', 'you',
'and', 'you', 'know', 'that', "can't", 'be', 'bad',
'yes', 'she', 'loves', 'you',
'and', 'you', 'know', 'you', 'should', 'be', 'glad',
'she', 'said', 'you', 'hurt', 'her', 'so',
'she', 'almost', 'lost', 'her', 'mind',
'and', 'now', 'she', 'says', 'she', 'knows',
"you're", 'not', 'the', 'hurting', 'kind',
'she', 'says', 'she', 'loves', 'you',
'and', 'you', 'know', 'that', "can't", 'be', 'bad',
'yes', 'she', 'loves', 'you',
'and', 'you', 'know', 'you', 'should', 'be', 'glad',
'oo', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'with', 'a', 'love', 'like', 'that',
'you', 'know', 'you', 'should', 'be', 'glad',
'you', 'know', "it's", 'up', 'to', 'you',
'i', 'think', "it's", 'only', 'fair',
'pride', 'can', 'hurt', 'you', 'too',
'pologize', 'to', 'her',
'Because', 'she', 'loves', 'you',
'and', 'you', 'know', 'that', "can't", 'be', 'bad',
'Yes', 'she', 'loves', 'you',
'and', 'you', 'know', 'you', 'should', 'be', 'glad',
'oo', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'with', 'a', 'love', 'like', 'that',
'you', 'know', 'you', 'should', 'be', 'glad',
'with', 'a', 'love', 'like', 'that',
'you', 'know', 'you', 'should', 'be', 'glad',
'with', 'a', 'love', 'like', 'that',
'you', 'know', 'you', 'should', 'be', 'glad',
'yeah', 'yeah', 'yeah',
'yeah', 'yeah', 'yeah', 'yeah'
]
beatles = lyrics_to_frecuencies(she_loves_you)
def most_common_words(freqs):
values = freqs.values()
best = max(values)
words = []
for k in freqs:
if freqs[k] == best:
words.append(k)
return (words, best)
(w, b) = most_common_words(beatles)
def words_often(freqs, minTimes):
result = []
done = False
while not done:
temp = most_common_words(freqs)
if temp[1] >= minTimes:
result.append(temp)
for w in temp[0]:
del (freqs[w])
else:
done = True
return result
print(words_often(beatles, 5))
# Fibonnaci with dictionaries
def fib(x):
if x == 1:
return 1
elif x == 2:
return 2
else:
return fib(x - 1) + fib(x - 2)
def fib_efficient(n, d):
if n in d:
return d[n]
else:
ans = fib_efficient(n - 1, d) + fib_efficient(n - 2, d)
d[n] = ans
return ans
d = {1: 1, 2: 2}
print(fib_efficient(6, d))
| gpl-3.0 | -5,361,887,684,765,019,000 | 27.345455 | 75 | 0.446226 | false |
singlasahil14/char-rnn | utils.py | 1 | 2427 | import numpy as np
import os
from six.moves import cPickle
class TextLoader():
def __init__(self, data_dir='nietzsche', batch_size=128, seq_length=8):
self.data_dir = "data/" + data_dir
self.batch_size = batch_size
self.seq_length = seq_length
self.input_file = os.path.join(self.data_dir, "input.txt")
self.vocab_map_file = os.path.join(self.data_dir, "vocab-map.pkl")
self.tensor_file = os.path.join(self.data_dir, "tensor.npy")
if not(os.path.exists(self.vocab_map_file) and os.path.exists(self.tensor_file)):
self.preprocess()
else:
self.load_preprocessed()
def preprocess(self):
input_file = self.input_file
vocab_map_file = self.vocab_map_file
tensor_file = self.tensor_file
text = open(input_file).read()
chars = list(set(text))
chars.insert(0, "\0")
self.chars = sorted(chars)
self.vocab_size = len(chars)
self.char2indices = dict((c, i) for i, c in enumerate(chars))
self.indices2char = dict((i, c) for i, c in enumerate(chars))
with open(vocab_map_file, 'wb') as f:
cPickle.dump(self.char2indices, f)
self.tensor = np.array(list(map(self.char2indices.get, text)))
np.save(tensor_file, self.tensor)
def load_preprocessed(self):
with open(self.vocab_map_file, 'rb') as f:
self.char2indices = cPickle.load(f)
self.chars = sorted(self.char2indices.keys())
self.vocab_size = len(self.char2indices)
self.tensor = np.load(self.tensor_file)
self.indices2char = {v: k for k, v in self.char2indices.iteritems()}
def data_iterator(self):
tensor = self.tensor
batch_size = self.batch_size
seq_length = self.seq_length
data_len = len(tensor)
batch_len = batch_size * seq_length
data_len = data_len - (data_len%batch_len) - batch_len
size_per_batch = data_len//batch_size
epoch_size = data_len//batch_len
data = np.zeros([batch_size, size_per_batch + 1], dtype=np.int32)
for i in range(batch_size):
data[i] = tensor[size_per_batch * i: size_per_batch * (i + 1) + 1]
for i in range(epoch_size):
x = data[:, i * seq_length:(i + 1) * seq_length]
y = data[:, i * seq_length + 1:(i + 1) * seq_length + 1]
yield(x, y)
| mit | -1,414,082,991,156,815,600 | 36.921875 | 89 | 0.588381 | false |
j00bar/django-widgy | widgy/contrib/page_builder/forms/__init__.py | 2 | 5872 | import os
from django import forms
from django.utils.safestring import mark_safe
from django.contrib.staticfiles.storage import staticfiles_storage
from django.template.loader import render_to_string
from django.conf import settings
import bleach
from django_pyscss import DjangoScssCompiler
PAGEDOWN_EDITOR_TEMPLATE = u'''
<div class="pagedown-buttonbar"></div>
{textarea}
<div class="pagedown-preview"></div>
'''
def scss_compile(scss_filename):
scss = DjangoScssCompiler()
css_content = scss.compile(scss_filename)
return css_content
class MarkdownWidget(forms.Textarea):
class Media:
css = {
'all': ('widgy/js/components/markdown/lib/pagedown.css',),
}
def render(self, *args, **kwargs):
textarea = super(MarkdownWidget, self).render(*args, **kwargs)
return mark_safe(PAGEDOWN_EDITOR_TEMPLATE.format(textarea=textarea))
class MarkdownField(forms.CharField):
widget = MarkdownWidget
class CKEditorWidget(forms.Textarea):
CONFIG = {
'toolbar': [
{'name': 'clipboard', 'groups': ['clipboard', 'undo'], 'items': ['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'insert', 'items': ['HorizontalRule', 'SpecialChar']},
{'name': 'justify', 'groups': ['justify'], 'items': ['JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock']},
{'name': 'document', 'groups': ['mode', 'document', 'doctools'], 'items': ['Source']},
{'name': 'tools', 'items': ['Maximize']},
'/',
{'name': 'basicstyles', 'groups': ['basicstyles', 'cleanup'], 'items': ['Bold', 'Italic', 'Strike', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat']},
{'name': 'paragraph', 'groups': ['list', 'indent', 'blocks', 'align'], 'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote']},
{'name': 'editing', 'groups': ['find', 'selection', 'spellchecker'], 'items': ['Scayt']},
{'name': 'styles', 'items': ['Styles', 'Format']},
],
'stylesSet': [
{'name': 'Big', 'element': 'big'},
{'name': 'Small', 'element': 'small'},
{'name': 'Typewriter', 'element': 'tt'},
{'name': 'Computer Code', 'element': 'code'},
{'name': 'Keyboard Phrase', 'element': 'kbd'},
{'name': 'Sample Text', 'element': 'samp'},
{'name': 'Variable', 'element': 'var'},
{'name': 'Deleted Text', 'element': 'del'},
{'name': 'Inserted Text', 'element': 'ins'},
{'name': 'Cited Work', 'element': 'cite'},
{'name': 'Inline Quotation', 'element': 'q'},
{'name': 'Language: RTL', 'element': 'span', 'attributes': {'dir': 'rtl'}},
{'name': 'Language: LTR', 'element': 'span', 'attributes': {'dir': 'ltr'}},
],
'allowedContent': True,
'removeButtons': '',
'extraPlugins': 'justify',
'justifyClasses': ['align-left', 'align-center', 'align-right', 'align-justify'],
'indentClasses': ['text-indent-%d' % i for i in range(1,6)],
'contentsCss': scss_compile('/widgy/page_builder/html.scss'),
}
def __init__(self, *args, **kwargs):
super(CKEditorWidget, self).__init__(*args, **kwargs)
self.attrs['class'] = 'widgy_ckeditor'
def render(self, name, value, attrs=None):
textarea = super(CKEditorWidget, self).render(name, value, attrs)
return render_to_string('page_builder/ckeditor_widget.html', {
'html_id': attrs['id'],
'textarea': textarea,
'ckeditor_path': staticfiles_storage.url('widgy/js/lib/ckeditor/'),
'config': self.CONFIG,
})
class CKEditorField(forms.CharField):
widget = CKEditorWidget
ALLOWED_ATTRIBUTES = {
'*': ['class', 'dir', 'title'],
'a': ['href', 'target', 'rel', 'name'],
'time': ['datetime', 'pubdate'],
'img': ['src'],
'table': ['border'],
'colgroup': ['span'],
'col': ['span'],
'td': ['colspan', 'rowspan', 'headers'],
'th': ['colspan', 'rowspan', 'headers', 'scope'],
}
ALLOWED_TAGS = [
'a', 'abbr', 'acronym', 'address', 'b', 'big', 'br', 'cite', 'code',
'del', 'dfn', 'div', 'em', 'hr', 'i', 'ins', 'kbd', 'mark', 'p', 'pre',
'q', 'samp', 'small', 'span', 'strong', 'sub', 'sup', 'time', 'u',
'var', 'wbr', 's', 'tt',
'ul', 'ol', 'li',
'dl', 'dt', 'dd',
'blockquote', 'details', 'summary',
'hgroup', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'figure', 'figcaption', 'img',
'caption', 'col', 'colgroup', 'table', 'tbody', 'td', 'tfoot', 'th',
'thead', 'tr',
]
def clean(self, value):
value = super(CKEditorField, self).clean(value)
return bleach.clean(value,
tags=self.ALLOWED_TAGS,
attributes=self.ALLOWED_ATTRIBUTES)
class MiniCKEditorWidget(CKEditorWidget):
CONFIG = {
'toolbar': [
{'name': 'basicstyles', 'groups': ['basicstyles', 'cleanup'], 'items': ['Bold', 'Italic', 'Strike', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat']},
{'name': 'undo', 'groups': ['undo'], 'items': ['Undo', 'Redo']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'mode', 'groups': ['mode'], 'items': ['Source']},
{'name': 'editing', 'groups': ['find', 'selection', 'spellchecker'], 'items': ['Scayt']},
],
'contentsCss': scss_compile('/widgy/page_builder/html.scss')
}
class MiniCKEditorField(forms.CharField):
widget = MiniCKEditorWidget
| apache-2.0 | -7,728,175,427,997,485,000 | 37.12987 | 180 | 0.533719 | false |
sio2project/filetracker | filetracker/scripts/migrate_test.py | 1 | 2281 | """Tests for migrate script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import Process
import os
import shutil
import tempfile
import time
import unittest
from filetracker.client import Client, FiletrackerError
from filetracker.scripts import migrate
from filetracker.servers.run import main as server_main
_TEST_PORT_NUMBER = 45785
class MigrateScriptTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.temp_dir, 'old_root', 'foo', 'bar'))
os.makedirs(os.path.join(self.temp_dir, 'new_root'))
self.server_process = Process(
target=_start_server, args=(os.path.join(self.temp_dir, 'new_root'),)
)
self.server_process.start()
time.sleep(2)
self.server_url = 'http://127.0.0.1:{}'.format(_TEST_PORT_NUMBER)
self.client = Client(local_store=None, remote_url=self.server_url)
def tearDown(self):
self.server_process.terminate()
shutil.rmtree(self.temp_dir)
def test_should_upload_files_with_correct_relative_root(self):
_touch(os.path.join(self.temp_dir, 'old_root', 'foo', 'a.txt'))
_touch(os.path.join(self.temp_dir, 'old_root', 'foo', 'bar', 'b.txt'))
_touch(os.path.join(self.temp_dir, 'old_root', 'c.txt'))
_touch(os.path.join(self.temp_dir, 'old_root', 'd.txt'))
migrate.main(
[
os.path.join(self.temp_dir, 'old_root', 'foo'),
self.server_url,
'--root',
os.path.join(self.temp_dir, 'old_root'),
'-s',
]
)
self.assertEqual(self.client.get_stream('/foo/a.txt')[0].read(), b'')
self.assertEqual(self.client.get_stream('/foo/bar/b.txt')[0].read(), b'')
with self.assertRaises(FiletrackerError):
self.client.get_stream('/c.txt')
with self.assertRaises(FiletrackerError):
self.client.get_stream('/d.txt')
def _start_server(server_dir):
server_main(
['-p', str(_TEST_PORT_NUMBER), '-d', server_dir, '-D', '--workers', '4']
)
def _touch(path):
with open(path, 'w') as f:
pass
| gpl-3.0 | 8,214,431,620,466,431,000 | 29.824324 | 81 | 0.604559 | false |
aio-libs/aiokafka | tests/record/test_util.py | 1 | 3681 | import struct
import pytest
from aiokafka.record import util
varint_data = [
(b"\x00", 0),
(b"\x01", -1),
(b"\x02", 1),
(b"\x7E", 63),
(b"\x7F", -64),
(b"\x80\x01", 64),
(b"\x81\x01", -65),
(b"\xFE\x7F", 8191),
(b"\xFF\x7F", -8192),
(b"\x80\x80\x01", 8192),
(b"\x81\x80\x01", -8193),
(b"\xFE\xFF\x7F", 1048575),
(b"\xFF\xFF\x7F", -1048576),
(b"\x80\x80\x80\x01", 1048576),
(b"\x81\x80\x80\x01", -1048577),
(b"\xFE\xFF\xFF\x7F", 134217727),
(b"\xFF\xFF\xFF\x7F", -134217728),
(b"\x80\x80\x80\x80\x01", 134217728),
(b"\x81\x80\x80\x80\x01", -134217729),
(b"\xFE\xFF\xFF\xFF\x7F", 17179869183),
(b"\xFF\xFF\xFF\xFF\x7F", -17179869184),
(b"\x80\x80\x80\x80\x80\x01", 17179869184),
(b"\x81\x80\x80\x80\x80\x01", -17179869185),
(b"\xFE\xFF\xFF\xFF\xFF\x7F", 2199023255551),
(b"\xFF\xFF\xFF\xFF\xFF\x7F", -2199023255552),
(b"\x80\x80\x80\x80\x80\x80\x01", 2199023255552),
(b"\x81\x80\x80\x80\x80\x80\x01", -2199023255553),
(b"\xFE\xFF\xFF\xFF\xFF\xFF\x7F", 281474976710655),
(b"\xFF\xFF\xFF\xFF\xFF\xFF\x7F", -281474976710656),
(b"\x80\x80\x80\x80\x80\x80\x80\x01", 281474976710656),
(b"\x81\x80\x80\x80\x80\x80\x80\x01", -281474976710657),
(b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\x7F", 36028797018963967),
(b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", -36028797018963968),
(b"\x80\x80\x80\x80\x80\x80\x80\x80\x01", 36028797018963968),
(b"\x81\x80\x80\x80\x80\x80\x80\x80\x01", -36028797018963969),
(b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", 4611686018427387903),
(b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", -4611686018427387904),
(b"\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01", 4611686018427387904),
(b"\x81\x80\x80\x80\x80\x80\x80\x80\x80\x01", -4611686018427387905),
]
@pytest.mark.parametrize("encoded, decoded", varint_data)
def test_encode_varint(encoded, decoded):
res = bytearray()
util.encode_varint(decoded, res.append)
assert res == encoded
@pytest.mark.parametrize("encoded, decoded", varint_data)
def test_decode_varint(encoded, decoded):
# We add a bit of bytes around just to check position is calculated
# correctly
value, pos = util.decode_varint(
bytearray(b"\x01\xf0" + encoded + b"\xff\x01"), 2)
assert value == decoded
assert pos - 2 == len(encoded)
@pytest.mark.parametrize("encoded, decoded", varint_data)
def test_size_of_varint(encoded, decoded):
assert util.size_of_varint(decoded) == len(encoded)
def test_crc32c():
def make_crc(data):
crc = util.calc_crc32c(data)
return struct.pack(">I", crc)
assert make_crc(b"") == b"\x00\x00\x00\x00"
assert make_crc(b"a") == b"\xc1\xd0\x43\x30"
# Took from librdkafka testcase
long_text = b"""\
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution."""
assert make_crc(long_text) == b"\x7d\xcd\xe1\x13"
| apache-2.0 | -1,897,615,598,977,457,200 | 37.747368 | 79 | 0.65906 | false |
i2c2-caj/Utilities | Python/file_test.py | 1 | 1763 | import os, time
def make_version(mtime):
# t = ['Text Day', 'Month', 'Num day', 'time', 'year']
t = time.ctime(mtime).split()
year = t[4]
month = int(time.strptime(t[1], '%b').tm_mon)
day = t[2]
if (month < 10):
new_version = 'y{0}m0{1}d{2}'.format(year, month, day)
else:
new_version = 'y{0}m{1}d{2}'.format(year, month, day)
return new_version
def update_version(mtime, old_version, lines):
new_version = make_version(mtime)
if (len(old_version) != 0 and old_version[1] == 'Version:'):
if (old_version[2] != new_version):
lines.append('# Version: {0}\n'.format(new_version))
else:
lines.append('# Version: {0}\n'.format(new_version))
'''
print '---FOUND VERSION'
print '---old: ', old_version
print '---new: ', new_version
'''
def main():
file_name = 'version_me.txt'
mtime = os.path.getmtime(file_name)
lines = []
file_object = open(file_name, 'r')
updated = False
for line in file_object:
# Check for version tag until it is found
if (updated == False):
check_line = line.strip().split()
if (len(check_line)):
# Found version tag, update it
if (check_line[1] == 'Version:'):
update_version(mtime, check_line[2], lines)
updated = True
else:
lines.append(line)
else:
lines.append('\n')
else:
lines.append(line)
# No version tag found, insert one
if (updated == False):
lines = ['# Version: {0}\n'.format(make_version(mtime))] + lines
w = open('text.txt', 'w')
w.writelines(lines)
main()
| gpl-2.0 | -6,161,450,614,470,024,000 | 26.123077 | 72 | 0.519569 | false |
vistadataproject/nodeVISTA | setupDocker/pySetup/rasUtilities/OSEHRAHelper.py | 1 | 22017 | #---------------------------------------------------------------------------
# Copyright 2012 The Open Source Electronic Health Record Agent
# Copyright 2017 Sam Habiel. writectrl methods.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
## @package OSEHRAHelper
## OSEHRA test helper
"""
OSEHRAHelper provides classes that establish connections to VistA
and interaction methods such as write() and wait()
@copyright The Open Source Electronic Health Record Agent
@license http://www.apache.org/licenses/LICENSE-2.0
"""
import sys
import os, errno
import telnetlib
import TestHelper
import time
import re
import logging
import csv
"""
# CG: removed pexpect location check as including explicitly AND paramiko which is used for SSH (as
# won't exercise that
filedir = os.path.dirname(os.path.abspath(__file__))
pexpectdir = os.path.normpath(os.path.join(filedir, "../Pexpect"))
import socket
paramikoedir = os.path.normpath(os.path.join(filedir, "../"))
sys.path.append(pexpectdir)
sys.path.append(paramikoedir)
"""
try:
import pexpect
no_pexpect = None
except ImportError, no_pexpect:
pass
try:
import paramiko
no_paramiko = None
except ImportError, no_paramiko:
pass
#---------------------------------------------------------------------------
# Initial Global Variables to use over the course of connecting
# connection=False
# log =False
#---------------------------------------------------------------------------
class PROMPT(object):
"""Wait for a VISTA> prompt in current namespace."""
class ConnectMUMPS(object):
def exitToPrompt(self):
self.write("Quit")
while True:
try:
index2 = self.multiwait(["to continue","Option:",self.prompt, "want to halt","[0-9]+d[0-9]+"])
except TIMEOUT:
continue
if index2 == 1:
self.write("Continue")
self.wait("Do you want to halt")
self.write("Y")
self.wait(self.prompt)
break
if index2 == 2:
break
if index2 == 3:
self.write("Y")
if index2 == 4:
self.write("Q")
self.write("^")
self.MenuLocation=[]
def ZN(self, namespace):
self.wait('>')
self.write('ZN "' + namespace + '"')
self.namespace = namespace
self.prompt = self.namespace + '>'
def login(self, username, password):
self.wait('Username:')
self.write(username)
self.wait('Password')
self.write(password)
def getenv(self, volume):
self.write('D GETENV^%ZOSV W Y')
if sys.platform == 'win32':
match = self.wait_re(volume + ':[0-9A-Za-z-]+', None)
test = match[1].span()
VistAboxvol = ''
for i in range(test[0], test[1]):
VistAboxvol = VistAboxvol + match[2][i]
self.boxvol = VistAboxvol
else:
self.wait_re(volume + ':[0-9A-Za-z-]+', None)
self.boxvol = self.connection.after
def IEN(self, file, objectname):
self.write('S DUZ=1 D Q^DI')
self.wait('OPTION')
self.write('5')
self.wait_re('FILE:')
self.write(file)
self.wait(file + ' NAME')
self.write(objectname + '\r')
self.wait_re('CAPTIONED OUTPUT?')
self.write('N')
self.wait_re('PRINT FIELD')
self.write('NUMBER\r')
self.wait('Heading')
self.write('')
self.wait('DEVICE')
if sys.platform == 'win32':
self.write('\r')
match = self.wait_re('\r\n[0-9]+')
test = match[1].span()
number = ''
for i in range(test[0], test[1]):
number = number + match[2][i]
number = number.lstrip('\r\n')
self.IENumber = number
else:
self.write('')
self.wait_re('\n[0-9]+')
number = self.connection.after
number = number.lstrip('\r\n')
self.IENumber = number
self.write('')
class ConnectWinCache(ConnectMUMPS):
def __init__(self, logfile, instance, namespace, location='127.0.0.1'):
super(ConnectMUMPS, self).__init__()
self.connection = telnetlib.Telnet(location, 23)
if len(namespace) == 0:
namespace = 'VISTA'
self.namespace = namespace
self.prompt = self.namespace + '>'
self.log = file(logfile, 'w')
self.type = 'cache'
path,filename = os.path.split(logfile)
self.MenuLocation=[]
self.lastconnection=""
self.optionParentDict = []
self.optionMenuTextDict = []
def write(self, command):
self.connection.write(command + '\r')
logging.debug('connection.write:' + command)
self.log.flush()
def writectrl(self, command):
self.connection.write(command)
logging.debug('connection.writectrl: ' + command)
def wait(self, command, tout=15):
logging.debug('connection.expect: ' + str(command))
if command is PROMPT:
command = self.namespace + '>'
rbuf = self.connection.read_until(command, tout)
if rbuf.find(command) == -1:
self.log.write('ERROR: expected: ' + command + 'actual: ' + rbuf)
logging.debug('ERROR: expected: ' + command + 'actual: ' + rbuf)
raise TestHelper.TestError('ERROR: expected: ' + command + 'actual: ' + rbuf)
else:
self.log.write(rbuf)
logging.debug(rbuf)
self.lastconnection=rbuf
return 1
def wait_re(self, command, timeout=30):
logging.debug('connection.expect: ' + str(command))
if command is PROMPT:
command = self.prompt
compCommand = re.compile(command,re.I)
output = self.connection.expect([compCommand], timeout)
self.match = output[1]
self.before = output[2]
if output[0] == -1 and output[1] == None:
raise Exception("Timed out")
if output[2]:
self.log.write(output[2])
self.log.flush()
self.lastconnection=output[2]
return output
def multiwait(self, options, tout=15):
logging.debug('connection.expect: ' + str(options))
if isinstance(options, list):
index = self.connection.expect(options, tout)
if index == -1:
logging.debug('ERROR: expected: ' + str(options))
raise TestHelper.TestError('ERROR: expected: ' + str(options))
self.log.write(index[2])
self.lastconnection=index[2]
return index[0]
else:
raise IndexError('Input to multiwait function is not a list')
def startCoverage(self, routines=['*']):
self.write('D ^%SYS.MONLBL')
rval = self.multiwait(['Stop Monitor', 'Start Monitor'])
if rval == 0:
self.write('1')
self.wait('Start Monitor')
self.write('1')
elif rval == 1:
self.write('1')
else:
raise TestHelper.TestError('ERROR starting monitor, rbuf: ' + rval)
for routine in routines:
self.wait('Routine Name')
self.write(routine)
self.wait('Routine Name', tout=120)
self.write('')
self.wait('choice')
self.write('2')
self.wait('choice')
self.write('1')
self.wait('continue')
self.write('\r')
def stopCoverage(self, path, humanreadable='OFF'):
newpath, filename = os.path.split(path)
self.write('D ^%SYS.MONLBL')
self.wait('choice')
if humanreadable == 'ON':
self.write('5')
self.wait('summary')
self.write('Y')
else:
self.write('6')
self.wait('Routine number')
self.write('*')
self.wait('FileName')
self.write(newpath + '/Coverage/' + filename.replace('.log', '.cmcov').replace('.txt', '.cmcov'))
self.wait('continue')
self.write('')
self.wait('choice')
self.write('1\r')
class ConnectLinuxCache(ConnectMUMPS):
def __init__(self, logfile, instance, namespace, location='127.0.0.1'):
super(ConnectMUMPS, self).__init__()
self.connection = pexpect.spawn('ccontrol session ' + instance + ' -U ' + namespace, timeout=None)
if len(namespace) == 0:
namespace = 'VISTA'
self.namespace = namespace
self.prompt = self.namespace + '>'
self.connection.logfile_read = file(logfile, 'w')
self.type = 'cache'
path,filename = os.path.split(logfile)
self.MenuLocation=[]
self.lastconnection=""
self.optionParentDict = []
self.optionMenuTextDict = []
def write(self, command):
self.connection.send(command + '\r')
logging.debug('connection.write:' + command)
def writectrl(self, command):
self.connection.send(command)
logging.debug('connection.writectrl: ' + command)
def wait(self, command, tout=15):
logging.debug('connection.expect: ' + str(command))
if command is PROMPT:
command = self.namespace + '>'
rbuf = self.connection.expect_exact(command, tout)
if rbuf == -1:
logging.debug('ERROR: expected: ' + command)
raise TestHelper.TestError('ERROR: expected: ' + command)
else:
self.lastconnection=self.connection.before
return 1
def wait_re(self, command, timeout=15):
logging.debug('connection.expect: ' + str(command))
if not timeout: timeout = -1
compCommand = re.compile(command,re.I)
self.connection.expect(compCommand, timeout)
self.lastconnection=self.connection.before
def multiwait(self, options, tout=15):
logging.debug('connection.expect: ' + str(options))
if isinstance(options, list):
index = self.connection.expect(options, tout)
if index == -1:
logging.debug('ERROR: expected: ' + options)
raise TestHelper.TestError('ERROR: expected: ' + options)
self.connection.logfile_read.write(options[index])
self.lastconnection=self.connection.before
return index
else:
raise IndexError('Input to multiwait function is not a list')
def startCoverage(self, routines=['*']):
self.write('D ^%SYS.MONLBL')
rval = self.multiwait(['Stop Monitor', 'Start Monitor'])
if rval == 0:
self.write('1')
self.wait('Start Monitor')
self.write('1')
elif rval == 1:
self.write('1')
else:
raise TestHelper.TestError('ERROR starting monitor, rbuf: ' + rval)
for routine in routines:
self.wait('Routine Name')
self.write(routine)
self.wait('Routine Name', tout=120)
self.write('')
self.wait('choice')
self.write('2')
self.wait('choice')
self.write('1')
self.wait('continue')
self.write('\r')
def stopCoverage(self, path, humanreadable='OFF'):
newpath, filename = os.path.split(path)
self.write('D ^%SYS.MONLBL')
self.wait('choice')
if humanreadable == 'ON':
self.write('5')
self.wait('summary')
self.write('Y')
else:
self.write('6')
self.wait('Routine number')
self.write('*')
self.wait('FileName')
self.write(newpath + '/Coverage/' + filename.replace('.log', '.cmcov').replace('.txt', '.cmcov'))
self.wait('continue')
self.write('')
self.wait('choice')
self.write('1\r')
class ConnectLinuxGTM(ConnectMUMPS):
def __init__(self, logfile, instance, namespace, location='127.0.0.1'):
super(ConnectMUMPS, self).__init__()
gtm_command = os.getenv('gtm_dist')+'/mumps -dir'
self.connection = pexpect.spawn(gtm_command, timeout=None)
# CG: namespace doesn't matter in GTM. Neither does instance
self.prompt = os.getenv("gtm_prompt")
if self.prompt == None:
self.prompt = "GTM>"
if len(namespace) == 0:
self.prompt = os.getenv("gtm_prompt")
if self.prompt == None:
self.prompt = "GTM>"
self.connection.logfile_read = file(logfile, 'w')
self.type = 'GTM'
path,filename = os.path.split(logfile)
self.MenuLocation=[]
self.lastconnection=""
self.optionParentDict = []
self.optionMenuTextDict = []
self.coverageRoutines = ""
def write(self, command):
self.connection.send(command + '\r')
logging.debug('connection.write: ' + command)
def writectrl(self, command):
self.connection.send(command)
logging.debug('connection.writectrl: ' + command)
def wait(self, command, tout=15):
logging.debug('connection.expect: ' + str(command))
if command is PROMPT:
command = self.prompt
rbuf = self.connection.expect_exact(command, tout)
logging.debug('RECEIVED: ' + command)
if rbuf == -1:
logging.debug('ERROR: expected: ' + command)
raise TestHelper.TestError('ERROR: expected: ' + command)
else:
self.lastconnection=self.connection.before
return 1
def wait_re(self, command, timeout=None):
logging.debug('connection.expect: ' + str(command))
if not timeout: timeout = -1
compCommand = re.compile(command,re.I)
self.connection.expect(compCommand, timeout)
self.lastconnection=self.connection.before
def multiwait(self, options, tout=15):
logging.debug('connection.expect: ' + str(options))
if isinstance(options, list):
index = self.connection.expect(options, tout)
if index == -1:
logging.debug('ERROR: expected: ' + str(options))
raise TestHelper.TestError('ERROR: expected: ' + str(options))
self.connection.logfile_read.write(options[index])
self.lastconnection=self.connection.before
return index
else:
raise IndexError('Input to multiwait function is not a list')
def startCoverage(self, routines=['*']):
self.write('K ^ZZCOVERAGE VIEW "TRACE":1:"^ZZCOVERAGE"')
self.coverageRoutines = routines
def stopCoverage(self, path, humanreadable='OFF'):
mypath, myfilename = os.path.split(path)
try:
os.makedirs(os.path.join(mypath, 'Coverage'))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.write('VIEW "TRACE":0:"^ZZCOVERAGE"')
self.wait(PROMPT)
self.write('D ^%GO')
self.wait('Global')
self.write('ZZCOVERAGE')
self.wait('Global')
self.write('')
self.wait('Label:')
self.write('')
self.wait('Format')
self.write('ZWR')
self.wait('device')
self.write(mypath + '/Coverage/' + myfilename.replace('.log', '.mcov').replace('.txt', '.mcov'))
self.write('')
self.wait(PROMPT)
if humanreadable == 'ON':
try:
self.write('W $T(GETRTNS^%ut1)')
self.wait(',NMSPS',.01)
except:
print('Human readable coverage requires M-Unit 1.6')
return
self.write('')
self.write('K NMSP')
self.wait(PROMPT)
for routine in self.coverageRoutines:
self.write('S NMSP("' + routine + '")=""')
self.write('K RTNS D GETRTNS^%ut1(.RTNS,.NMSP)')
self.wait(PROMPT)
self.write('K ^ZZCOHORT D RTNANAL^%ut1(.RTNS,"^ZZCOHORT")')
self.wait(PROMPT)
self.write('K ^ZZSURVIVORS M ^ZZSURVIVORS=^ZZCOHORT')
self.wait(PROMPT)
self.write('D COVCOV^%ut1("^ZZSURVIVORS","^ZZCOVERAGE")')
self.wait(PROMPT)
self.write('D COVRPT^%ut1("^ZZCOHORT","^ZZSURVIVORS","^ZZRESULT",-1)')
self.wait(PROMPT)
self.write('D ^%GO')
self.wait('Global')
self.write('ZZRESULT')
self.wait('Global')
self.write('')
self.wait('Label:')
self.write('')
self.wait('Format')
self.write('ZWR')
self.wait('device')
self.write(mypath + '/Coverage/coverageCalc.mcov')
self.wait(PROMPT)
self.write('WRITE ^ZZRESULT," ",@^ZZRESULT')
self.wait(PROMPT)
self.write('K ^ZZCOHORT,^ZZSURVIVORS,^ZZCOVERAGE,^ZZRESULT')
self.wait(PROMPT)
class ConnectRemoteSSH(ConnectMUMPS):
"""
This will provide a connection to VistA via SSH. This class handles any
remote system (ie: currently there are not multiple versions of it for each
remote OS).
"""
def __init__(self, logfile, instance, namespace, location, remote_conn_details):
super(ConnectMUMPS, self).__init__()
self.type = str.lower(instance)
self.namespace = str.upper(namespace)
self.prompt = self.namespace + '>'
# Create a new SSH client object
client = paramiko.SSHClient()
# Set SSH key parameters to auto accept unknown hosts
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Connect to the host
client.connect(hostname=remote_conn_details.remote_address,
port=remote_conn_details.remote_port,
username=remote_conn_details.username,
password=remote_conn_details.password)
# Create a client interaction class which will interact with the host
from paramikoe import SSHClientInteraction
interact = SSHClientInteraction(client, timeout=10, display=False)
self.connection = interact
self.connection.logfile_read = file(logfile, 'w')
self.client = client # apparently there is a deconstructor which disconnects (probably sends a FYN packet) when client is gone
def write(self, command):
time.sleep(.01)
self.connection.send(command + '\r')
logging.debug('connection.send:' + command)
def writectrl(self, command):
time.sleep(.01)
self.connection.send(command)
logging.debug('connection.writectrl: ' + command)
def wait(self, command, tout=15):
time.sleep(.01)
logging.debug('connection.expect: ' + str(command))
if command is PROMPT:
command = self.namespace + '>'
else:
command = self.escapeSpecialChars(command)
if command == '':
command = '.*' # fix for paramiko expect, it does not work with wait('')
try:
rbuf = self.connection.expect(command, tout)
except socket.timeout:
rbuf = -1
if rbuf == -1:
logging.debug('ERROR: expected: ' + command)
print 'ERROR: expected: ' + command
raise TestHelper.TestError('ERROR: expected: ' + command)
else:
return 1
#paramikoe already accept regular expressions as input by default
def wait_re(self, command, timeout=30):
self.wait(command, timeout)
def multiwait(self, options, tout=15):
logging.debug('connection.expect: ' + str(options))
temp_options = []
for command in options:
temp_options.append(self.escapeSpecialChars(command))
options = temp_options
time.sleep(.01)
if isinstance(options, list):
index = self.connection.expect(options, timeout=tout)
if index == -1:
logging.debug('ERROR: expected: ' + str(options))
raise TestHelper.TestError('ERROR: expected: ' + str(options))
return index
else:
raise IndexError('Input to multiwait function is not a list')
def startCoverage(self, routines=['*']):
if self.type == 'cache':
self.write('D ^%SYS.MONLBL')
rval = self.multiwait(['Stop Monitor', 'Start Monitor'])
if rval == 0:
self.write('1')
self.wait('Start Monitor')
self.write('1')
elif rval == 1:
self.write('1')
else:
raise TestHelper.TestError('ERROR starting monitor, rbuf: ' + rval)
for routine in routines:
self.wait('Routine Name')
self.write(routine)
self.wait('Routine Name', tout=120)
self.write('')
self.wait('choice')
self.write('2')
self.wait('choice')
self.write('1')
self.wait('continue')
self.write('\r')
else:
self.write('K ^ZZCOVERAGE VIEW "TRACE":1:"^ZZCOVERAGE"')
def stopCoverage(self, path):
if self.type == 'cache':
newpath, filename = os.path.split(path)
self.write('D ^%SYS.MONLBL')
self.wait('choice')
self.write('5')
self.wait('summary')
self.write('Y')
self.wait('FileName')
self.write(newpath + '/' + filename.replace('.log', '.cmcov'))
self.wait('continue')
self.write('')
self.wait('choice')
self.write('1\r')
else:
path, filename = os.path.split(path)
self.write('VIEW "TRACE":0:"^ZZCOVERAGE"')
self.wait(PROMPT)
self.write('D ^%GO')
self.wait('Global')
self.write('ZZCOVERAGE')
self.wait('Global')
self.write('')
self.wait('Label:')
self.write('')
self.wait('Format')
self.write('ZWR')
self.wait('device')
self.write(path + '/' + filename.replace('.log', '.mcov'))
"""
Added to convert regex's into regular string matching. It replaces special
characters such as '?' into '\?'
"""
def escapeSpecialChars(self, string):
re_chars = '?*.+-|^$\()[]{}'
escaped_str = ''
for c in string:
if c in re_chars:
escaped_str = escaped_str + '\\'
escaped_str += c
return escaped_str
def ConnectToMUMPS(logfile, instance='CACHE', namespace='VISTA', location='127.0.0.1', remote_conn_details=None):
# self.namespace = namespace
# self.location = location
# print "You are using " + sys.platform
# remote connections
if remote_conn_details is not None:
if no_paramiko:
raise no_paramiko
return ConnectRemoteSSH(logfile, instance, namespace, location, remote_conn_details)
# local connections
if sys.platform == 'win32':
return ConnectWinCache(logfile, instance, namespace, location)
elif sys.platform == 'linux2':
if no_pexpect:
raise no_pexpect
if os.getenv('gtm_dist'):
try:
return ConnectLinuxGTM(logfile, instance, namespace, location)
except pexpect.ExceptionPexpect, no_gtm:
if (no_gtm):
raise "Cannot find a MUMPS instance"
else:
try:
return ConnectLinuxCache(logfile, instance, namespace, location)
except pexpect.ExceptionPexpect, no_cache:
if (no_cache):
raise "Cannot find a MUMPS instance"
| agpl-3.0 | 2,144,244,377,783,868,200 | 31.521418 | 131 | 0.617296 | false |
LabAdvComp/tukey_middleware | setup.py | 1 | 1983 | # Copyright 2013 Open Cloud Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#from distutils.core import setup
from setuptools import setup
import subprocess
name='tukey_middleware'
sub_packages = ['modules', 'api', 'auth', 'cloud_driver',
# tests
'tests', 'tests.services']
sub_modules = ['ids', 'instance_metadata', 'metadata']
modules = ['modules.%s' % s for s in sub_modules]
setup(
name=name,
version='0.4.2.1',
packages=[name] + ['%s.%s' % (name, s) for s in sub_packages + modules],
license='Apache License 2.0"',
dependency_links=[
'https://github.com/LabAdvComp/novacluster/tarball/master#egg=novacluster'],
install_requires=[
'novacluster',
'flask',
'python-glanceclient',
'python-cinderclient',
'python-magic',
'python-memcached',
'dnspython',
'prettytable',
'apache-libcloud==0.14.0-beta3',
'xmldict',
'SQLAlchemy',
'psycopg2',
'couchdb',
'fuse-python',
'requests',
'python-novaclient',
'python-swiftclient',
'psutil',
'python-gnupg',
'M2Crypto',
],
long_description=open('README.rst').read(),
scripts=['bin/osdcfs', 'bin/osdc-upload-metadata', 'bin/osdc-upload-file',
'bin/osdc-register-file'],
)
#VIRTUAL_ENV
#%s/lib/python2.7/site-packages/tukey_middleware/local_settings.py
#subprocess.Popen(", shell=True)
| apache-2.0 | -2,085,793,852,312,047,400 | 29.045455 | 84 | 0.638931 | false |
arvinsahni/ml4 | learn/forall.py | 1 | 2547 |
import pandas as pd
from sklearn.preprocessing import label_binarize
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import r2_score, roc_auc_score
from learn import utils
class Regression():
def __init__(self, time_to_compute=None):
self.time_to_compute = time_to_compute
def fit(self, X, y):
model = RandomForestRegressor(n_estimators=100,
oob_score=True)
model.fit(X, y)
self.model = model
self.oob_predictions = model.oob_prediction_
self.score_type = "R2"
self.score = r2_score(y, self.oob_predictions)
return self
def predict(self, X):
predictions = self.model.predict(X)
return predictions
class Classification():
def __init__(self, time_to_compute=None):
"""
"""
self.time_to_compute = time_to_compute
def fit(self, X, y):
"""
Currently y must be numeric. Wrap
LabelVectorizer as TODO.
"""
y = pd.Series(y)
self.n_classes = len(y.unique())
model = RandomForestClassifier(n_estimators=100,
oob_score=True)
model.fit(X, y)
self.model = model
# Evaluation metrics
if self.n_classes == 2:
self.oob_predictions = model.oob_decision_function_[:, 1]
self.score_type = "AUC"
self.score = roc_auc_score(y, self.oob_predictions)
else:
self.oob_predictions = model.oob_decision_function_
self.score_type = "AUC"
y_bin = label_binarize(y, sorted(pd.Series(y).unique()))
self.score = roc_auc_score(y_bin,
self.oob_predictions)
return self
def predict(self, X):
predictions = self.model.predict(X)
return predictions
class All():
def __init__(self, time_to_compute=None):
self.time_to_compute = time_to_compute
def fit(self, X, y):
self.classification = utils.is_classification_problem(y)
if self.classification:
model = Classification()
else:
model = Regression()
model.fit(X, y)
self.model = model
self.score = model.score
self.score_type = model.score_type
return self
def predict(self, X):
predictions = self.model.predict(X)
return predictions | mit | -7,940,005,392,520,095,000 | 30.45679 | 74 | 0.556733 | false |
chromium/chromium | third_party/wpt_tools/wpt/tools/wptrunner/wptrunner/vcs.py | 13 | 1926 | import subprocess
from functools import partial
from typing import Callable
from mozlog import get_default_logger
from wptserve.utils import isomorphic_decode
logger = None
def vcs(bin_name: str) -> Callable[..., None]:
def inner(command, *args, **kwargs):
global logger
if logger is None:
logger = get_default_logger("vcs")
repo = kwargs.pop("repo", None)
log_error = kwargs.pop("log_error", True)
stdout = kwargs.pop("stdout", None)
stdin = kwargs.pop("stdin", None)
if kwargs:
raise TypeError(kwargs)
args = list(args)
proc_kwargs = {}
if repo is not None:
# Make sure `cwd` is str type to work in different sub-versions of Python 3.
# Before 3.8, bytes were not accepted on Windows for `cwd`.
proc_kwargs["cwd"] = isomorphic_decode(repo)
if stdout is not None:
proc_kwargs["stdout"] = stdout
if stdin is not None:
proc_kwargs["stdin"] = stdin
command_line = [bin_name, command] + args
logger.debug(" ".join(command_line))
try:
func = subprocess.check_output if not stdout else subprocess.check_call
return func(command_line, stderr=subprocess.STDOUT, **proc_kwargs)
except OSError as e:
if log_error:
logger.error(e)
raise
except subprocess.CalledProcessError as e:
if log_error:
logger.error(e.output)
raise
return inner
git = vcs("git")
hg = vcs("hg")
def bind_to_repo(vcs_func, repo, log_error=True):
return partial(vcs_func, repo=repo, log_error=log_error)
def is_git_root(path, log_error=True):
try:
rv = git("rev-parse", "--show-cdup", repo=path, log_error=log_error)
except subprocess.CalledProcessError:
return False
return rv == b"\n"
| bsd-3-clause | 6,284,325,737,113,984,000 | 28.630769 | 88 | 0.593458 | false |
hidext/oemedical | oemedical_his/models/oemedical_hospital_unit.py | 1 | 1618 | # -*- coding: utf-8 -*-
##############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2004-TODAY Tech-Receptives(<http://www.techreceptives.com>)
# Special Credit and Thanks to Thymbra Latinoamericana S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class OeMedicalHospitalUnit(orm.Model):
_name = 'oemedical.hospital.unit'
_columns = {
'code': fields.char(size=8, string='Code'),
'institution': fields.many2one(
'res.partner',
string='Institution',
help='Medical Center'),
'name': fields.char(
string='Name',
size=256,
required=True,
help='Name of the unit, eg Neonatal, Intensive Care, ...'),
'extra_info': fields.text(string='Extra Info'),
}
| agpl-3.0 | 3,963,752,721,680,169,000 | 39.45 | 78 | 0.592089 | false |
justinmeister/The-Stolen-Crown-RPG | data/components/attack.py | 1 | 1379 | """
Sprites for attacks.
"""
import sys
import pygame as pg
from .. import setup, tools
#Python 2/3 compatibility.
if sys.version_info[0] == 2:
range = xrange
class Fire(pg.sprite.Sprite):
"""
Fire animation for attacks.
"""
def __init__(self, x, y):
super(Fire, self).__init__()
self.spritesheet = setup.GFX['explosion']
self.get_image = tools.get_image
self.image_list = self.make_image_list()
self.index = 0
self.image = self.image_list[self.index]
self.rect = self.image.get_rect(left=x, top=y)
self.timer = 0.0
def make_image_list(self):
"""
Make a list of images to cycle through for the
animation.
"""
image_list = []
for row in range(8):
for column in range(8):
posx = column * 128
posy = row * 128
new_image = self.get_image(posx, posy, 128, 128,
self.spritesheet)
image_list.append(new_image)
return image_list
def update(self):
"""
Update fire explosion.
"""
if self.index < (len(self.image_list) - 1):
self.index += 1
self.image = self.image_list[self.index]
elif self.index == (len(self.image_list) - 1):
self.kill()
| mit | 3,362,993,657,341,950,500 | 26.039216 | 64 | 0.518492 | false |
alangenfeld/cloud-nfs | pyCloud/recovery.py | 1 | 1086 | #!/usr/bin/env python
import boto
import os
import tempfile
import pickle
import ./cloudnfs.py
#bucketName = cloudnfs
bucketName = "cs699wisc_samanas"
#########################################################################
# Recovery
#########################################################################
# "Load your developer keys from the .boto config file."
config = boto.config
#"Create a URI, but don't specify a bucket or object because you are listing buckets."
uri = boto.storage_uri("", "gs")
#"Get your buckets."
buckets = uri.get_all_buckets()
l = list();
for bucket in buckets:
"Create a URI for a bucket."
uri = boto.storage_uri(bucket.name, "gs")
"Get the objects that are in the bucket."
objs = uri.get_bucket()
for obj in objs :
if (obj.name == 'table.pkl') :
cloudnfs.download(obj.name, obj.name)
else :
cloudnfs.download(obj.name, "/" + obj.name)
#if 'table.pkl' in l :
# download('table.pkl', 'temp_table.pkl')
# table = open(temp_table.pkl)
# table_dict = pickle.load(table)
| lgpl-3.0 | 4,373,986,769,432,987,600 | 23.681818 | 86 | 0.55709 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/ubuntuone-client/ubuntuone/eventlog/zglog.py | 1 | 1605 | # -*- coding: utf-8 -*-
#
# Author: Alejandro J. Cura <[email protected]>
#
# Copyright 2010 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Log into the Zeitgeist daemon."""
from twisted.internet.defer import Deferred
from ubuntuone.logger import logging
logger = logging.getLogger('ubuntuone.eventlog.zglog')
class ZeitgeistLogger(object):
"""A class that logs zeitgeist events."""
client = None
def __init__(self):
"""Initialize this instance."""
try:
from zeitgeist.client import ZeitgeistClient
self.client = ZeitgeistClient()
logger.info("Zeitgeist support initialized.")
except Exception:
logger.exception("Zeitgeist support not started:")
def log(self, event):
"""Log a zeitgeist event."""
d = Deferred()
if self.client:
logger.info("Logging Zeitgeist event: %r", event)
self.client.insert_event(event, d.callback, d.errback)
else:
d.callback([])
return d
| gpl-3.0 | 1,994,564,346,605,307,600 | 32.4375 | 75 | 0.675389 | false |
mikeckennedy/cookiecutter-course | src/ch4_advanced_usage/secondbottle/manage.py | 1 | 1406 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import click
from bottle import static_file, Bottle, run, TEMPLATE_PATH
from beaker.middleware import SessionMiddleware
from secondbottle import settings
from secondbottle.routes import Routes
TEMPLATE_PATH.insert(0, settings.TEMPLATE_PATH)
session_opts = {
'session.type': 'file',
'session.auto': True
}
app = SessionMiddleware(Bottle(), session_opts)
# Bottle Routes
app.wrap_app.merge(Routes)
@app.wrap_app.route('/assets/<path:path>', name='assets')
def assets(path):
yield static_file(path, root=settings.STATIC_PATH)
@click.group()
def cmds():
pass
@cmds.command()
@click.option('--port', default=os.environ.get('PORT', 8080), type=int,
help=u'Set application server port!')
@click.option('--ip', default='0.0.0.0', type=str,
help=u'Set application server ip!')
@click.option('--debug', default=False,
help=u'Set application server debug!')
def runserver(port, ip, debug):
click.echo('Start server at: {}:{}'.format(ip, port))
run(app=app, host=ip, port=port, debug=debug, reloader=debug)
@cmds.command()
def test():
import unittest
loader = unittest.TestLoader()
tests = loader.discover('tests')
testRunner = unittest.runner.TextTestRunner()
testRunner.run(tests)
if __name__ == "__main__":
cmds()
| gpl-2.0 | 931,982,854,121,050,500 | 23.666667 | 71 | 0.676387 | false |
AndrewWasHere/audiolens | lib/beamformers/beamformer.py | 1 | 2105 | """
Copyright 2015 Andrew Lin.
All rights reserved.
Licensed under the BSD 3-clause License. See LICENSE.txt or
<http://opensource.org/licenses/BSD-3-Clause>.
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from lib.albatross import log
_log = log.get_logger(__name__)
class BeamFormerError(Exception):
"""Error while beam forming."""
class BeamFormer(metaclass=ABCMeta):
"""Audio beam former base class."""
def __init__(self, max_channels):
self.max_channels = max_channels
# Public Interfaces. #######################################################
def process(self, audio):
"""Process audio file.
Args:
audio (np.ndarray or list of np.ndarray): multi-channel audio.
Raises:
ValueError: Problem with audio.
BeamFormerError: Problem while processing audio.
"""
_log.debug('%s.process(%s)', self.__class__.__name__, audio)
# Process audio.
if isinstance(audio, np.ndarray):
_, channels = audio.shape
audio = [audio[:, n] for n in range(channels)]
n_channels = len(audio)
if n_channels < 2:
raise ValueError(
'Not enough channels in audio to beam form. (found %d)',
n_channels
)
elif self.max_channels and n_channels > self.max_channels:
raise ValueError(
'Too many channels in audio. There cannot be more than %d '
'channels. Found %d.',
self.max_channels,
n_channels
)
self._process(audio) # Derived class implementation.
# Private methods. #########################################################
@abstractmethod
def _process(self, audio):
"""Process audio.
This function is implemented in derived classes.
Args:
audio (list of np.ndarray): multi-channel audio.
Raises:
BeamFormerException (or a derivation thereof): Problem while
processing audio.
"""
| bsd-3-clause | 896,673,129,523,582,500 | 27.445946 | 80 | 0.549644 | false |
sportorg/pysport | sportorg/libs/iof/parser.py | 1 | 10856 | import xml.etree.ElementTree as ET
class IOFParseResult(object):
def __init__(self, name, data):
self.name = name
self.data = data
def parse(file):
ns = {
'iof': 'http://www.orienteering.org/datastandard/3.0',
'orgeo': 'http://orgeo.ru/iof-xml-extensions/3.0',
}
tree = ET.parse(file)
results = [
IOFParseResult('EntryList', entry_list(tree, ns)),
IOFParseResult('CourseData', course_data(tree, ns)),
IOFParseResult('ResultList', result_list(tree, ns)),
IOFParseResult('Event', event(tree, ns)),
]
return [result for result in results if result.data is not None]
def course_data(tree, ns):
root = tree.getroot()
if 'CourseData' not in root.tag:
return
courses = []
version = '0'
if 'iofVersion' in root.attrib:
version = root.attrib['iofVersion'][0]
elif root.find('IOFVersion') is not None:
version = root.find('IOFVersion').attrib['version'][0]
if version == '3':
for course_el in root.find('iof:RaceCourseData', ns).findall('iof:Course', ns):
course = {
'name': course_el.find('iof:Name', ns).text,
'length': int(course_el.find('iof:Length', ns).text),
'climb': int(course_el.find('iof:Climb', ns).text),
'controls': [],
}
for course_control_el in course_el.findall('iof:CourseControl', ns):
leg_length = 0
if course_control_el.find('iof:LegLength', ns) is not None:
leg_length = int(course_control_el.find('iof:LegLength', ns).text)
course['controls'].append(
{
'type': course_control_el.attrib['type'], # Start, Control, Finish
'control': course_control_el.find('iof:Control', ns).text,
'leg_length': leg_length,
}
)
courses.append(course)
elif version == '2':
for course_el in root.findall('Course'):
course_variation_el = course_el.find('CourseVariation')
course = {
'name': course_el.find('CourseName').text.strip(),
'length': int(course_variation_el.find('CourseLength').text),
'climb': int(course_variation_el.find('CourseClimb').text.strip()) if course_variation_el.find(
'CourseClimb').text.strip().isdigit() else 0,
'controls': [],
}
for course_control_el in course_variation_el.findall('CourseControl'):
leg_length = 0
if course_control_el.find('LegLength') is not None:
leg_length = int(course_control_el.find('LegLength').text)
course['controls'].append(
{
'type': 'Control',
'control': course_control_el.find('ControlCode').text.strip(),
'leg_length': leg_length,
}
)
courses.append(course)
return courses
def entry_list(tree, ns):
root = tree.getroot()
if 'EntryList' not in root.tag:
return
groups = {}
for group_el in root.findall('iof:Class', ns):
group_id = group_el.find('iof:Id', ns).text
groups[group_id] = {
'id': group_id,
'name': group_el.find('iof:Name', ns).text,
'short_name': group_el.find('iof:ShortName', ns).text,
}
person_entries = []
for person_entry_el in root.findall('iof:PersonEntry', ns):
person_el = person_entry_el.find('iof:Person', ns)
birth_date_el = person_el.find('iof:BirthDate', ns)
id_el = person_el.find('iof:Id', ns)
person = {
'family': person_el.find('iof:Name', ns).find('iof:Family', ns).text,
'given': person_el.find('iof:Name', ns).find('iof:Given', ns).text,
'extensions': {},
}
if birth_date_el is not None:
person['birth_date'] = birth_date_el.text
if id_el is not None:
person['id'] = id_el.text
extensions_el = person_el.find('iof:Extensions', ns)
if extensions_el:
qual_el = extensions_el.find('orgeo:Qual', ns)
if qual_el is not None:
person['extensions']['qual'] = qual_el.text
bib_el = extensions_el.find('orgeo:BibNumber', ns)
if bib_el is not None:
person['extensions']['bib'] = bib_el.text
org_el = person_entry_el.find('iof:Organisation', ns)
organization = None
if org_el:
organization = {
'id': org_el.find('iof:Id', ns).text,
'name': org_el.find('iof:Name', ns).text
}
role = org_el.find('iof:Role', ns)
if role:
role_person = role.find('iof:Person', ns)
organization['role_person'] = '{} {}'.format(
role_person.find('iof:Name', ns).find('iof:Family', ns).text,
role_person.find('iof:Name', ns).find('iof:Given', ns).text
)
group_el = person_entry_el.find('iof:Class', ns)
if group_el:
group = {
'id': group_el.find('iof:Id', ns).text,
'name': group_el.find('iof:Name', ns).text
}
groups[group['id']] = {
'id': group['id'],
'name': group['name']
}
control_card_el = person_entry_el.find('iof:ControlCard', ns)
control_card = ''
if control_card_el is not None:
control_card = control_card_el.text
race_numbers = []
for race_num_el in person_entry_el.findall('iof:RaceNumber', ns):
race_numbers.append(race_num_el.text)
person_entries.append(
{
'person': person,
'organization': organization,
'group': groups[group['id']] if group['id'] in groups else group,
'control_card': control_card,
'race_numbers': race_numbers,
}
)
return person_entries
def result_list(tree, ns):
root = tree.getroot()
if 'ResultList' not in root.tag:
return
groups = {}
person_results = []
for class_result in root.findall('iof:ClassResult', ns):
"""Group of results for class"""
group_el = class_result.find('iof:Class', ns)
group_id = group_el.find('iof:Id', ns).text
groups[group_id] = {
'id': group_id,
'name': group_el.find('iof:Name', ns).text,
'short_name': group_el.find('iof:ShortName', ns).text if group_el.find('iof:ShortName', ns) else ''
}
for person_result_el in class_result.findall('iof:PersonResult', ns):
person_el = person_result_el.find('iof:Person', ns)
birth_date_el = person_el.find('iof:BirthDate', ns)
id_el = person_el.find('iof:Id', ns)
person = {
'family': person_el.find('iof:Name', ns).find('iof:Family', ns).text,
'given': person_el.find('iof:Name', ns).find('iof:Given', ns).text,
'extensions': {}
}
if birth_date_el is not None:
person['birth_date'] = birth_date_el.text
if id_el is not None:
person['id'] = id_el.text
org_el = person_result_el.find('iof:Organisation', ns)
organization = None
if org_el:
organization = {
'id': org_el.find('iof:Id', ns).text,
'name': org_el.find('iof:Name', ns).text
}
role = org_el.find('iof:Role', ns)
if role:
role_person = role.find('iof:Person', ns)
organization['role_person'] = '{} {}'.format(
role_person.find('iof:Name', ns).find('iof:Family', ns).text,
role_person.find('iof:Name', ns).find('iof:Given', ns).text
)
result_el = person_result_el.find('iof:Result', ns)
bib_el = result_el.find('iof:BibNumber', ns)
control_card_el = result_el.find('iof:ControlCard', ns)
finish_time_el = result_el.find('iof:FinishTime', ns)
splits = []
for split in result_el .findall('iof:SplitTime', ns):
split_time_el = split.find('iof:Time', ns)
if split_time_el is not None:
control_code = split.find('iof:ControlCode', ns)
split_obj = {
'control_code': control_code.text,
'time': split_time_el.text
}
splits.append(split_obj)
result = {
'bib': result_el.find('iof:BibNumber', ns).text if bib_el is not None else '',
'start_time': result_el.find('iof:StartTime', ns).text,
'finish_time': finish_time_el.text if finish_time_el is not None else '',
'status': result_el.find('iof:Status', ns).text,
'control_card': control_card_el.text if control_card_el is not None else '',
'splits': splits
}
person_results.append({
'person': person,
'organization': organization,
'group': groups[group_id],
'result': result,
})
return person_results
def event(tree, ns):
root = tree.getroot()
event_obj = {'races': []}
event_el = root.find('iof:Event', ns)
if event_el is None:
return
if event_el.find('iof:Name', ns) is not None:
event_obj['name'] = event_el.find('iof:Name', ns).text
if event_el.find('iof:StartTime', ns) is not None:
event_obj['start_time'] = event_el.find('iof:StartTime', ns).text
if event_el.find('iof:URL', ns) is not None:
event_obj['url'] = event_el.find('iof:URL', ns).text
if event_el is not None:
for race_el in event_el.findall('iof:Race', ns):
race_obj = {'name': race_el.find('iof:Name', ns).text if race_el.find('iof:Name', ns) is not None else ''}
start_time_el = race_el.find('iof:StartTime', ns)
if start_time_el:
if start_time_el.find('iof:Date', ns) is not None:
race_obj['date'] = start_time_el.find('iof:Date', ns).text
if start_time_el.find('iof:Time', ns) is not None:
race_obj['time'] = start_time_el.find('iof:Time', ns).text
event_obj['races'].append(race_obj)
return event_obj
| gpl-3.0 | -5,870,004,246,323,201,000 | 37.496454 | 118 | 0.510041 | false |
jeremiah-c-leary/vhdl-style-guide | vsg/tests/function/test_rule_014.py | 1 | 2051 |
import os
import unittest
from vsg.rules import function
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_014_test_input.vhd'))
lExpected_lower = []
lExpected_lower.append('')
utils.read_file(os.path.join(sTestDir, 'rule_014_test_input.fixed_lower.vhd'), lExpected_lower)
lExpected_upper = []
lExpected_upper.append('')
utils.read_file(os.path.join(sTestDir, 'rule_014_test_input.fixed_upper.vhd'), lExpected_upper)
class test_function_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_014_lower(self):
oRule = function.rule_014()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'function')
self.assertEqual(oRule.identifier, '014')
lExpected = [6, 8]
oRule.analyze(self.oFile)
self.assertEqual(utils.extract_violation_lines_from_violation_object(oRule.violations), lExpected)
def test_rule_014_upper(self):
oRule = function.rule_014()
oRule.case = 'upper'
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'function')
self.assertEqual(oRule.identifier, '014')
lExpected = [4, 8]
oRule.analyze(self.oFile)
self.assertEqual(utils.extract_violation_lines_from_violation_object(oRule.violations), lExpected)
def test_fix_rule_014_lower(self):
oRule = function.rule_014()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_lower, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
def test_fix_rule_014_upper(self):
oRule = function.rule_014()
oRule.case = 'upper'
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_upper, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| gpl-3.0 | 2,142,061,807,629,623,600 | 27.09589 | 106 | 0.667479 | false |
endlessm/chromium-browser | third_party/chromite/third_party/infra_libs/ts_mon/common/test/http_metrics_test.py | 3 | 1887 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from infra_libs.ts_mon.common import http_metrics
from infra_libs.ts_mon.common import interface
from infra_libs.ts_mon.common import targets
class TestHttpMetrics(unittest.TestCase):
def setUp(self):
super(TestHttpMetrics, self).setUp()
target = targets.TaskTarget('test_service', 'test_job',
'test_region', 'test_host')
self.mock_state = interface.State(target=target)
self.state_patcher = mock.patch('infra_libs.ts_mon.common.interface.state',
new=self.mock_state)
self.state_patcher.start()
def tearDown(self):
self.state_patcher.stop()
super(TestHttpMetrics, self).tearDown()
def test_update_http_server_metrics(self):
http_metrics.update_http_server_metrics(
'/', 200, 125.4,
request_size=100, response_size=200, user_agent='Chrome')
fields = {'status': 200, 'name': '/', 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
self.assertEqual(125.4, http_metrics.server_durations.get(fields).sum)
self.assertEqual(100, http_metrics.server_request_bytes.get(fields).sum)
self.assertEqual(200, http_metrics.server_response_bytes.get(fields).sum)
def test_update_http_server_metrics_no_sizes(self):
http_metrics.update_http_server_metrics('/', 200, 125.4)
fields = {'status': 200, 'name': '/', 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
self.assertEqual(125.4, http_metrics.server_durations.get(fields).sum)
self.assertIsNone(http_metrics.server_request_bytes.get(fields))
self.assertIsNone(http_metrics.server_response_bytes.get(fields))
| bsd-3-clause | -8,013,579,235,826,577,000 | 41.886364 | 79 | 0.700053 | false |
mindw/pip | pip/index.py | 1 | 37607 | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
cached_property, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS,
)
from pip.utils.deprecation import RemovedInPip9Warning, RemovedInPip10Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, is_url, path_to_url, url_to_path
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags
from pip._vendor import html5lib, requests, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip9Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (origin[1] and
origin[1].lower() != secure_origin[1].lower() and
secure_origin[1] != "*"):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(
url,
urllib_parse.quote(canonicalize_name(project_name)))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def find_all_candidates(self, project_name):
"""Find all available InstallationCandidate for project_name
This checks index_urls, find_links and dependency_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
all_candidates = self.find_all_candidates(req.name)
# Filter out anything which doesn't match our specifier
compatible_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
# Again, converting to str to deal with debundling.
c for c in all_candidates if str(c.version) in compatible_versions
]
if applicable_candidates:
best_candidate = max(applicable_candidates,
key=self._candidate_sort_key)
else:
best_candidate = None
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(c.version) for c in all_candidates),
key=parse_version,
)
)
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
', '.join(sorted(compatible_versions, key=parse_version)) or
"none",
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
', '.join(sorted(compatible_versions, key=parse_version))
)
return best_candidate.location
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set()
for location in locations:
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(resp.content, resp.url, resp.headers)
except requests.HTTPError as exc:
cls._handle_fail(link, exc, url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, meth=logger.info)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
except requests.TooManyRedirects as exc:
cls._handle_fail(
link,
"Error: %s" % exc,
url
)
except Exception as e:
reason = ("There was an unknown error: %s" % e)
cls._handle_fail(
link,
reason,
url
)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None):
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
@property
def subdirectory_fragment(self):
match = self._subdirectory_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', RemovedInPip10Warning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
| mit | 5,830,695,384,401,401,000 | 34.411488 | 79 | 0.551573 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.