repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
firebitsbr/pwn_plug_sources | src/metagoofil/hachoir_metadata/misc.py | 41 | 8940 | from hachoir_metadata.metadata import RootMetadata, registerExtractor
from hachoir_metadata.safe import fault_tolerant
from hachoir_parser.container import SwfFile
from hachoir_parser.misc import TorrentFile, TrueTypeFontFile, OLE2_File, PcfFile
from hachoir_core.field import isString
from hachoir_core.error import warning
from hachoir_parser import guessParser
from hachoir_metadata.setter import normalizeString
class TorrentMetadata(RootMetadata):
KEY_TO_ATTR = {
u"announce": "url",
u"comment": "comment",
u"creation_date": "creation_date",
}
INFO_TO_ATTR = {
u"length": "file_size",
u"name": "filename",
}
def extract(self, torrent):
for field in torrent[0]:
self.processRoot(field)
@fault_tolerant
def processRoot(self, field):
if field.name in self.KEY_TO_ATTR:
key = self.KEY_TO_ATTR[field.name]
value = field.value
setattr(self, key, value)
elif field.name == "info" and "value" in field:
for field in field["value"]:
self.processInfo(field)
@fault_tolerant
def processInfo(self, field):
if field.name in self.INFO_TO_ATTR:
key = self.INFO_TO_ATTR[field.name]
value = field.value
setattr(self, key, value)
elif field.name == "piece_length":
self.comment = "Piece length: %s" % field.display
class TTF_Metadata(RootMetadata):
NAMEID_TO_ATTR = {
0: "copyright", # Copyright notice
3: "title", # Unique font identifier
5: "version", # Version string
8: "author", # Manufacturer name
11: "url", # URL Vendor
14: "copyright", # License info URL
}
def extract(self, ttf):
if "header" in ttf:
self.extractHeader(ttf["header"])
if "names" in ttf:
self.extractNames(ttf["names"])
@fault_tolerant
def extractHeader(self, header):
self.creation_date = header["created"].value
self.last_modification = header["modified"].value
self.comment = u"Smallest readable size in pixels: %s pixels" % header["lowest"].value
self.comment = u"Font direction: %s" % header["font_dir"].display
@fault_tolerant
def extractNames(self, names):
offset = names["offset"].value
for header in names.array("header"):
key = header["nameID"].value
foffset = offset + header["offset"].value
field = names.getFieldByAddress(foffset*8)
if not field or not isString(field):
continue
value = field.value
if key not in self.NAMEID_TO_ATTR:
continue
key = self.NAMEID_TO_ATTR[key]
if key == "version" and value.startswith(u"Version "):
# "Version 1.2" => "1.2"
value = value[8:]
setattr(self, key, value)
class OLE2_Metadata(RootMetadata):
SUMMARY_ID_TO_ATTR = {
2: "title", # Title
4: "author",
6: "comment",
8: "author", # Last saved by
12: "creation_date",
13: "last_modification",
14: "nb_page",
18: "producer",
}
IGNORE_SUMMARY = set((
1, # Code page
))
DOC_SUMMARY_ID_TO_ATTR = {
3: "title", # Subject
14: "author", # Manager
}
IGNORE_DOC_SUMMARY = set((
1, # Code page
))
def extract(self, ole2):
self._extract(ole2)
def _extract(self, fieldset, main_document=True):
if main_document:
# _feedAll() is needed to make sure that we get all root[*] fragments
fieldset._feedAll()
if "root[0]" in fieldset:
self.useRoot(fieldset["root[0]"])
doc_summary = self.getField(fieldset, main_document, "doc_summary[0]")
if doc_summary:
self.useSummary(doc_summary, True)
word_doc = self.getField(fieldset, main_document, "word_doc[0]")
if word_doc:
self.useWordDocument(word_doc)
summary = self.getField(fieldset, main_document, "summary[0]")
if summary:
self.useSummary(summary, False)
revision = self.getField(fieldset, main_document, "table1[0]")
@fault_tolerant
def useRoot(self, root):
stream = root.getSubIStream()
ministream = guessParser(stream)
if not ministream:
warning("Unable to create the OLE2 mini stream parser!")
return
self._extract(ministream, main_document=False)
def getField(self, fieldset, main_document, name):
if name not in fieldset:
return None
# _feedAll() is needed to make sure that we get all fragments
# eg. summary[0], summary[1], ..., summary[n]
fieldset._feedAll()
field = fieldset[name]
if main_document:
stream = field.getSubIStream()
field = guessParser(stream)
if not field:
warning("Unable to create the OLE2 parser for %s!" % name)
return None
return field
@fault_tolerant
def useSummary(self, summary, is_doc_summary):
if "os" in summary:
self.os = summary["os"].display
if "section[0]" not in summary:
return
summary = summary["section[0]"]
for property in summary.array("property_index"):
self.useProperty(summary, property, is_doc_summary)
@fault_tolerant
def useWordDocument(self, doc):
self.comment = "Encrypted: %s" % doc["fEncrypted"].value
@fault_tolerant
def useProperty(self, summary, property, is_doc_summary):
field = summary.getFieldByAddress(property["offset"].value*8)
if not field \
or "value" not in field:
return
field = field["value"]
if not field.hasValue():
return
# Get value
value = field.value
if isinstance(value, (str, unicode)):
value = normalizeString(value)
if not value:
return
# Get property identifier
prop_id = property["id"].value
if is_doc_summary:
id_to_attr = self.DOC_SUMMARY_ID_TO_ATTR
ignore = self.IGNORE_DOC_SUMMARY
else:
id_to_attr = self.SUMMARY_ID_TO_ATTR
ignore = self.IGNORE_SUMMARY
if prop_id in ignore:
return
# Get Hachoir metadata key
try:
key = id_to_attr[prop_id]
use_prefix = False
except LookupError:
key = "comment"
use_prefix = True
if use_prefix:
prefix = property["id"].display
if (prefix in ("TotalEditingTime", "LastPrinted")) \
and (not field):
# Ignore null time delta
return
value = "%s: %s" % (prefix, value)
else:
if (key == "last_modification") and (not field):
# Ignore null timestamp
return
setattr(self, key, value)
class PcfMetadata(RootMetadata):
PROP_TO_KEY = {
'CHARSET_REGISTRY': 'charset',
'COPYRIGHT': 'copyright',
'WEIGHT_NAME': 'font_weight',
'FOUNDRY': 'author',
'FONT': 'title',
'_XMBDFED_INFO': 'producer',
}
def extract(self, pcf):
if "properties" in pcf:
self.useProperties(pcf["properties"])
def useProperties(self, properties):
last = properties["total_str_length"]
offset0 = last.address + last.size
for index in properties.array("property"):
# Search name and value
value = properties.getFieldByAddress(offset0+index["value_offset"].value*8)
if not value:
continue
value = value.value
if not value:
continue
name = properties.getFieldByAddress(offset0+index["name_offset"].value*8)
if not name:
continue
name = name.value
if name not in self.PROP_TO_KEY:
warning("Skip %s=%r" % (name, value))
continue
key = self.PROP_TO_KEY[name]
setattr(self, key, value)
class SwfMetadata(RootMetadata):
def extract(self, swf):
self.height = swf["rect/ymax"].value # twips
self.width = swf["rect/xmax"].value # twips
self.format_version = "flash version %s" % swf["version"].value
self.frame_rate = swf["frame_rate"].value
self.comment = "Frame count: %s" % swf["frame_count"].value
registerExtractor(TorrentFile, TorrentMetadata)
registerExtractor(TrueTypeFontFile, TTF_Metadata)
registerExtractor(OLE2_File, OLE2_Metadata)
registerExtractor(PcfFile, PcfMetadata)
registerExtractor(SwfFile, SwfMetadata)
| gpl-3.0 | 4,551,887,132,164,519,400 | 33.122137 | 94 | 0.568792 | false |
cppisfun/GameEngine | foreign/boost/tools/build/v2/util/utility.py | 9 | 4754 | # (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
""" Utility functions to add/remove/get grists.
Grists are string enclosed in angle brackets (<>) that are used as prefixes. See Jam for more information.
"""
import re
import os
import bjam
from b2.exceptions import *
__re_grist_and_value = re.compile (r'(<[^>]*>)(.*)')
__re_grist_content = re.compile ('^<(.*)>$')
__re_backslash = re.compile (r'\\')
def to_seq (value):
""" If value is a sequence, returns it.
If it is a string, returns a sequence with value as its sole element.
"""
if not value:
return []
if isinstance (value, str):
return [value]
else:
return value
def replace_references_by_objects (manager, refs):
objs = []
for r in refs:
objs.append (manager.get_object (r))
return objs
def add_grist (features):
""" Transform a string by bracketing it with "<>". If already bracketed, does nothing.
features: one string or a sequence of strings
return: the gristed string, if features is a string, or a sequence of gristed strings, if features is a sequence
"""
def grist_one (feature):
if feature [0] != '<' and feature [len (feature) - 1] != '>':
return '<' + feature + '>'
else:
return feature
if isinstance (features, str):
return grist_one (features)
else:
return [ grist_one (feature) for feature in features ]
def replace_grist (features, new_grist):
""" Replaces the grist of a string by a new one.
Returns the string with the new grist.
"""
def replace_grist_one (name, new_grist):
split = __re_grist_and_value.match (name)
if not split:
return new_grist + name
else:
return new_grist + split.group (2)
if isinstance (features, str):
return replace_grist_one (features, new_grist)
else:
return [ replace_grist_one (feature, new_grist) for feature in features ]
def get_value (property):
""" Gets the value of a property, that is, the part following the grist, if any.
"""
return replace_grist (property, '')
def get_grist (value):
""" Returns the grist of a string.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
def get_grist_one (name):
split = __re_grist_and_value.match (name)
if not split:
return ''
else:
return split.group (1)
if isinstance (value, str):
return get_grist_one (value)
else:
return [ get_grist_one (v) for v in value ]
def ungrist (value):
""" Returns the value without grist.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
def ungrist_one (value):
stripped = __re_grist_content.match (value)
if not stripped:
raise BaseException ("in ungrist: '%s' is not of the form <.*>" % value)
return stripped.group (1)
if isinstance (value, str):
return ungrist_one (value)
else:
return [ ungrist_one (v) for v in value ]
def replace_suffix (name, new_suffix):
""" Replaces the suffix of name by new_suffix.
If no suffix exists, the new one is added.
"""
split = os.path.splitext (name)
return split [0] + new_suffix
def forward_slashes (s):
""" Converts all backslashes to forward slashes.
"""
return __re_backslash.sub ('/', s)
def split_action_id (id):
""" Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++')
"""
split = id.split ('.', 1)
toolset = split [0]
name = ''
if len (split) > 1:
name = split [1]
return (toolset, name)
def os_name ():
result = bjam.variable("OS")
assert(len(result) == 1)
return result[0]
def platform ():
return bjam.variable("OSPLAT")
def os_version ():
return bjam.variable("OSVER")
def on_windows ():
""" Returns true if running on windows, whether in cygwin or not.
"""
if bjam.variable("NT"):
return True
elif bjam.variable("UNIX"):
uname = bjam.variable("JAMUNAME")
if uname and uname[0].startswith("CYGWIN"):
return True
return False
| gpl-3.0 | -3,124,740,749,742,810,600 | 28.670968 | 120 | 0.588557 | false |
mheap/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_scaleset.py | 1 | 35967 | #!/usr/bin/python
#
# Copyright (c) 2016 Sertac Ozercan, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine_scaleset
version_added: "2.4"
short_description: Manage Azure virtual machine scale sets.
description:
- Create and update a virtual machine scale set.
options:
resource_group:
description:
- Name of the resource group containing the virtual machine scale set.
required: true
name:
description:
- Name of the virtual machine.
required: true
state:
description:
- Assert the state of the virtual machine scale set.
- State 'present' will check that the machine exists with the requested configuration. If the configuration
of the existing machine does not match, the machine will be updated.
state.
- State 'absent' will remove the virtual machine scale set.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to location of the resource group.
short_hostname:
description:
- Short host name
version_added: 2.6
vm_size:
description:
- A valid Azure VM size value. For example, 'Standard_D4'. The list of choices varies depending on the
subscription and location. Check your subscription for available choices.
required: true
capacity:
description:
- Capacity of VMSS.
required: true
default: 1
tier:
description:
- SKU Tier.
choices:
- Basic
- Standard
upgrade_policy:
description:
- Upgrade policy.
choices:
- Manual
- Automatic
admin_username:
description:
- Admin username used to access the host after it is created. Required when creating a VM.
admin_password:
description:
- Password for the admin username. Not required if the os_type is Linux and SSH password authentication
is disabled by setting ssh_password_enabled to false.
ssh_password_enabled:
description:
- When the os_type is Linux, setting ssh_password_enabled to false will disable SSH password authentication
and require use of SSH keys.
type: bool
default: true
ssh_public_keys:
description:
- "For os_type Linux provide a list of SSH keys. Each item in the list should be a dictionary where the
dictionary contains two keys: path and key_data. Set the path to the default location of the
authorized_keys files. On an Enterprise Linux host, for example, the path will be
/home/<admin username>/.ssh/authorized_keys. Set key_data to the actual value of the public key."
image:
description:
- Specifies the image used to build the VM.
- If a string, the image is sourced from a custom image based on the
name.
- 'If a dict with the keys C(publisher), C(offer), C(sku), and
C(version), the image is sourced from a Marketplace image. NOTE:
set image.version to C(latest) to get the most recent version of a
given image.'
- 'If a dict with the keys C(name) and C(resource_group), the image
is sourced from a custom image based on the C(name) and
C(resource_group) set. NOTE: the key C(resource_group) is optional
and if omitted, all images in the subscription will be searched for
by C(name).'
- Custom image support was added in Ansible 2.5
required: true
os_disk_caching:
description:
- Type of OS disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
aliases:
- disk_caching
os_type:
description:
- Base type of operating system.
choices:
- Windows
- Linux
default: Linux
managed_disk_type:
description:
- Managed disk type.
choices:
- Standard_LRS
- Premium_LRS
data_disks:
description:
- Describes list of data disks.
version_added: "2.4"
suboptions:
lun:
description:
- The logical unit number for data disk.
default: 0
version_added: "2.4"
disk_size_gb:
description:
- The initial disk size in GB for blank data disks.
version_added: "2.4"
managed_disk_type:
description:
- Managed data disk type.
choices:
- Standard_LRS
- Premium_LRS
version_added: "2.4"
caching:
description:
- Type of data disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
version_added: "2.4"
virtual_network_resource_group:
description:
- When creating a virtual machine, if a specific virtual network from another resource group should be
used, use this parameter to specify the resource group to use.
version_added: "2.5"
virtual_network_name:
description:
- Virtual Network name.
aliases:
- virtual_network
subnet_name:
description:
- Subnet name.
aliases:
- subnet
load_balancer:
description:
- Load balancer name.
version_added: "2.5"
remove_on_absent:
description:
- When removing a VM using state 'absent', also remove associated resources.
- "It can be 'all' or a list with any of the following: ['network_interfaces', 'virtual_storage', 'public_ips']."
- Any other input will be ignored.
default: ['all']
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Sertac Ozercan (@sozercan)"
'''
EXAMPLES = '''
- name: Create VMSS
azure_rm_virtualmachine_scaleset:
resource_group: Testing
name: testvmss
vm_size: Standard_DS1_v2
capacity: 2
virtual_network_name: testvnet
subnet_name: testsubnet
admin_username: adminUser
ssh_password_enabled: false
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
managed_disk_type: Standard_LRS
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
data_disks:
- lun: 0
disk_size_gb: 64
caching: ReadWrite
managed_disk_type: Standard_LRS
- name: Create a VMSS with a custom image
azure_rm_virtualmachine_scaleset:
resource_group: Testing
name: testvmss
vm_size: Standard_DS1_v2
capacity: 2
virtual_network_name: testvnet
subnet_name: testsubnet
admin_username: adminUser
admin_password: password01
managed_disk_type: Standard_LRS
image: customimage001
- name: Create a VMSS with a custom image from a particular resource group
azure_rm_virtualmachine_scaleset:
resource_group: Testing
name: testvmss
vm_size: Standard_DS1_v2
capacity: 2
virtual_network_name: testvnet
subnet_name: testsubnet
admin_username: adminUser
admin_password: password01
managed_disk_type: Standard_LRS
image:
name: customimage001
resource_group: Testing
'''
RETURN = '''
azure_vmss:
description: Facts about the current state of the object. Note that facts are not part of the registered output but available directly.
returned: always
type: complex
contains: {
"properties": {
"overprovision": true,
"singlePlacementGroup": true,
"upgradePolicy": {
"mode": "Manual"
},
"virtualMachineProfile": {
"networkProfile": {
"networkInterfaceConfigurations": [
{
"name": "testvmss",
"properties": {
"dnsSettings": {
"dnsServers": []
},
"enableAcceleratedNetworking": false,
"ipConfigurations": [
{
"name": "default",
"properties": {
"privateIPAddressVersion": "IPv4",
"subnet": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/testvnet/subnets/testsubnet"
}
}
}
],
"primary": true
}
}
]
},
"osProfile": {
"adminUsername": "testuser",
"computerNamePrefix": "testvmss",
"linuxConfiguration": {
"disablePasswordAuthentication": true,
"ssh": {
"publicKeys": [
{
"keyData": "",
"path": "/home/testuser/.ssh/authorized_keys"
}
]
}
},
"secrets": []
},
"storageProfile": {
"dataDisks": [
{
"caching": "ReadWrite",
"createOption": "empty",
"diskSizeGB": 64,
"lun": 0,
"managedDisk": {
"storageAccountType": "Standard_LRS"
}
}
],
"imageReference": {
"offer": "CoreOS",
"publisher": "CoreOS",
"sku": "Stable",
"version": "899.17.0"
},
"osDisk": {
"caching": "ReadWrite",
"createOption": "fromImage",
"managedDisk": {
"storageAccountType": "Standard_LRS"
}
}
}
}
},
"sku": {
"capacity": 2,
"name": "Standard_DS1_v2",
"tier": "Standard"
},
"tags": null,
"type": "Microsoft.Compute/virtualMachineScaleSets"
}
''' # NOQA
import random
import re
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict
AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet'
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
class AzureRMVirtualMachineScaleSet(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present', type='str'),
location=dict(type='str'),
short_hostname=dict(type='str'),
vm_size=dict(type='str', required=True),
tier=dict(type='str', choices=['Basic', 'Standard']),
capacity=dict(type='int', default=1),
upgrade_policy=dict(type='str', choices=['Automatic', 'Manual']),
admin_username=dict(type='str'),
admin_password=dict(type='str', no_log=True),
ssh_password_enabled=dict(type='bool', default=True),
ssh_public_keys=dict(type='list'),
image=dict(type='raw'),
os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite'],
default='ReadOnly'),
os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'Premium_LRS']),
data_disks=dict(type='list'),
subnet_name=dict(type='str', aliases=['subnet']),
load_balancer=dict(type='str'),
virtual_network_resource_group=dict(type='str'),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
remove_on_absent=dict(type='list', default=['all']),
)
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.short_hostname = None
self.vm_size = None
self.capacity = None
self.tier = None
self.upgrade_policy = None
self.admin_username = None
self.admin_password = None
self.ssh_password_enabled = None
self.ssh_public_keys = None
self.image = None
self.os_disk_caching = None
self.managed_disk_type = None
self.data_disks = None
self.os_type = None
self.subnet_name = None
self.virtual_network_resource_group = None
self.virtual_network_name = None
self.tags = None
self.differences = None
self.load_balancer = None
self.results = dict(
changed=False,
actions=[],
ansible_facts=dict(azure_vmss=None)
)
super(AzureRMVirtualMachineScaleSet, self).__init__(
derived_arg_spec=self.module_arg_spec,
supports_check_mode=True
)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
# make sure options are lower case
self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent])
# default virtual_network_resource_group to resource_group
if not self.virtual_network_resource_group:
self.virtual_network_resource_group = self.resource_group
changed = False
results = dict()
vmss = None
disable_ssh_password = None
vmss_dict = None
virtual_network = None
subnet = None
image_reference = None
custom_image = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present':
# Verify parameters and resolve any defaults
if self.vm_size and not self.vm_size_is_valid():
self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format(
self.vm_size
))
# if self.virtual_network_name:
# virtual_network = self.get_virtual_network(self.virtual_network_name)
if self.ssh_public_keys:
msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \
"each dict contains keys: path, key_data."
for key in self.ssh_public_keys:
if not isinstance(key, dict):
self.fail(msg)
if not key.get('path') or not key.get('key_data'):
self.fail(msg)
if self.image and isinstance(self.image, dict):
if all(key in self.image for key in ('publisher', 'offer', 'sku', 'version')):
marketplace_image = self.get_marketplace_image_version()
if self.image['version'] == 'latest':
self.image['version'] = marketplace_image.name
self.log("Using image version {0}".format(self.image['version']))
image_reference = self.compute_models.ImageReference(
publisher=self.image['publisher'],
offer=self.image['offer'],
sku=self.image['sku'],
version=self.image['version']
)
elif self.image.get('name'):
custom_image = True
image_reference = self.get_custom_image_reference(
self.image.get('name'),
self.image.get('resource_group'))
else:
self.fail("parameter error: expecting image to contain [publisher, offer, sku, version] or [name, resource_group]")
elif self.image and isinstance(self.image, str):
custom_image = True
image_reference = self.get_custom_image_reference(self.image)
elif self.image:
self.fail("parameter error: expecting image to be a string or dict not {0}".format(type(self.image).__name__))
disable_ssh_password = not self.ssh_password_enabled
try:
self.log("Fetching virtual machine scale set {0}".format(self.name))
vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
self.check_provisioning_state(vmss, self.state)
vmss_dict = self.serialize_vmss(vmss)
if self.state == 'present':
differences = []
results = vmss_dict
if self.os_disk_caching and \
self.os_disk_caching != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching']:
self.log('CHANGED: virtual machine scale set {0} - OS disk caching'.format(self.name))
differences.append('OS Disk caching')
changed = True
vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'] = self.os_disk_caching
if self.capacity and \
self.capacity != vmss_dict['sku']['capacity']:
self.log('CHANGED: virtual machine scale set {0} - Capacity'.format(self.name))
differences.append('Capacity')
changed = True
vmss_dict['sku']['capacity'] = self.capacity
if self.data_disks and \
len(self.data_disks) != len(vmss_dict['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])):
self.log('CHANGED: virtual machine scale set {0} - Data Disks'.format(self.name))
differences.append('Data Disks')
changed = True
update_tags, vmss_dict['tags'] = self.update_tags(vmss_dict.get('tags', dict()))
if update_tags:
differences.append('Tags')
changed = True
self.differences = differences
elif self.state == 'absent':
self.log("CHANGED: virtual machine scale set {0} exists and requested state is 'absent'".format(self.name))
results = dict()
changed = True
except CloudError:
self.log('Virtual machine scale set {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: virtual machine scale set {0} does not exist but state is 'present'.".format(self.name))
changed = True
self.results['changed'] = changed
self.results['ansible_facts']['azure_vmss'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not vmss:
# Create the VMSS
self.log("Create virtual machine scale set {0}".format(self.name))
self.results['actions'].append('Created VMSS {0}'.format(self.name))
# Validate parameters
if not self.admin_username:
self.fail("Parameter error: admin_username required when creating a virtual machine scale set.")
if self.os_type == 'Linux':
if disable_ssh_password and not self.ssh_public_keys:
self.fail("Parameter error: ssh_public_keys required when disabling SSH password.")
if not self.virtual_network_name:
default_vnet = self.create_default_vnet()
virtual_network = default_vnet.id
self.virtual_network_name = default_vnet.name
if self.subnet_name:
subnet = self.get_subnet(self.virtual_network_name, self.subnet_name)
load_balancer_backend_address_pools = None
load_balancer_inbound_nat_pools = None
if self.load_balancer:
load_balancer = self.get_load_balancer(self.load_balancer)
load_balancer_backend_address_pools = ([self.compute_models.SubResource(resource.id)
for resource in load_balancer.backend_address_pools]
if load_balancer.backend_address_pools else None)
load_balancer_inbound_nat_pools = ([self.compute_models.SubResource(resource.id)
for resource in load_balancer.inbound_nat_pools]
if load_balancer.inbound_nat_pools else None)
if not self.short_hostname:
self.short_hostname = self.name
if not image_reference:
self.fail("Parameter error: an image is required when creating a virtual machine.")
managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=self.managed_disk_type)
vmss_resource = self.compute_models.VirtualMachineScaleSet(
self.location,
tags=self.tags,
upgrade_policy=self.compute_models.UpgradePolicy(
mode=self.upgrade_policy
),
sku=self.compute_models.Sku(
name=self.vm_size,
capacity=self.capacity,
tier=self.tier,
),
virtual_machine_profile=self.compute_models.VirtualMachineScaleSetVMProfile(
os_profile=self.compute_models.VirtualMachineScaleSetOSProfile(
admin_username=self.admin_username,
computer_name_prefix=self.short_hostname,
),
storage_profile=self.compute_models.VirtualMachineScaleSetStorageProfile(
os_disk=self.compute_models.VirtualMachineScaleSetOSDisk(
managed_disk=managed_disk,
create_option=self.compute_models.DiskCreateOptionTypes.from_image,
caching=self.os_disk_caching,
),
image_reference=image_reference,
),
network_profile=self.compute_models.VirtualMachineScaleSetNetworkProfile(
network_interface_configurations=[
self.compute_models.VirtualMachineScaleSetNetworkConfiguration(
name=self.name,
primary=True,
ip_configurations=[
self.compute_models.VirtualMachineScaleSetIPConfiguration(
name='default',
subnet=self.compute_models.ApiEntityReference(
id=subnet.id
),
primary=True,
load_balancer_backend_address_pools=load_balancer_backend_address_pools,
load_balancer_inbound_nat_pools=load_balancer_inbound_nat_pools
)
]
)
]
)
)
)
if self.admin_password:
vmss_resource.virtual_machine_profile.os_profile.admin_password = self.admin_password
if self.os_type == 'Linux':
vmss_resource.virtual_machine_profile.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
disable_password_authentication=disable_ssh_password
)
if self.ssh_public_keys:
ssh_config = self.compute_models.SshConfiguration()
ssh_config.public_keys = \
[self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys]
vmss_resource.virtual_machine_profile.os_profile.linux_configuration.ssh = ssh_config
if self.data_disks:
data_disks = []
for data_disk in self.data_disks:
data_disk_managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
storage_account_type=data_disk['managed_disk_type']
)
data_disk['caching'] = data_disk.get(
'caching',
self.compute_models.CachingTypes.read_only
)
data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(
lun=data_disk['lun'],
caching=data_disk['caching'],
create_option=self.compute_models.DiskCreateOptionTypes.empty,
disk_size_gb=data_disk['disk_size_gb'],
managed_disk=data_disk_managed_disk,
))
vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks
self.log("Create virtual machine with parameters:")
self.create_or_update_vmss(vmss_resource)
elif self.differences and len(self.differences) > 0:
self.log("Update virtual machine scale set {0}".format(self.name))
self.results['actions'].append('Updated VMSS {0}'.format(self.name))
vmss_resource = self.get_vmss()
vmss_resource.virtual_machine_profile.storage_profile.os_disk.caching = self.os_disk_caching
vmss_resource.sku.capacity = self.capacity
data_disks = []
for data_disk in self.data_disks:
data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(
lun=data_disk['lun'],
caching=data_disk['caching'],
create_option=self.compute_models.DiskCreateOptionTypes.empty,
disk_size_gb=data_disk['disk_size_gb'],
managed_disk=self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
storage_account_type=data_disk['managed_disk_type']
),
))
vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks
self.log("Update virtual machine with parameters:")
self.create_or_update_vmss(vmss_resource)
self.results['ansible_facts']['azure_vmss'] = self.serialize_vmss(self.get_vmss())
elif self.state == 'absent':
# delete the VM
self.log("Delete virtual machine scale set {0}".format(self.name))
self.results['ansible_facts']['azure_vmss'] = None
self.delete_vmss(vmss)
# until we sort out how we want to do this globally
del self.results['actions']
return self.results
def get_vmss(self):
'''
Get the VMSS
:return: VirtualMachineScaleSet object
'''
try:
vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
return vmss
except CloudError as exc:
self.fail("Error getting virtual machine scale set {0} - {1}".format(self.name, str(exc)))
def get_virtual_network(self, name):
try:
vnet = self.network_client.virtual_networks.get(self.virtual_network_resource_group, name)
return vnet
except CloudError as exc:
self.fail("Error fetching virtual network {0} - {1}".format(name, str(exc)))
def get_subnet(self, vnet_name, subnet_name):
self.log("Fetching subnet {0} in virtual network {1}".format(subnet_name, vnet_name))
try:
subnet = self.network_client.subnets.get(self.virtual_network_resource_group, vnet_name, subnet_name)
except CloudError as exc:
self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format(
subnet_name,
vnet_name,
str(exc)))
return subnet
def get_load_balancer(self, id):
id_dict = parse_resource_id(id)
try:
return self.network_client.load_balancers.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name'))
except CloudError as exc:
self.fail("Error fetching load balancer {0} - {1}".format(id, str(exc)))
def serialize_vmss(self, vmss):
'''
Convert a VirtualMachineScaleSet object to dict.
:param vm: VirtualMachineScaleSet object
:return: dict
'''
result = self.serialize_obj(vmss, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
result['id'] = vmss.id
result['name'] = vmss.name
result['type'] = vmss.type
result['location'] = vmss.location
result['tags'] = vmss.tags
return result
def delete_vmss(self, vmss):
self.log("Deleting virtual machine scale set {0}".format(self.name))
self.results['actions'].append("Deleted virtual machine scale set {0}".format(self.name))
try:
poller = self.compute_client.virtual_machine_scale_sets.delete(self.resource_group, self.name)
# wait for the poller to finish
self.get_poller_result(poller)
except CloudError as exc:
self.fail("Error deleting virtual machine scale set {0} - {1}".format(self.name, str(exc)))
return True
def get_marketplace_image_version(self):
try:
versions = self.compute_client.virtual_machine_images.list(self.location,
self.image['publisher'],
self.image['offer'],
self.image['sku'])
except CloudError as exc:
self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
str(exc)))
if versions and len(versions) > 0:
if self.image['version'] == 'latest':
return versions[len(versions) - 1]
for version in versions:
if version.name == self.image['version']:
return version
self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
self.image['version']))
def get_custom_image_reference(self, name, resource_group=None):
try:
if resource_group:
vm_images = self.compute_client.images.list_by_resource_group(resource_group)
else:
vm_images = self.compute_client.images.list()
except Exception as exc:
self.fail("Error fetching custom images from subscription - {0}".format(str(exc)))
for vm_image in vm_images:
if vm_image.name == name:
self.log("Using custom image id {0}".format(vm_image.id))
return self.compute_models.ImageReference(id=vm_image.id)
self.fail("Error could not find image with name {0}".format(name))
def create_or_update_vmss(self, params):
try:
poller = self.compute_client.virtual_machine_scale_sets.create_or_update(self.resource_group, self.name, params)
self.get_poller_result(poller)
except CloudError as exc:
self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc)))
def vm_size_is_valid(self):
'''
Validate self.vm_size against the list of virtual machine sizes available for the account and location.
:return: boolean
'''
try:
sizes = self.compute_client.virtual_machine_sizes.list(self.location)
except CloudError as exc:
self.fail("Error retrieving available machine sizes - {0}".format(str(exc)))
for size in sizes:
if size.name == self.vm_size:
return True
return False
def main():
AzureRMVirtualMachineScaleSet()
if __name__ == '__main__':
main()
| gpl-3.0 | -6,842,941,546,625,744,000 | 41.017523 | 199 | 0.513832 | false |
DarthMaulware/EquationGroupLeaks | Leak #1 - Equation Group Cyber Weapons Auction - Invitation/EQGRP-Free-File/Firewall/EXPLOITS/ELCA/fosho/requests/packages/urllib3/exceptions.py | 2 | 1156 | # urllib3/exceptions.py
##
##
##
##
##
class HTTPError(Exception):
''''''
pass
class PoolError(HTTPError):
''''''
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
class SSLError(HTTPError):
''''''
pass
##
class MaxRetryError(PoolError):
''''''
def __init__(self, pool, url):
message = "Max retries exceeded with url: %s" % url
PoolError.__init__(self, pool, message)
self.url = url
class HostChangedError(PoolError):
''''''
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
PoolError.__init__(self, pool, message)
self.url = url
self.retries = retries
class TimeoutError(PoolError):
''''''
pass
class EmptyPoolError(PoolError):
''''''
pass
class LocationParseError(ValueError, HTTPError):
''''''
def __init__(self, location):
message = "Failed to parse: %s" % location
super(LocationParseError, self).__init__(self, message)
self.location = location
| unlicense | 6,331,205,613,518,147,000 | 13.271605 | 67 | 0.557958 | false |
mitodl/sga-lti | sga/backend/constants.py | 1 | 1170 | """
Constant definitions
"""
# Datetime formats
SGA_DATETIME_FORMAT = "l, F j, Y, g:iA e"
EPOCH_FORMAT = "U"
# Messages
GRADER_TO_STUDENT_CONFIRM = ("Are you sure you want to change this grader to a student? " +
"Students currently assigned to this grader will no longer " +
"be assigned to any grader.")
STUDENT_TO_GRADER_CONFIRM = "Are you sure you want to change this student into a grader?"
UNASSIGN_GRADER_CONFIRM = ("Are you sure you want to unassign the grader from this student? " +
"(You can reassign the same grader or a new grader after this action.)")
UNASSIGN_STUDENT_CONFIRM = ("Are you sure you want to unassign this student from this grader? " +
"(You can reassign the same grader or a new grader after this action.)")
UNSUBMIT_CONFIRM = "Are you sure you want to mark this submission as not submitted?"
INVALID_S3_CHARACTERS_REGEX = r"[^a-zA-Z0-9!\-_.*'()/]"
STUDIO_USER_USERNAME = "cuid:student"
class Roles():
"""
Role definitions
"""
student = "student"
grader = "grader"
admin = "admin"
none = "none"
| bsd-3-clause | 1,113,038,588,414,616,300 | 34.454545 | 100 | 0.62735 | false |
akbarpn136/perek-dj | utama/views.py | 1 | 16574 | from django.shortcuts import render, redirect, get_object_or_404, HttpResponse
from django.http import JsonResponse
from django.contrib import messages
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.text import slugify
from .models import Kategori, Kegiatan, Format, Personil
from .forms import FormKategori, FormKegiatan, FormFormat, FormPersonil, FormMasuk
# Create your views here.
def bantu_kegiatan(request):
data_raw = [{'title': o.nama, 'description': o.referensi, 'slug': o.slug} for o in Kegiatan.objects.all()]
return JsonResponse(data_raw, safe=False)
def index(request):
data_kegiatan = Kegiatan.objects.all()
paginator = Paginator(data_kegiatan, 15, 1)
page = request.GET.get('halaman')
try:
kegiatan = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
kegiatan = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
kegiatan = paginator.page(paginator.num_pages)
maks = len(paginator.page_range)
start_number = kegiatan.number - 3 if kegiatan.number >= 4 else 0
end_number = kegiatan.number + 2 if kegiatan.number <= maks else maks
page_range = paginator.page_range[start_number:end_number]
data = {
'kegiatan': kegiatan,
'page_range': page_range,
}
return render(request, 'utama/halaman_utama.html', data)
@login_required
def tambah_kegiatan(request):
if request.method == 'POST':
slug = slugify(request.POST.get('nama'))
a = Kegiatan(slug=slug)
formulir = FormKegiatan(request.POST, instance=a)
if formulir.is_valid():
if request.user.is_superuser:
messages.success(request, 'Kegiatan berhasil disimpan.')
formulir.save()
else:
messages.warning(request, 'Hanya admin yang boleh menambahkan kegiatan!')
return redirect('halaman_utama')
else:
formulir = FormKegiatan()
data = {
'form_kegiatan': formulir
}
if request.user.is_superuser:
return render(request, 'utama/halaman_modif_kegiatan.html', data)
else:
messages.warning(request, 'Halaman khusus admin')
return redirect('halaman_utama')
@login_required
def ubah_kegiatan(request, slug, pk):
kegiatan_ubah = get_object_or_404(Kegiatan, pk=pk)
if slug is None:
pass
if request.method == 'POST':
slg = slugify(request.POST.get('nama'))
kegiatan_ubah.slug = slg
formulir = FormKegiatan(request.POST, instance=kegiatan_ubah)
if formulir.is_valid():
if request.user.is_superuser:
messages.success(request, 'Kegiatan berhasil disimpan.')
formulir.save()
else:
messages.warning(request, 'Hanya admin yang boleh mengubah kegiatan!')
return redirect('halaman_utama')
else:
formulir = FormKegiatan(instance=kegiatan_ubah)
data = {
'form_kegiatan': formulir,
'kegiatan': kegiatan_ubah
}
if request.user.is_superuser:
return render(request, 'utama/halaman_modif_kegiatan.html', data)
else:
messages.warning(request, 'Halaman khusus admin')
return redirect('halaman_utama')
@login_required
def hapus_kegiatan(request, slug, pk):
if slug is None:
pass
kegiatan_ubah = get_object_or_404(Kegiatan, pk=pk)
if request.user.is_superuser:
if kegiatan_ubah.delete():
html = '''<div class="ui green message">
<div class="header">
Info
</div>
<p>
Kegiatan berhasil dihapus.
</p>
</div>'''
return HttpResponse(html)
else:
html = '''<div class="ui red message">
<div class="header">
Info
</div>
<p>
Kegiatan hanya boleh dihapus oleh admin.
</p>
</div>'''
return HttpResponse(html)
def kegiatan_berdasarkan_kategori(request, slug, pk):
kegiatan_kategori = Kegiatan.objects.filter(kategori_kegiatan__slug=slug, kategori_kegiatan__pk=pk)
data = {
'kegiatan': kegiatan_kategori
}
return render(request, 'utama/halaman_utama.html', data)
def cari_kegiatan(request, slug):
teks = slug.replace('-', ' ')
temu_kegiatan = Kegiatan.objects.filter(nama__contains=teks)
data = {
'kegiatan': temu_kegiatan
}
return render(request, 'utama/halaman_utama.html', data)
def user_kegiatan(request):
data_keg_user = Kegiatan.objects.filter(personil__orang__pk=request.user.pk)
data = {
'kegiatan': data_keg_user
}
return render(request, 'utama/halaman_utama.html', data)
def lihat_kategori(request):
data_kategori = Kategori.objects.all()
paginator = Paginator(data_kategori, 50, 1)
page = request.GET.get('halaman')
try:
kategori = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
kategori = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
kategori = paginator.page(paginator.num_pages)
maks = len(paginator.page_range)
start_number = kategori.number - 3 if kategori.number >= 4 else 0
end_number = kategori.number + 2 if kategori.number <= maks else maks
page_range = paginator.page_range[start_number:end_number]
data = {
'kategori': kategori,
'page_range': page_range,
}
return render(request, 'utama/halaman_kategori.html', data)
@login_required
def tambah_kategori(request):
if request.method == 'POST':
slug = slugify(request.POST.get('nama'))
a = Kategori(slug=slug)
formulir = FormKategori(request.POST, instance=a)
if formulir.is_valid():
messages.success(request, 'Kategori berhasil disimpan.')
formulir.save()
return redirect('halaman_kategori')
else:
formulir = FormKategori()
data = {
'form_kategori': formulir
}
return render(request, 'utama/halaman_modif_kategori.html', data)
@login_required
def ubah_kategori(request, slug, pk):
kategori_ubah = get_object_or_404(Kategori, pk=pk)
if slug is None:
pass
if request.method == 'POST':
slg = slugify(request.POST.get('nama'))
kategori_ubah.slug = slg
formulir = FormKategori(request.POST, instance=kategori_ubah)
if formulir.is_valid():
messages.success(request, 'Kategori berhasil disimpan.')
formulir.save()
return redirect('halaman_kategori')
else:
formulir = FormKategori(instance=kategori_ubah)
data = {
'form_kategori': formulir,
'kategori': kategori_ubah
}
return render(request, 'utama/halaman_modif_kategori.html', data)
@login_required
def hapus_kategori(request, slug, pk):
if slug is None:
pass
kategori_ubah = get_object_or_404(Kategori, pk=pk)
if kategori_ubah.delete():
html = '''<div class="ui green message">
<div class="header">
Info
</div>
<p>
Kategori berhasil dihapus.
</p>
</div>'''
return HttpResponse(html)
@login_required
def lihat_format(request, pk):
data_format = Format.objects.all()
data_keg = get_object_or_404(Kegiatan, pk=pk)
data = {
'format': data_format,
'keg': data_keg
}
return render(request, 'utama/halaman_format.html', data)
@login_required
def tambah_format(request, pk):
kegiatan = get_object_or_404(Kegiatan, pk=pk)
if request.method == 'POST':
a = Format(format_kegiatan=kegiatan)
formulir = FormFormat(request.POST, instance=a)
if formulir.is_valid():
if request.user.is_superuser:
messages.success(request, 'Format berhasil disimpan.')
formulir.save()
else:
messages.warning(request, 'hanya dapat dilakukan oleh admin.')
return redirect('halaman_format', pk=pk)
else:
formulir = FormFormat()
data = {
'form_format': formulir,
'index': kegiatan
}
if request.user.is_superuser:
return render(request, 'utama/halaman_modif_format.html', data)
else:
messages.warning(request, 'hanya dapat dilakukan oleh admin.')
return redirect('halaman_format', pk=pk)
@login_required
def ubah_format(request, pk, keg_id):
frmt = get_object_or_404(Format, pk=pk)
kegiatan = get_object_or_404(Kegiatan, pk=keg_id)
if request.method == 'POST':
formulir = FormFormat(request.POST, instance=frmt)
if formulir.is_valid():
if request.user.is_superuser:
messages.success(request, 'Format berhasil disimpan.')
formulir.save()
else:
messages.warning(request, 'hanya dapat dilakukan oleh admin.')
return redirect('halaman_format', pk=keg_id)
else:
formulir = FormFormat(instance=frmt)
data = {
'form_format': formulir,
'index': kegiatan,
'format': frmt
}
if request.user.is_superuser:
return render(request, 'utama/halaman_modif_format.html', data)
else:
messages.warning(request, 'hanya dapat dilakukan oleh admin.')
return redirect('halaman_format', pk=keg_id)
@login_required
def hapus_format(request, pk):
format_ubah = get_object_or_404(Format, pk=pk)
if request.user.is_superuser:
if format_ubah.delete():
html = '''<div class="ui green message">
<div class="header">
Info
</div>
<p>
Format berhasil dihapus.
</p>
</div>'''
return HttpResponse(html)
else:
html = '''<div class="ui red message">
<div class="header">
Info
</div>
<p>
Format hanya boleh dihapus oleh admin.
</p>
</div>'''
return HttpResponse(html)
def lihat_personil(request, pk):
data_personil = Personil.objects.filter(personil_kegiatan=pk)
kegiatan_tertentu = get_object_or_404(Kegiatan, pk=pk)
data = {
'personil': data_personil,
'kegiatan': kegiatan_tertentu,
}
return render(request, 'utama/halaman_personil.html', data)
@login_required
def tambah_personil(request, pk):
kegiatan_tertentu = get_object_or_404(Kegiatan, pk=pk)
if request.method == 'POST':
a = Personil(personil_kegiatan=kegiatan_tertentu)
formulir = FormPersonil(request.POST, instance=a)
org = request.POST.get('orang')
banyak_peran = Personil.objects.filter(orang=org, peran_utama=True).count()
if formulir.is_valid():
if request.user.is_superuser:
if banyak_peran == 0:
messages.success(request, 'Data personil berhasil disimpan')
formulir.save()
elif banyak_peran == 1:
personil_terkait = get_object_or_404(Personil, orang=org, peran_utama=True)
personil_terkait.peran_utama = False
personil_terkait.save()
messages.success(request, 'Data personil berhasil disimpan')
formulir.save()
else:
messages.warning(request, 'Jumlah peran utama tidak boleh lebih dari 1 (satu) untuk tiap kegiatan, '
'silahkan pilih salah satu')
else:
messages.warning(request, 'Simpan data personil hanya dapat dilakukan oleh admin.')
return redirect('halaman_personil', pk=kegiatan_tertentu.pk)
else:
formulir = FormPersonil()
data = {
'formulir': formulir,
'kegiatan': kegiatan_tertentu
}
if request.user.is_superuser:
return render(request, 'utama/halaman_modif_personil.html', data)
else:
messages.warning(request, 'Hanya dapat dilakukan oleh admin.')
return redirect('halaman_utama')
@login_required
def ubah_personil(request, pk, pk_personil):
kegiatan_tertentu = get_object_or_404(Kegiatan, pk=pk)
personil_tertentu = get_object_or_404(Personil, pk=pk_personil)
if request.method == 'POST':
formulir = FormPersonil(request.POST, instance=personil_tertentu)
org = request.POST.get('orang')
banyak_peran = Personil.objects.filter(orang=org, peran_utama=True).count()
if formulir.is_valid():
if request.user.is_superuser:
if banyak_peran == 0:
messages.success(request, 'Data personil berhasil disimpan')
formulir.save()
elif banyak_peran == 1:
personil_terkait = get_object_or_404(Personil, orang=org, peran_utama=True)
personil_terkait.peran_utama = False
personil_terkait.save()
messages.success(request, 'Data personil berhasil disimpan')
formulir.save()
else:
messages.warning(request, 'Jumlah peran utama tidak boleh lebih dari 1 (satu) untuk tiap kegiatan, '
'silahkan pilih salah satu')
else:
messages.warning(request, 'Simpan data personil hanya dapat dilakukan oleh admin.')
return redirect('halaman_personil', pk=kegiatan_tertentu.pk)
else:
formulir = FormPersonil(instance=personil_tertentu)
data = {
'formulir': formulir,
'kegiatan': kegiatan_tertentu,
'personil': personil_tertentu
}
if request.user.is_superuser:
return render(request, 'utama/halaman_modif_personil.html', data)
else:
messages.warning(request, 'Hanya dapat dilakukan oleh admin.')
return redirect('halaman_personil', pk=kegiatan_tertentu.pk)
@login_required
def hapus_personil(request, pk):
personil_ubah = get_object_or_404(Personil, pk=pk)
if request.user.is_superuser:
if personil_ubah.delete():
html = '''<div class="ui green message">
<div class="header">
Info
</div>
<p>
Personil berhasil dihapus.
</p>
</div>'''
return HttpResponse(html)
else:
html = '''<div class="ui red message">
<div class="header">
Info
</div>
<p>
Personil hanya boleh dihapus oleh admin.
</p>
</div>'''
return HttpResponse(html)
def masuk(request):
if request.user.is_authenticated():
messages.warning(request, 'Sudah terotentikasi...')
return redirect('halaman_utama')
else:
if request.method == 'POST':
a = request.POST.get('username')
b = request.POST.get('password')
lanjut = request.GET.get('next')
formulir = FormMasuk(request.POST)
if formulir.is_valid():
user = authenticate(username=a, password=b)
if user is None:
messages.warning(request, 'username atau password salah.')
return redirect('halaman_login')
else:
login(request, user)
messages.success(request, 'Selamat datang, ' + request.user.username)
if lanjut is None:
return redirect('halaman_utama')
else:
return redirect(lanjut)
else:
formulir = FormMasuk()
data = {
'formulir': formulir
}
return render(request, 'registration/halaman_masuk.html', data)
def keluar(request):
messages.info(request, 'Berhasil logout.')
logout(request)
return redirect('halaman_utama')
| cc0-1.0 | -1,456,190,791,158,790,000 | 29.692593 | 120 | 0.587426 | false |
163gal/Time-Line | libs_arm/wx/lib/pubsub/core/kwargs/publishermixin.py | 5 | 2311 | """
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
class PublisherMixin:
"""
Mixin for publishing messages to a topic's listeners. This will be
mixed into topicobj.Topic so that a user can use a Topic object to
send a message to the topic's listeners via a publish() method.
Note that it is important that the PublisherMixin NOT modify any
state data during message sending, because in principle it could
happen that a listener causes another message of same topic to be
sent (presumably, the listener has a way of preventing infinite
loop).
"""
def __init__(self):
pass
def publish(self, **msgKwargs):
self._publish(msgKwargs)
############## IMPLEMENTATION ###############
class IterState:
def __init__(self, msgKwargs):
self.filteredArgs = msgKwargs
self.argsChecked = False
def checkMsgArgs(self, spec):
spec.check(self.filteredArgs)
self.argsChecked = True
def filterMsgArgs(self, topicObj):
if self.argsChecked:
self.filteredArgs = topicObj.filterMsgArgs(self.filteredArgs)
else:
self.filteredArgs = topicObj.filterMsgArgs(self.filteredArgs, True)
self.argsChecked = True
def _mix_prePublish(self, msgKwargs, topicObj=None, iterState=None):
if iterState is None:
# do a first check that all args are there, costly so only do once
iterState = self.IterState(msgKwargs)
if self.hasMDS():
iterState.checkMsgArgs( self._getListenerSpec() )
else:
assert not self.hasListeners()
else:
iterState.filterMsgArgs(topicObj)
assert iterState is not None
return iterState
def _mix_callListener(self, listener, msgKwargs, iterState):
"""Send the message for given topic with data in msgKwargs.
This sends message to listeners of parent topics as well.
Note that at each level, msgKwargs is filtered so only those
args that are defined for the topic are sent to listeners. """
listener(iterState.filteredArgs, self, msgKwargs)
| gpl-3.0 | -4,813,082,928,440,752,000 | 34.553846 | 83 | 0.63955 | false |
cemoody/chainer | examples/sentiment/train_sentiment.py | 4 | 6986 | #!/usr/bin/env python
"""Sample script of recursive neural networks for sentiment analysis.
This is Socher's simple recursive model, not RTNN:
R. Socher, C. Lin, A. Y. Ng, and C.D. Manning.
Parsing Natural Scenes and Natural Language with Recursive Neural Networks.
in ICML2011.
"""
import argparse
import codecs
import collections
import random
import re
import time
import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
from chainer import optimizers
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', default=400, type=int,
help='number of epochs to learn')
parser.add_argument('--unit', '-u', default=30, type=int,
help='number of units')
parser.add_argument('--batchsize', '-b', type=int, default=25,
help='learning minibatch size')
parser.add_argument('--label', '-l', type=int, default=5,
help='number of labels')
parser.add_argument('--epocheval', '-p', type=int, default=5,
help='number of epochs per evaluation')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
n_epoch = args.epoch # number of epochs
n_units = args.unit # number of units per layer
batchsize = args.batchsize # minibatch size
n_label = args.label # number of labels
epoch_per_eval = args.epocheval # number of epochs per evaluation
class SexpParser(object):
def __init__(self, line):
self.tokens = re.findall(r'\(|\)|[^\(\) ]+', line)
self.pos = 0
def parse(self):
assert self.pos < len(self.tokens)
token = self.tokens[self.pos]
assert token != ')'
self.pos += 1
if token == '(':
children = []
while True:
assert self.pos < len(self.tokens)
if self.tokens[self.pos] == ')':
self.pos += 1
break
else:
children.append(self.parse())
return children
else:
return token
def convert_tree(vocab, exp):
assert isinstance(exp, list) and (len(exp) == 2 or len(exp) == 3)
if len(exp) == 2:
label, leaf = exp
if leaf not in vocab:
vocab[leaf] = len(vocab)
return {'label': int(label), 'node': vocab[leaf]}
elif len(exp) == 3:
label, left, right = exp
node = (convert_tree(vocab, left), convert_tree(vocab, right))
return {'label': int(label), 'node': node}
def read_corpus(path, vocab, max_size):
with codecs.open(path, encoding='utf-8') as f:
trees = []
for line in f:
line = line.strip()
tree = SexpParser(line).parse()
trees.append(convert_tree(vocab, tree))
if max_size and len(trees) >= max_size:
break
return trees
class RecursiveNet(chainer.Chain):
def __init__(self, n_vocab, n_units):
super(RecursiveNet, self).__init__(
embed=L.EmbedID(n_vocab, n_units),
l=L.Linear(n_units * 2, n_units),
w=L.Linear(n_units, n_label))
def leaf(self, x):
return self.embed(x)
def node(self, left, right):
return F.tanh(self.l(F.concat((left, right))))
def label(self, v):
return self.w(v)
def traverse(model, node, train=True, evaluate=None, root=True):
if isinstance(node['node'], int):
# leaf node
word = xp.array([node['node']], np.int32)
loss = 0
x = chainer.Variable(word, volatile=not train)
v = model.leaf(x)
else:
# internal node
left_node, right_node = node['node']
left_loss, left = traverse(
model, left_node, train=train, evaluate=evaluate, root=False)
right_loss, right = traverse(
model, right_node, train=train, evaluate=evaluate, root=False)
v = model.node(left, right)
loss = left_loss + right_loss
y = model.label(v)
if train:
label = xp.array([node['label']], np.int32)
t = chainer.Variable(label, volatile=not train)
loss += F.softmax_cross_entropy(y, t)
if evaluate is not None:
predict = cuda.to_cpu(y.data.argmax(1))
if predict[0] == node['label']:
evaluate['correct_node'] += 1
evaluate['total_node'] += 1
if root:
if predict[0] == node['label']:
evaluate['correct_root'] += 1
evaluate['total_root'] += 1
return loss, v
def evaluate(model, test_trees):
m = model.copy()
m.volatile = True
result = collections.defaultdict(lambda: 0)
for tree in test_trees:
traverse(m, tree, train=False, evaluate=result)
acc_node = 100.0 * result['correct_node'] / result['total_node']
acc_root = 100.0 * result['correct_root'] / result['total_root']
print(' Node accuracy: {0:.2f} %% ({1:,d}/{2:,d})'.format(
acc_node, result['correct_node'], result['total_node']))
print(' Root accuracy: {0:.2f} %% ({1:,d}/{2:,d})'.format(
acc_root, result['correct_root'], result['total_root']))
vocab = {}
if args.test:
max_size = 10
else:
max_size = None
train_trees = read_corpus('trees/train.txt', vocab, max_size)
test_trees = read_corpus('trees/test.txt', vocab, max_size)
develop_trees = read_corpus('trees/dev.txt', vocab, max_size)
model = RecursiveNet(len(vocab), n_units)
if args.gpu >= 0:
model.to_gpu()
# Setup optimizer
optimizer = optimizers.AdaGrad(lr=0.1)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))
accum_loss = 0
count = 0
start_at = time.time()
cur_at = start_at
for epoch in range(n_epoch):
print('Epoch: {0:d}'.format(epoch))
total_loss = 0
cur_at = time.time()
random.shuffle(train_trees)
for tree in train_trees:
loss, v = traverse(model, tree, train=True)
accum_loss += loss
count += 1
if count >= batchsize:
model.zerograds()
accum_loss.backward()
optimizer.update()
total_loss += float(accum_loss.data)
accum_loss = 0
count = 0
print('loss: {:.2f}'.format(total_loss))
now = time.time()
throuput = float(len(train_trees)) / (now - cur_at)
print('{:.2f} iters/sec, {:.2f} sec'.format(throuput, now - cur_at))
print()
if (epoch + 1) % epoch_per_eval == 0:
print('Train data evaluation:')
evaluate(model, train_trees)
print('Develop data evaluation:')
evaluate(model, develop_trees)
print('')
print('Test evaluateion')
evaluate(model, test_trees)
| mit | -2,984,529,657,359,926,000 | 28.854701 | 77 | 0.583166 | false |
unrza72/qplotutils | qplotutils/wireframe/items.py | 1 | 23332 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Items for the wireframe view
"""
import logging
import numpy as np
from OpenGL import GL
from OpenGL.GL import (
GL_BLEND,
GL_SRC_ALPHA,
GL_ONE_MINUS_SRC_ALPHA,
glEnable,
glDisable,
GL_LINE_SMOOTH,
GL_POLYGON_SMOOTH,
GL_LINE_SMOOTH_HINT,
GL_NICEST,
glHint,
GL_POLYGON_SMOOTH_HINT,
glBlendFunc,
glLineWidth,
glBegin,
GL_LINES,
glColor4f,
glVertex3f,
glEnd,
glVertexPointerf,
glEnableClientState,
glNormalPointerf,
glDrawArrays,
GL_TRIANGLES,
glDisableClientState,
GL_NORMAL_ARRAY,
GL_VERTEX_ARRAY,
GL_COLOR_ARRAY,
glDrawElements,
GL_UNSIGNED_INT,
)
from qtpy.QtCore import QObject
from qtpy.QtGui import QMatrix4x4
from qplotutils.wireframe.base_types import DefaultGlOptions
from qplotutils.wireframe.shader import ShaderRegistry
_log = logging.getLogger(__name__)
__author__ = "Philipp Baust"
__copyright__ = "Copyright 2019, Philipp Baust"
__credits__ = []
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Philipp Baust"
__email__ = "[email protected]"
__status__ = "Development"
DEBUG = True
class GLGraphicsItem(QObject):
_nextId = 0
def __init__(
self, *args, glOptions=DefaultGlOptions.OPAQUE, parentItem=None, **kwargs
):
super(GLGraphicsItem, self).__init__()
self._id = GLGraphicsItem._nextId
GLGraphicsItem._nextId += 1
self.__parent = None
self.__view = None
self.__children = set()
self.__transform = QMatrix4x4()
self.__visible = True
self.setParentItem(parentItem)
# self.setDepthValue(0)
self.__glOpts = glOptions
def setParentItem(self, item):
"""Set this item's parent in the scenegraph hierarchy."""
if self.__parent is not None:
self.__parent.__children.remove(self)
if item is not None:
item.__children.add(self)
self.__parent = item
if self.__parent is not None and self.view() is not self.__parent.view():
if self.view() is not None:
self.view().removeItem(self)
self.__parent.view().addItem(self)
def setGLOptions(self, opts):
self.__glOpts = opts.copy()
self.update()
def updateGLOptions(self, opts):
"""
Modify the OpenGL state options to use immediately before drawing this item.
*opts* must be a dictionary as specified by setGLOptions.
Values may also be None, in which case the key will be ignored.
"""
self.__glOpts.update(opts)
def parentItem(self):
"""Return a this item's parent in the scenegraph hierarchy."""
return self.__parent
def childItems(self):
"""Return a list of this item's children in the scenegraph hierarchy."""
return list(self.__children)
# def _setView(self, v):
# self.__view = v
#
# def view(self):
# return self.__view
@property
def view(self):
return self.__view
@view.setter
def view(self, value):
self.__view = value
# def setDepthValue(self, value):
# """
# Sets the depth value of this item. Default is 0.
# This controls the order in which items are drawn--those with a greater depth value will be drawn later.
# Items with negative depth values are drawn before their parent.
# (This is analogous to QGraphicsItem.zValue)
# The depthValue does NOT affect the position of the item or the values it imparts to the GL depth buffer.
# """
# self.__depthValue = value
#
# def depthValue(self):
# """Return the depth value of this item. See setDepthValue for more information."""
# return self.__depthValue
def setTransform(self, tr):
"""Set the local transform for this object.
Must be a :class:`Transform3D <pyqtgraph.Transform3D>` instance. This transform
determines how the local coordinate system of the item is mapped to the coordinate
system of its parent."""
self.__transform = tr # Transform3D(tr)
self.update()
def resetTransform(self):
"""Reset this item's transform to an identity transformation."""
self.__transform.setToIdentity()
self.update()
def applyTransform(self, tr, local):
"""
Multiply this object's transform by *tr*.
If local is True, then *tr* is multiplied on the right of the current transform::
newTransform = transform * tr
If local is False, then *tr* is instead multiplied on the left::
newTransform = tr * transform
"""
if local:
self.setTransform(self.transform() * tr)
else:
self.setTransform(tr * self.transform())
def transform(self):
"""Return this item's transform object."""
return self.__transform
def viewTransform(self):
"""Return the transform mapping this item's local coordinate system to the
view coordinate system."""
tr = self.__transform
p = self
while True:
p = p.parentItem()
if p is None:
break
tr = p.transform() * tr
return tr
def translate(self, dx, dy, dz, local=False):
"""
Translate the object by (*dx*, *dy*, *dz*) in its parent's coordinate system.
If *local* is True, then translation takes place in local coordinates.
"""
tr = QMatrix4x4() # Transform3D()
tr.translate(dx, dy, dz)
self.applyTransform(tr, local=local)
def rotate(self, angle, x, y, z, local=False):
"""
Rotate the object around the axis specified by (x,y,z).
*angle* is in degrees.
"""
tr = QMatrix4x4()
tr.rotate(angle, x, y, z)
self.applyTransform(tr, local=local)
def scale(self, x, y, z, local=True):
"""
Scale the object by (*dx*, *dy*, *dz*) in its local coordinate system.
If *local* is False, then scale takes place in the parent's coordinates.
"""
tr = QMatrix4x4()
tr.scale(x, y, z)
self.applyTransform(tr, local=local)
def hide(self):
"""Hide this item.
This is equivalent to setVisible(False)."""
self.setVisible(False)
def show(self):
"""Make this item visible if it was previously hidden.
This is equivalent to setVisible(True)."""
self.setVisible(True)
def setVisible(self, vis):
"""Set the visibility of this item."""
self.__visible = vis
self.update()
def visible(self):
"""Return True if the item is currently set to be visible.
Note that this does not guarantee that the item actually appears in the
view, as it may be obscured or outside of the current view area."""
return self.__visible
def initializeGL(self):
"""
Called after an item is added to a GLViewWidget.
The widget's GL context is made current before this method is called.
(So this would be an appropriate time to generate lists, upload textures, etc.)
"""
pass
def _applyGLOptions(self):
"""
This method is responsible for preparing the GL state options needed to render
this item (blending, depth testing, etc). The method is called immediately before painting the item.
"""
for k, v in self.__glOpts.items():
if v is None:
continue
if isinstance(k, str):
func = getattr(GL, k)
func(*v)
else:
if v is True:
glEnable(k)
else:
glDisable(k)
def paint(self):
"""
Called by the GLViewWidget to draw this item.
It is the responsibility of the item to set up its own modelview matrix,
but the caller will take care of pushing/popping.
"""
self._applyGLOptions()
def update(self):
"""
Indicates that this item needs to be redrawn, and schedules an update
with the view it is displayed in.
"""
v = self.view
if v is None:
return
v.update()
def mapToParent(self, point):
tr = self.transform()
if tr is None:
return point
return tr.map(point)
def mapFromParent(self, point):
tr = self.transform()
if tr is None:
return point
return tr.inverted()[0].map(point)
def mapToView(self, point):
tr = self.viewTransform()
if tr is None:
return point
return tr.map(point)
def mapFromView(self, point):
tr = self.viewTransform()
if tr is None:
return point
return tr.inverted()[0].map(point)
class Grid(GLGraphicsItem):
def __init__(
self, x=10, y=10, xs=1.0, ys=1.0, edge_color=(0.7, 0.7, 0.7, 1), parentItem=None
):
super(Grid, self).__init__(parentItem)
self.edge_color = edge_color
self.x, self.y = x, y
self.xs, self.ys = xs, ys
def paint(self):
super(Grid, self).paint()
glEnable(GL_LINE_SMOOTH)
glEnable(GL_POLYGON_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glLineWidth(1.3)
glBegin(GL_LINES) # lgtm [py/call/wrong-arguments]
xvals = np.linspace(-self.x / 2.0, self.x / 2.0, self.x / self.xs + 1)
yvals = np.linspace(-self.y / 2.0, self.y / 2.0, self.y / self.ys + 1)
glColor4f(*self.edge_color)
for x in xvals:
glVertex3f(x, yvals[0], 0)
glVertex3f(x, yvals[-1], 0)
for y in yvals:
glVertex3f(xvals[0], y, 0)
glVertex3f(xvals[-1], y, 0)
glEnd() # lgtm [py/call/wrong-arguments]
class CoordinateCross(GLGraphicsItem):
def __init__(self, parentItem=None):
super(CoordinateCross, self).__init__(parentItem=parentItem)
def paint(self):
super(CoordinateCross, self).paint()
glLineWidth(20.0)
glBegin(GL_LINES)
# X
glColor4f(1, 0, 0, 0.3)
glVertex3f(0, 0, 0)
glVertex3f(1, 0, 0)
glColor4f(0, 1, 0, 0.3)
glVertex3f(0, 0, 0)
glVertex3f(0, 1, 0)
glColor4f(0, 0, 1, 0.3)
glVertex3f(0, 0, 0)
glVertex3f(0, 0, 1)
glEnd()
class Box(GLGraphicsItem):
def __init__(self, parentItem=None):
super(Box, self).__init__(parentItem)
self.length = 4 # in x from point of origin
self.width = 2 # in y central to point of origin
self.height = 1.2 # in z from point of origin
def paint(self):
super(Box, self).paint()
l = self.length
wh = self.width / 2.0
# h = self.height
p = [
[0, wh, 0],
[l, wh, 0],
[l, -wh, 0],
[0, -wh, 0],
[0, wh, 1],
[l, wh, 1],
[l, -wh, 1],
[0, -wh, 1],
]
m = [
[0, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
]
glLineWidth(2.0)
glBegin(GL_LINES) # lgtm [py/call/wrong-arguments]
glColor4f(1, 0, 0, 1)
for k in range(8):
for j in range(k + 1, 8):
if m[k][j] == 1:
glVertex3f(*p[k])
glVertex3f(*p[j])
glEnd() # lgtm [py/call/wrong-arguments]
class Mesh(object):
def __init__(self, hasWireframe=False):
self.has_wireframe = hasWireframe
# Array (N,3,3) of all the faces of the mesh. Where N is the number of triangles which is defined by 3 vectors in R3
self.face_vertices = None
# Array (N,3,3) of the face normal vectors, Same size as face vertices.
# self.face_normal_vectors = None
self._vertix_normalized_face_normal_vectors = None
self._face_normalized_face_normal_vectors = None
# self.debug_face_normals_vertices = None
# self.debug_face_normals_edges = None
self.face_edges = None
self.smooth = False
# Wireframe array and adge mapping
self.wireframe_vertices = None
self.wireframe_edges = None
@property
def face_normal_vectors(self):
if self.smooth:
return self._vertix_normalized_face_normal_vectors
else:
return self._face_normalized_face_normal_vectors
@staticmethod
def sphere(stacks=8, sectors=8, radius=1):
vertices = np.zeros(shape=((stacks - 1) * sectors + 2, 3), dtype=np.float)
sh = np.pi / (1.0 * stacks)
thetas = np.linspace(
0 + sh, np.pi - sh, stacks - 1, endpoint=True, dtype=np.float
)
phis = np.linspace(0, 2 * np.pi, sectors, endpoint=False, dtype=np.float)
for k, theta in enumerate(thetas):
z = radius * np.cos(theta)
xy = radius * np.sin(theta)
for j, phi in enumerate(phis):
x = xy * np.sin(phi)
y = xy * np.cos(phi)
vertices[k * sectors + j] = [x, y, z]
vertices[-2] = [0, 0, -1 * radius]
vertices[-1] = [0, 0, 1 * radius]
faces_top = np.zeros((sectors, 3), np.int)
faces_bottom = np.zeros((sectors, 3), np.int)
faces1 = np.zeros((sectors * (stacks - 2), 3), np.int)
faces2 = np.zeros((sectors * (stacks - 2), 3), np.int)
# top
for k in range(sectors):
f0 = len(vertices) - 1
f1 = k
if k + 1 == sectors:
f2 = 0
else:
f2 = k + 1
faces_top[k] = [f0, f2, f1]
# bottom
for k in range(sectors):
f0 = len(vertices) - 2
f1 = (stacks - 2) * sectors + k
if k + 1 == sectors:
f2 = (stacks - 2) * sectors
else:
f2 = (stacks - 2) * sectors + k + 1
faces_bottom[k] = [f1, f2, f0]
for k in range(stacks - 2):
for l in range(sectors):
f0 = k * sectors + l
if l + 1 == sectors:
f1 = k * sectors
else:
f1 = k * sectors + l + 1
f2 = (k + 1) * (sectors) + l
if l == 0:
f3 = (k + 1) * (sectors) + sectors - 1
else:
f3 = (k + 1) * (sectors) + (l - 1)
faces1[f0] = [f0, f1, f2]
faces2[f0] = [f0, f2, f3]
faces = np.concatenate((faces_top, faces_bottom, faces1, faces2), axis=0)
tt = Mesh.compute_face_arrays(vertices, faces)
return tt
@staticmethod
def cone(n_faces=4, radius=1, height=1):
""" Return a mesh for a cone with the defined number of faces
"""
vertices = np.zeros((n_faces + 2, 3), np.float)
for k in range(n_faces):
x = np.sin(2 * np.pi * k / n_faces) * radius
y = np.cos(2 * np.pi * k / n_faces) * radius
vertices[k] = [x, y, 0]
vertices[n_faces] = [0, 0, height]
a = np.arange(0, n_faces, 1, dtype=np.uint8)
b = np.roll(a, 1)
t_faces = np.array([a, b, np.ones(n_faces) * n_faces], dtype=np.uint8).T
b_faces = np.array([b, a, np.ones(n_faces) * (n_faces + 1)], dtype=np.uint8).T
faces = np.append(t_faces, b_faces, axis=0)
tt = Mesh.compute_face_arrays(vertices, faces)
return tt
@staticmethod
def compute_face_arrays(vertices, faces, wireframe_edges=None):
# n_faces = faces.shape[0]
n_vertices = vertices.shape[0]
# compute face vertices
v = vertices[faces]
# face normals
nv = np.cross(v[:, 1] - v[:, 0], v[:, 2] - v[:, 0])
nvl = np.linalg.norm(nv, axis=1)
nv = nv / nvl.reshape(-1, 1)
# non-smoothed face normals
norms = np.zeros((nv.shape[0], 3, 3))
norms[:] = nv[:, np.newaxis, :]
# smothed normals,
# make vector to compute all normals attached to a vertix
vertices_norms_mask = np.zeros(
(faces.shape[0], vertices.shape[0]), dtype=np.uint8
)
for k, f in enumerate(faces):
vertices_norms_mask[k, f[0]] = 1
vertices_norms_mask[k, f[1]] = 1
vertices_norms_mask[k, f[2]] = 1
vertice_norms = np.zeros(vertices.shape, np.float)
for v_idx in range(n_vertices):
f_idx, = np.where(vertices_norms_mask[:, v_idx] == 1)
if f_idx is None or len(f_idx) == 0:
continue
f_idx_u = np.unique(f_idx)
nn = nv[f_idx_u]
vertice_norms[v_idx] = np.sum(nn, axis=0) / len(f_idx_u)
norms2 = vertice_norms[faces]
# computation for mesh grid vizu
face_edges = np.zeros((faces.shape[0] * 3, 2), np.int)
for k, f in enumerate(faces):
a = np.sort(f)
face_edges[k] = [a[0], a[1]]
face_edges[faces.shape[0] + k] = [a[1], a[2]]
face_edges[faces.shape[0] * 2 + k] = [a[0], a[2]]
face_edges = np.unique(face_edges, axis=0)
md = Mesh()
if wireframe_edges is not None:
md.wireframe_edges = wireframe_edges
md.has_wireframe = True
md.wireframe_vertices = vertices
md.face_vertices = v
md._face_normalized_face_normal_vectors = norms
md._vertix_normalized_face_normal_vectors = norms2
md.face_edges = face_edges
return md
@staticmethod
def cube(edge_length=1.0):
vertices = (
np.array(
[
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
]
)
* edge_length
- edge_length / 2.0
)
# Every cube side is constructed of two triangles
# be careful with culling
faces = np.array(
[
[0, 2, 1], # ok
[2, 3, 1],
[0, 1, 4], # ok
[1, 5, 4],
[1, 3, 5], # ok
[3, 7, 5],
[2, 7, 3], # ok
[2, 6, 7],
[0, 6, 2], # ok
[0, 4, 6],
[4, 5, 6], # ok
[5, 7, 6],
]
)
# Hand constructed:
wireframe_edges = np.array(
[
[0, 1],
[1, 3],
[3, 2],
[2, 0],
[4, 5],
[5, 7],
[7, 6],
[6, 4],
[0, 4],
[1, 5],
[2, 6],
[3, 7],
],
np.int8,
)
return Mesh.compute_face_arrays(vertices, faces, wireframe_edges)
class MeshItem(GLGraphicsItem):
def __init__(
self,
meshData,
parentItem=None,
shader=None,
faceColor=(0.6, 0.6, 0.6, 1.0),
edgeColor=(1.0, 0.5, 0.5, 1.0),
smooth=False,
glOptions=None,
):
super(MeshItem, self).__init__(parentItem=parentItem)
self.draw_faces = True
self.draw_wireframe = False
self.debug_face_normals = False
self.debug_face_edges = False
self.__antialiasing = True
self.edge_color = edgeColor
self.face_color = faceColor
self.shader_registry = ShaderRegistry()
self.shader_registry.add(shader)
self.shader_program = self.shader_registry[shader]
if glOptions is not None:
self.setGLOptions(glOptions)
else:
self.setGLOptions(self.shader_program.glOptions)
self.mesh = meshData
self.mesh.smooth = smooth
def initializeGL(self):
_log.debug("InitializeGL")
# self.mesh = Mesh.cone() # cube()
def paint(self):
self._applyGLOptions()
if self.__antialiasing:
glEnable(GL_LINE_SMOOTH)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glLineWidth(1.5)
if self.draw_faces:
# need face
with self.shader_program:
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointerf(self.mesh.face_vertices)
glColor4f(*self.face_color)
glEnableClientState(GL_NORMAL_ARRAY)
glNormalPointerf(self.mesh.face_normal_vectors)
glDrawArrays(
GL_TRIANGLES, 0, np.product(self.mesh.face_vertices.shape[:-1])
)
glDisableClientState(GL_NORMAL_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
if self.debug_face_normals:
# Visualize the face normal vectors
glEnableClientState(GL_VERTEX_ARRAY)
N = self.mesh.face_vertices.shape[0] * 3
v = np.concatenate(
[
self.mesh.face_vertices,
self.mesh.face_vertices + self.mesh.face_normal_vectors,
]
)
e = np.array([np.arange(N), np.arange(N) + N]).T.flatten()
glColor4f(1.0, 1.0, 0.0, 1.0)
glVertexPointerf(v)
glDrawElements(GL_LINES, e.shape[0], GL_UNSIGNED_INT, e)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
if self.debug_face_edges:
# visualize all face edges
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointerf(self.mesh.wireframe_vertices)
glColor4f(1.0, 1.0, 0.0, 1.0)
edges = self.mesh.face_edges.flatten()
glDrawElements(GL_LINES, edges.shape[0], GL_UNSIGNED_INT, edges)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
if self.draw_wireframe and self.mesh.has_wireframe:
# draw a mesh wireframe which may or may not be identical to the face edges, depending on the mesh
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointerf(self.mesh.wireframe_vertices)
glColor4f(0, 1, 0, 1)
edges = self.mesh.wireframe_edges.flatten()
glDrawElements(GL_LINES, edges.shape[0], GL_UNSIGNED_INT, edges)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
| mit | -3,036,442,289,709,507,600 | 29.261997 | 124 | 0.526016 | false |
googleapis/python-talent | tests/unit/gapic/talent_v4beta1/test_tenant_service.py | 1 | 87731 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.talent_v4beta1.services.tenant_service import TenantServiceAsyncClient
from google.cloud.talent_v4beta1.services.tenant_service import TenantServiceClient
from google.cloud.talent_v4beta1.services.tenant_service import pagers
from google.cloud.talent_v4beta1.services.tenant_service import transports
from google.cloud.talent_v4beta1.services.tenant_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.talent_v4beta1.types import tenant
from google.cloud.talent_v4beta1.types import tenant as gct_tenant
from google.cloud.talent_v4beta1.types import tenant_service
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TenantServiceClient._get_default_mtls_endpoint(None) is None
assert (
TenantServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
TenantServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
TenantServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TenantServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
TenantServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [TenantServiceClient, TenantServiceAsyncClient,]
)
def test_tenant_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "jobs.googleapis.com:443"
@pytest.mark.parametrize(
"client_class", [TenantServiceClient, TenantServiceAsyncClient,]
)
def test_tenant_service_client_service_account_always_use_jwt(client_class):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
client = client_class(credentials=creds)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TenantServiceGrpcTransport, "grpc"),
(transports.TenantServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_tenant_service_client_service_account_always_use_jwt_true(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
@pytest.mark.parametrize(
"client_class", [TenantServiceClient, TenantServiceAsyncClient,]
)
def test_tenant_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "jobs.googleapis.com:443"
def test_tenant_service_client_get_transport_class():
transport = TenantServiceClient.get_transport_class()
available_transports = [
transports.TenantServiceGrpcTransport,
]
assert transport in available_transports
transport = TenantServiceClient.get_transport_class("grpc")
assert transport == transports.TenantServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc"),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
TenantServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceClient),
)
@mock.patch.object(
TenantServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceAsyncClient),
)
def test_tenant_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TenantServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TenantServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc", "true"),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc", "false"),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TenantServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceClient),
)
@mock.patch.object(
TenantServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_tenant_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc"),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_tenant_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc"),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_tenant_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_tenant_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TenantServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_create_tenant(
transport: str = "grpc", request_type=tenant_service.CreateTenantRequest
):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant(
name="name_value",
external_id="external_id_value",
usage_type=gct_tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=[
"keyword_searchable_profile_custom_attributes_value"
],
)
response = client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.CreateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
assert response.usage_type == gct_tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == [
"keyword_searchable_profile_custom_attributes_value"
]
def test_create_tenant_from_dict():
test_create_tenant(request_type=dict)
def test_create_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
client.create_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.CreateTenantRequest()
@pytest.mark.asyncio
async def test_create_tenant_async(
transport: str = "grpc_asyncio", request_type=tenant_service.CreateTenantRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_tenant.Tenant(
name="name_value",
external_id="external_id_value",
usage_type=gct_tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=[
"keyword_searchable_profile_custom_attributes_value"
],
)
)
response = await client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.CreateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
assert response.usage_type == gct_tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == [
"keyword_searchable_profile_custom_attributes_value"
]
@pytest.mark.asyncio
async def test_create_tenant_async_from_dict():
await test_create_tenant_async(request_type=dict)
def test_create_tenant_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.CreateTenantRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
call.return_value = gct_tenant.Tenant()
client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.CreateTenantRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
await client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tenant_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tenant(
parent="parent_value", tenant=gct_tenant.Tenant(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].tenant == gct_tenant.Tenant(name="name_value")
def test_create_tenant_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tenant(
tenant_service.CreateTenantRequest(),
parent="parent_value",
tenant=gct_tenant.Tenant(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tenant(
parent="parent_value", tenant=gct_tenant.Tenant(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].tenant == gct_tenant.Tenant(name="name_value")
@pytest.mark.asyncio
async def test_create_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tenant(
tenant_service.CreateTenantRequest(),
parent="parent_value",
tenant=gct_tenant.Tenant(name="name_value"),
)
def test_get_tenant(
transport: str = "grpc", request_type=tenant_service.GetTenantRequest
):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant.Tenant(
name="name_value",
external_id="external_id_value",
usage_type=tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=[
"keyword_searchable_profile_custom_attributes_value"
],
)
response = client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.GetTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
assert response.usage_type == tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == [
"keyword_searchable_profile_custom_attributes_value"
]
def test_get_tenant_from_dict():
test_get_tenant(request_type=dict)
def test_get_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
client.get_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.GetTenantRequest()
@pytest.mark.asyncio
async def test_get_tenant_async(
transport: str = "grpc_asyncio", request_type=tenant_service.GetTenantRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tenant.Tenant(
name="name_value",
external_id="external_id_value",
usage_type=tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=[
"keyword_searchable_profile_custom_attributes_value"
],
)
)
response = await client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.GetTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
assert response.usage_type == tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == [
"keyword_searchable_profile_custom_attributes_value"
]
@pytest.mark.asyncio
async def test_get_tenant_async_from_dict():
await test_get_tenant_async(request_type=dict)
def test_get_tenant_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.GetTenantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
call.return_value = tenant.Tenant()
client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.GetTenantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tenant.Tenant())
await client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_tenant_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant.Tenant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tenant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_tenant_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tenant(
tenant_service.GetTenantRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant.Tenant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tenant.Tenant())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tenant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tenant(
tenant_service.GetTenantRequest(), name="name_value",
)
def test_update_tenant(
transport: str = "grpc", request_type=tenant_service.UpdateTenantRequest
):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant(
name="name_value",
external_id="external_id_value",
usage_type=gct_tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=[
"keyword_searchable_profile_custom_attributes_value"
],
)
response = client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.UpdateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
assert response.usage_type == gct_tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == [
"keyword_searchable_profile_custom_attributes_value"
]
def test_update_tenant_from_dict():
test_update_tenant(request_type=dict)
def test_update_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
client.update_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.UpdateTenantRequest()
@pytest.mark.asyncio
async def test_update_tenant_async(
transport: str = "grpc_asyncio", request_type=tenant_service.UpdateTenantRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_tenant.Tenant(
name="name_value",
external_id="external_id_value",
usage_type=gct_tenant.Tenant.DataUsageType.AGGREGATED,
keyword_searchable_profile_custom_attributes=[
"keyword_searchable_profile_custom_attributes_value"
],
)
)
response = await client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.UpdateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
assert response.usage_type == gct_tenant.Tenant.DataUsageType.AGGREGATED
assert response.keyword_searchable_profile_custom_attributes == [
"keyword_searchable_profile_custom_attributes_value"
]
@pytest.mark.asyncio
async def test_update_tenant_async_from_dict():
await test_update_tenant_async(request_type=dict)
def test_update_tenant_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.UpdateTenantRequest()
request.tenant.name = "tenant.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
call.return_value = gct_tenant.Tenant()
client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tenant.name=tenant.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.UpdateTenantRequest()
request.tenant.name = "tenant.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
await client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tenant.name=tenant.name/value",) in kw["metadata"]
def test_update_tenant_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tenant(tenant=gct_tenant.Tenant(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].tenant == gct_tenant.Tenant(name="name_value")
def test_update_tenant_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tenant(
tenant_service.UpdateTenantRequest(),
tenant=gct_tenant.Tenant(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tenant(
tenant=gct_tenant.Tenant(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].tenant == gct_tenant.Tenant(name="name_value")
@pytest.mark.asyncio
async def test_update_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tenant(
tenant_service.UpdateTenantRequest(),
tenant=gct_tenant.Tenant(name="name_value"),
)
def test_delete_tenant(
transport: str = "grpc", request_type=tenant_service.DeleteTenantRequest
):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.DeleteTenantRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_tenant_from_dict():
test_delete_tenant(request_type=dict)
def test_delete_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
client.delete_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.DeleteTenantRequest()
@pytest.mark.asyncio
async def test_delete_tenant_async(
transport: str = "grpc_asyncio", request_type=tenant_service.DeleteTenantRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.DeleteTenantRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_tenant_async_from_dict():
await test_delete_tenant_async(request_type=dict)
def test_delete_tenant_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.DeleteTenantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
call.return_value = None
client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.DeleteTenantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tenant_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tenant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_tenant_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tenant(
tenant_service.DeleteTenantRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tenant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tenant(
tenant_service.DeleteTenantRequest(), name="name_value",
)
def test_list_tenants(
transport: str = "grpc", request_type=tenant_service.ListTenantsRequest
):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant_service.ListTenantsResponse(
next_page_token="next_page_token_value",
)
response = client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.ListTenantsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTenantsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tenants_from_dict():
test_list_tenants(request_type=dict)
def test_list_tenants_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
client.list_tenants()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.ListTenantsRequest()
@pytest.mark.asyncio
async def test_list_tenants_async(
transport: str = "grpc_asyncio", request_type=tenant_service.ListTenantsRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tenant_service.ListTenantsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.ListTenantsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTenantsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tenants_async_from_dict():
await test_list_tenants_async(request_type=dict)
def test_list_tenants_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.ListTenantsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
call.return_value = tenant_service.ListTenantsResponse()
client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_tenants_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.ListTenantsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tenant_service.ListTenantsResponse()
)
await client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_tenants_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant_service.ListTenantsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tenants(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_tenants_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tenants(
tenant_service.ListTenantsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tenants_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant_service.ListTenantsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tenant_service.ListTenantsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tenants(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_tenants_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tenants(
tenant_service.ListTenantsRequest(), parent="parent_value",
)
def test_list_tenants_pager():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(), tenant.Tenant(),],
next_page_token="abc",
),
tenant_service.ListTenantsResponse(tenants=[], next_page_token="def",),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(),], next_page_token="ghi",
),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tenants(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, tenant.Tenant) for i in results)
def test_list_tenants_pages():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(), tenant.Tenant(),],
next_page_token="abc",
),
tenant_service.ListTenantsResponse(tenants=[], next_page_token="def",),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(),], next_page_token="ghi",
),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(),],
),
RuntimeError,
)
pages = list(client.list_tenants(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tenants_async_pager():
client = TenantServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(), tenant.Tenant(),],
next_page_token="abc",
),
tenant_service.ListTenantsResponse(tenants=[], next_page_token="def",),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(),], next_page_token="ghi",
),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(),],
),
RuntimeError,
)
async_pager = await client.list_tenants(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, tenant.Tenant) for i in responses)
@pytest.mark.asyncio
async def test_list_tenants_async_pages():
client = TenantServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(), tenant.Tenant(),],
next_page_token="abc",
),
tenant_service.ListTenantsResponse(tenants=[], next_page_token="def",),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(),], next_page_token="ghi",
),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_tenants(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TenantServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TenantServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TenantServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TenantServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.TenantServiceGrpcTransport,)
def test_tenant_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TenantServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_tenant_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TenantServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_tenant",
"get_tenant",
"update_tenant",
"delete_tenant",
"list_tenants",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_tenant_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TenantServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_tenant_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TenantServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
def test_tenant_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.talent_v4beta1.services.tenant_service.transports.TenantServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TenantServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_tenant_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TenantServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_tenant_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TenantServiceClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_tenant_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_tenant_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TenantServiceGrpcTransport, grpc_helpers),
(transports.TenantServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_tenant_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"jobs.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
scopes=["1", "2"],
default_host="jobs.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
def test_tenant_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_tenant_service_host_no_port():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint="jobs.googleapis.com"),
)
assert client.transport._host == "jobs.googleapis.com:443"
def test_tenant_service_host_with_port():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="jobs.googleapis.com:8000"
),
)
assert client.transport._host == "jobs.googleapis.com:8000"
def test_tenant_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TenantServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_tenant_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TenantServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
def test_tenant_service_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
def test_tenant_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_tenant_path():
project = "squid"
tenant = "clam"
expected = "projects/{project}/tenants/{tenant}".format(
project=project, tenant=tenant,
)
actual = TenantServiceClient.tenant_path(project, tenant)
assert expected == actual
def test_parse_tenant_path():
expected = {
"project": "whelk",
"tenant": "octopus",
}
path = TenantServiceClient.tenant_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_tenant_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TenantServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = TenantServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = TenantServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = TenantServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = TenantServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = TenantServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = TenantServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = TenantServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = TenantServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = TenantServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TenantServiceTransport, "_prep_wrapped_messages"
) as prep:
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TenantServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TenantServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 | 2,514,338,566,656,052,700 | 37.177111 | 118 | 0.659847 | false |
edx/course-discovery | course_discovery/apps/course_metadata/tests/test_query.py | 1 | 6132 | # pylint: disable=no-member
import datetime
import ddt
import pytest
import pytz
from django.test import TestCase
from course_discovery.apps.course_metadata.choices import CourseRunStatus, ProgramStatus
from course_discovery.apps.course_metadata.models import Course, CourseRun, Program
from course_discovery.apps.course_metadata.tests.factories import CourseRunFactory, ProgramFactory, SeatFactory
@pytest.mark.usefixtures('course_run_states')
class CourseQuerySetTests(TestCase):
def test_available(self):
"""
Verify the method filters Courses to those which contain at least one
CourseRun that can be enrolled in immediately, is ongoing or yet to start,
and appears on the marketing site.
"""
for state in self.states():
Course.objects.all().delete()
course_run = CourseRunFactory()
for function in state:
function(course_run)
course_run.save()
if state in self.available_states:
course = course_run.course
# This course is available, so should be returned by the
# available() query.
assert list(Course.objects.available()) == [course]
# This run has no seats, but we still expect its parent course
# to be included.
CourseRunFactory(course=course)
assert list(Course.objects.available()) == [course]
# Generate another course run with available seats.
# Only one instance of the course should be included in the result.
other_course_run = CourseRunFactory(course=course)
for function in state:
function(other_course_run)
other_course_run.save()
assert list(Course.objects.available()) == [course]
else:
assert list(Course.objects.available()) == []
@ddt.ddt
class CourseRunQuerySetTests(TestCase):
def test_active(self):
""" Verify the method returns only course runs currently open for enrollment or opening in the future. """
now = datetime.datetime.now(pytz.UTC)
active_course_end = now + datetime.timedelta(days=60)
inactive_course_end = now - datetime.timedelta(days=15)
open_enrollment_end = now + datetime.timedelta(days=30)
closed_enrollment_end = now - datetime.timedelta(days=30)
# Create course with end date in future and enrollment_end in past.
CourseRunFactory(end=active_course_end, enrollment_end=closed_enrollment_end)
# Create course with end date in past and no enrollment_end.
CourseRunFactory(end=inactive_course_end, enrollment_end=None)
assert CourseRun.objects.active().count() == 0
# Create course with end date in future and enrollment_end in future.
active_enrollment_end = CourseRunFactory(end=active_course_end, enrollment_end=open_enrollment_end)
# Create course with end date in future and no enrollment_end.
active_no_enrollment_end = CourseRunFactory(end=active_course_end, enrollment_end=None)
# Create course with no end date and enrollment date in future.
active_no_end_date = CourseRunFactory(end=None, enrollment_end=open_enrollment_end)
assert set(CourseRun.objects.active()) == {active_enrollment_end, active_no_enrollment_end, active_no_end_date}
def test_enrollable(self):
""" Verify the method returns only course runs currently open for enrollment. """
past = datetime.datetime.now(pytz.UTC) - datetime.timedelta(days=2)
future = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
enrollable = CourseRunFactory(enrollment_start=past, enrollment_end=future)
enrollable_no_enrollment_end = CourseRunFactory(enrollment_start=past, enrollment_end=None)
enrollable_no_enrollment_start = CourseRunFactory(enrollment_start=None, enrollment_end=future)
CourseRunFactory(enrollment_start=future)
CourseRunFactory(enrollment_end=past)
# order doesn't matter
assert list(CourseRun.objects.enrollable().order_by('id')) == sorted([
enrollable, enrollable_no_enrollment_end, enrollable_no_enrollment_start
], key=lambda x: x.id)
def test_marketable(self):
""" Verify the method filters CourseRuns to those with slugs. """
course_run = CourseRunFactory()
SeatFactory(course_run=course_run)
assert list(CourseRun.objects.marketable()) == [course_run]
@ddt.data(True, False)
def test_marketable_seats_exclusions(self, has_seats):
""" Verify that the method excludes CourseRuns without seats. """
course_run = CourseRunFactory()
if has_seats:
SeatFactory(course_run=course_run)
assert CourseRun.objects.marketable().exists() == has_seats
@ddt.data(True, False)
def test_marketable_unpublished_exclusions(self, is_published):
""" Verify the method excludes CourseRuns with Unpublished status. """
course_run = CourseRunFactory(status=CourseRunStatus.Unpublished)
SeatFactory(course_run=course_run)
if is_published:
course_run.status = CourseRunStatus.Published
course_run.save()
assert CourseRun.objects.marketable().exists() == is_published
@ddt.ddt
class ProgramQuerySetTests(TestCase):
@ddt.data(
(ProgramStatus.Unpublished, False),
(ProgramStatus.Active, True),
)
@ddt.unpack
def test_marketable(self, status, is_marketable):
""" Verify the method filters Programs to those which are active and have marketing slugs. """
program = ProgramFactory(status=status)
expected = [program] if is_marketable else []
assert list(Program.objects.marketable()) == expected
def test_marketable_exclusions(self):
""" Verify the method excludes Programs without a marketing slug. """
ProgramFactory(marketing_slug='')
assert Program.objects.marketable().count() == 0
| agpl-3.0 | 1,480,001,325,813,953,500 | 41.583333 | 119 | 0.667482 | false |
nadeemsyed/swift | swift/common/direct_client.py | 3 | 21930 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internal client library for making calls directly to the servers rather than
through the proxy.
"""
import json
import os
import socket
from eventlet import sleep, Timeout
import six
import six.moves.cPickle as pickle
from six.moves.http_client import HTTPException
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ClientException
from swift.common.utils import Timestamp, FileLikeIter
from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \
is_success, is_server_error
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import quote
class DirectClientException(ClientException):
def __init__(self, stype, method, node, part, path, resp, host=None):
# host can be used to override the node ip and port reported in
# the exception
host = host if host is not None else node
if not isinstance(path, six.text_type):
path = path.decode("utf-8")
full_path = quote('/%s/%s%s' % (node['device'], part, path))
msg = '%s server %s:%s direct %s %r gave status %s' % (
stype, host['ip'], host['port'], method, full_path, resp.status)
headers = HeaderKeyDict(resp.getheaders())
super(DirectClientException, self).__init__(
msg, http_host=host['ip'], http_port=host['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason, http_headers=headers)
def _make_req(node, part, method, path, _headers, stype,
conn_timeout=5, response_timeout=15):
"""
Make request to backend storage node.
(i.e. 'Account', 'Container', 'Object')
:param node: a node dict from a ring
:param part: an integer, the partion number
:param method: a string, the HTTP method (e.g. 'PUT', 'DELETE', etc)
:param path: a string, the request path
:param headers: a dict, header name => value
:param stype: a string, describing the type of service
:returns: an HTTPResponse object
"""
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
method, path, headers=_headers)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise DirectClientException(stype, method, node, part, path, resp)
return resp
def _get_direct_account_container(path, stype, node, part,
marker=None, limit=None,
prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15,
end_marker=None, reverse=None):
"""Base class for get direct account and container.
Do not use directly use the get_direct_account or
get_direct_container instead.
"""
params = ['format=json']
if marker:
params.append('marker=%s' % quote(marker))
if limit:
params.append('limit=%d' % limit)
if prefix:
params.append('prefix=%s' % quote(prefix))
if delimiter:
params.append('delimiter=%s' % quote(delimiter))
if end_marker:
params.append('end_marker=%s' % quote(end_marker))
if reverse:
params.append('reverse=%s' % quote(reverse))
qs = '&'.join(params)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, query_string=qs,
headers=gen_headers())
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise DirectClientException(stype, 'GET', node, part, path, resp)
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json.loads(resp.read())
def gen_headers(hdrs_in=None, add_ts=False):
hdrs_out = HeaderKeyDict(hdrs_in) if hdrs_in else HeaderKeyDict()
if add_ts:
hdrs_out['X-Timestamp'] = Timestamp.now().internal
hdrs_out['User-Agent'] = 'direct-client %s' % os.getpid()
return hdrs_out
def direct_get_account(node, part, account, marker=None, limit=None,
prefix=None, delimiter=None, conn_timeout=5,
response_timeout=15, end_marker=None, reverse=None):
"""
Get listings directly from the account server.
:param node: node dictionary from the ring
:param part: partition the account is on
:param account: account name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimiter: delimiter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param end_marker: end_marker query
:param reverse: reverse the returned listing
:returns: a tuple of (response headers, a list of containers) The response
headers will HeaderKeyDict.
"""
path = '/' + account
return _get_direct_account_container(path, "Account", node, part,
marker=marker,
limit=limit, prefix=prefix,
delimiter=delimiter,
end_marker=end_marker,
reverse=reverse,
conn_timeout=conn_timeout,
response_timeout=response_timeout)
def direct_delete_account(node, part, account, conn_timeout=5,
response_timeout=15, headers=None):
if headers is None:
headers = {}
path = '/%s' % account
_make_req(node, part, 'DELETE', path, gen_headers(headers, True),
'Account', conn_timeout, response_timeout)
def direct_head_container(node, part, account, container, conn_timeout=5,
response_timeout=15):
"""
Request container information directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers in a HeaderKeyDict
:raises ClientException: HTTP HEAD request failed
"""
path = '/%s/%s' % (account, container)
resp = _make_req(node, part, 'HEAD', path, gen_headers(),
'Container', conn_timeout, response_timeout)
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
return resp_headers
def direct_get_container(node, part, account, container, marker=None,
limit=None, prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15, end_marker=None,
reverse=None):
"""
Get container listings directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimiter: delimiter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param end_marker: end_marker query
:param reverse: reverse the returned listing
:returns: a tuple of (response headers, a list of objects) The response
headers will be a HeaderKeyDict.
"""
path = '/%s/%s' % (account, container)
return _get_direct_account_container(path, "Container", node,
part, marker=marker,
limit=limit, prefix=prefix,
delimiter=delimiter,
end_marker=end_marker,
reverse=reverse,
conn_timeout=conn_timeout,
response_timeout=response_timeout)
def direct_delete_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None):
"""
Delete container directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:raises ClientException: HTTP DELETE request failed
"""
if headers is None:
headers = {}
path = '/%s/%s' % (account, container)
add_timestamp = 'x-timestamp' not in (k.lower() for k in headers)
_make_req(node, part, 'DELETE', path, gen_headers(headers, add_timestamp),
'Container', conn_timeout, response_timeout)
def direct_put_container_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15,
headers=None):
if headers is None:
headers = {}
have_x_timestamp = 'x-timestamp' in (k.lower() for k in headers)
path = '/%s/%s/%s' % (account, container, obj)
_make_req(node, part, 'PUT', path,
gen_headers(headers, add_ts=(not have_x_timestamp)),
'Container', conn_timeout, response_timeout)
def direct_delete_container_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15,
headers=None):
if headers is None:
headers = {}
headers = gen_headers(headers, add_ts='x-timestamp' not in (
k.lower() for k in headers))
path = '/%s/%s/%s' % (account, container, obj)
_make_req(node, part, 'DELETE', path, headers,
'Container', conn_timeout, response_timeout)
def direct_head_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15, headers=None):
"""
Request object information directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: a dict containing the response's headers in a HeaderKeyDict
:raises ClientException: HTTP HEAD request failed
"""
if headers is None:
headers = {}
headers = gen_headers(headers)
path = '/%s/%s/%s' % (account, container, obj)
resp = _make_req(node, part, 'HEAD', path, headers,
'Object', conn_timeout, response_timeout)
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
return resp_headers
def direct_get_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15, resp_chunk_size=None, headers=None):
"""
Get object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param resp_chunk_size: if defined, chunk size of data to read.
:param headers: dict to be passed into HTTPConnection headers
:returns: a tuple of (response headers, the object's contents) The response
headers will be a HeaderKeyDict.
:raises ClientException: HTTP GET request failed
"""
if headers is None:
headers = {}
path = '/%s/%s/%s' % (account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, headers=gen_headers(headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise DirectClientException('Object', 'GET', node, part, path, resp)
if resp_chunk_size:
def _object_body():
buf = resp.read(resp_chunk_size)
while buf:
yield buf
buf = resp.read(resp_chunk_size)
object_body = _object_body()
else:
object_body = resp.read()
resp_headers = HeaderKeyDict()
for header, value in resp.getheaders():
resp_headers[header] = value
return resp_headers, object_body
def direct_put_object(node, part, account, container, name, contents,
content_length=None, etag=None, content_type=None,
headers=None, conn_timeout=5, response_timeout=15,
chunk_size=65535):
"""
Put object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param contents: an iterable or string to read object data from
:param content_length: value to send as content-length header
:param etag: etag of contents
:param content_type: value to send as content-type header
:param headers: additional headers to include in the request
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param chunk_size: if defined, chunk size of data to send.
:returns: etag from the server response
:raises ClientException: HTTP PUT request failed
"""
path = '/%s/%s/%s' % (account, container, name)
if headers is None:
headers = {}
if etag:
headers['ETag'] = etag.strip('"')
if content_length is not None:
headers['Content-Length'] = str(content_length)
else:
for n, v in headers.items():
if n.lower() == 'content-length':
content_length = int(v)
if content_type is not None:
headers['Content-Type'] = content_type
else:
headers['Content-Type'] = 'application/octet-stream'
if not contents:
headers['Content-Length'] = '0'
if isinstance(contents, six.string_types):
contents = [contents]
# Incase the caller want to insert an object with specific age
add_ts = 'X-Timestamp' not in headers
if content_length is None:
headers['Transfer-Encoding'] = 'chunked'
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'PUT', path, headers=gen_headers(headers, add_ts))
contents_f = FileLikeIter(contents)
if content_length is None:
chunk = contents_f.read(chunk_size)
while chunk:
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = contents_f.read(chunk_size)
conn.send('0\r\n\r\n')
else:
left = content_length
while left > 0:
size = chunk_size
if size > left:
size = left
chunk = contents_f.read(size)
if not chunk:
break
conn.send(chunk)
left -= len(chunk)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise DirectClientException('Object', 'PUT',
node, part, path, resp)
return resp.getheader('etag').strip('"')
def direct_post_object(node, part, account, container, name, headers,
conn_timeout=5, response_timeout=15):
"""
Direct update to object metadata on object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param headers: headers to store as metadata
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP POST request failed
"""
path = '/%s/%s/%s' % (account, container, name)
_make_req(node, part, 'POST', path, gen_headers(headers, True),
'Object', conn_timeout, response_timeout)
def direct_delete_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15, headers=None):
"""
Delete object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP DELETE request failed
"""
if headers is None:
headers = {}
headers = gen_headers(headers, add_ts='x-timestamp' not in (
k.lower() for k in headers))
path = '/%s/%s/%s' % (account, container, obj)
_make_req(node, part, 'DELETE', path, headers,
'Object', conn_timeout, response_timeout)
def direct_get_suffix_hashes(node, part, suffixes, conn_timeout=5,
response_timeout=15, headers=None):
"""
Get suffix hashes directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: dict of suffix hashes
:raises ClientException: HTTP REPLICATE request failed
"""
if headers is None:
headers = {}
path = '/%s' % '-'.join(suffixes)
with Timeout(conn_timeout):
conn = http_connect(node['replication_ip'], node['replication_port'],
node['device'], part, 'REPLICATE', path,
headers=gen_headers(headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
raise DirectClientException('Object', 'REPLICATE',
node, part, path, resp,
host={'ip': node['replication_ip'],
'port': node['replication_port']}
)
return pickle.loads(resp.read())
def retry(func, *args, **kwargs):
"""
Helper function to retry a given function a number of times.
:param func: callable to be called
:param retries: number of retries
:param error_log: logger for errors
:param args: arguments to send to func
:param kwargs: keyward arguments to send to func (if retries or
error_log are sent, they will be deleted from kwargs
before sending on to func)
:returns: result of func
:raises ClientException: all retries failed
"""
retries = kwargs.pop('retries', 5)
error_log = kwargs.pop('error_log', None)
attempts = 0
backoff = 1
while attempts <= retries:
attempts += 1
try:
return attempts, func(*args, **kwargs)
except (socket.error, HTTPException, Timeout) as err:
if error_log:
error_log(err)
if attempts > retries:
raise
except ClientException as err:
if error_log:
error_log(err)
if attempts > retries or not is_server_error(err.http_status) or \
err.http_status == HTTP_INSUFFICIENT_STORAGE:
raise
sleep(backoff)
backoff *= 2
# Shouldn't actually get down here, but just in case.
if args and 'ip' in args[0]:
raise ClientException('Raise too many retries',
http_host=args[0]['ip'],
http_port=args[0]['port'],
http_device=args[0]['device'])
else:
raise ClientException('Raise too many retries')
| apache-2.0 | 1,070,365,014,901,260,800 | 38.160714 | 79 | 0.614318 | false |
tarmstrong/nbdiff | scripts/gen_benchmark_notebook.py | 1 | 1306 | # generate two notebook files that are large enough for benchmarking.
import IPython.nbformat.current as nbformat
import random
def new_code_cell():
nlines = random.randint(0, 30)
input = [
str(random.random())
for i in range(nlines)
]
code_cell = nbformat.new_code_cell(input=input)
return code_cell
cells = [
new_code_cell()
for i in range(100)
]
worksheet = nbformat.new_worksheet(cells=cells)
nb = nbformat.new_notebook(name='Test Notebook')
nb['worksheets'].append(worksheet)
with open('nb1.ipynb', 'w') as out:
nbformat.write(nb, out, 'ipynb')
cells = nb['worksheets'][0]['cells']
# Take original notebook and make changes to it
ncells = len(cells)
to_change = [random.choice(list(range(ncells))) for i in range(10)]
for tc in to_change:
input = cells[tc]['input']
ninput = len(input)
to_delete = [random.choice(list(range(ninput))) for i in range(10)]
for td in to_delete:
if td < len(input):
del input[td]
cells[tc]['input'] = input
ncells = len(cells)
removed = [random.choice(list(range(ncells))) for i in range(10)]
for r in removed:
if r < len(cells):
del cells[r]
nb['worksheets'][0]['cells'] = cells
with open('nb2.ipynb', 'w') as out:
nbformat.write(nb, out, 'ipynb')
| mit | 5,158,438,282,222,395,000 | 21.135593 | 71 | 0.644717 | false |
chrisbarnettster/cfg-analysis-on-heroku-jupyter | .jupyter/jupyter_notebook_config.py | 2 | 2062 | try:
import os
import json
import traceback
import IPython.lib
import pgcontents
c = get_config()
### Password protection ###
# http://jupyter-notebook.readthedocs.io/en/latest/security.html
if os.environ.get('JUPYTER_NOTEBOOK_PASSWORD_DISABLED') != 'DangerZone!':
passwd = os.environ['JUPYTER_NOTEBOOK_PASSWORD']
c.NotebookApp.password = IPython.lib.passwd(passwd)
else:
c.NotebookApp.token = ''
c.NotebookApp.password = ''
### PostresContentsManager ###
database_url = os.getenv('DATABASE_URL', None)
if database_url:
# Tell IPython to use PostgresContentsManager for all storage.
c.NotebookApp.contents_manager_class = pgcontents.PostgresContentsManager
# Set the url for the database used to store files. See
# http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html#postgresql
# for more info on db url formatting.
c.PostgresContentsManager.db_url = database_url
# PGContents associates each running notebook server with a user, allowing
# multiple users to connect to the same database without trampling each other's
# notebooks. By default, we use the result of result of getpass.getuser(), but
# a username can be specified manually like so:
c.PostgresContentsManager.user_id = 'heroku'
# Set a maximum file size, if desired.
#c.PostgresContentsManager.max_file_size_bytes = 1000000 # 1MB File cap
### CloudFoundry specific settings
vcap_application_json = os.getenv('VCAP_APPLICATION', None)
if vcap_application_json:
vcap_application = json.loads(vcap_application_json)
uri = vcap_application['uris'][0]
c.NotebookApp.allow_origin = 'https://{}'.format(uri)
c.NotebookApp.websocket_url = 'wss://{}:4443'.format(uri)
except Exception:
traceback.print_exc()
# if an exception occues, notebook normally would get started
# without password set. For security reasons, execution is stopped.
exit(-1)
| mit | 2,645,788,274,607,554,000 | 39.431373 | 87 | 0.677013 | false |
spacy-io/thinc | thinc/extra/datasets.py | 1 | 8348 | # coding: utf8
from __future__ import unicode_literals
import random # pragma: no cover
import io # pragma: no cover
from collections import Counter # pragma: no cover
import os.path # pragma: no cover
import csv # pragma: no cover
import numpy
import json
import sys
from srsly import cloudpickle as pickle
from pathlib import Path
from ._vendorized.keras_data_utils import get_file # pragma: no cover
from ..neural.util import partition
from ..compat import basestring
try:
unicode
except NameError:
unicode = str
GITHUB = "https://github.com/UniversalDependencies/" # pragma: no cover
ANCORA_1_4_ZIP = "{github}/{ancora}/archive/r1.4.zip".format(
github=GITHUB, ancora="UD_Spanish-AnCora"
) # pragma: no cover
EWTB_1_4_ZIP = "{github}/{ewtb}/archive/r1.4.zip".format(
github=GITHUB, ewtb="UD_English"
) # pragma: no cover
SNLI_URL = "http://nlp.stanford.edu/projects/snli/snli_1.0.zip"
QUORA_QUESTIONS_URL = "http://qim.ec.quoracdn.net/quora_duplicate_questions.tsv"
IMDB_URL = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
def ancora_pos_tags(encode_words=False): # pragma: no cover
data_dir = get_file("UD_Spanish-AnCora-r1.4", ANCORA_1_4_ZIP, unzip=True)
train_loc = os.path.join(data_dir, "es_ancora-ud-train.conllu")
dev_loc = os.path.join(data_dir, "es_ancora-ud-dev.conllu")
return ud_pos_tags(train_loc, dev_loc, encode_words=encode_words)
def ewtb_pos_tags(encode_tags=False, encode_words=False): # pragma: no cover
data_dir = get_file("UD_English-r1.4", EWTB_1_4_ZIP, unzip=True)
train_loc = os.path.join(data_dir, "en-ud-train.conllu")
dev_loc = os.path.join(data_dir, "en-ud-dev.conllu")
return ud_pos_tags(
train_loc, dev_loc, encode_tags=encode_tags, encode_words=encode_words
)
def ud_pos_tags(
train_loc, dev_loc, encode_tags=True, encode_words=True
): # pragma: no cover
train_sents = list(read_conll(train_loc))
dev_sents = list(read_conll(dev_loc))
tagmap = {}
freqs = Counter()
for words, tags in train_sents:
for tag in tags:
tagmap.setdefault(tag, len(tagmap))
for word in words:
freqs[word] += 1
vocab = {
word: i for i, (word, freq) in enumerate(freqs.most_common()) if (freq >= 5)
}
def _encode(sents):
X = []
y = []
for words, tags in sents:
if encode_words:
X.append(
numpy.asarray(
[vocab.get(word, len(vocab)) for word in words], dtype="uint64"
)
)
else:
X.append(words)
if encode_tags:
y.append(numpy.asarray([tagmap[tag] for tag in tags], dtype="int32"))
else:
y.append(tags)
return zip(X, y)
return _encode(train_sents), _encode(dev_sents), len(tagmap)
def imdb(loc=None, limit=0):
if loc is None:
loc = get_file("aclImdb", IMDB_URL, untar=True, unzip=True)
train_loc = Path(loc) / "train"
test_loc = Path(loc) / "test"
return read_imdb(train_loc, limit=limit), read_imdb(test_loc, limit=limit)
def read_wikiner(file_, tagmap=None):
Xs = []
ys = []
for line in file_:
if not line.strip():
continue
tokens = [t.rsplit("|", 2) for t in line.split()]
words, _, tags = zip(*tokens)
if tagmap is not None:
tags = [tagmap.setdefault(tag, len(tagmap)) for tag in tags]
Xs.append(words)
ys.append(tags)
return zip(Xs, ys)
def read_imdb(data_dir, limit=0):
examples = []
for subdir, label in (("pos", 1), ("neg", 0)):
for filename in (data_dir / subdir).iterdir():
with filename.open("r", encoding="utf8") as file_:
text = file_.read()
text = text.replace("<br />", "\n\n")
if text.strip():
examples.append((text, label))
random.shuffle(examples)
if limit >= 1:
examples = examples[:limit]
return examples
def read_conll(loc): # pragma: no cover
with io.open(loc, encoding="utf8") as file_:
sent_strs = file_.read().strip().split("\n\n")
for sent_str in sent_strs:
lines = [
line.split() for line in sent_str.split("\n") if not line.startswith("#")
]
words = []
tags = []
for i, pieces in enumerate(lines):
if len(pieces) == 4:
word, pos, head, label = pieces
else:
idx, word, lemma, pos1, pos, morph, head, label, _, _2 = pieces
if "-" in idx:
continue
words.append(word)
tags.append(pos)
yield words, tags
def read_csv(csv_loc, label_col=0, text_col=-1):
with csv_loc.open() as file_:
for row in csv.reader(file_):
label_str = row[label_col]
text = row[text_col]
yield text, label_str
def mnist(): # pragma: no cover
from ._vendorized.keras_datasets import load_mnist
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = load_mnist()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255.0
X_test /= 255.0
train_data = list(zip(X_train, y_train))
nr_train = X_train.shape[0]
random.shuffle(train_data)
heldout_data = train_data[: int(nr_train * 0.1)]
train_data = train_data[len(heldout_data) :]
test_data = list(zip(X_test, y_test))
return train_data, heldout_data, test_data
def reuters(): # pragma: no cover
from ._vendorized.keras_datasets import load_reuters
(X_train, y_train), (X_test, y_test) = load_reuters()
return (X_train, y_train), (X_test, y_test)
def quora_questions(loc=None):
if loc is None:
loc = get_file("quora_similarity.tsv", QUORA_QUESTIONS_URL)
if isinstance(loc, basestring):
loc = Path(loc)
is_header = True
lines = []
with loc.open("r", encoding="utf8") as file_:
for row in csv.reader(file_, delimiter="\t"):
if is_header:
is_header = False
continue
id_, qid1, qid2, sent1, sent2, is_duplicate = row
if not isinstance(sent1, unicode):
sent1 = sent1.decode("utf8").strip()
if not isinstance(sent2, unicode):
sent2 = sent2.decode("utf8").strip()
if sent1 and sent2:
lines.append(((sent1, sent2), int(is_duplicate)))
train, dev = partition(lines, 0.9)
return train, dev
THREE_LABELS = {"entailment": 2, "contradiction": 1, "neutral": 0}
TWO_LABELS = {"entailment": 1, "contradiction": 0, "neutral": 0}
def snli(loc=None, ternary=False):
label_scheme = THREE_LABELS if ternary else TWO_LABELS
if loc is None:
loc = get_file("snli_1.0", SNLI_URL, unzip=True)
if isinstance(loc, basestring):
loc = Path(loc)
train = read_snli(Path(loc) / "snli_1.0_train.jsonl", label_scheme)
dev = read_snli(Path(loc) / "snli_1.0_dev.jsonl", label_scheme)
return train, dev
def stack_exchange(loc=None):
if loc is None:
raise ValueError("No default path for Stack Exchange yet")
rows = []
with loc.open("r", encoding="utf8") as file_:
for line in file_:
eg = json.loads(line)
rows.append(((eg["text1"], eg["text2"]), int(eg["label"])))
train, dev = partition(rows, 0.7)
return train, dev
def read_snli(loc, label_scheme):
rows = []
with loc.open("r", encoding="utf8") as file_:
for line in file_:
eg = json.loads(line)
label = eg["gold_label"]
if label == "-":
continue
rows.append(((eg["sentence1"], eg["sentence2"]), label_scheme[label]))
return rows
def get_word_index(path="reuters_word_index.pkl"): # pragma: no cover
path = get_file(
path, origin="https://s3.amazonaws.com/text-datasets/reuters_word_index.pkl"
)
f = open(path, "rb")
if sys.version_info < (3,):
data = pickle.load(f)
else:
data = pickle.load(f, encoding="latin1")
f.close()
return data
| mit | 3,532,642,709,093,280,000 | 30.862595 | 87 | 0.584571 | false |
larsbergstrom/servo | tests/wpt/web-platform-tests/mathml/tools/mathvariant-transforms.py | 7 | 7477 | #!/usr/bin/python
from __future__ import print_function
from lxml import etree
from utils.misc import downloadWithProgressBar, UnicodeXMLURL
from utils import mathfont
# Retrieve the unicode.xml file if necessary.
unicodeXML = downloadWithProgressBar(UnicodeXMLURL)
# Extract the mathvariants transformation.
xsltTransform = etree.XSLT(etree.XML('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:strip-space elements="*"/>
<xsl:template match="charlist">
<root><xsl:apply-templates select="character"/></root>
</xsl:template>
<xsl:template match="character">
<xsl:if test="surrogate">
<entry>
<xsl:attribute name="mathvariant">
<xsl:value-of select="surrogate/@mathvariant"/>
</xsl:attribute>
<xsl:attribute name="baseChar">
<xsl:value-of select="surrogate/@ref"/>
</xsl:attribute>
<xsl:attribute name="transformedChar">
<xsl:choose>
<xsl:when test="bmp">
<xsl:value-of select="bmp/@ref"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="@id"/>
</xsl:otherwise>
</xsl:choose>
</xsl:attribute>
</entry>
</xsl:if>
</xsl:template>
</xsl:stylesheet>'''))
# Put the mathvariant transforms into a Python structure.
mathvariantTransforms = {}
root = xsltTransform(etree.parse(unicodeXML)).getroot()
def parseCodePoint(aHexaString):
return int("0x%s" % aHexaString[1:], 16)
for entry in root:
mathvariant = entry.get("mathvariant")
baseChar = parseCodePoint(entry.get("baseChar"))
transformedChar = parseCodePoint(entry.get("transformedChar"))
if mathvariant not in mathvariantTransforms:
mathvariantTransforms[mathvariant] = {}
mathvariantTransforms[mathvariant][baseChar] = transformedChar
# There is no "isolated" mathvariant.
del mathvariantTransforms["isolated"]
# Automatic mathvariant uses the same transform as italic.
# It is handled specially (see below).
mathvariantTransforms["auto"] = mathvariantTransforms["italic"]
# Create a WOFF font for each mathvariant.
for mathvariant in mathvariantTransforms:
if mathvariant == "auto":
continue
font = mathfont.create("mathvariant-%s" % mathvariant)
for baseChar in mathvariantTransforms[mathvariant]:
if baseChar not in font:
mathfont.createGlyphFromValue(font, baseChar)
transformedChar = mathvariantTransforms[mathvariant][baseChar]
mathfont.createGlyphFromValue(font, transformedChar)
mathfont.save(font)
# Create a MathML and CSS test for each mathvariant.
for mathvariant in mathvariantTransforms:
print("Generating tests for %s..." % mathvariant, end="")
reftest = open("../relations/css-styling/mathvariant-%s.html" % mathvariant, "w")
reftestReference = open("../relations/css-styling/mathvariant-%s-ref.html" % mathvariant, "w")
CSSreftest = open("../../css/css-text/text-transform/math/text-transform-math-%s-001.tentative.html" % mathvariant, "w")
CSSreftestReference = open("../../css/css-text/text-transform/math/text-transform-math-%s-001.tentative-ref.html" % mathvariant, "w")
source = '\
<!DOCTYPE html>\n\
<html>\n\
<head>\n\
<meta charset="utf-8"/>\n\
<title>%s</title>\n'
reftest.write(source % ("mathvariant %s" % mathvariant))
reftestReference.write(source % ("mathvariant %s (reference)" % mathvariant))
CSSreftest.write(source % ("text-transform math-%s" % mathvariant))
CSSreftestReference.write(source % ("text-transform math-%s (reference)" % mathvariant))
if mathvariant == "auto":
mathAssert = "Verify that a single-char <mi> is equivalent to an <mi> with the transformed italic unicode character."
mapping = "italic"
else:
mathAssert = "Verify that a single-char <mtext> with a %s mathvariant is equivalent to an <mtext> with the transformed unicode character." % mathvariant
mapping = mathvariant
source ='\
<link rel="help" href="https://mathml-refresh.github.io/mathml-core/#css-styling">\n\
<link rel="help" href="https://mathml-refresh.github.io/mathml-core/#the-mathvariant-attribute">\n\
<link rel="help" href="https://mathml-refresh.github.io/mathml-core/#new-text-transform-values">\n\
<link rel="help" href="https://mathml-refresh.github.io/mathml-core/#%s-mappings">\n\
<link rel="match" href="mathvariant-%s-ref.html"/>\n\
<meta name="assert" content="%s">\n'
reftest.write(source % (mapping, mathvariant, mathAssert))
source = '\
<link rel="help" href="https://github.com/w3c/csswg-drafts/issues/3745"/>\n\
<link rel="help" href="https://mathml-refresh.github.io/mathml-core/#new-text-transform-values">\n\
<link rel="help" href="https://mathml-refresh.github.io/mathml-core/#%s-mappings">\n\
<link rel="match" href="text-transform-math-%s-001.tentative-ref.html"/>\n\
<meta name="assert" content="Verify that a character with \'text-transform: math-%s\' renders the same as the transformed unicode character.">\n'
CSSreftest.write(source % (mapping, mathvariant, mathvariant))
WOFFfont = "mathvariant-%s.woff" % mapping
source = '\
<style>\n\
@font-face {\n\
font-family: TestFont;\n\
src: url("/fonts/math/%s");\n\
}\n\
body > span {\n\
padding: 10px;\n\
}\n\
span > span {\n\
font-family: monospace;\n\
font-size: 10px;\n\
}\n\
.testfont {\n\
font-family: TestFont;\n\
font-size: 10px;\n\
}\n\
</style>\n\
<body>\n\
<!-- Generated by mathml/tools/mathvariant.py; DO NOT EDIT. -->\n\
<p>Test passes if all the equalities below are true.</p>\n' % WOFFfont
reftest.write(source)
reftestReference.write(source)
CSSreftest.write(source)
CSSreftestReference.write(source)
charIndex = 0
for baseChar in mathvariantTransforms[mathvariant]:
transformedChar = mathvariantTransforms[mathvariant][baseChar]
if mathvariant == "auto":
tokenTag = '<mi>&#x%0X;</mi>' % baseChar
tokenTagRef = '<mi>&#x%0X;</mi>' % transformedChar
else:
tokenTag = '<mtext mathvariant="%s">&#x%0X;</mtext>' % (mathvariant, baseChar)
tokenTagRef = '<mtext>&#x%0X;</mtext>' % transformedChar
reftest.write(' <span><math class="testfont">%s</math>=<span>%05X</span></span>' % (tokenTag, transformedChar))
reftestReference.write(' <span><math class="testfont">%s</math>=<span>%05X</span></span>' % (tokenTagRef, transformedChar))
CSSreftest.write(' <span><span class="testfont" style="text-transform: math-%s">&#x%0X;</span>=<span>%05X</span></span>' % (mathvariant, baseChar, transformedChar))
CSSreftestReference.write(' <span><span class="testfont">&#x%0X;</span>=<span>%05X</span></span>' % (transformedChar, transformedChar))
charIndex += 1
if charIndex % 10 == 0:
reftest.write('<br/>')
reftestReference.write('<br/>')
CSSreftest.write('<br/>')
CSSreftestReference.write('<br/>')
reftest.write('\n')
reftestReference.write('\n')
CSSreftest.write('\n')
CSSreftestReference.write('\n')
source = '</body>\n</html>\n'
reftest.write(source)
reftestReference.write(source)
CSSreftest.write(source)
CSSreftestReference.write(source)
reftest.close()
reftestReference.close()
CSSreftest.close()
CSSreftestReference.close()
print(" done.")
| mpl-2.0 | -8,020,646,585,455,962,000 | 42.725146 | 173 | 0.663769 | false |
DrMeers/django | django/contrib/formtools/wizard/views.py | 4 | 28545 | from collections import OrderedDict
import re
from django import forms
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.forms import formsets, ValidationError
from django.views.generic import TemplateView
from django.utils.decorators import classonlymethod
from django.utils.translation import ugettext as _
from django.utils import six
from django.contrib.formtools.wizard.storage import get_storage
from django.contrib.formtools.wizard.storage.exceptions import NoFileStorageConfigured
from django.contrib.formtools.wizard.forms import ManagementForm
def normalize_name(name):
"""
Converts camel-case style names into underscore separated words. Example::
>>> normalize_name('oneTwoThree')
'one_two_three'
>>> normalize_name('FourFiveSix')
'four_five_six'
"""
new = re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', name)
return new.lower().strip('_')
class StepsHelper(object):
def __init__(self, wizard):
self._wizard = wizard
def __dir__(self):
return self.all
def __len__(self):
return self.count
def __repr__(self):
return '<StepsHelper for %s (steps: %s)>' % (self._wizard, self.all)
@property
def all(self):
"Returns the names of all steps/forms."
return list(self._wizard.get_form_list())
@property
def count(self):
"Returns the total number of steps/forms in this the wizard."
return len(self.all)
@property
def current(self):
"""
Returns the current step. If no current step is stored in the
storage backend, the first step will be returned.
"""
return self._wizard.storage.current_step or self.first
@property
def first(self):
"Returns the name of the first step."
return self.all[0]
@property
def last(self):
"Returns the name of the last step."
return self.all[-1]
@property
def next(self):
"Returns the next step."
return self._wizard.get_next_step()
@property
def prev(self):
"Returns the previous step."
return self._wizard.get_prev_step()
@property
def index(self):
"Returns the index for the current step."
return self._wizard.get_step_index()
@property
def step0(self):
return int(self.index)
@property
def step1(self):
return int(self.index) + 1
class WizardView(TemplateView):
"""
The WizardView is used to create multi-page forms and handles all the
storage and validation stuff. The wizard is based on Django's generic
class based views.
"""
storage_name = None
form_list = None
initial_dict = None
instance_dict = None
condition_dict = None
template_name = 'formtools/wizard/wizard_form.html'
def __repr__(self):
return '<%s: forms: %s>' % (self.__class__.__name__, self.form_list)
@classonlymethod
def as_view(cls, *args, **kwargs):
"""
This method is used within urls.py to create unique wizardview
instances for every request. We need to override this method because
we add some kwargs which are needed to make the wizardview usable.
"""
initkwargs = cls.get_initkwargs(*args, **kwargs)
return super(WizardView, cls).as_view(**initkwargs)
@classmethod
def get_initkwargs(cls, form_list=None, initial_dict=None,
instance_dict=None, condition_dict=None, *args, **kwargs):
"""
Creates a dict with all needed parameters for the form wizard instances.
* `form_list` - is a list of forms. The list entries can be single form
classes or tuples of (`step_name`, `form_class`). If you pass a list
of forms, the wizardview will convert the class list to
(`zero_based_counter`, `form_class`). This is needed to access the
form for a specific step.
* `initial_dict` - contains a dictionary of initial data dictionaries.
The key should be equal to the `step_name` in the `form_list` (or
the str of the zero based counter - if no step_names added in the
`form_list`)
* `instance_dict` - contains a dictionary whose values are model
instances if the step is based on a ``ModelForm`` and querysets if
the step is based on a ``ModelFormSet``. The key should be equal to
the `step_name` in the `form_list`. Same rules as for `initial_dict`
apply.
* `condition_dict` - contains a dictionary of boolean values or
callables. If the value of for a specific `step_name` is callable it
will be called with the wizardview instance as the only argument.
If the return value is true, the step's form will be used.
"""
kwargs.update({
'initial_dict': initial_dict or kwargs.pop('initial_dict',
getattr(cls, 'initial_dict', None)) or {},
'instance_dict': instance_dict or kwargs.pop('instance_dict',
getattr(cls, 'instance_dict', None)) or {},
'condition_dict': condition_dict or kwargs.pop('condition_dict',
getattr(cls, 'condition_dict', None)) or {}
})
form_list = form_list or kwargs.pop('form_list',
getattr(cls, 'form_list', None)) or []
computed_form_list = OrderedDict()
assert len(form_list) > 0, 'at least one form is needed'
# walk through the passed form list
for i, form in enumerate(form_list):
if isinstance(form, (list, tuple)):
# if the element is a tuple, add the tuple to the new created
# sorted dictionary.
computed_form_list[six.text_type(form[0])] = form[1]
else:
# if not, add the form with a zero based counter as unicode
computed_form_list[six.text_type(i)] = form
# walk through the new created list of forms
for form in six.itervalues(computed_form_list):
if issubclass(form, formsets.BaseFormSet):
# if the element is based on BaseFormSet (FormSet/ModelFormSet)
# we need to override the form variable.
form = form.form
# check if any form contains a FileField, if yes, we need a
# file_storage added to the wizardview (by subclassing).
for field in six.itervalues(form.base_fields):
if (isinstance(field, forms.FileField) and
not hasattr(cls, 'file_storage')):
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
# build the kwargs for the wizardview instances
kwargs['form_list'] = computed_form_list
return kwargs
def get_prefix(self, *args, **kwargs):
# TODO: Add some kind of unique id to prefix
return normalize_name(self.__class__.__name__)
def get_form_list(self):
"""
This method returns a form_list based on the initial form list but
checks if there is a condition method/value in the condition_list.
If an entry exists in the condition list, it will call/read the value
and respect the result. (True means add the form, False means ignore
the form)
The form_list is always generated on the fly because condition methods
could use data from other (maybe previous forms).
"""
form_list = OrderedDict()
for form_key, form_class in six.iteritems(self.form_list):
# try to fetch the value from condition list, by default, the form
# gets passed to the new list.
condition = self.condition_dict.get(form_key, True)
if callable(condition):
# call the value if needed, passes the current instance.
condition = condition(self)
if condition:
form_list[form_key] = form_class
return form_list
def dispatch(self, request, *args, **kwargs):
"""
This method gets called by the routing engine. The first argument is
`request` which contains a `HttpRequest` instance.
The request is stored in `self.request` for later use. The storage
instance is stored in `self.storage`.
After processing the request using the `dispatch` method, the
response gets updated by the storage engine (for example add cookies).
"""
# add the storage engine to the current wizardview instance
self.prefix = self.get_prefix(*args, **kwargs)
self.storage = get_storage(self.storage_name, self.prefix, request,
getattr(self, 'file_storage', None))
self.steps = StepsHelper(self)
response = super(WizardView, self).dispatch(request, *args, **kwargs)
# update the response (e.g. adding cookies)
self.storage.update_response(response)
return response
def get(self, request, *args, **kwargs):
"""
This method handles GET requests.
If a GET request reaches this point, the wizard assumes that the user
just starts at the first step or wants to restart the process.
The data of the wizard will be resetted before rendering the first step.
"""
self.storage.reset()
# reset the current step to the first step.
self.storage.current_step = self.steps.first
return self.render(self.get_form())
def post(self, *args, **kwargs):
"""
This method handles POST requests.
The wizard will render either the current step (if form validation
wasn't successful), the next step (if the current step was stored
successful) or the done view (if no more steps are available)
"""
# Look for a wizard_goto_step element in the posted data which
# contains a valid step name. If one was found, render the requested
# form. (This makes stepping back a lot easier).
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step and wizard_goto_step in self.get_form_list():
return self.render_goto_step(wizard_goto_step)
# Check if form was refreshed
management_form = ManagementForm(self.request.POST, prefix=self.prefix)
if not management_form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered.'),
code='missing_management_form',
)
form_current_step = management_form.cleaned_data['current_step']
if (form_current_step != self.steps.current and
self.storage.current_step is not None):
# form refreshed, change current step
self.storage.current_step = form_current_step
# get the form for the current step
form = self.get_form(data=self.request.POST, files=self.request.FILES)
# and try to validate
if form.is_valid():
# if the form is valid, store the cleaned data and files.
self.storage.set_step_data(self.steps.current, self.process_step(form))
self.storage.set_step_files(self.steps.current, self.process_step_files(form))
# check if the current step is the last step
if self.steps.current == self.steps.last:
# no more steps, render done view
return self.render_done(form, **kwargs)
else:
# proceed to the next step
return self.render_next_step(form)
return self.render(form)
def render_next_step(self, form, **kwargs):
"""
This method gets called when the next step/form should be rendered.
`form` contains the last/current form.
"""
# get the form instance based on the data from the storage backend
# (if available).
next_step = self.steps.next
new_form = self.get_form(next_step,
data=self.storage.get_step_data(next_step),
files=self.storage.get_step_files(next_step))
# change the stored current step
self.storage.current_step = next_step
return self.render(new_form, **kwargs)
def render_goto_step(self, goto_step, **kwargs):
"""
This method gets called when the current step has to be changed.
`goto_step` contains the requested step to go to.
"""
self.storage.current_step = goto_step
form = self.get_form(
data=self.storage.get_step_data(self.steps.current),
files=self.storage.get_step_files(self.steps.current))
return self.render(form)
def render_done(self, form, **kwargs):
"""
This method gets called when all forms passed. The method should also
re-validate all steps to prevent manipulation. If any form fails to
validate, `render_revalidation_failure` should get called.
If everything is fine call `done`.
"""
final_forms = OrderedDict()
# walk through the form list and try to validate the data again.
for form_key in self.get_form_list():
form_obj = self.get_form(step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(form_key))
if not form_obj.is_valid():
return self.render_revalidation_failure(form_key, form_obj, **kwargs)
final_forms[form_key] = form_obj
# render the done view and reset the wizard before returning the
# response. This is needed to prevent from rendering done with the
# same data twice.
done_response = self.done(final_forms.values(), form_dict=final_forms, **kwargs)
self.storage.reset()
return done_response
def get_form_prefix(self, step=None, form=None):
"""
Returns the prefix which will be used when calling the actual form for
the given step. `step` contains the step-name, `form` the form which
will be called with the returned prefix.
If no step is given, the form_prefix will determine the current step
automatically.
"""
if step is None:
step = self.steps.current
return str(step)
def get_form_initial(self, step):
"""
Returns a dictionary which will be passed to the form for `step`
as `initial`. If no initial data was provied while initializing the
form wizard, a empty dictionary will be returned.
"""
return self.initial_dict.get(step, {})
def get_form_instance(self, step):
"""
Returns a object which will be passed to the form for `step`
as `instance`. If no instance object was provied while initializing
the form wizard, None will be returned.
"""
return self.instance_dict.get(step, None)
def get_form_kwargs(self, step=None):
"""
Returns the keyword arguments for instantiating the form
(or formset) on the given step.
"""
return {}
def get_form(self, step=None, data=None, files=None):
"""
Constructs the form for a given `step`. If no `step` is defined, the
current step will be determined automatically.
The form will be initialized using the `data` argument to prefill the
new form. If needed, instance or queryset (for `ModelForm` or
`ModelFormSet`) will be added too.
"""
if step is None:
step = self.steps.current
form_class = self.form_list[step]
# prepare the kwargs for the form instance.
kwargs = self.get_form_kwargs(step)
kwargs.update({
'data': data,
'files': files,
'prefix': self.get_form_prefix(step, form_class),
'initial': self.get_form_initial(step),
})
if issubclass(form_class, (forms.ModelForm, forms.models.BaseInlineFormSet)):
# If the form is based on ModelForm or InlineFormSet,
# add instance if available and not previously set.
kwargs.setdefault('instance', self.get_form_instance(step))
elif issubclass(form_class, forms.models.BaseModelFormSet):
# If the form is based on ModelFormSet, add queryset if available
# and not previous set.
kwargs.setdefault('queryset', self.get_form_instance(step))
return form_class(**kwargs)
def process_step(self, form):
"""
This method is used to postprocess the form data. By default, it
returns the raw `form.data` dictionary.
"""
return self.get_form_step_data(form)
def process_step_files(self, form):
"""
This method is used to postprocess the form files. By default, it
returns the raw `form.files` dictionary.
"""
return self.get_form_step_files(form)
def render_revalidation_failure(self, step, form, **kwargs):
"""
Gets called when a form doesn't validate when rendering the done
view. By default, it changes the current step to failing forms step
and renders the form.
"""
self.storage.current_step = step
return self.render(form, **kwargs)
def get_form_step_data(self, form):
"""
Is used to return the raw form data. You may use this method to
manipulate the data.
"""
return form.data
def get_form_step_files(self, form):
"""
Is used to return the raw form files. You may use this method to
manipulate the data.
"""
return form.files
def get_all_cleaned_data(self):
"""
Returns a merged dictionary of all step cleaned_data dictionaries.
If a step contains a `FormSet`, the key will be prefixed with
'formset-' and contain a list of the formset cleaned_data dictionaries.
"""
cleaned_data = {}
for form_key in self.get_form_list():
form_obj = self.get_form(
step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(form_key)
)
if form_obj.is_valid():
if isinstance(form_obj.cleaned_data, (tuple, list)):
cleaned_data.update({
'formset-%s' % form_key: form_obj.cleaned_data
})
else:
cleaned_data.update(form_obj.cleaned_data)
return cleaned_data
def get_cleaned_data_for_step(self, step):
"""
Returns the cleaned data for a given `step`. Before returning the
cleaned data, the stored values are revalidated through the form.
If the data doesn't validate, None will be returned.
"""
if step in self.form_list:
form_obj = self.get_form(step=step,
data=self.storage.get_step_data(step),
files=self.storage.get_step_files(step))
if form_obj.is_valid():
return form_obj.cleaned_data
return None
def get_next_step(self, step=None):
"""
Returns the next step after the given `step`. If no more steps are
available, None will be returned. If the `step` argument is None, the
current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
keys = list(form_list.keys())
key = keys.index(step) + 1
if len(keys) > key:
return keys[key]
return None
def get_prev_step(self, step=None):
"""
Returns the previous step before the given `step`. If there are no
steps available, None will be returned. If the `step` argument is
None, the current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
keys = list(form_list.keys())
key = keys.index(step) - 1
if key >= 0:
return keys[key]
return None
def get_step_index(self, step=None):
"""
Returns the index for the given `step` name. If no step is given,
the current step will be used to get the index.
"""
if step is None:
step = self.steps.current
return list(self.get_form_list().keys()).index(step)
def get_context_data(self, form, **kwargs):
"""
Returns the template context for a step. You can overwrite this method
to add more data for all or some steps. This method returns a
dictionary containing the rendered form step. Available template
context variables are:
* all extra data stored in the storage backend
* `form` - form instance of the current step
* `wizard` - the wizard instance itself
Example:
.. code-block:: python
class MyWizard(WizardView):
def get_context_data(self, form, **kwargs):
context = super(MyWizard, self).get_context_data(form=form, **kwargs)
if self.steps.current == 'my_step_name':
context.update({'another_var': True})
return context
"""
context = super(WizardView, self).get_context_data(form=form, **kwargs)
context.update(self.storage.extra_data)
context['wizard'] = {
'form': form,
'steps': self.steps,
'management_form': ManagementForm(prefix=self.prefix, initial={
'current_step': self.steps.current,
}),
}
return context
def render(self, form=None, **kwargs):
"""
Returns a ``HttpResponse`` containing all needed context data.
"""
form = form or self.get_form()
context = self.get_context_data(form=form, **kwargs)
return self.render_to_response(context)
def done(self, form_list, **kwargs):
"""
This method must be overridden by a subclass to process to form data
after processing all steps.
"""
raise NotImplementedError("Your %s class has not defined a done() "
"method, which is required." % self.__class__.__name__)
class SessionWizardView(WizardView):
"""
A WizardView with pre-configured SessionStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieWizardView(WizardView):
"""
A WizardView with pre-configured CookieStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
class NamedUrlWizardView(WizardView):
"""
A WizardView with URL named steps support.
"""
url_name = None
done_step_name = None
@classmethod
def get_initkwargs(cls, *args, **kwargs):
"""
We require a url_name to reverse URLs later. Additionally users can
pass a done_step_name to change the URL name of the "done" view.
"""
assert 'url_name' in kwargs, 'URL name is needed to resolve correct wizard URLs'
extra_kwargs = {
'done_step_name': kwargs.pop('done_step_name', 'done'),
'url_name': kwargs.pop('url_name'),
}
initkwargs = super(NamedUrlWizardView, cls).get_initkwargs(*args, **kwargs)
initkwargs.update(extra_kwargs)
assert initkwargs['done_step_name'] not in initkwargs['form_list'], \
'step name "%s" is reserved for "done" view' % initkwargs['done_step_name']
return initkwargs
def get_step_url(self, step):
return reverse(self.url_name, kwargs={'step': step})
def get(self, *args, **kwargs):
"""
This renders the form or, if needed, does the http redirects.
"""
step_url = kwargs.get('step', None)
if step_url is None:
if 'reset' in self.request.GET:
self.storage.reset()
self.storage.current_step = self.steps.first
if self.request.GET:
query_string = "?%s" % self.request.GET.urlencode()
else:
query_string = ""
return redirect(self.get_step_url(self.steps.current)
+ query_string)
# is the current step the "done" name/view?
elif step_url == self.done_step_name:
last_step = self.steps.last
return self.render_done(self.get_form(step=last_step,
data=self.storage.get_step_data(last_step),
files=self.storage.get_step_files(last_step)
), **kwargs)
# is the url step name not equal to the step in the storage?
# if yes, change the step in the storage (if name exists)
elif step_url == self.steps.current:
# URL step name and storage step name are equal, render!
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_files,
), **kwargs)
elif step_url in self.get_form_list():
self.storage.current_step = step_url
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_files,
), **kwargs)
# invalid step name, reset to first and redirect.
else:
self.storage.current_step = self.steps.first
return redirect(self.get_step_url(self.steps.first))
def post(self, *args, **kwargs):
"""
Do a redirect if user presses the prev. step button. The rest of this
is super'd from WizardView.
"""
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step and wizard_goto_step in self.get_form_list():
return self.render_goto_step(wizard_goto_step)
return super(NamedUrlWizardView, self).post(*args, **kwargs)
def get_context_data(self, form, **kwargs):
"""
NamedUrlWizardView provides the url_name of this wizard in the context
dict `wizard`.
"""
context = super(NamedUrlWizardView, self).get_context_data(form=form, **kwargs)
context['wizard']['url_name'] = self.url_name
return context
def render_next_step(self, form, **kwargs):
"""
When using the NamedUrlWizardView, we have to redirect to update the
browser's URL to match the shown step.
"""
next_step = self.get_next_step()
self.storage.current_step = next_step
return redirect(self.get_step_url(next_step))
def render_goto_step(self, goto_step, **kwargs):
"""
This method gets called when the current step has to be changed.
`goto_step` contains the requested step to go to.
"""
self.storage.current_step = goto_step
return redirect(self.get_step_url(goto_step))
def render_revalidation_failure(self, failed_step, form, **kwargs):
"""
When a step fails, we have to redirect the user to the first failing
step.
"""
self.storage.current_step = failed_step
return redirect(self.get_step_url(failed_step))
def render_done(self, form, **kwargs):
"""
When rendering the done view, we have to redirect first (if the URL
name doesn't fit).
"""
if kwargs.get('step', None) != self.done_step_name:
return redirect(self.get_step_url(self.done_step_name))
return super(NamedUrlWizardView, self).render_done(form, **kwargs)
class NamedUrlSessionWizardView(NamedUrlWizardView):
"""
A NamedUrlWizardView with pre-configured SessionStorage backend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class NamedUrlCookieWizardView(NamedUrlWizardView):
"""
A NamedUrlFormWizard with pre-configured CookieStorageBackend.
"""
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
| bsd-3-clause | 8,514,781,733,380,186,000 | 37.836735 | 90 | 0.607217 | false |
larsks/ansible-modules-core | packaging/language/gem.py | 67 | 8053 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Johan Wiren <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: gem
short_description: Manage Ruby gems
description:
- Manage installation and uninstallation of Ruby gems.
version_added: "1.1"
options:
name:
description:
- The name of the gem to be managed.
required: true
state:
description:
- The desired state of the gem. C(latest) ensures that the latest version is installed.
required: false
choices: [present, absent, latest]
default: present
gem_source:
description:
- The path to a local gem used as installation source.
required: false
include_dependencies:
description:
- Whether to include dependencies or not.
required: false
choices: [ "yes", "no" ]
default: "yes"
repository:
description:
- The repository from which the gem will be installed
required: false
aliases: [source]
user_install:
description:
- Install gem in user's local gems cache or for all users
required: false
default: "yes"
version_added: "1.3"
executable:
description:
- Override the path to the gem executable
required: false
version_added: "1.4"
version:
description:
- Version of the gem to be installed/removed.
required: false
pre_release:
description:
- Allow installation of pre-release versions of the gem.
required: false
default: "no"
version_added: "1.6"
include_doc:
description:
- Install with or without docs.
required: false
default: "no"
version_added: "2.0"
build_flags:
description:
- Allow adding build flags for gem compilation
required: false
version_added: "2.0"
author:
- "Ansible Core Team"
- "Johan Wiren"
'''
EXAMPLES = '''
# Installs version 1.0 of vagrant.
- gem: name=vagrant version=1.0 state=present
# Installs latest available version of rake.
- gem: name=rake state=latest
# Installs rake version 1.0 from a local gem on disk.
- gem: name=rake gem_source=/path/to/gems/rake-1.0.gem state=present
'''
import re
def get_rubygems_path(module):
if module.params['executable']:
return module.params['executable'].split(' ')
else:
return [ module.get_bin_path('gem', True) ]
def get_rubygems_version(module):
cmd = get_rubygems_path(module) + [ '--version' ]
(rc, out, err) = module.run_command(cmd, check_rc=True)
match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out)
if not match:
return None
return tuple(int(x) for x in match.groups())
def get_installed_versions(module, remote=False):
cmd = get_rubygems_path(module)
cmd.append('query')
if remote:
cmd.append('--remote')
if module.params['repository']:
cmd.extend([ '--source', module.params['repository'] ])
cmd.append('-n')
cmd.append('^%s$' % module.params['name'])
(rc, out, err) = module.run_command(cmd, check_rc=True)
installed_versions = []
for line in out.splitlines():
match = re.match(r"\S+\s+\((.+)\)", line)
if match:
versions = match.group(1)
for version in versions.split(', '):
installed_versions.append(version.split()[0])
return installed_versions
def exists(module):
if module.params['state'] == 'latest':
remoteversions = get_installed_versions(module, remote=True)
if remoteversions:
module.params['version'] = remoteversions[0]
installed_versions = get_installed_versions(module)
if module.params['version']:
if module.params['version'] in installed_versions:
return True
else:
if installed_versions:
return True
return False
def uninstall(module):
if module.check_mode:
return
cmd = get_rubygems_path(module)
cmd.append('uninstall')
if module.params['version']:
cmd.extend([ '--version', module.params['version'] ])
else:
cmd.append('--all')
cmd.append('--executable')
cmd.append(module.params['name'])
module.run_command(cmd, check_rc=True)
def install(module):
if module.check_mode:
return
ver = get_rubygems_version(module)
if ver:
major = ver[0]
else:
major = None
cmd = get_rubygems_path(module)
cmd.append('install')
if module.params['version']:
cmd.extend([ '--version', module.params['version'] ])
if module.params['repository']:
cmd.extend([ '--source', module.params['repository'] ])
if not module.params['include_dependencies']:
cmd.append('--ignore-dependencies')
else:
if major and major < 2:
cmd.append('--include-dependencies')
if module.params['user_install']:
cmd.append('--user-install')
else:
cmd.append('--no-user-install')
if module.params['pre_release']:
cmd.append('--pre')
if not module.params['include_doc']:
cmd.append('--no-rdoc')
cmd.append('--no-ri')
cmd.append(module.params['gem_source'])
if module.params['build_flags']:
cmd.extend([ '--', module.params['build_flags'] ])
module.run_command(cmd, check_rc=True)
def main():
module = AnsibleModule(
argument_spec = dict(
executable = dict(required=False, type='str'),
gem_source = dict(required=False, type='str'),
include_dependencies = dict(required=False, default=True, type='bool'),
name = dict(required=True, type='str'),
repository = dict(required=False, aliases=['source'], type='str'),
state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'),
user_install = dict(required=False, default=True, type='bool'),
pre_release = dict(required=False, default=False, type='bool'),
include_doc = dict(required=False, default=False, type='bool'),
version = dict(required=False, type='str'),
build_flags = dict(required=False, type='str'),
),
supports_check_mode = True,
mutually_exclusive = [ ['gem_source','repository'], ['gem_source','version'] ],
)
if module.params['version'] and module.params['state'] == 'latest':
module.fail_json(msg="Cannot specify version when state=latest")
if module.params['gem_source'] and module.params['state'] == 'latest':
module.fail_json(msg="Cannot maintain state=latest when installing from local source")
if not module.params['gem_source']:
module.params['gem_source'] = module.params['name']
changed = False
if module.params['state'] in [ 'present', 'latest']:
if not exists(module):
install(module)
changed = True
elif module.params['state'] == 'absent':
if exists(module):
uninstall(module)
changed = True
result = {}
result['name'] = module.params['name']
result['state'] = module.params['state']
if module.params['version']:
result['version'] = module.params['version']
result['changed'] = changed
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 5,974,740,311,679,299,000 | 30.457031 | 126 | 0.620142 | false |
vpramo/contrail-controller | src/config/common/setup.py | 8 | 1181 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from setuptools import setup
import re
def requirements(filename):
with open(filename) as f:
lines = f.read().splitlines()
c = re.compile(r'\s*#.*')
return filter(bool, map(lambda y: c.sub('', y).strip(), lines))
setup(
name='cfgm_common',
version='0.1dev',
packages=['cfgm_common',
'cfgm_common.ifmap',
'cfgm_common.uve',
'cfgm_common.uve.acl',
'cfgm_common.uve.service_instance',
'cfgm_common.uve.vnc_api',
'cfgm_common.uve.virtual_machine',
'cfgm_common.uve.virtual_network',
'cfgm_common.uve.physical_router',
'cfgm_common.uve.cfgm_cpuinfo',
'cfgm_common.uve.cfgm_cpuinfo.cpuinfo',
'cfgm_common.uve.cfgm_cpuinfo.process_info'
],
package_data={'': ['*.html', '*.css', '*.xml']},
zip_safe=False,
long_description="VNC Configuration Common Utils",
install_requires=requirements('requirements.txt'),
tests_require=requirements('test-requirements.txt'),
test_suite='tests.test_suite',
)
| apache-2.0 | -5,403,071,694,133,926,000 | 32.742857 | 67 | 0.585097 | false |
hclhkbu/dlbench | tools/mxnet/rnn/train_rnn.py | 2 | 10054 | from common import find_mxnet
import numpy as np
import os
import mxnet as mx
import argparse
parser = argparse.ArgumentParser(description="Train RNN on Penn Tree Bank",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, help='the input data directory')
parser.add_argument('--sequence-lens', type=str, default="32", help='the sequence lengths, e.g "8,16,32,64,128"')
parser.add_argument('--num-examples', type=str, help='Flag for consistancy, no use in rnn')
parser.add_argument('--test', default=False, action='store_true',
help='whether to do testing instead of training')
parser.add_argument('--model-prefix', type=str, default=None,
help='path to save/load model')
parser.add_argument('--load-epoch', type=int, default=0,
help='load from epoch')
parser.add_argument('--num-layers', type=int, default=2,
help='number of stacked RNN layers')
parser.add_argument('--num-hidden', type=int, default=256,
help='hidden layer size')
parser.add_argument('--num-embed', type=int, default=256,
help='embedding layer size')
parser.add_argument('--bidirectional', type=bool, default=False,
help='whether to use bidirectional layers')
parser.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ' \
'Increase batch size when using multiple gpus for best performance.')
parser.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
parser.add_argument('--num-epochs', type=int, default=25,
help='max num of epochs')
parser.add_argument('--lr', type=float, default=0.01,
help='initial learning rate')
parser.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
parser.add_argument('--mom', type=float, default=0.0,
help='momentum for sgd')
parser.add_argument('--wd', type=float, default=0.00001,
help='weight decay for sgd')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size.')
parser.add_argument('--disp-batches', type=int, default=50,
help='show progress for every n batches')
parser.add_argument('--stack-rnn', default=False,
help='stack fused RNN cells to reduce communication overhead')
parser.add_argument('--dropout', type=float, default='0.0',
help='dropout probability (1.0 - keep probability)')
args = parser.parse_args()
#buckets = [64]
buckets = [int(i) for i in args.sequence_lens.split(',')]
start_label = 1
invalid_label = 0
data_dir = os.environ["HOME"] + "/data/mxnet/ptb/" if args.data_dir is None else args.data_dir
def tokenize_text(fname, vocab=None, invalid_label=-1, start_label=0):
lines = open(fname).readlines()
lines = [filter(None, i.split(' ')) for i in lines]
sentences, vocab = mx.rnn.encode_sentences(lines, vocab=vocab, invalid_label=invalid_label, start_label=start_label)
return sentences, vocab
def get_data(layout):
train_sent, vocab = tokenize_text(data_dir + "ptb.train.txt", start_label=start_label,
invalid_label=invalid_label)
val_sent, _ = tokenize_text(data_dir + "ptb.test.txt", vocab=vocab, start_label=start_label,
invalid_label=invalid_label)
data_train = mx.rnn.BucketSentenceIter(train_sent, args.batch_size, buckets=buckets, invalid_label=invalid_label, layout=layout)
data_val = mx.rnn.BucketSentenceIter(val_sent, args.batch_size, buckets=buckets, invalid_label=invalid_label, layout=layout)
return data_train, data_val, vocab
def train(args):
data_train, data_val, vocab = get_data('TN')
sample_size = 0
for x in data_train.data:
sample_size += len(x)
print("len of data train===================== " + str(sample_size))
if args.stack_rnn:
cell = mx.rnn.SequentialRNNCell()
for i in range(args.num_layers):
cell.add(mx.rnn.FusedRNNCell(args.num_hidden, num_layers=1,
mode='lstm', prefix='lstm_l%d'%i,
bidirectional=args.bidirectional))
if args.dropout > 0 and i < args.num_layers - 1:
cell.add(mx.rnn.DropoutCell(args.dropout, prefix='lstm_d%d'%i))
else:
cell = mx.rnn.FusedRNNCell(args.num_hidden, num_layers=args.num_layers, dropout=args.dropout,
mode='lstm', bidirectional=args.bidirectional)
def sym_gen(seq_len):
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
embed = mx.sym.Embedding(data=data, input_dim=len(vocab), output_dim=args.num_embed,name='embed')
output, _ = cell.unroll(seq_len, inputs=embed, merge_outputs=True, layout='TNC')
pred = mx.sym.Reshape(output,
shape=(-1, args.num_hidden*(1+args.bidirectional)))
pred = mx.sym.FullyConnected(data=pred, num_hidden=len(vocab), name='pred')
label = mx.sym.Reshape(label, shape=(-1,))
pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return pred, ('data',), ('softmax_label',)
if args.gpus:
contexts = [mx.gpu(int(i)) for i in args.gpus.split(',')]
else:
contexts = mx.cpu(0)
model = mx.mod.BucketingModule(
sym_gen = sym_gen,
default_bucket_key = data_train.default_bucket_key,
context = contexts)
if args.load_epoch:
_, arg_params, aux_params = mx.rnn.load_rnn_checkpoint(
cell, args.model_prefix, args.load_epoch)
else:
arg_params = None
aux_params = None
opt_params = {
'learning_rate': args.lr,
'wd': args.wd,
'clip_gradient': 5.0
}
if args.optimizer not in ['adadelta', 'adagrad', 'adam', 'rmsprop']:
opt_params['momentum'] = args.mom
print str(int((sample_size-args.batch_size)/args.batch_size))
model.fit(
train_data = data_train,
eval_data = data_val,
eval_metric = mx.metric.Perplexity(invalid_label),
kvstore = args.kv_store,
optimizer = args.optimizer,
optimizer_params = opt_params,
#initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
#initializer = mx.initializer.Uniform(scale=0.1),
initializer = mx.init.Uniform(scale=0.1),
arg_params = arg_params,
aux_params = aux_params,
begin_epoch = args.load_epoch,
num_epoch = args.num_epochs,
#batch_end_callback = mx.callback.Speedometer(args.batch_size, args.disp_batches),
batch_end_callback = mx.callback.Speedometer(args.batch_size, int((sample_size-args.batch_size)/args.batch_size) - 1),
epoch_end_callback = mx.rnn.do_rnn_checkpoint(cell, args.model_prefix, 1)
if args.model_prefix else None)
def test(args):
assert args.model_prefix, "Must specifiy path to load from"
_, data_val, vocab = get_data('NT')
if not args.stack_rnn:
stack = mx.rnn.FusedRNNCell(args.num_hidden, num_layers=args.num_layers,
mode='lstm', bidirectional=args.bidirectional).unfuse()
else:
stack = mx.rnn.SequentialRNNCell()
for i in range(args.num_layers):
cell = mx.rnn.LSTMCell(num_hidden=args.num_hidden, prefix='lstm_%dl0_'%i)
if args.bidirectional:
cell = mx.rnn.BidirectionalCell(
cell,
mx.rnn.LSTMCell(num_hidden=args.num_hidden, prefix='lstm_%dr0_'%i),
output_prefix='bi_lstm_%d'%i)
stack.add(cell)
def sym_gen(seq_len):
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
embed = mx.sym.Embedding(data=data, input_dim=len(vocab),
output_dim=args.num_embed, name='embed')
stack.reset()
outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)
pred = mx.sym.Reshape(outputs,
shape=(-1, args.num_hidden*(1+args.bidirectional)))
pred = mx.sym.FullyConnected(data=pred, num_hidden=len(vocab), name='pred')
label = mx.sym.Reshape(label, shape=(-1,))
pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return pred, ('data',), ('softmax_label',)
if args.gpus:
contexts = [mx.gpu(int(i)) for i in args.gpus.split(',')]
else:
contexts = mx.cpu(0)
model = mx.mod.BucketingModule(
sym_gen = sym_gen,
default_bucket_key = data_val.default_bucket_key,
context = contexts)
model.bind(data_val.provide_data, data_val.provide_label, for_training=False)
# note here we load using SequentialRNNCell instead of FusedRNNCell.
_, arg_params, aux_params = mx.rnn.load_rnn_checkpoint(stack, args.model_prefix, args.load_epoch)
model.set_params(arg_params, aux_params)
model.score(data_val, mx.metric.Perplexity(invalid_label),
batch_end_callback=mx.callback.Speedometer(args.batch_size, 5))
if __name__ == '__main__':
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
if args.num_layers >= 4 and len(args.gpus.split(',')) >= 4 and not args.stack_rnn:
print('WARNING: stack-rnn is recommended to train complex model on multiple GPUs')
if args.test:
# Demonstrates how to load a model trained with CuDNN RNN and predict
# with non-fused MXNet symbol
test(args)
else:
train(args)
| mit | -3,025,118,791,386,855,400 | 43.486726 | 133 | 0.602248 | false |
sysadminmatmoz/odoo-clearcorp | product_category_sequence/product_product.py | 3 | 2553 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class productProductinherit(models.Model):
_inherit = 'product.product'
ir_sequence_id = fields.Many2one('ir.sequence', relation='categ_id.ir_sequence_cat_id', store=True, string="Product Sequence")
#'categ_id', 'ir_sequence_cat_id',
#Change sequence. It depends of category assigned to product
@api.onchange('ir_sequence_id')
def onchange_categ_id(self):
if self.categ_id:
cat_obj = self.env['product.category'].browse(self.categ_id.id)
if cat_obj.ir_sequence_cat_id:
return {'value': {'ir_sequence_id': cat_obj.ir_sequence_cat_id.id}}
else:
return {'value': {'ir_sequence_id': False}}
return {'value': {'ir_sequence_id': False}}
#Redefine create. To new products, assign new default code, this depends of sequence in
@api.model
def create(self, values):
seq_obj = self.env['ir.sequence']
if 'default_code' not in values.keys() or ('default_code' in values.keys() and not values['default_code']):
if 'categ_id' in values.keys():
categ_obj = self.env['product.category'].browse(values['categ_id'])
if categ_obj.ir_sequence_cat_id:
default_code = seq_obj.next_by_id(categ_obj.ir_sequence_cat_id.id)
values['default_code'] = default_code
res = super(productProductinherit, self).create(values)
return res
| agpl-3.0 | -447,596,186,688,590,460 | 45.418182 | 131 | 0.602037 | false |
victorywang80/Maintenance | saltstack/src/salt/modules/pacman.py | 1 | 13895 | # -*- coding: utf-8 -*-
'''
A module to wrap pacman calls, since Arch is the best
(https://wiki.archlinux.org/index.php/Arch_is_the_best)
'''
# Import python libs
import copy
import logging
import re
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pkg'
def __virtual__():
'''
Set the virtual pkg module if the os is Arch
'''
if __grains__['os'] in ('Arch', 'Arch ARM'):
return __virtualname__
return False
def _list_removed(old, new):
'''
List the packages which have been removed between the two package objects
'''
return [x for x in old if x not in new]
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
'''
refresh = salt.utils.is_true(kwargs.pop('refresh', True))
if len(names) == 0:
return ''
# Refresh before looking for the latest version available
if refresh:
refresh_db()
ret = {}
# Initialize the dict with empty strings
for name in names:
ret[name] = ''
cmd = 'pacman -Sp --needed --print-format "%n %v" ' \
'{0}'.format(' '.join(names))
for line in __salt__['cmd.run_stdout'](cmd).splitlines():
try:
name, version_num = line.split()
# Only add to return dict if package is in the list of packages
# passed, otherwise dependencies will make their way into the
# return data.
if name in names:
ret[name] = version_num
except (ValueError, IndexError):
pass
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = latest_version
def upgrade_available(name):
'''
Check whether or not an upgrade is available for a given package
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
return latest_version(name) != ''
def list_upgrades():
'''
List all available package upgrades on this system
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
upgrades = {}
lines = __salt__['cmd.run'](
'pacman -Sypu --print-format "%n %v" | egrep -v ' r'"^\s|^:"'
).splitlines()
for line in lines:
comps = line.split(' ')
if len(comps) < 2:
continue
upgrades[comps[0]] = comps[1]
return upgrades
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
'''
return __salt__['pkg_resource.version'](*names, **kwargs)
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
'''
versions_as_list = salt.utils.is_true(versions_as_list)
# 'removed' not yet implemented or not applicable
if salt.utils.is_true(kwargs.get('removed')):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
cmd = 'pacman -Q'
ret = {}
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
if not line:
continue
try:
name, version_num = line.split()[0:2]
except ValueError:
log.error('Problem parsing pacman -Q: Unexpected formatting in '
'line: "{0}"'.format(line))
else:
__salt__['pkg_resource.add_pkg'](ret, name, version_num)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def refresh_db():
'''
Just run a ``pacman -Sy``, return a dict::
{'<database name>': Bool}
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
cmd = 'LANG=C pacman -Sy'
ret = {}
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
if line.strip().startswith('::'):
continue
if not line:
continue
key = line.strip().split()[0]
if 'is up to date' in line:
ret[key] = False
elif 'downloading' in line:
key = line.strip().split()[1].split('.')[0]
ret[key] = True
return ret
def install(name=None,
refresh=True,
pkgs=None,
sources=None,
**kwargs):
'''
Install the passed package, add refresh=True to install with an -Sy.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
Whether or not to refresh the package database before installing.
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list. A specific version number can be specified
by using a single-element dict representing the package and its
version. As with the ``version`` parameter above, comparison operators
can be used to target a specific version of a package.
CLI Examples:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-4"}]'
salt '*' pkg.install pkgs='["foo", {"bar": "<1.2.3-4"}]'
sources
A list of packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package.
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.pkg.tar.xz"},{"bar": "salt://bar.pkg.tar.xz"}]'
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
'''
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name,
pkgs,
sources,
**kwargs)
if pkg_params is None or len(pkg_params) == 0:
return {}
version_num = kwargs.get('version')
if version_num:
if pkgs is None and sources is None:
# Allow "version" to work for single package target
pkg_params = {name: version_num}
else:
log.warning('"version" parameter will be ignored for multiple '
'package targets')
if pkg_type == 'file':
cmd = 'pacman -U --noprogressbar --noconfirm ' \
'{0}'.format(' '.join(pkg_params))
targets = pkg_params
elif pkg_type == 'repository':
targets = []
problems = []
for param, version_num in pkg_params.iteritems():
if version_num is None:
targets.append(param)
else:
match = re.match('^([<>])?(=)?([^<>=]+)$', version_num)
if match:
gt_lt, eq, verstr = match.groups()
prefix = gt_lt or ''
prefix += eq or ''
# If no prefix characters were supplied, use '='
prefix = prefix or '='
targets.append('{0}{1}{2}'.format(param, prefix, verstr))
else:
msg = 'Invalid version string "{0}" for package ' \
'"{1}"'.format(version_num, name)
problems.append(msg)
if problems:
for problem in problems:
log.error(problem)
return {}
if salt.utils.is_true(refresh):
cmd = 'pacman -Syu --noprogressbar --noconfirm ' \
'"{0}"'.format('" "'.join(targets))
else:
cmd = 'pacman -S --noprogressbar --noconfirm ' \
'"{0}"'.format('" "'.join(targets))
old = list_pkgs()
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.compare_dicts(old, new)
def upgrade():
'''
Run a full system upgrade, a pacman -Syu
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
old = list_pkgs()
cmd = 'pacman -Syu --noprogressbar --noconfirm'
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.compare_dicts(old, new)
def _uninstall(action='remove', name=None, pkgs=None, **kwargs):
'''
remove and purge do identical things but with different pacman commands,
this function performs the common logic.
'''
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
remove_arg = '-Rsc' if action == 'purge' else '-R'
cmd = 'pacman {0} --noprogressbar --noconfirm {1}'.format(remove_arg,
' '.join(targets))
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.compare_dicts(old, new)
def remove(name=None, pkgs=None, **kwargs):
'''
Remove packages with ``pacman -R``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
return _uninstall(action='remove', name=name, pkgs=pkgs)
def purge(name=None, pkgs=None, **kwargs):
'''
Recursively remove a package and all dependencies which were installed
with it, this will call a ``pacman -Rsc``
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
return _uninstall(action='purge', name=name, pkgs=pkgs)
def file_list(*packages):
'''
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
'''
errors = []
ret = []
cmd = 'pacman -Ql {0}'.format(' '.join(packages))
for line in __salt__['cmd.run'](cmd).splitlines():
if line.startswith('error'):
errors.append(line)
else:
comps = line.split()
ret.append(' '.join(comps[1:]))
return {'errors': errors, 'files': ret}
def file_dict(*packages):
'''
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's
package database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
'''
errors = []
ret = {}
cmd = 'pacman -Ql {0}'.format(' '.join(packages))
for line in __salt__['cmd.run'](cmd).splitlines():
if line.startswith('error'):
errors.append(line)
else:
comps = line.split()
if not comps[0] in ret:
ret[comps[0]] = []
ret[comps[0]].append((' '.join(comps[1:])))
return {'errors': errors, 'packages': ret}
| apache-2.0 | -4,393,131,029,616,571,400 | 27.649485 | 110 | 0.556531 | false |
cnobile2012/dcolumn | example_site/books/admin.py | 1 | 2681 | #
# example_site/books/admin.py
#
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from dcolumn.common.admin_mixins import UserAdminMixin
from dcolumn.dcolumns.admin import KeyValueInline
from .models import Promotion, Book, Author, Publisher
from .forms import PromotionForm, BookForm, AuthorForm, PublisherForm
#
# Promotion
#
@admin.register(Promotion)
class PromotionAdmin(UserAdminMixin):
fieldsets = (
(None, {'fields': ('name',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('column_collection', 'active', 'creator',
'created', 'updater', 'updated',)}),
)
readonly_fields = ('creator', 'created', 'updater', 'updated',)
list_display = ('name', 'column_collection', 'active', 'updater_producer',
'updated', 'detail_producer',)
list_editable = ('active',)
inlines = (KeyValueInline,)
#
# Author
#
@admin.register(Author)
class AuthorAdmin(UserAdminMixin):
fieldsets = (
(None, {'fields': ('name',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('column_collection', 'active', 'creator',
'created', 'updater', 'updated',)}),
)
readonly_fields = ('creator', 'created', 'updater', 'updated',)
list_display = ('name', 'column_collection', 'updater_producer',
'updated', 'detail_producer',)
inlines = (KeyValueInline,)
#
# Publisher
#
@admin.register(Publisher)
class PublisherAdmin(UserAdminMixin):
fieldsets = (
(None, {'fields': ('name',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('column_collection', 'active', 'creator',
'created', 'updater', 'updated',)}),
)
readonly_fields = ('creator', 'created', 'updater', 'updated',)
list_display = ('name', 'column_collection', 'updater_producer',
'updated', 'detail_producer',)
inlines = (KeyValueInline,)
#
# Book
#
@admin.register(Book)
class BookAdmin(UserAdminMixin):
fieldsets = (
(None, {'fields': ('title',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('column_collection', 'active', 'creator',
'created', 'updater', 'updated',)}),
)
list_editable = ('active',)
readonly_fields = ('creator', 'created', 'updater', 'updated',)
list_display = ('title', 'column_collection', 'active', 'updater_producer',
'updated', 'detail_producer',)
inlines = (KeyValueInline,)
| mit | 7,266,502,489,189,427,000 | 31.695122 | 79 | 0.556882 | false |
9thSenseRobotics/bosh_server | nbxmpp-0.1/build/lib.linux-i686-2.7/nbxmpp/smacks.py | 2 | 4527 | from protocol import Acks
from protocol import NS_STREAM_MGMT
import logging
log = logging.getLogger('nbxmpp.smacks')
class Smacks():
'''
This is Smacks is the Stream Management class. It takes care of requesting
and sending acks. Also, it keeps track of the unhandled outgoing stanzas.
The dispatcher has to be able to access this class to increment the
number of handled stanzas
'''
def __init__(self, con):
self.con = con # Connection object
self.out_h = 0 # Outgoing stanzas handled
self.in_h = 0 # Incoming stanzas handled
self.uqueue = [] # Unhandled stanzas queue
self.session_id = None
self.resumption = False # If server supports resume
# Max number of stanzas in queue before making a request
self.max_queue = 5
self._owner = None
self.resuming = False
self.enabled = False # If SM is enabled
self.location = None
def set_owner(self, owner):
self._owner = owner
# Register handlers
owner.Dispatcher.RegisterNamespace(NS_STREAM_MGMT)
owner.Dispatcher.RegisterHandler('enabled', self._neg_response,
xmlns=NS_STREAM_MGMT)
owner.Dispatcher.RegisterHandler('r', self.send_ack,
xmlns=NS_STREAM_MGMT)
owner.Dispatcher.RegisterHandler('a', self.check_ack,
xmlns=NS_STREAM_MGMT)
owner.Dispatcher.RegisterHandler('resumed', self.check_ack,
xmlns=NS_STREAM_MGMT)
owner.Dispatcher.RegisterHandler('failed', self.error_handling,
xmlns=NS_STREAM_MGMT)
def _neg_response(self, disp, stanza):
r = stanza.getAttr('resume')
if r == 'true' or r == 'True' or r == '1':
self.resumption = True
self.session_id = stanza.getAttr('id')
if r == 'false' or r == 'False' or r == '0':
self.negociate(False)
l = stanza.getAttr('location')
if l:
self.location = l
def negociate(self, resume=True):
# Every time we attempt to negociate, we must erase all previous info
# about any previous session
self.uqueue = []
self.in_h = 0
self.out_h = 0
self.session_id = None
self.enabled = True
stanza = Acks()
stanza.buildEnable(resume)
self._owner.Connection.send(stanza, now=True)
def resume_request(self):
if not self.session_id:
self.resuming = False
log.error('Attempted to resume without a valid session id ')
return
resume = Acks()
resume.buildResume(self.in_h, self.session_id)
self._owner.Connection.send(resume, False)
def send_ack(self, disp, stanza):
ack = Acks()
ack.buildAnswer(self.in_h)
self._owner.Connection.send(ack, False)
def request_ack(self):
r = Acks()
r.buildRequest()
self._owner.Connection.send(r, False)
def check_ack(self, disp, stanza):
'''
Checks if the number of stanzas sent are the same as the
number of stanzas received by the server. Pops stanzas that were
handled by the server from the queue.
'''
h = int(stanza.getAttr('h'))
diff = self.out_h - h
if len(self.uqueue) < diff or diff < 0:
log.error('Server and client number of stanzas handled mismatch ')
else:
while (len(self.uqueue) > diff):
self.uqueue.pop(0)
if stanza.getName() == 'resumed':
self.enabled = True
self.resuming = True
self.con.set_oldst()
if self.uqueue != []:
for i in self.uqueue:
self._owner.Connection.send(i, False)
def error_handling(self, disp, stanza):
# If the server doesn't recognize previd, forget about resuming
# Ask for service discovery, etc..
if stanza.getTag('item-not-found'):
self.resuming = False
self.enabled = False
# we need to bind a resource
self._owner.NonBlockingBind.resuming = False
self._owner._on_auth_bind(None)
return
# Doesn't support resumption
if stanza.getTag('feature-not-implemented'):
self.negociate(False)
return
if stanza.getTag('unexpected-request'):
self.enabled = False
log.error('Gajim failed to negociate Stream Management')
return
| gpl-3.0 | -1,592,018,348,041,819,600 | 33.295455 | 78 | 0.588027 | false |
pystruct/pystruct | pystruct/utils/graph_functions.py | 5 | 1063 | import numpy as np
def is_forest(edges, n_vertices=None):
if n_vertices is not None and len(edges) > n_vertices - 1:
return False
n_vertices = np.max(edges) + 1
parents = -np.ones(n_vertices)
visited = np.zeros(n_vertices, dtype=np.bool)
neighbors = [[] for i in range(n_vertices)]
for edge in edges:
neighbors[edge[0]].append(edge[1])
neighbors[edge[1]].append(edge[0])
lonely = 0
while lonely < n_vertices:
for i in range(lonely, n_vertices):
if not visited[i]:
queue = [i]
lonely = i + 1
visited[i] = True
break
lonely = n_vertices
while queue:
node = queue.pop()
for neighbor in neighbors[node]:
if not visited[neighbor]:
parents[neighbor] = node
queue.append(neighbor)
visited[neighbor] = True
elif not parents[node] == neighbor:
return False
return True
| bsd-2-clause | -707,808,938,073,425,700 | 31.212121 | 62 | 0.516463 | false |
jordotech/sherri_satchmo | satchmo/apps/satchmo_store/shop/templatetags/satchmo_cart.py | 12 | 5435 | from django import template
from livesettings import config_value
from l10n.utils import moneyfmt
from tax.templatetags.satchmo_tax import CartitemLineTaxedTotalNode, CartTaxedTotalNode
import logging
log = logging.getLogger('shop.templatetags.satchmo_cart')
register = template.Library()
class CartitemTotalNode(template.Node):
"""Show the total for the cartitem"""
def __init__(self, cartitem, show_currency, show_tax):
self.cartitem = template.Variable(cartitem)
self.raw_cartitem = cartitem
self.show_currency = template.Variable(show_currency)
self.raw_currency = show_currency
self.show_tax = template.Variable(show_tax)
self.raw_tax = show_tax
def render(self, context):
try:
show_tax = self.show_tax.resolve(context)
except template.VariableDoesNotExist:
show_tax = self.raw_tax
if show_tax:
tag = CartitemLineTaxedTotalNode(self.raw_cartitem, self.raw_currency)
return tag.render(context)
try:
cartitem = self.cartitem.resolve(context)
except template.VariableDoesNotExist:
log.warn('Could not resolve template variable: %s', self.cartitem)
return ''
try:
show_currency = self.show_currency.resolve(context)
except template.VariableDoesNotExist:
show_currency = self.raw_currency
if show_currency:
return moneyfmt(cartitem.line_total)
else:
return cartitem.line_total
def cartitem_custom_details(cartitem):
is_custom = "CustomProduct" in cartitem.product.get_subtypes()
return {
'cartitem' : cartitem,
'is_custom' : is_custom
}
register.inclusion_tag("product/cart_detail_customproduct.html", takes_context=False)(cartitem_custom_details)
def cartitem_subscription_details(cartitem):
log.debug('sub details')
return {
'cartitem' : cartitem,
'is_subscription' : cartitem.product.is_subscription
}
register.inclusion_tag("product/cart_detail_subscriptionproduct.html", takes_context=False)(cartitem_subscription_details)
def cartitem_total(parser, token):
"""Returns the line total for the cartitem, possibly with tax added. If currency evaluates true,
then return the total formatted through moneyfmt.
Example::
{% cartitem_total cartitem [show_tax] [currency] %}
"""
tokens = token.contents.split()
if len(tokens) < 2:
raise template.TemplateSyntaxError, "'%s' tag requires a cartitem argument" % tokens[0]
cartitem = tokens[1]
if len(tokens) > 2:
show_tax = tokens[2]
else:
show_tax = config_value('TAX', 'DEFAULT_VIEW_TAX')
if len(tokens) >3:
show_currency = tokens[3]
else:
show_currency = 'True'
return CartitemTotalNode(cartitem, show_currency, show_tax)
cartitem_total = register.tag(cartitem_total)
class CartTotalNode(template.Node):
"""Show the total for the cart"""
def __init__(self, cart, show_currency, show_tax, show_discount):
self.cart = template.Variable(cart)
self.raw_cart = cart
self.show_currency = template.Variable(show_currency)
self.raw_currency = show_currency
self.show_tax = template.Variable(show_tax)
self.raw_tax = show_tax
self.show_discount = template.Variable(show_discount)
self.raw_show_discount = show_discount
def render(self, context):
try:
show_tax = self.show_tax.resolve(context)
except template.VariableDoesNotExist:
show_tax = self.raw_tax
if show_tax:
tag = CartTaxedTotalNode(self.raw_cart, self.raw_currency)
return tag.render(context)
try:
cart = self.cart.resolve(context)
except template.VariableDoesNotExist:
log.warn('Could not resolve template variable: %s', self.cart)
return ''
try:
show_currency = self.show_currency.resolve(context)
except template.VariableDoesNotExist:
show_currency = self.raw_currency
try:
show_discount = self.show_discount.resolve(context)
except template.VariableDoesNotExist:
show_discount = self.raw_show_discount
if show_discount:
total = cart.total_undiscounted
else:
total = cart.total
if show_currency:
return moneyfmt(cart.total)
else:
return cart.total
def cart_total(parser, token):
"""Returns the total for the cart, possibly with tax added. If currency evaluates true,
then return the total formatted through moneyfmt.
Example::
{% cart_total cart [show_tax] [currency] [discounted] %}
"""
tokens = token.contents.split()
if len(tokens) < 2:
raise template.TemplateSyntaxError, "'%s' tag requires a cart argument" % tokens[0]
cart = tokens[1]
if len(tokens) > 2:
show_tax = tokens[2]
else:
show_tax = config_value('TAX', 'DEFAULT_VIEW_TAX')
if len(tokens) > 3:
show_currency = tokens[3]
else:
show_currency = True
if len(tokens) > 4:
show_discount = tokens[4]
else:
show_discount = False
return CartTotalNode(cart, show_currency, show_tax, show_discount)
cart_total = register.tag(cart_total)
| bsd-3-clause | 1,507,412,427,990,449,700 | 29.363128 | 122 | 0.640294 | false |
joshuahellier/PhDStuff | codes/kmc/batchJobs/rateCaculation/mainStuff/concFlow.py | 1 | 14456 | import sys
import os
import math
import shutil
resultDir = os.environ.get('RESULTS')
if resultDir == None :
print ("WARNING! $RESULTS not set! Attempt to write results will fail!\n")
# Expecting input botConc, topConc, rateConstFull, sysSize, analInterval, numStepsEquilib, numStepsSnapshot, numStepsAnal, numStepsReq, numPasses, timeInterval, fileCode
from KMCLib import *
from KMCLib.Backend import Backend
import numpy
from RateCalc import *
from DensHist import *
from BlockStats import *
from foldyFloatList import *
botConc = float(sys.argv[1])
topConc = float(sys.argv[2])
rateConstFull = float(sys.argv[3])
sysSize = int(sys.argv[4])
analInterval = int(sys.argv[5])
numStepsEquilib = int(sys.argv[6])
numStepsSnapshot = int(sys.argv[7])
numStepsAnal = int(sys.argv[8])
numStepsReq = int(sys.argv[9])
numPasses = int(sys.argv[10])
timeInterval = float(sys.argv[11])
fileInfo = sys.argv[12]
resultsPlace = resultDir+"/"+fileInfo+"/"
if not os.path.exists(resultsPlace):
os.makedirs(resultsPlace)
with open(resultsPlace+'settings', 'w') as f:
f.write('BotConcentration = ' + str(botConc) +'\n')
f.write('TopConcentration = ' + str(topConc) +'\n')
f.write('FullRate = ' + str(rateConstFull) +'\n')
f.write('SysSize = ' + str(sysSize) +'\n')
f.write('TimeInterval = ' + str(timeInterval) +'\n')
f.write('AnalInterval = ' +str(analInterval) + '\n')
f.write('NumStepsEquilib = '+str(numStepsEquilib) +'\n')
f.write('NumStepsSnapshot = '+str(numStepsSnapshot)+'\n')
f.write('NumStepsAnal = '+str(numStepsAnal) +'\n')
"""I've put this in the file to make command line input easier"""
# Load the configuration and interactions.
# We're in 1d, so everything's a bit trivial
cell_vectors = [[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]]
# Only bothering with one set
basis_points = [[0.0, 0.0, 0.0]]
unit_cell = KMCUnitCell(cell_vectors=cell_vectors,
basis_points=basis_points)
# Define the lattice.
xRep = 1
yRep = 1
zRep = sysSize
numPoints = xRep*(zRep+4)*yRep
lattice = KMCLattice(unit_cell=unit_cell,
repetitions=(xRep,yRep,zRep+4),
periodic=(False, False, True))
# Generate the initial types. There's double-layered section of "To" at the top and "Bo" at the bottom
avConc = 0.5*(botConc+topConc)
types = ["V"]*numPoints
types[0] = "BoV"
types[1] = "BoV"
types[-2] = "ToV"
types[-1] = "ToV"
for i in range(int(zRep*avConc)):
# find a site which is not yet occupied by a "O" type.
pos = int(numpy.random.rand()*zRep+2.0)
while (types[pos] != "V"):
pos = int(numpy.random.rand()*zRep+2.0)
# Set the type.
types[pos] = "O"
"""
for i in range(2, numPoints-2):
if i < numPoints/2:
types[i] = "O"
else:
types[i] = "V"
"""
# Setup the configuration.
configuration = KMCConfiguration(lattice=lattice,
types=types,
possible_types=["O","V","ToV","BoV", "ToO", "BoO"])
# Rates.
rateConstEmpty = 1.0
topSpawn = math.sqrt(topConc/(1.0-topConc))
botSpawn = math.sqrt(botConc/(1.0-botConc))
topDespawn = 1.0/topSpawn
botDespawn = 1.0/botSpawn
#
##
###
"""I've put the processes in here to make it easier to adjust them via command line arguments."""
# Fill the list of processes.
processes = []
# Only on the first set of basis_points for O/V
basis_sites = [0]
# Bulk processes
# Up, empty.
#0
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Down, empty.
#1
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Now for Oxygen annihilation at the top boundary
#2
elements_before = ["O", "ToV"]
elements_after = ["V", "ToV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise the rate constant
# Oxygen creation at the top boundary
#3
elements_before = ["ToO", "V"]
elements_after = ["ToO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Now for Oxygen annihilation at the bottom boundary
#4
elements_before = ["O", "BoV"]
elements_after = ["V", "BoV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Obviously the rate constant will be customised
# Oxygen creation at the bottom boundary
#5
elements_before = ["BoO", "V"]
elements_after = ["BoO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the bottom boundary
#6
elements_before = ["BoV"]
elements_after = ["BoO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#7
elements_before = ["BoO"]
elements_after = ["BoV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the top boundary
#8
elements_before = ["ToV"]
elements_after = ["ToO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#9
elements_before = ["ToO"]
elements_after = ["ToV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Create the interactions object.
interactions = KMCInteractions(processes, implicit_wildcards=True)
# Define the custom rates calculator, using the lol model as a template
class lolModelRates(KMCRateCalculatorPlugin):
# Class for defining the custom rates function for the KMCLib paper.
def rate(self, geometry, elements_before, elements_after, rate_constant, process_number, global_coordinate):
if process_number == 0:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 1:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 2:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 4:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 3:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 5:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 6:
return botSpawn
if process_number == 7:
return botDespawn
if process_number == 8:
return topSpawn
if process_number == 9:
return topDespawn
def cutoff(self):
# Overloaded base class API function
return 1.0
interactions.setRateCalculator(rate_calculator=lolModelRates)
"""End of processes"""
###
##
#
# Create the model.
model = KMCLatticeModel(configuration, interactions)
compositionTracker = Composition(time_interval=timeInterval)
# Define the parameters; not entirely sure if these are sensible or not...
control_parameters_equilib = KMCControlParameters(number_of_steps=numStepsEquilib, analysis_interval=numStepsEquilib/100,
dump_interval=numStepsEquilib+1)
control_parameters_req = KMCControlParameters(number_of_steps=numStepsReq, analysis_interval=numStepsReq/100,
dump_interval=numStepsReq+1)
control_parameters_anal = KMCControlParameters(number_of_steps=numStepsAnal, analysis_interval=1,
dump_interval=numStepsAnal+1)
# Run the simulation - save trajectory to resultsPlace, which should by now exist
model.run(control_parameters_equilib, trajectory_filename=("/dev/null"))
with open(resultsPlace+"inBot.dat", 'w') as f:
pass
with open(resultsPlace+"outBot.dat", 'w') as f:
pass
with open(resultsPlace+"inTop.dat", 'w') as f:
pass
with open(resultsPlace+"outTop.dat", 'w') as f:
pass
if not os.path.exists(resultsPlace+"numHists"):
os.makedirs(resultsPlace+"numHists")
if not os.path.exists(resultsPlace+"blockStats"):
os.makedirs(resultsPlace+"blockStats")
ovNumHist = []
for index in range(0, sysSize):
ovNumHist.append(0.0)
ovBlockHist = []
for index in range(0, sysSize):
ovBlockHist.append(0.0)
for passNum in range(0, numPasses):
processStatsOxInBot = RateCalc(processes=[5])
processStatsOxOutBot = RateCalc(processes=[4])
processStatsOxInTop = RateCalc(processes=[3])
processStatsOxOutTop = RateCalc(processes=[2])
numHist = DensHist(spec=["O"], inProc=[5, 3], outProc=[4, 2])
blockStat = BlockStats(blockComp = ["O"])
model.run(control_parameters_req, trajectory_filename=("/dev/null"))
model.run(control_parameters_anal, trajectory_filename=("/dev/null"), analysis=[processStatsOxInBot, processStatsOxOutBot, processStatsOxInTop, processStatsOxOutTop, numHist, blockStat])
with open(resultsPlace+"inBot.dat", 'a') as f:
processStatsOxInBot.printResults(f)
with open(resultsPlace+"outBot.dat", 'a') as f:
processStatsOxOutBot.printResults(f)
with open(resultsPlace+"inTop.dat", 'a') as f:
processStatsOxInTop.printResults(f)
with open(resultsPlace+"outTop.dat", 'a') as f:
processStatsOxOutTop.printResults(f)
with open(resultsPlace+"numHists/numHist"+str(passNum)+".dat", 'w') as f:
pass
with open(resultsPlace+"numHists/numHist"+str(passNum)+".dat", 'a') as f:
numHist.printResults(f)
with open(resultsPlace+"numHists/numHist"+str(passNum)+".dat", 'r') as f:
lines = f.readlines()
for index in range(0, sysSize):
words = lines[index].split()
ovNumHist[index] += float(words[1])
os.remove(resultsPlace+"numHists/numHist"+str(passNum)+".dat")
with open(resultsPlace+"blockStats/blockStat"+str(passNum)+".dat", 'w') as f:
pass
with open(resultsPlace+"blockStats/blockStat"+str(passNum)+".dat", 'a') as f:
blockStat.printResults(f)
with open(resultsPlace+"blockStats/blockStat"+str(passNum)+".dat", 'r') as f:
lines = f.readlines()
for index in range(0, sysSize):
words = lines[index].split()
ovBlockHist[index] += float(words[1])
os.remove(resultsPlace+"blockStats/blockStat"+str(passNum)+".dat")
with open(resultsPlace+"ovNumHist.dat", 'w') as f:
for index in range(0, sysSize):
f.write(str(index)+" "+str(ovNumHist[index])+"\n")
with open(resultsPlace+"ovBlockHist.dat", 'w') as f:
for index in range(0, sysSize):
f.write(str(index)+" "+str(ovBlockHist[index])+"\n")
shutil.rmtree(resultsPlace+"blockStats", ignore_errors=True)
shutil.rmtree(resultsPlace+"numHists", ignore_errors=True)
print("Process would appear to have succesfully terminated! How very suspicious...")
| mit | -5,669,884,973,100,589,000 | 35.14 | 190 | 0.606253 | false |
dparks1134/STAMP | stamp/GUI/statsTableDlg.py | 1 | 3326 | #=======================================================================
# Author: Donovan Parks
#
# Dialog box used to set program preferences.
#
# Copyright 2011 Donovan Parks
#
# This file is part of STAMP.
#
# STAMP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# STAMP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with STAMP. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
from PyQt4 import QtGui, QtCore
from statsTableDlgUI import Ui_StatsTableDlg
from stamp.metagenomics.GenericTable import GenericTable
class StatsTableDlg(QtGui.QDockWidget):
def __init__(self, preferences, parent=None, info=None):
QtGui.QDockWidget.__init__(self, parent)
# initialize GUI
self.ui = Ui_StatsTableDlg()
self.ui.setupUi(self)
self.preferences = preferences
self.table = ''
# signals
self.connect(self.ui.btnSave, QtCore.SIGNAL("clicked()"), self.saveTable)
self.connect(self.ui.chkShowActiveFeatures, QtCore.SIGNAL("clicked()"), self.__updateTable)
def updateTable(self, statsTest):
self.statsTest = statsTest
self.__updateTable()
def __updateTable(self):
if self.statsTest.results.profile != None:
tableData, tableHeadings = self.statsTest.results.tableData(self.ui.chkShowActiveFeatures.isChecked())
self.table = GenericTable(tableData, tableHeadings, self)
self.table.sort(0,QtCore.Qt.AscendingOrder) # start with features in alphabetical order
self.ui.tableStatisticalSummary.setModel(self.table)
self.ui.tableStatisticalSummary.verticalHeader().setVisible(False)
# resize columns to fit context by sampling first 100 rows
#self.ui.tableStatisticalSummary.resizeColumnsToContents()
for colIndex in xrange(0, self.table.columnCount(None)):
fm = self.ui.tableStatisticalSummary.fontMetrics()
maxWidth = fm.width(tableHeadings[colIndex]) + 10
for i in xrange(0, 100): # sample first 100 rows to estimate column width, this is strictly for efficiency
width = fm.width(self.ui.tableStatisticalSummary.model().data(self.ui.tableStatisticalSummary.model().index(i,colIndex), QtCore.Qt.DisplayRole).toString()) + 10
if width > maxWidth:
maxWidth = width
self.ui.tableStatisticalSummary.setColumnWidth(colIndex, maxWidth)
def saveTable(self):
filename = QtGui.QFileDialog.getSaveFileName(self, 'Save table...', self.preferences['Last directory'],
'Tab-separated values (*.tsv);;' +
'Text file (*.txt);;' +
'All files (*.*)')
if filename != '':
self.preferences['Last directory'] = filename[0:filename.lastIndexOf('/')]
try:
if self.table != '':
self.table.save(filename)
except IOError:
QtGui.QMessageBox.information(self, 'Failed to save table', 'Write permission for file denied.', QtGui.QMessageBox.Ok)
if __name__ == "__main__":
pass | gpl-3.0 | 8,051,999,409,962,092,000 | 38.141176 | 165 | 0.695129 | false |
apache/airflow | airflow/jobs/local_task_job.py | 2 | 11763 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import signal
from typing import Optional
import psutil
from sqlalchemy.exc import OperationalError
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.jobs.base_job import BaseJob
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
from airflow.sentry import Sentry
from airflow.stats import Stats
from airflow.task.task_runner import get_task_runner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import with_row_locks
from airflow.utils.state import State
class LocalTaskJob(BaseJob):
"""LocalTaskJob runs a single task instance."""
__mapper_args__ = {'polymorphic_identity': 'LocalTaskJob'}
def __init__(
self,
task_instance: TaskInstance,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
pickle_id: Optional[str] = None,
pool: Optional[str] = None,
*args,
**kwargs,
):
self.task_instance = task_instance
self.dag_id = task_instance.dag_id
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_runner = None
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
super().__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
def signal_handler(signum, frame):
"""Setting kill signal handler"""
self.log.error("Received SIGTERM. Terminating subprocesses")
self.on_kill()
self.task_instance.refresh_from_db()
if self.task_instance.state not in State.finished:
self.task_instance.set_state(State.FAILED)
self.task_instance._run_finished_callback(error="task received sigterm")
raise AirflowException("LocalTaskJob received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
if not self.task_instance.check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=self.id,
pool=self.pool,
):
self.log.info("Task is not able to be run")
return
try:
self.task_runner.start()
heartbeat_time_limit = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# task callback invocation happens either here or in
# self.heartbeat() instead of taskinstance._run_raw_task to
# avoid race conditions
#
# When self.terminating is set to True by heartbeat_callback, this
# loop should not be restarted. Otherwise self.handle_task_exit
# will be invoked and we will end up with duplicated callbacks
while not self.terminating:
# Monitor the task to see if it's done. Wait in a syscall
# (`os.wait`) for as long as possible so we notice the
# subprocess finishing as quick as we can
max_wait_time = max(
0, # Make sure this value is never negative,
min(
(
heartbeat_time_limit
- (timezone.utcnow() - self.latest_heartbeat).total_seconds() * 0.75
),
self.heartrate,
),
)
return_code = self.task_runner.return_code(timeout=max_wait_time)
if return_code is not None:
self.handle_task_exit(return_code)
return
self.heartbeat()
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
# This can only really happen if the worker can't read the DB for a long time
time_since_last_heartbeat = (timezone.utcnow() - self.latest_heartbeat).total_seconds()
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.log.error("Heartbeat time limit exceeded!")
raise AirflowException(
"Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s).".format(time_since_last_heartbeat, heartbeat_time_limit)
)
finally:
self.on_kill()
def handle_task_exit(self, return_code: int) -> None:
"""Handle case where self.task_runner exits by itself"""
self.log.info("Task exited with return code %s", return_code)
self.task_instance.refresh_from_db()
# task exited by itself, so we need to check for error file
# in case it failed due to runtime exception/error
error = None
if self.task_instance.state == State.RUNNING:
# This is for a case where the task received a sigkill
# while running
self.task_instance.set_state(State.FAILED)
if self.task_instance.state != State.SUCCESS:
error = self.task_runner.deserialize_run_error()
self.task_instance._run_finished_callback(error=error)
if not self.task_instance.test_mode:
if conf.getboolean('scheduler', 'schedule_after_task_execution', fallback=True):
self._run_mini_scheduler_on_child_tasks()
self._update_dagrun_state_for_paused_dag()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
if ti.state == State.RUNNING:
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
if not same_hostname:
self.log.warning(
"The recorded hostname %s " "does not match this instance's hostname " "%s",
ti.hostname,
fqdn,
)
raise AirflowException("Hostname of job runner does not match")
current_pid = self.task_runner.process.pid
same_process = ti.pid == current_pid
if ti.run_as_user:
same_process = psutil.Process(ti.pid).ppid() == current_pid
if ti.pid is not None and not same_process:
self.log.warning("Recorded pid %s does not match " "the current pid %s", ti.pid, current_pid)
raise AirflowException("PID of job runner does not match")
elif self.task_runner.return_code() is None and hasattr(self.task_runner, 'process'):
self.log.warning(
"State of this instance has been externally set to %s. " "Terminating instance.", ti.state
)
self.task_runner.terminate()
if ti.state == State.SUCCESS:
error = None
else:
# if ti.state is not set by taskinstance.handle_failure, then
# error file will not be populated and it must be updated by
# external source suck as web UI
error = self.task_runner.deserialize_run_error() or "task marked as failed externally"
ti._run_finished_callback(error=error)
self.terminating = True
@provide_session
@Sentry.enrich_errors
def _run_mini_scheduler_on_child_tasks(self, session=None) -> None:
try:
# Re-select the row with a lock
dag_run = with_row_locks(
session.query(DagRun).filter_by(
dag_id=self.dag_id,
execution_date=self.task_instance.execution_date,
),
session=session,
).one()
# Get a partial dag with just the specific tasks we want to
# examine. In order for dep checks to work correctly, we
# include ourself (so TriggerRuleDep can check the state of the
# task we just executed)
task = self.task_instance.task
partial_dag = task.dag.partial_subset(
task.downstream_task_ids,
include_downstream=False,
include_upstream=False,
include_direct_upstream=True,
)
dag_run.dag = partial_dag
info = dag_run.task_instance_scheduling_decisions(session)
skippable_task_ids = {
task_id for task_id in partial_dag.task_ids if task_id not in task.downstream_task_ids
}
schedulable_tis = [ti for ti in info.schedulable_tis if ti.task_id not in skippable_task_ids]
for schedulable_ti in schedulable_tis:
if not hasattr(schedulable_ti, "task"):
schedulable_ti.task = task.dag.get_task(schedulable_ti.task_id)
num = dag_run.schedule_tis(schedulable_tis)
self.log.info("%d downstream tasks scheduled from follow-on schedule check", num)
session.commit()
except OperationalError as e:
# Any kind of DB error here is _non fatal_ as this block is just an optimisation.
self.log.info(
"Skipping mini scheduling run due to exception: %s",
e.statement,
exc_info=True,
)
session.rollback()
@provide_session
def _update_dagrun_state_for_paused_dag(self, session=None):
"""
Checks for paused dags with DagRuns in the running state and
update the DagRun state if possible
"""
dag = self.task_instance.task.dag
if dag.get_is_paused():
dag_run = self.task_instance.get_dagrun(session=session)
if dag_run:
dag_run.dag = dag
dag_run.update_state(session=session, execute_callbacks=True)
| apache-2.0 | 5,801,869,318,638,379,000 | 41.31295 | 109 | 0.599932 | false |
alex/pinax | pinax/core/management/commands/clone_project.py | 2 | 7917 | import glob
import os
import optparse
import sys
import shutil
import re
import random
import pinax
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
EXCLUDED_PATTERNS = ('.svn','.pyc',)
DEFAULT_PINAX_ROOT = None # fallback to the normal PINAX_ROOT in settings.py.
PINAX_ROOT_RE = re.compile(r'PINAX_ROOT\s*=.*$', re.M)
SECRET_KEY_RE = re.compile(r'SECRET_KEY\s*=.*$', re.M)
ROOT_URLCONF_RE = re.compile(r'ROOT_URLCONF\s*=.*$', re.M)
VIRTUALENV_BASE_RE = re.compile(r'VIRTUALENV_BASE\s*=.*$', re.M)
CHARS = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
def get_pinax_root(default_pinax_root):
if default_pinax_root is None:
return os.path.abspath(os.path.dirname(pinax.__file__))
return default_pinax_root
def get_projects_dir(pinax_root):
return os.path.join(pinax_root, 'projects')
def get_projects(pinax_root):
projects = []
for item in glob.glob(os.path.join(get_projects_dir(pinax_root), '*')):
if os.path.isdir(item):
projects.append(item)
return projects
try:
WindowsError
except NameError:
WindowsError = None
def copytree(src, dst, symlinks=False):
"""
Backported from the Python 2.6 source tree, then modified for this script's
purposes.
"""
names = os.listdir(src)
os.makedirs(dst)
errors = []
for name in names:
ignore = False
for pattern in EXCLUDED_PATTERNS:
if pattern in os.path.join(src, name):
ignore = True
if ignore:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks)
else:
shutil.copy2(srcname, dstname)
except (IOError, os.error), why:
errors.append((srcname, dstname, str(why)))
except shutil.Error, err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error, errors
def update_settings(pinax_root, path, old_name, new_name):
settings_file = open(path, 'r')
settings = settings_file.read()
settings_file.close()
settings = settings.replace(old_name, new_name)
if pinax_root is not None:
settings = PINAX_ROOT_RE.sub("PINAX_ROOT = '%s'" % (pinax_root,),
settings)
new_secret_key = ''.join([random.choice(CHARS) for i in xrange(50)])
settings = SECRET_KEY_RE.sub("SECRET_KEY = '%s'" % (new_secret_key,),
settings)
new_root_urlconf = '%s.urls' % new_name
settings = ROOT_URLCONF_RE.sub("ROOT_URLCONF = '%s'" % new_root_urlconf,
settings)
settings_file = open(path, 'w')
settings_file.write(settings)
settings_file.close()
def update_rename_deploy_files(path, old_name, new_name):
for deploy_file in glob.glob(os.path.join(path, "pinax") + '*'):
df = open(deploy_file, 'r')
deploy_settings = df.read()
df.close()
deploy_settings = deploy_settings.replace(old_name, new_name)
df = open(deploy_file, 'w')
df.write(deploy_settings)
df.close()
# fix modpython.py
modpython_file = open(os.path.join(path, "modpython.py"), "rb")
modpython = modpython_file.read()
modpython_file.close()
virtualenv_base = os.environ.get("VIRTUAL_ENV", "")
modpython = VIRTUALENV_BASE_RE.sub('VIRTUALENV_BASE = "%s"' % virtualenv_base, modpython)
modpython_file = open(os.path.join(path, "modpython.py"), "wb")
modpython_file.write(modpython)
modpython_file.close()
def main(default_pinax_root, project_name, destination, verbose=True):
try:
# check to see if the destination copies an existing module name
__import__(destination)
except ImportError:
# The module does not exist so we let Pinax create it as a project
pass
else:
# The module exists so we raise a CommandError and exit
raise CommandError("'%s' conflicts with the name of an existing Python module and cannot be used as a project name. Please try another name." % destination)
if os.path.exists(destination):
raise CommandError("Files already exist at this path: %s" % (destination,))
user_project_name = os.path.basename(destination)
pinax_root = get_pinax_root(default_pinax_root)
if project_name in map(os.path.basename, get_projects(pinax_root)):
source = os.path.join(get_projects_dir(pinax_root), project_name)
else:
if not os.path.exists(project_name):
print "Project template does not exist at this path: %s" % (
project_name,)
sys.exit(1)
source = project_name
if verbose:
print "Copying your project to its new location"
copytree(source, destination)
if verbose:
print "Updating settings.py for your new project"
update_settings(default_pinax_root, os.path.join(destination, 'settings.py'),
project_name, user_project_name)
if verbose:
print "Renaming and updating your deployment files"
update_rename_deploy_files(os.path.join(destination, 'deploy'), project_name,
user_project_name)
if verbose:
print "Finished cloning your project, now you may enjoy Pinax!"
class Command(BaseCommand):
help = "Clones a Pinax starter project to <new_project_name> (which can be a path)."
args = "<original_project> <new_project_name>"
clone_project_options = (
make_option('-l', '--list-projects', dest='list_projects',
action = 'store_true',
help = 'lists the projects that are available on this system'),
make_option('-r', '--pinax-root', dest='pinax_root',
default = DEFAULT_PINAX_ROOT,
action = 'store_true',
help = 'where Pinax lives on your system (defaults to Pinax in your virtual environment)'),
make_option('-b', '--verbose', dest='verbose',
action = 'store_false', default=True,
help = 'enables verbose output'),
)
option_list = BaseCommand.option_list + clone_project_options
def handle(self, *args, **options):
"""
Handle clone_project options and run main to perform clone_project
operations.
"""
if options.get('list_projects'):
pinax_root = get_pinax_root(options.get('pinax_root'))
print "Available Projects"
print "------------------"
sys.path.insert(0, get_projects_dir(pinax_root))
for project in map(os.path.basename, get_projects(pinax_root)):
print "%s:" % project
about = getattr(__import__(project), '__about__', '')
for line in about.strip().splitlines():
print ' %s' % line
print
sys.exit(0)
if not args:
# note: --help prints full path to pinax-admin
self.print_help("pinax-admin", "clone_project")
sys.exit(0)
try:
destination = args[1]
except IndexError:
sys.stderr.write("You must provide a destination path for the cloned project.\n\n")
# note: --help prints full path to pinax-admin
self.print_help("pinax-admin", "clone_project")
sys.exit(0)
main(options.get('pinax_root'), args[0], destination,
verbose = options.get('verbose')
)
return 0
| mit | -2,826,627,347,031,284,000 | 35.483871 | 164 | 0.607427 | false |
berteh/synfig-import-labels | pystache/renderer.py | 2 | 16502 | # coding: utf-8
"""
This module provides a Renderer class to render templates.
"""
import sys
from pystache import defaults
from pystache.common import TemplateNotFoundError, MissingTags, is_string
from pystache.context import ContextStack, KeyNotFoundError
from pystache.loader import Loader
from pystache.parsed import ParsedTemplate
from pystache.renderengine import context_get, RenderEngine
from pystache.specloader import SpecLoader
from pystache.template_spec import TemplateSpec
class Renderer(object):
"""
A class for rendering mustache templates.
This class supports several rendering options which are described in
the constructor's docstring. Other behavior can be customized by
subclassing this class.
For example, one can pass a string-string dictionary to the constructor
to bypass loading partials from the file system:
>>> partials = {'partial': 'Hello, {{thing}}!'}
>>> renderer = Renderer(partials=partials)
>>> # We apply print to make the test work in Python 3 after 2to3.
>>> print(renderer.render('{{>partial}}', {'thing': 'world'}))
Hello, world!
To customize string coercion (e.g. to render False values as ''), one can
subclass this class. For example:
class MyRenderer(Renderer):
def str_coerce(self, val):
if not val:
return ''
else:
return str(val)
"""
def __init__(self, file_encoding=None, string_encoding=None,
decode_errors=None, search_dirs=None, file_extension=None,
escape=None, partials=None, missing_tags=None):
"""
Construct an instance.
Arguments:
file_encoding: the name of the encoding to use by default when
reading template files. All templates are converted to unicode
prior to parsing. Defaults to the package default.
string_encoding: the name of the encoding to use when converting
to unicode any byte strings (type str in Python 2) encountered
during the rendering process. This name will be passed as the
encoding argument to the built-in function unicode().
Defaults to the package default.
decode_errors: the string to pass as the errors argument to the
built-in function unicode() when converting byte strings to
unicode. Defaults to the package default.
search_dirs: the list of directories in which to search when
loading a template by name or file name. If given a string,
the method interprets the string as a single directory.
Defaults to the package default.
file_extension: the template file extension. Pass False for no
extension (i.e. to use extensionless template files).
Defaults to the package default.
partials: an object (e.g. a dictionary) for custom partial loading
during the rendering process.
The object should have a get() method that accepts a string
and returns the corresponding template as a string, preferably
as a unicode string. If there is no template with that name,
the get() method should either return None (as dict.get() does)
or raise an exception.
If this argument is None, the rendering process will use
the normal procedure of locating and reading templates from
the file system -- using relevant instance attributes like
search_dirs, file_encoding, etc.
escape: the function used to escape variable tag values when
rendering a template. The function should accept a unicode
string (or subclass of unicode) and return an escaped string
that is again unicode (or a subclass of unicode).
This function need not handle strings of type `str` because
this class will only pass it unicode strings. The constructor
assigns this function to the constructed instance's escape()
method.
To disable escaping entirely, one can pass `lambda u: u`
as the escape function, for example. One may also wish to
consider using markupsafe's escape function: markupsafe.escape().
This argument defaults to the package default.
missing_tags: a string specifying how to handle missing tags.
If 'strict', an error is raised on a missing tag. If 'ignore',
the value of the tag is the empty string. Defaults to the
package default.
"""
if decode_errors is None:
decode_errors = defaults.DECODE_ERRORS
if escape is None:
escape = defaults.TAG_ESCAPE
if file_encoding is None:
file_encoding = defaults.FILE_ENCODING
if file_extension is None:
file_extension = defaults.TEMPLATE_EXTENSION
if missing_tags is None:
missing_tags = defaults.MISSING_TAGS
if search_dirs is None:
search_dirs = defaults.SEARCH_DIRS
if string_encoding is None:
string_encoding = defaults.STRING_ENCODING
if isinstance(search_dirs, str):
search_dirs = [search_dirs]
self._context = None
self.decode_errors = decode_errors
self.escape = escape
self.file_encoding = file_encoding
self.file_extension = file_extension
self.missing_tags = missing_tags
self.partials = partials
self.search_dirs = search_dirs
self.string_encoding = string_encoding
# This is an experimental way of giving views access to the current context.
# TODO: consider another approach of not giving access via a property,
# but instead letting the caller pass the initial context to the
# main render() method by reference. This approach would probably
# be less likely to be misused.
@property
def context(self):
"""
Return the current rendering context [experimental].
"""
return self._context
# We could not choose str() as the name because 2to3 renames the unicode()
# method of this class to str().
def str_coerce(self, val):
"""
Coerce a non-string value to a string.
This method is called whenever a non-string is encountered during the
rendering process when a string is needed (e.g. if a context value
for string interpolation is not a string). To customize string
coercion, you can override this method.
"""
return str(val)
def _to_unicode_soft(self, s):
"""
Convert a basestring to unicode, preserving any unicode subclass.
"""
# We type-check to avoid "TypeError: decoding Unicode is not supported".
# We avoid the Python ternary operator for Python 2.4 support.
if isinstance(s, str):
return s
return self.str(s)
def _to_unicode_hard(self, s):
"""
Convert a basestring to a string with type unicode (not subclass).
"""
return str(self._to_unicode_soft(s))
def _escape_to_unicode(self, s):
"""
Convert a basestring to unicode (preserving any unicode subclass), and escape it.
Returns a unicode string (not subclass).
"""
return str(self.escape(self._to_unicode_soft(s)))
def str(self, b, encoding=None):
"""
Convert a byte string to unicode, using string_encoding and decode_errors.
Arguments:
b: a byte string.
encoding: the name of an encoding. Defaults to the string_encoding
attribute for this instance.
Raises:
TypeError: Because this method calls Python's built-in unicode()
function, this method raises the following exception if the
given string is already unicode:
TypeError: decoding Unicode is not supported
"""
if encoding is None:
encoding = self.string_encoding
# TODO: Wrap UnicodeDecodeErrors with a message about setting
# the string_encoding and decode_errors attributes.
return str(b, encoding, self.decode_errors)
def _make_loader(self):
"""
Create a Loader instance using current attributes.
"""
return Loader(file_encoding=self.file_encoding, extension=self.file_extension,
to_unicode=self.str, search_dirs=self.search_dirs)
def _make_load_template(self):
"""
Return a function that loads a template by name.
"""
loader = self._make_loader()
def load_template(template_name):
return loader.load_name(template_name)
return load_template
def _make_load_partial(self):
"""
Return a function that loads a partial by name.
"""
if self.partials is None:
return self._make_load_template()
# Otherwise, create a function from the custom partial loader.
partials = self.partials
def load_partial(name):
# TODO: consider using EAFP here instead.
# http://docs.python.org/glossary.html#term-eafp
# This would mean requiring that the custom partial loader
# raise a KeyError on name not found.
template = partials.get(name)
if template is None:
raise TemplateNotFoundError("Name %s not found in partials: %s" %
(repr(name), type(partials)))
# RenderEngine requires that the return value be unicode.
return self._to_unicode_hard(template)
return load_partial
def _is_missing_tags_strict(self):
"""
Return whether missing_tags is set to strict.
"""
val = self.missing_tags
if val == MissingTags.strict:
return True
elif val == MissingTags.ignore:
return False
raise Exception("Unsupported 'missing_tags' value: %s" % repr(val))
def _make_resolve_partial(self):
"""
Return the resolve_partial function to pass to RenderEngine.__init__().
"""
load_partial = self._make_load_partial()
if self._is_missing_tags_strict():
return load_partial
# Otherwise, ignore missing tags.
def resolve_partial(name):
try:
return load_partial(name)
except TemplateNotFoundError:
return ''
return resolve_partial
def _make_resolve_context(self):
"""
Return the resolve_context function to pass to RenderEngine.__init__().
"""
if self._is_missing_tags_strict():
return context_get
# Otherwise, ignore missing tags.
def resolve_context(stack, name):
try:
return context_get(stack, name)
except KeyNotFoundError:
return ''
return resolve_context
def _make_render_engine(self):
"""
Return a RenderEngine instance for rendering.
"""
resolve_context = self._make_resolve_context()
resolve_partial = self._make_resolve_partial()
engine = RenderEngine(literal=self._to_unicode_hard,
escape=self._escape_to_unicode,
resolve_context=resolve_context,
resolve_partial=resolve_partial,
to_str=self.str_coerce)
return engine
# TODO: add unit tests for this method.
def load_template(self, template_name):
"""
Load a template by name from the file system.
"""
load_template = self._make_load_template()
return load_template(template_name)
def _render_object(self, obj, *context, **kwargs):
"""
Render the template associated with the given object.
"""
loader = self._make_loader()
# TODO: consider an approach that does not require using an if
# block here. For example, perhaps this class's loader can be
# a SpecLoader in all cases, and the SpecLoader instance can
# check the object's type. Or perhaps Loader and SpecLoader
# can be refactored to implement the same interface.
if isinstance(obj, TemplateSpec):
loader = SpecLoader(loader)
template = loader.load(obj)
else:
template = loader.load_object(obj)
context = [obj] + list(context)
return self._render_string(template, *context, **kwargs)
def render_name(self, template_name, *context, **kwargs):
"""
Render the template with the given name using the given context.
See the render() docstring for more information.
"""
loader = self._make_loader()
template = loader.load_name(template_name)
return self._render_string(template, *context, **kwargs)
def render_path(self, template_path, *context, **kwargs):
"""
Render the template at the given path using the given context.
Read the render() docstring for more information.
"""
loader = self._make_loader()
template = loader.read(template_path)
return self._render_string(template, *context, **kwargs)
def _render_string(self, template, *context, **kwargs):
"""
Render the given template string using the given context.
"""
# RenderEngine.render() requires that the template string be unicode.
template = self._to_unicode_hard(template)
render_func = lambda engine, stack: engine.render(template, stack)
return self._render_final(render_func, *context, **kwargs)
# All calls to render() should end here because it prepares the
# context stack correctly.
def _render_final(self, render_func, *context, **kwargs):
"""
Arguments:
render_func: a function that accepts a RenderEngine and ContextStack
instance and returns a template rendering as a unicode string.
"""
stack = ContextStack.create(*context, **kwargs)
self._context = stack
engine = self._make_render_engine()
return render_func(engine, stack)
def render(self, template, *context, **kwargs):
"""
Render the given template string, view template, or parsed template.
Returns a unicode string.
Prior to rendering, this method will convert a template that is a
byte string (type str in Python 2) to unicode using the string_encoding
and decode_errors attributes. See the constructor docstring for
more information.
Arguments:
template: a template string that is unicode or a byte string,
a ParsedTemplate instance, or another object instance. In the
final case, the function first looks for the template associated
to the object by calling this class's get_associated_template()
method. The rendering process also uses the passed object as
the first element of the context stack when rendering.
*context: zero or more dictionaries, ContextStack instances, or objects
with which to populate the initial context stack. None
arguments are skipped. Items in the *context list are added to
the context stack in order so that later items in the argument
list take precedence over earlier items.
**kwargs: additional key-value data to add to the context stack.
As these arguments appear after all items in the *context list,
in the case of key conflicts these values take precedence over
all items in the *context list.
"""
if is_string(template):
return self._render_string(template, *context, **kwargs)
if isinstance(template, ParsedTemplate):
render_func = lambda engine, stack: template.render(engine, stack)
return self._render_final(render_func, *context, **kwargs)
# Otherwise, we assume the template is an object.
return self._render_object(template, *context, **kwargs)
| gpl-2.0 | -5,483,383,119,718,152,000 | 34.796095 | 89 | 0.620713 | false |
realgo/luigi | test/cmdline_test.py | 3 | 10442 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import mock
import os
import subprocess
from helpers import unittest
from luigi import six
import luigi
from luigi.mock import MockTarget
class SomeTask(luigi.Task):
n = luigi.IntParameter()
def output(self):
return MockTarget('/tmp/test_%d' % self.n)
def run(self):
f = self.output().open('w')
f.write('done')
f.close()
class AmbiguousClass(luigi.Task):
pass
class AmbiguousClass(luigi.Task): # NOQA
pass
class TaskWithSameName(luigi.Task):
def run(self):
self.x = 42
class TaskWithSameName(luigi.Task): # NOQA
# there should be no ambiguity
def run(self):
self.x = 43
class WriteToFile(luigi.Task):
filename = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.filename)
def run(self):
f = self.output().open('w')
print('foo', file=f)
f.close()
class FooBaseClass(luigi.Task):
x = luigi.Parameter(default='foo_base_default')
class FooSubClass(FooBaseClass):
pass
class CmdlineTest(unittest.TestCase):
def setUp(self):
MockTarget.fs.clear()
@mock.patch("logging.getLogger")
def test_cmdline_main_task_cls(self, logger):
luigi.run(['--local-scheduler', '--no-lock', '--n', '100'], main_task_cls=SomeTask)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_100': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_local_scheduler(self, logger):
luigi.run(['SomeTask', '--no-lock', '--n', '101'], local_scheduler=True)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_101': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_other_task(self, logger):
luigi.run(['--local-scheduler', '--no-lock', 'SomeTask', '--n', '1000'])
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_1000': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_ambiguous_class(self, logger):
self.assertRaises(Exception, luigi.run, ['--local-scheduler', '--no-lock', 'AmbiguousClass'])
@mock.patch("logging.getLogger")
@mock.patch("logging.StreamHandler")
def test_setup_interface_logging(self, handler, logger):
handler.return_value = mock.Mock(name="stream_handler")
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
luigi.interface.setup_interface_logging()
self.assertEqual([mock.call(handler.return_value)], logger.return_value.addHandler.call_args_list)
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
if six.PY2:
error = ConfigParser.NoSectionError
else:
error = KeyError
self.assertRaises(error, luigi.interface.setup_interface_logging, '/blah')
@mock.patch("warnings.warn")
@mock.patch("luigi.interface.setup_interface_logging")
def test_cmdline_logger(self, setup_mock, warn):
with mock.patch("luigi.interface.core") as env_params:
env_params.return_value.logging_conf_file = None
luigi.run(['SomeTask', '--n', '7', '--local-scheduler', '--no-lock'])
self.assertEqual([mock.call(None)], setup_mock.call_args_list)
with mock.patch("luigi.configuration.get_config") as getconf:
getconf.return_value.get.side_effect = ConfigParser.NoOptionError(section='foo', option='bar')
getconf.return_value.getint.return_value = 0
luigi.interface.setup_interface_logging.call_args_list = []
luigi.run(['SomeTask', '--n', '42', '--local-scheduler', '--no-lock'])
self.assertEqual([], setup_mock.call_args_list)
@mock.patch('argparse.ArgumentParser.print_usage')
def test_non_existent_class(self, print_usage):
self.assertRaises(luigi.task_register.TaskClassNotFoundException,
luigi.run, ['--local-scheduler', '--no-lock', 'XYZ'])
@mock.patch('argparse.ArgumentParser.print_usage')
def test_no_task(self, print_usage):
self.assertRaises(SystemExit, luigi.run, ['--local-scheduler', '--no-lock'])
class InvokeOverCmdlineTest(unittest.TestCase):
def _run_cmdline(self, args):
env = os.environ.copy()
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + ':.:test'
print('Running: ' + ' '.join(args)) # To simplify rerunning failing tests
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = p.communicate() # Unfortunately subprocess.check_output is 2.7+
return p.returncode, stdout, stderr
def test_bin_luigi(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['./bin/luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['python', 'test/cmdline_test.py', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_python_module(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['python', '-m', 'luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python_help(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', '--help-all'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_direct_python_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_bin_luigi_help(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', '--help-all'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_python_module_luigi_help(self):
returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi', '--module', 'cmdline_test', '--help-all'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_bin_luigi_help_no_module(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help'])
self.assertTrue(stdout.find(b'usage:') != -1)
def test_bin_luigi_help_not_spammy(self):
"""
Test that `luigi --help` fits on one screen
"""
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help'])
self.assertLessEqual(len(stdout.splitlines()), 15)
def test_bin_luigi_all_help_spammy(self):
"""
Test that `luigi --help-all` doesn't fit on a screen
Naturally, I don't mind this test breaking, but it convinces me that
the "not spammy" test is actually testing what it claims too.
"""
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help-all'])
self.assertGreater(len(stdout.splitlines()), 15)
def test_error_mesage_on_misspelled_task(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', 'RangeDaili'])
self.assertTrue(stderr.find(b'RangeDaily') != -1)
def test_bin_luigi_no_parameters(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi'])
self.assertTrue(stderr.find(b'No task specified') != -1)
def test_python_module_luigi_no_parameters(self):
returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi'])
self.assertTrue(stderr.find(b'No task specified') != -1)
def test_bin_luigi_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_python_module_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['python', '-m', 'luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
def test_bin_luigi_options_before_task(self):
args = ['./bin/luigi', '--module', 'cmdline_test', '--no-lock', '--local-scheduler', '--FooBaseClass-x', 'hello', 'FooBaseClass']
returncode, stdout, stderr = self._run_cmdline(args)
self.assertEqual(0, returncode)
def test_bin_fail_on_unrecognized_args(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--no-lock', '--local-scheduler', 'Task', '--unknown-param', 'hiiii'])
self.assertNotEqual(0, returncode)
def test_deps_py_script(self):
"""
Test the deps.py script.
"""
args = 'python luigi/tools/deps.py --module examples.top_artists ArtistToplistToDatabase --date-interval 2015-W10'.split()
returncode, stdout, stderr = self._run_cmdline(args)
self.assertEqual(0, returncode)
self.assertTrue(stdout.find(b'[FileSystem] data/streams_2015_03_04_faked.tsv') != -1)
self.assertTrue(stdout.find(b'[DB] localhost') != -1)
if __name__ == '__main__':
# Needed for one of the tests
luigi.run()
| apache-2.0 | -7,938,638,837,689,764,000 | 38.403774 | 141 | 0.633499 | false |
shastikk/youtube-dl | youtube_dl/extractor/arte.py | 29 | 9937 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
find_xpath_attr,
unified_strdate,
get_element_by_attribute,
int_or_none,
qualities,
)
# There are different sources of video in arte.tv, the extraction process
# is different for each one. The videos usually expire in 7 days, so we can't
# add tests.
class ArteTvIE(InfoExtractor):
_VALID_URL = r'http://videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
IE_NAME = 'arte.tv'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
lang = mobj.group('lang')
video_id = mobj.group('id')
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
ref_xml_doc = self._download_xml(
ref_xml_url, video_id, note='Downloading metadata')
config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
config_xml_url = config_node.attrib['ref']
config = self._download_xml(
config_xml_url, video_id, note='Downloading configuration')
formats = [{
'format_id': q.attrib['quality'],
# The playpath starts at 'mp4:', if we don't manually
# split the url, rtmpdump will incorrectly parse them
'url': q.text.split('mp4:', 1)[0],
'play_path': 'mp4:' + q.text.split('mp4:', 1)[1],
'ext': 'flv',
'quality': 2 if q.attrib['quality'] == 'hd' else 1,
} for q in config.findall('./urls/url')]
self._sort_formats(formats)
title = config.find('.//name').text
thumbnail = config.find('.//firstThumbnailUrl').text
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
class ArteTVPlus7IE(InfoExtractor):
IE_NAME = 'arte.tv:+7'
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
@classmethod
def _extract_url_info(cls, url):
mobj = re.match(cls._VALID_URL, url)
lang = mobj.group('lang')
# This is not a real id, it can be for example AJT for the news
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
video_id = mobj.group('id')
return video_id, lang
def _real_extract(self, url):
video_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, video_id)
return self._extract_from_webpage(webpage, video_id, lang)
def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(
[r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
webpage, 'json vp url', default=None)
if not json_url:
iframe_url = self._html_search_regex(
r'<iframe[^>]+src=(["\'])(?P<url>.+\bjson_url=.+?)\1',
webpage, 'iframe url', group='url')
json_url = compat_parse_qs(
compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
return self._extract_from_json_url(json_url, video_id, lang)
def _extract_from_json_url(self, json_url, video_id, lang):
info = self._download_json(json_url, video_id)
player_info = info['videoJsonPlayer']
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
upload_date_str = player_info.get('VDA', '').split(' ')[0]
title = player_info['VTI'].strip()
subtitle = player_info.get('VSU', '').strip()
if subtitle:
title += ' - %s' % subtitle
info_dict = {
'id': player_info['VID'],
'title': title,
'description': player_info.get('VDE'),
'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
formats = []
for format_id, format_dict in player_info['VSR'].items():
f = dict(format_dict)
versionCode = f.get('versionCode')
langcode = {
'fr': 'F',
'de': 'A',
}.get(lang, lang)
lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
lang_pref = (
None if versionCode is None else (
10 if any(re.match(r, versionCode) for r in lang_rexs)
else -10))
source_pref = 0
if versionCode is not None:
# The original version with subtitles has lower relevance
if re.match(r'VO-ST(F|A)', versionCode):
source_pref -= 10
# The version with sourds/mal subtitles has also lower relevance
elif re.match(r'VO?(F|A)-STM\1', versionCode):
source_pref -= 9
format = {
'format_id': format_id,
'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
'language_preference': lang_pref,
'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'tbr': int_or_none(f.get('bitrate')),
'quality': qfunc(f.get('quality')),
'source_preference': source_pref,
}
if f.get('mediaType') == 'rtmp':
format['url'] = f['streamer']
format['play_path'] = 'mp4:' + f['url']
format['ext'] = 'flv'
else:
format['url'] = f['url']
formats.append(format)
self._check_formats(formats, video_id)
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
# It also uses the arte_vp_url url from the webpage to extract the information
class ArteTVCreativeIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:creative'
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/(?:magazine?/)?(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
'info_dict': {
'id': '72176',
'ext': 'mp4',
'title': 'Folge 2 - Corporate Design',
'upload_date': '20131004',
},
}, {
'url': 'http://creative.arte.tv/fr/Monty-Python-Reunion',
'info_dict': {
'id': '160676',
'ext': 'mp4',
'title': 'Monty Python live (mostly)',
'description': 'Événement ! Quarante-cinq ans après leurs premiers succès, les légendaires Monty Python remontent sur scène.\n',
'upload_date': '20140805',
}
}]
class ArteTVFutureIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:future'
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
_TEST = {
'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
'info_dict': {
'id': '5201',
'ext': 'mp4',
'title': 'Les champignons au secours de la planète',
'upload_date': '20131101',
},
}
def _real_extract(self, url):
anchor_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, anchor_id)
row = self._search_regex(
r'(?s)id="%s"[^>]*>.+?(<div[^>]*arte_vp_url[^>]*>)' % anchor_id,
webpage, 'row')
return self._extract_from_webpage(row, anchor_id, lang)
class ArteTVDDCIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:ddc'
_VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
def _real_extract(self, url):
video_id, lang = self._extract_url_info(url)
if lang == 'folge':
lang = 'de'
elif lang == 'emission':
lang = 'fr'
webpage = self._download_webpage(url, video_id)
scriptElement = get_element_by_attribute('class', 'visu_video_block', webpage)
script_url = self._html_search_regex(r'src="(.*?)"', scriptElement, 'script url')
javascriptPlayerGenerator = self._download_webpage(script_url, video_id, 'Download javascript player generator')
json_url = self._search_regex(r"json_url=(.*)&rendering_place.*", javascriptPlayerGenerator, 'json url')
return self._extract_from_json_url(json_url, video_id, lang)
class ArteTVConcertIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:concert'
_VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)'
_TEST = {
'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde',
'md5': '9ea035b7bd69696b67aa2ccaaa218161',
'info_dict': {
'id': '186',
'ext': 'mp4',
'title': 'The Notwist im Pariser Konzertclub "Divan du Monde"',
'upload_date': '20140128',
'description': 'md5:486eb08f991552ade77439fe6d82c305',
},
}
class ArteTVEmbedIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:embed'
_VALID_URL = r'''(?x)
http://www\.arte\.tv
/playerv2/embed\.php\?json_url=
(?P<json_url>
http://arte\.tv/papi/tvguide/videos/stream/player/
(?P<lang>[^/]+)/(?P<id>[^/]+)[^&]*
)
'''
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
lang = mobj.group('lang')
json_url = mobj.group('json_url')
return self._extract_from_json_url(json_url, video_id, lang)
| unlicense | 4,908,092,684,769,792,000 | 36.609848 | 140 | 0.53943 | false |
grungi-ankhfire/gbromhack | scripts/jw_signs.py | 1 | 3330 | #!/usr/bin/python
# -*- coding:utf-8 -*-
"""Usage: jw_signs.py extract <romfile> <tablefile> [-o <file>] [-s <address> -e <address>]
Handle the extraction of sign data from the game Jungle Wars (GB).
Options:
-o <file> --output <file> Name of the output file to write [default: signs_output.yaml]
-s <address> --start <address> Address of the signs pointer table start
-e <address> --end <address> Address of the signs pointer table end
Arguments
<romfile> The file to extract from
<tablefile> The translation table file to use for interpreting the text.
"""
import pyaml
from docopt import docopt
from hexint import HexInt, hexint_representer
from translation_table import TranslationTable
from jw_memorymap import SIGNS_DATA_POINTERS_START, SIGNS_DATA_POINTERS_END
pyaml.add_representer(HexInt, hexint_representer)
class PointerTableExtractor:
def __init__(self, rom, start, end):
self.rom = rom
self.start = start
self.end = end
self.pointers = []
self.pointers_locations = {}
def read_pointers(self):
offset = self.start
self.pointers = []
bank = offset // 0x4000
self.rom.seek(offset)
while offset < self.end:
raw_pointer = int.from_bytes(self.rom.read(2), "little")
pointer = raw_pointer + 0x4000 * (bank-1)
self.pointers.append(pointer)
self.pointers_locations[pointer] = offset
offset += 2
def extract_signs(rom, offset_start, offset_end, table):
pointer_table = PointerTableExtractor(rom, offset_start, offset_end)
pointer_table.read_pointers()
result = {}
sign_id = 0
for pointer in pointer_table.pointers:
rom.seek(pointer)
sign = {}
# This postulates that all signs have 3 lines, as there is no termination character
for l in range(3):
line_length = int.from_bytes(rom.read(1), 'little')
line_text = bytearray()
for _ in range(line_length):
line_text += rom.read(1)
line_text = table.convert_bytearray(line_text)
sign[f'line{l}_original_length'] = line_length
sign[f'line{l}_original_text'] = line_text
sign[f'line{l}_translated_text'] = f'TODO{sign_id:02}{l}'
sign['pointer_location'] = HexInt(pointer_table.pointers_locations[pointer], digits=5)
result[HexInt(sign_id, digits=2)] = sign
sign_id += 1
return result
if __name__ == '__main__':
arguments = docopt(__doc__, version='1.0')
print(arguments)
if arguments["extract"]:
pointers_start = SIGNS_DATA_POINTERS_START
if arguments['--start']:
pointers_start = int(arguments['--start'], 16)
pointers_end = SIGNS_DATA_POINTERS_END
if arguments['--end']:
pointers_end = int(arguments['--end'], 16)
table = TranslationTable(arguments['<tablefile>'])
rom = open(arguments["<romfile>"], 'rb+')
res = {"signs" : extract_signs(rom, pointers_start, pointers_end, table)}
rom.close()
f = open(arguments['--output'], 'w', encoding='utf-8')
f.write(pyaml.dump(res, indent=2, vspacing=[2, 1], width=float("inf"), string_val_style='"'))
f.close()
| gpl-3.0 | -1,061,818,902,053,977,200 | 29.833333 | 101 | 0.608108 | false |
XiangyiKong/flask-snippets | decorators/http_access_control.py | 2 | 2108 | # -*- coding: utf-8 -*-
"""
decorators.access_control
~~~~~~~~~~~~~~~~~~~~~~~~~
Decorator for the HTTP Access Control
http://flask.pocoo.org/snippets/56/
"""
from datetime import timedelta
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from functools import update_wrapper
from flask import make_response, request, current_app, jsonify
from app import app
def crossdomain(origin=None, methods=None, headers=None, max_age=21600,
attach_to_all=True, automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.route('/my_service')
@crossdomain(origin='*')
def my_service():
return jsonify(foo='cross domain ftw')
if __name__ == "__main__":
app.run()
| bsd-3-clause | -2,955,192,279,439,878,000 | 29.550725 | 79 | 0.606736 | false |
zhakui/oryol | fips-generators/MessageProtocol.py | 6 | 15000 | '''
Code generator for message protocol xml files.
'''
import os
import sys
import yaml
import genutil as util
Version = 11
#-------------------------------------------------------------------------------
def writeHeaderTop(f, desc) :
'''
Write header area for the generated C++ header.
'''
f.write('#pragma once\n')
f.write('//-----------------------------------------------------------------------------\n')
f.write('/* #version:{}#\n'.format(Version))
f.write(' machine generated, do not edit!\n')
f.write('*/\n')
f.write('#include <cstring>\n')
#-------------------------------------------------------------------------------
def writeIncludes(f, desc) :
'''
Write include statements in the generated C++ header.
'''
f.write('#include "Messaging/Message.h"\n')
f.write('#include "Messaging/Serializer.h"\n')
parentHdr = desc.get('parentProtocolHeader', 'Messaging/Protocol.h')
f.write('#include "{}"\n'.format(parentHdr))
for hdr in desc.get('headers', []) :
f.write('#include "{}"\n'.format(hdr))
f.write('\n')
#-------------------------------------------------------------------------------
def writeProtocolMethods(f, desc) :
'''
Write the protocol methods
'''
f.write(' static ProtocolIdType GetProtocolId() {\n')
f.write(" return '{}';\n".format(desc['id']))
f.write(' };\n')
#-------------------------------------------------------------------------------
def writeMessageIdEnum(f, desc) :
'''
Write the enum with message ids
'''
protocol = desc['name']
parentProtocol = desc.get('parentProtocol', 'Protocol')
f.write(' class MessageId {\n')
f.write(' public:\n')
f.write(' enum {\n')
msgCount = 0
for msg in desc['messages'] :
if msgCount == 0:
f.write(' ' + msg['name'] + 'Id = ' + parentProtocol + '::MessageId::NumMessageIds, \n')
else :
f.write(' ' + msg['name'] + 'Id,\n')
msgCount += 1
f.write(' NumMessageIds\n')
f.write(' };\n')
f.write(' static const char* ToString(MessageIdType c) {\n')
f.write(' switch (c) {\n')
for msg in desc['messages'] :
msgName = msg['name'] + 'Id'
f.write(' case ' + msgName + ': return "' + msgName + '";\n')
f.write(' default: return "InvalidMessageId";\n')
f.write(' }\n')
f.write(' };\n')
f.write(' static MessageIdType FromString(const char* str) {\n')
for msg in desc['messages'] :
msgName = msg['name'] + 'Id'
f.write(' if (std::strcmp("' + msgName + '", str) == 0) return ' + msgName + ';\n')
f.write(' return InvalidMessageId;\n')
f.write(' };\n')
f.write(' };\n')
f.write(' typedef Ptr<Message> (*CreateCallback)();\n')
f.write(' static CreateCallback jumpTable[' + protocol + '::MessageId::NumMessageIds];\n')
#-------------------------------------------------------------------------------
def writeFactoryClassDecl(f, desc) :
'''
Writes the message factory for this protocol
'''
f.write(' class Factory {\n')
f.write(' public:\n')
f.write(' static Ptr<Message> Create(MessageIdType id);\n')
f.write(' };\n')
#-------------------------------------------------------------------------------
def writeFactoryClassImpl(f, desc) :
'''
Writes the factory class implementation
'''
protocol = desc['name']
parentProtocol = desc.get('parentProtocol', 'Protocol')
f.write(protocol + '::CreateCallback ' + protocol + '::jumpTable[' + protocol + '::MessageId::NumMessageIds] = { \n')
for msg in desc['messages'] :
f.write(' &' + protocol + '::' + msg['name'] + '::FactoryCreate,\n')
f.write('};\n')
f.write('Ptr<Message>\n')
f.write(protocol + '::Factory::Create(MessageIdType id) {\n')
f.write(' if (id < ' + parentProtocol + '::MessageId::NumMessageIds) {\n')
f.write(' return ' + parentProtocol + '::Factory::Create(id);\n')
f.write(' }\n')
f.write(' else {\n')
f.write(' o_assert(id < ' + protocol + '::MessageId::NumMessageIds);\n')
f.write(' return jumpTable[id - ' + parentProtocol + '::MessageId::NumMessageIds]();\n')
f.write(' };\n')
f.write('}\n')
#-------------------------------------------------------------------------------
def getAttrDefaultValue(attr) :
'''
Get the default value for a given attribute
'''
defValue = attr.get('default')
attrType = attr['type']
if attrType in ('int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64') :
if not defValue :
defValue = '0'
elif attrType in ('char', 'unsigned char', 'int', 'unsigned int', 'short', 'unsigned short', 'long', 'unsigned long') :
if not defValue :
defValue = '0'
elif attrType == 'bool' :
if not defValue :
defValue = 'false'
elif attrType in ('float32', 'float') :
if not defValue :
defValue = '0.0f'
elif attrType in ('float64', 'double') :
if not defValue :
defValue = '0.0'
return defValue;
#-------------------------------------------------------------------------------
def getRefType(attrType) :
'''
Get the reference type string for an attribute type
'''
if attrType in ('int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64') :
return attrType
elif attrType in ('bool', 'char', 'unsigned char', 'int', 'unsigned int', 'short', 'unsigned short', 'long', 'unsigned long') :
return attrType
elif attrType in ('float32', 'float') :
return attrType
elif attrType in ('float64', 'double') :
return attrType
else :
return 'const ' + attrType + '&'
#-------------------------------------------------------------------------------
def getValueType(attrType) :
'''
Get the value type string for an attribute type
'''
return attrType
#-------------------------------------------------------------------------------
def isArrayType(attrType) :
'''
Test if the type string is an array type (Array<TYPE>)
'''
return attrType.startswith('Array<') and attrType.endswith('>')
#-------------------------------------------------------------------------------
def getArrayType(attrType) :
'''
Get the element type of an array type.
'''
# strip the 'Array<' at the left, and the '>' at the right
return attrType[12:-1]
#-------------------------------------------------------------------------------
def writeMessageClasses(f, desc) :
'''
Write the message classes to the generated C++ header
'''
protocolId = desc['id']
for msg in desc['messages'] :
msgClassName = msg['name']
msgParentClassName = msg.get('parent', 'Message')
f.write(' class ' + msgClassName + ' : public ' + msgParentClassName + ' {\n')
f.write(' OryolClassDecl(' + msgClassName + ');\n')
f.write(' OryolTypeDecl(' + msgClassName + ',' + msgParentClassName + ');\n')
f.write(' public:\n')
# write constructor
f.write(' ' + msgClassName + '() {\n')
f.write(' this->msgId = MessageId::' + msgClassName + 'Id;\n')
for attr in msg.get('attrs', []) :
attrName = attr['name'].lower()
defValue = getAttrDefaultValue(attr)
if defValue :
f.write(' this->' + attrName + ' = ' + defValue + ';\n')
f.write(' };\n')
# special factory create method
f.write(' static Ptr<Message> FactoryCreate() {\n')
f.write(' return Create();\n')
f.write(' };\n')
# special class message id static method
f.write(' static MessageIdType ClassMessageId() {\n')
f.write(' return MessageId::' + msgClassName + 'Id;\n')
f.write(' };\n')
# virtual method which checks whether the method belongs to a protocol
f.write(' virtual bool IsMemberOf(ProtocolIdType protId) const override {\n')
f.write(" if (protId == '" + protocolId + "') return true;\n")
f.write(' else return ' + msgParentClassName + '::IsMemberOf(protId);\n')
f.write(' };\n')
# write serializer methods
if msg.get('serialize', False) :
f.write(' virtual int32 EncodedSize() const override;\n')
f.write(' virtual uint8* Encode(uint8* dstPtr, const uint8* maxValidPtr) const override;\n')
f.write(' virtual const uint8* Decode(const uint8* srcPtr, const uint8* maxValidPtr) override;\n')
# write setters/getters
for attr in msg.get('attrs', []) :
attrName = attr['name']
attrType = attr['type']
f.write(' void Set' + attrName + '(' + getRefType(attrType) + ' val) {\n')
f.write(' this->' + attrName.lower() + ' = val;\n')
f.write(' };\n')
f.write(' ' + getRefType(attrType) + ' Get' + attrName + '() const {\n')
f.write(' return this->' + attrName.lower() + ';\n')
f.write(' };\n')
# write members
f.write('private:\n')
for attr in msg.get('attrs', []) :
attrName = attr['name'].lower()
attrType = attr['type']
f.write(' ' + getValueType(attrType) + ' ' + attrName + ';\n')
f.write(' };\n')
#-------------------------------------------------------------------------------
def writeSerializeMethods(f, desc) :
'''
Writes the serializer methods of the message to the source file.
'''
for msg in desc['messages'] :
if msg.get('serialize', False) :
protocol = desc['name']
msgClassName = msg['name']
msgParentClassName = msg.get('parent', 'Message')
# EncodedSize()
f.write('int32 ' + protocol + '::' + msgClassName + '::EncodedSize() const {\n')
f.write(' int32 s = ' + msgParentClassName + '::EncodedSize();\n')
for attr in msg.get('attrs', []) :
attrName = attr['name'].lower()
attrType = attr['type']
if isArrayType(attrType) :
elmType = getArrayType(attrType)
f.write(' s += Serializer::EncodedArraySize<' + elmType + '>(this->' + attrName + ');\n')
else :
f.write(' s += Serializer::EncodedSize<' + attrType + '>(this->' + attrName + ');\n')
f.write(' return s;\n')
f.write('}\n')
# Encode
# FIXME: I think we need to diffentiate between "send" and "receive" attributes!
# ... so: EncodeSend/DecodeSend, EncodeReceive/DecodeReceive
f.write('uint8* ' + protocol + '::' + msgClassName + '::Encode(uint8* dstPtr, const uint8* maxValidPtr) const {\n')
f.write(' dstPtr = ' + msgParentClassName + '::Encode(dstPtr, maxValidPtr);\n')
for attr in msg.get('attrs', []) :
attrName = attr['name'].lower()
attrType = attr['type']
if isArrayType(attrType) :
elmType = getArrayType(attrType)
f.write(' dstPtr = Serializer::EncodeArray<' + elmType + '>(this->' + attrName + ', dstPtr, maxValidPtr);\n')
else :
f.write(' dstPtr = Serializer::Encode<' + attrType + '>(this->' + attrName + ', dstPtr, maxValidPtr);\n')
f.write(' return dstPtr;\n')
f.write('}\n')
# Decode
f.write('const uint8* ' + protocol + '::' + msgClassName + '::Decode(const uint8* srcPtr, const uint8* maxValidPtr) {\n')
f.write(' srcPtr = ' + msgParentClassName + '::Decode(srcPtr, maxValidPtr);\n')
for attr in msg.get('attrs', []) :
attrName = attr['name'].lower()
attrType = attr['type']
if isArrayType(attrType) :
elmType = getArrayType(attrType)
f.write(' srcPtr = Serializer::DecodeArray<' + elmType + '>(srcPtr, maxValidPtr, this->' + attrName + ');\n')
else :
f.write(' srcPtr = Serializer::Decode<' + attrType + '>(srcPtr, maxValidPtr, this->' + attrName + ');\n')
f.write(' return srcPtr;\n')
f.write('}\n')
#-------------------------------------------------------------------------------
def generateHeader(desc, absHeaderPath) :
'''
Generate the C++ header file
'''
f = open(absHeaderPath, 'w')
protocol = desc['name']
writeHeaderTop(f, desc)
writeIncludes(f, desc)
f.write('namespace Oryol {\n')
f.write('class ' + protocol + ' {\n')
f.write('public:\n')
writeProtocolMethods(f, desc)
writeMessageIdEnum(f, desc)
writeFactoryClassDecl(f, desc)
writeMessageClasses(f, desc)
f.write('};\n')
f.write('}\n')
f.close()
#-------------------------------------------------------------------------------
def writeSourceTop(f, desc, absSourcePath) :
'''
Write source file header area
'''
path, hdrFileAndExt = os.path.split(absSourcePath)
hdrFile, ext = os.path.splitext(hdrFileAndExt)
f.write('//-----------------------------------------------------------------------------\n')
f.write('// #version:{}# machine generated, do not edit!\n'.format(Version))
f.write('//-----------------------------------------------------------------------------\n')
f.write('#include "Pre.h"\n')
f.write('#include "' + hdrFile + '.h"\n')
f.write('\n')
#-------------------------------------------------------------------------------
def generateSource(desc, absSourcePath) :
'''
Generate the C++ source file
'''
protocol = desc['name']
f = open(absSourcePath, 'w')
writeSourceTop(f, desc, absSourcePath)
f.write('namespace Oryol {\n')
for msg in desc['messages'] :
msgClassName = msg['name']
f.write('OryolClassImpl(' + protocol + '::' + msgClassName + ');\n')
writeFactoryClassImpl(f, desc)
writeSerializeMethods(f, desc)
f.write('}\n')
f.close()
#-------------------------------------------------------------------------------
def generate(input, out_src, out_hdr) :
if util.isDirty(Version, [input], [out_src, out_hdr]) :
with open(input, 'r') as f :
desc = yaml.load(f)
generateHeader(desc, out_hdr)
generateSource(desc, out_src)
| mit | 9,010,717,530,135,525,000 | 40.436464 | 133 | 0.472867 | false |
divx118/kboot | fs/kboot/opt/menu/image.py | 2 | 2759 | #! /usr/bin/python
import os, pygame
from menu import *
# ---[ READ THE NOTE - THIS IS NOT PART OF THE EXAMPLE ]------------------------
# NOTE! This function is PURPOSELY not commented since it is not part of this
# example menu system, but is used to load some images to use as buttons for
# demonstration. Please see my graphics class to see a better load_image
# function and how to use it more effectively.
def load_image(file_name, folder, colorkey = None):
full_name = os.path.join(folder, file_name)
try:
image = pygame.image.load(full_name)
except pygame.error, message:
print 'Cannot load image:', full_name
raise SystemExit, message
image = image.convert_alpha()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
# ---[ READ THE NOTE - THIS IS NOT PART OF THE EXAMPLE ]------------------------
# This is just used to describe the current menu to the user - this is not
# implemented "well" since it is not what I am trying to show in this menu
# example (i.e. when this is updated, it changes the ENTIRE screen instead
# of only the portion that changed as we do with the menu system).
DESC = [['MENU 0 - This menu is positioned using the top left corner and '
'contains only text buttons',
'Press enter to select a button. Press \'r\' to remove the currently '
'selected button.',
'Select Exit on any menu or press ESC to exit the program'],
['MENU 1 - This menu is positioned using the top left corner and '
'contains only text buttons',
'The menu has multiple rows and columns (use the arrow keys). The '
'colors can also be changed!',
'Select a button'],
['MENU 2 - The center of this menu is placed in the center of the '
'screen and it contains only picture',
'buttons. Can you have a menu with text buttons and images?',
'Select a button to continue (All buttons go to the next menu)'],
['MENU 3 - A mix of text and graphical images! Add will dynamically '
'add some buttons. Center will',
'center the menu on the screen. Set (0, 0) will set the top left of '
'the menu in the top left of the screen.',
'Rand Colors/Config will change some menu parameters (see console '
'output for new values).']]
TEXT = []
pygame.font.init()
desc_font = pygame.font.Font(None, 24) # Font to use
for text in DESC:
tmp = []
tmp.append(desc_font.render(text[0], True, WHITE))
tmp.append(desc_font.render(text[1], True, WHITE))
tmp.append(desc_font.render(text[2], True, WHITE))
TEXT.append(tmp)
| gpl-2.0 | 1,177,759,665,556,089,600 | 45.762712 | 80 | 0.644436 | false |
Pexego/PXGO_00049_2013_PCG | project-addons/history_product_code/history_product_code.py | 1 | 3864 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2014 Pexego Sistemas Informáticos All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class history_product_code(orm.Model):
_inherit = 'product.product'
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if vals.get('default_code', False):
for id in ids:
product = self.browse(cr, uid, id, context)
if product.default_code:
self.pool.get("historial.product.code").create(cr, uid, {'product_id':product.id, 'code':product.default_code}, context)
return super(history_product_code,self).write(cr, uid, ids, vals, context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
vals = super(history_product_code, self).name_search(cr, user, name=name, args=args, operator=operator, context=context, limit=limit)
if not vals:
vals = []
historial_ids = self.pool.get("historial.product.code").search(cr, user, [('code', '=', name)], offset=0, limit=limit-len(vals), order=None, context=context, count=False)
if not historial_ids:
historial_ids = self.pool.get("historial.product.code").search(cr, user, [('code', operator, name)], offset=0, limit=limit-len(vals), order=None, context=context, count=False)
if not historial_ids:
return vals
historials = self.pool.get("historial.product.code").browse(cr, user, historial_ids, context)
product_ids = []
for historial in historials:
product_ids.append(historial.product_id.id)
product_ids = self.search(cr, user, [('id', 'in', product_ids)] + args, offset=0, limit=limit-len(vals), order=None, context=context, count=False)
results = self.name_get(cr, user, product_ids, context)
for result in results:
if not result in vals:
vals.append(result)
return vals
class historial_product_code(orm.Model):
_name = 'historial.product.code'
_columns = {
'code':fields.char('Codigo', size=64, required=True, readonly=True),
'product_id':fields.many2one('product.product', 'Product', required=False),
}
class product_template(orm.Model):
_inherit = "product.template"
def action_view_history_code(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'history_product_code.act_product_code_history_open', context=context)
if len(ids) == 1 and len(products) == 1:
ctx = "{'default_product_id': %s, 'search_default_product_id': %s}" \
% (products[0], products[0])
result['context'] = ctx
else:
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
return result
| agpl-3.0 | 4,415,528,005,096,436,000 | 47.898734 | 187 | 0.60497 | false |
robobrobro/ballin-octo-shame | lib/Python-3.4.3/Lib/test/pickletester.py | 8 | 77270 | import copyreg
import io
import pickle
import pickletools
import random
import struct
import sys
import unittest
import weakref
from http.cookies import SimpleCookie
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest,
)
from pickle import bytes_types
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
class H(object):
pass
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
# DATA0 .. DATA2 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\nac'
b'builtins\ncomplex\n'
b'p1\n(F3.0\nF0.0\ntp2\nRp'
b'3\naL1L\naL-1L\naL255L\naL-'
b'255L\naL-256L\naL65535L\na'
b'L-65535L\naL-65536L\naL2'
b'147483647L\naL-2147483'
b'647L\naL-2147483648L\na('
b'Vabc\np4\ng4\nccopyreg'
b'\n_reconstructor\np5\n('
b'c__main__\nC\np6\ncbu'
b'iltins\nobject\np7\nNt'
b'p8\nRp9\n(dp10\nVfoo\np1'
b'1\nL1L\nsVbar\np12\nL2L\nsb'
b'g9\ntp13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL 'builtins complex'
39: p PUT 1
42: ( MARK
43: F FLOAT 3.0
48: F FLOAT 0.0
53: t TUPLE (MARK at 42)
54: p PUT 2
57: R REDUCE
58: p PUT 3
61: a APPEND
62: L LONG 1
66: a APPEND
67: L LONG -1
72: a APPEND
73: L LONG 255
79: a APPEND
80: L LONG -255
87: a APPEND
88: L LONG -256
95: a APPEND
96: L LONG 65535
104: a APPEND
105: L LONG -65535
114: a APPEND
115: L LONG -65536
124: a APPEND
125: L LONG 2147483647
138: a APPEND
139: L LONG -2147483647
153: a APPEND
154: L LONG -2147483648
168: a APPEND
169: ( MARK
170: V UNICODE 'abc'
175: p PUT 4
178: g GET 4
181: c GLOBAL 'copyreg _reconstructor'
205: p PUT 5
208: ( MARK
209: c GLOBAL '__main__ C'
221: p PUT 6
224: c GLOBAL 'builtins object'
241: p PUT 7
244: N NONE
245: t TUPLE (MARK at 208)
246: p PUT 8
249: R REDUCE
250: p PUT 9
253: ( MARK
254: d DICT (MARK at 253)
255: p PUT 10
259: V UNICODE 'foo'
264: p PUT 11
268: L LONG 1
272: s SETITEM
273: V UNICODE 'bar'
278: p PUT 12
282: L LONG 2
286: s SETITEM
287: b BUILD
288: g GET 9
291: t TUPLE (MARK at 169)
292: p PUT 13
296: a APPEND
297: g GET 13
301: a APPEND
302: L LONG 5
306: a APPEND
307: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopyreg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06cbuiltins\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL 'builtins complex'
35: q BINPUT 1
37: ( MARK
38: G BINFLOAT 3.0
47: G BINFLOAT 0.0
56: t TUPLE (MARK at 37)
57: q BINPUT 2
59: R REDUCE
60: q BINPUT 3
62: K BININT1 1
64: J BININT -1
69: K BININT1 255
71: J BININT -255
76: J BININT -256
81: M BININT2 65535
84: J BININT -65535
89: J BININT -65536
94: J BININT 2147483647
99: J BININT -2147483647
104: J BININT -2147483648
109: ( MARK
110: X BINUNICODE 'abc'
118: q BINPUT 4
120: h BINGET 4
122: c GLOBAL 'copyreg _reconstructor'
146: q BINPUT 5
148: ( MARK
149: c GLOBAL '__main__ C'
161: q BINPUT 6
163: c GLOBAL 'builtins object'
180: q BINPUT 7
182: N NONE
183: t TUPLE (MARK at 148)
184: q BINPUT 8
186: R REDUCE
187: q BINPUT 9
189: } EMPTY_DICT
190: q BINPUT 10
192: ( MARK
193: X BINUNICODE 'foo'
201: q BINPUT 11
203: K BININT1 1
205: X BINUNICODE 'bar'
213: q BINPUT 12
215: K BININT1 2
217: u SETITEMS (MARK at 192)
218: b BUILD
219: h BINGET 9
221: t TUPLE (MARK at 109)
222: q BINPUT 13
224: h BINGET 13
226: K BININT1 5
228: e APPENDS (MARK at 3)
229: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'foo'
153: q BINPUT 8
155: K BININT1 1
157: X BINUNICODE 'bar'
165: q BINPUT 9
167: K BININT1 2
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA3 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA4 = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA5 = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA6 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# Exception objects without arguments pickled from 2.x with protocol 2
DATA7 = {
exception :
exception_pickle.replace(b'?', exception.__name__.encode("ascii"))
for exception in python2_exceptions_without_args
}
# StandardError is mapped to Exception, test that separately
DATA8 = exception_pickle.replace(b'?', b'StandardError')
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA9 = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = create_data()
def setUp(self):
pass
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
15: p PUT 0
18: ( MARK
19: d DICT (MARK at 18)
20: p PUT 1
23: b BUILD
24: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 0)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
17: q BINPUT 0
19: o OBJ (MARK at 2)
20: q BINPUT 1
22: } EMPTY_DICT
23: q BINPUT 2
25: b BUILD
26: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertTrue(x is x[0])
def test_recursive_tuple(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertEqual(len(x[0]), 1)
self.assertTrue(x is x[0][0])
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(list(x.keys()), [1])
self.assertTrue(x[1] is x)
def test_recursive_set(self):
h = H()
y = set({h})
h.attr = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertIs(list(x)[0].attr, x)
self.assertEqual(len(x), 1)
def test_recursive_frozenset(self):
h = H()
y = frozenset({h})
h.attr = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, frozenset)
self.assertIs(list(x)[0].attr, x)
self.assertEqual(len(x), 1)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, C)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def test_get(self):
self.assertRaises(KeyError, self.loads, b'g0\np0')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_ints(self):
import sys
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.assertRaises(ValueError, self.loads, data)
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_pop_empty_stack(self):
# Test issue7455
s = b'0'
self.assertRaises((pickle.UnpicklingError, IndexError), self.loads, s)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "stat"):
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = object.__new__(SimpleNewObj) # avoid __init__
x.abc = 666
for proto in protocols:
s = self.dumps(x, proto)
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto < 4)
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
proto >= 4)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Protocol 0 is less strict and also accept iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except (pickle.PickleError):
pass
try:
self.dumps(D(), proto)
except (pickle.PickleError):
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA3)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA4)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA5)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
for (exc, data) in DATA7.items():
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
loaded = self.loads(DATA8)
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA9)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA4)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA6)
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def check_negative_32b_binXXX(self, dumped):
if sys.maxsize > 2**32:
self.skipTest("test is only meaningful on 32-bit builds")
# XXX Pure Python pickle reads lengths as signed and passes
# them directly to read() (hence the EOFError)
with self.assertRaises((pickle.UnpicklingError, EOFError,
ValueError, OverflowError)):
self.loads(dumped)
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
self.check_negative_32b_binXXX(b'\x80\x03B\xff\xff\xff\xffxyzq\x00.')
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
self.check_negative_32b_binXXX(b'\x80\x03X\xff\xff\xff\xffxyzq\x00.')
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.assertRaises(ValueError, self.loads, dumped)
def test_negative_32b_binput(self):
# Issue #12847
if sys.maxsize > 2**32:
self.skipTest("test is only meaningful on 32-bit builds")
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.assertRaises(ValueError, self.loads, dumped)
def test_badly_escaped_string(self):
self.assertRaises(ValueError, self.loads, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.assertRaises(pickle.UnpicklingError, self.loads, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
"""
frame_opcode_size = 9
last_arg = last_pos = None
for op, arg, pos in pickletools.genops(pickled):
if op.name != 'FRAME':
continue
if last_pos is not None:
# The previous frame's size should be equal to the number
# of bytes up to the current frame.
frame_size = pos - last_pos - frame_opcode_size
self.assertEqual(frame_size, last_arg)
last_arg, last_pos = arg, pos
# The last frame's size should be equal to the number of bytes up
# to the pickle's end.
frame_size = len(pickled) - last_pos - frame_opcode_size
self.assertEqual(frame_size, last_arg)
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
obj = [b'x' * N, b'y' * N, b'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
self.assertGreaterEqual(n_frames, len(obj))
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
obj = [bytes([i]) * frame_size for i in range(num_frames)]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2GB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4GB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == self.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == self.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(object):
def __init__(self, a, b, c):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return self.__dict__ == other.__dict__
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, pickle.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_closed_file(self):
import os
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, pickle.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
pickle.dump(data, stream)
stream.seek(0)
unpickled = pickle.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 4)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
pickle.dump(123, f, -1)
pickle.dump(123, file=f, protocol=-1)
pickle.dumps(123, -1)
pickle.dumps(123, protocol=-1)
pickle.Pickler(f, -1)
pickle.Pickler(f, protocol=-1)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(pickle.Pickler):
def __init__(self): pass
class BadUnpickler(pickle.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def test_bad_input(self):
# Test issue4298
s = bytes([0x58, 0, 0, 0, 0x54])
self.assertRaises(EOFError, pickle.loads, s)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if f.seekable():
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if f.seekable():
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
pass
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(3):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
| mit | -7,512,519,983,446,190,000 | 33.17249 | 82 | 0.522041 | false |
NicolasHug/Surprise | examples/load_custom_dataset_predefined_folds.py | 1 | 1287 | """
This module descibes how to load a custom dataset when folds (for
cross-validation) are predefined by train and test files.
As a custom dataset we will actually use the movielens-100k dataset, but act as
if it were not built-in.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from surprise import SVD
from surprise import Dataset
from surprise import Reader
from surprise import accuracy
from surprise.model_selection import PredefinedKFold
# path to dataset folder
files_dir = os.path.expanduser('~/.surprise_data/ml-100k/ml-100k/')
# This time, we'll use the built-in reader.
reader = Reader('ml-100k')
# folds_files is a list of tuples containing file paths:
# [(u1.base, u1.test), (u2.base, u2.test), ... (u5.base, u5.test)]
train_file = files_dir + 'u%d.base'
test_file = files_dir + 'u%d.test'
folds_files = [(train_file % i, test_file % i) for i in (1, 2, 3, 4, 5)]
data = Dataset.load_from_folds(folds_files, reader=reader)
pkf = PredefinedKFold()
algo = SVD()
for trainset, testset in pkf.split(data):
# train and test algorithm.
algo.fit(trainset)
predictions = algo.test(testset)
# Compute and print Root Mean Squared Error
accuracy.rmse(predictions, verbose=True)
| bsd-3-clause | 3,786,601,404,914,213,000 | 28.930233 | 79 | 0.710179 | false |
micolous/cfsprinter | src/pagerprinter/plugins/tts.py | 1 | 1808 | """
pyttsx (text to speech)
Copyright 2014 Shane Rees <https://github.com/Shaggs/>
This plug-in is designed to read out a copy of the received turnout
Page for those that maybe in the station or form those coming in
and need details.
it reads only parts of the page. They are RESPOND (job type), Alarm Level,
address, and any extra info after the ==
the speed is hard coded at this stage.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
from . import BasePlugin
import pyttsx
import re
import time
engine = pyttsx.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', 100)
volume = engine.getProperty('volume')
engine.setProperty('volume', 100)
class TTS(BasePlugin):
def execute(self, msg, unit, address, when, printer, print_copies):
res = str('%s - %s' % (msg, unit))
rem = re.compile('.*(RESPOND.*?ALARM\sLEVEL:\s\d)')
resp = rem.match(res)
more = str('%s - %s' % (msg, unit))
inf = re.compile('.*==(.*?\s:)')
info = inf.match(more)
if resp:
for group in resp.groups():
if info:
for group2 in info.groups():
for x in range(3):
engine.say(group)
engine.say(group2)
engine.runAndWait()
time.sleep(180)
PLUGIN = TTS
| gpl-3.0 | -2,769,014,519,169,296,400 | 29.644068 | 74 | 0.712389 | false |
zhuyongyong/crosswalk-test-suite | webapi/tct-cors-w3c-tests/cors-py/support/cors_allow_origin_asterisk.py | 25 | 2055 | # Copyright (c) 2012 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan,Weiwei <[email protected]>
def main(request, response):
response.headers.update([("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "false"),
("Access-Control-Expose-Headers",
"Access-Control-Allow-Origin"),
("Cache-Control", "no-cache"),
("Pragma", "no-cache"),
("Content-Type", "text/plain")
])
return "HelloWorld"
| bsd-3-clause | 1,334,555,304,954,264,800 | 51.692308 | 82 | 0.686131 | false |
SEA000/uw-empathica | empathica/gluon/contrib/login_methods/dropbox_account.py | 1 | 3886 | #!/usr/bin/env python
# coding: utf8
"""
Dropbox Authentication for web2py
Developed by Massimo Di Pierro (2011)
Same License as Web2py License
"""
# mind here session is dropbox session, not current.session
import os
import re
import urllib
from dropbox import client, rest, session
from gluon import *
from gluon.tools import fetch
from gluon.storage import Storage
import gluon.contrib.simplejson as json
class DropboxAccount(object):
"""
from gluon.contrib.login_methods.dropbox_account import DropboxAccount
auth.settings.actions_disabled=['register','change_password','request_reset_password']
auth.settings.login_form = DropboxAccount(request,
key="...",
secret="...",
access_type="...",
url = "http://localhost:8000/%s/default/user/login" % request.application)
when logged in
client = auth.settings.login_form.client
"""
def __init__(self,
request,
key = "",
secret = "",
access_type="app_folder",
login_url = "",
on_login_failure=None,
):
self.request=request
self.key=key
self.secret=secret
self.access_type=access_type
self.login_url = login_url
self.on_login_failure = on_login_failure
self.sess = session.DropboxSession(
self.key,self.secret,self.access_type)
def get_user(self):
request = self.request
token = current.session.dropbox_token
try:
access_token = self.sess.obtain_access_token(token)
except:
access_token = None
if access_token:
user = Storage()
self.client = client.DropboxClient(self.sess)
data = self.client.account_info()
display_name = data.get('display_name','').split(' ',1)
user = dict(email = data.get('email',None),
first_name = display_name[0],
last_name = display_name[-1],
registration_id = data.get('uid',None))
if not user['registration_id'] and self.on_login_failure:
redirect(self.on_login_failure)
return user
return None
def login_form(self):
token = self.sess.obtain_request_token()
current.session.dropbox_token = token
dropbox_url = self.sess.build_authorize_url(token,self.login_url)
redirect(dropbox_url)
form = IFRAME(_src=dropbox_url,
_scrolling="no",
_frameborder="no",
_style="width:400px;height:240px;")
return form
def logout_url(self, next = "/"):
current.session.dropbox_token=None
current.session.auth=None
redirect('https://www.dropbox.com/logout')
return next
def put(self,filename,file):
return json.loads(self.client.put_file(filename,file))['bytes']
def get(self,filename,file):
return self.client.get_file(filename)
def dir(self,path):
return json.loads(self.client.metadata(path))
def use_dropbox(auth,filename='private/dropbox.key',**kwargs):
path = os.path.join(current.request.folder,filename)
if os.path.exists(path):
request = current.request
key,secret,access_type = open(path,'r').read().strip().split(':')
host = current.request.env.http_host
login_url = "http://%s/%s/default/user/login" % \
(host,request.application)
auth.settings.actions_disabled = \
['register','change_password','request_reset_password']
auth.settings.login_form = DropboxAccount(
request,key=key,secret=secret,access_type=access_type,
login_url = login_url,**kwargs)
| mit | 2,806,511,071,998,391,000 | 34.651376 | 90 | 0.58492 | false |
holgern/pyedflib | pyedflib/highlevel.py | 1 | 31130 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2020 Simon Kern
# Copyright (c) 2015 - 2020 Holger Nahrstaedt
# Copyright (c) 2011, 2015, Chris Lee-Messer
# Copyright (c) 2016-2017 The pyedflib Developers
# <https://github.com/holgern/pyedflib>
# See LICENSE for license details.
"""
Created on Tue Jan 7 12:13:47 2020
This file contains high-level functions to work with pyedflib.
Includes
- Reading and writing EDFs
- Anonymizing EDFs
- Comparing EDFs
- Renaming Channels from EDF files
- Dropping Channels from EDF files
@author: skjerns
"""
import os
import numpy as np
import warnings
import pyedflib
from copy import deepcopy
from datetime import datetime
# from . import EdfWriter
# from . import EdfReader
def _get_sample_frequency(signal_header):
# Temporary conditional assignment while we deprecate 'sample_rate' as a channel attribute
# in favor of 'sample_frequency', supporting the use of either to give
# users time to switch to the new interface.
return (signal_header['sample_rate']
if signal_header.get('sample_frequency') is None
else signal_header['sample_frequency'])
def tqdm(iteratable, *args, **kwargs):
"""
These is an optional dependecies that shows a progress bar for some
of the functions, e.g. loading.
install this dependency with `pip install tqdm`
if not installed this is just a pass through iterator.
"""
try:
from tqdm import tqdm as iterator
return iterator(iteratable, *args, **kwargs)
except:
return iteratable
def _parse_date(string):
"""
A simple dateparser that detects common date formats
Parameters
----------
string : str
a date string in format as denoted below.
Returns
-------
datetime.datetime
datetime object of a time.
"""
# some common formats.
formats = ['%Y-%m-%d', '%d-%m-%Y', '%d.%m.%Y', '%Y.%m.%d', '%d %b %Y',
'%Y/%m/%d', '%d/%m/%Y']
for f in formats:
try:
return datetime.strptime(string, f)
except:
pass
try:
import dateparser
return dateparser.parse(string)
except:
print('dateparser is not installed. to convert strings to dates'\
'install via `pip install dateparser`.')
raise ValueError('birthdate must be datetime object or of format'\
' `%d-%m-%Y`, eg. `24-01-2020`')
def dig2phys(signal, dmin, dmax, pmin, pmax):
"""
converts digital edf values to physical values
Parameters
----------
signal : np.ndarray or int
A numpy array with int values (digital values) or an int.
dmin : int
digital minimum value of the edf file (eg -2048).
dmax : int
digital maximum value of the edf file (eg 2048).
pmin : float
physical maximum value of the edf file (eg -200.0).
pmax : float
physical maximum value of the edf file (eg 200.0).
Returns
-------
physical : np.ndarray or float
converted physical values
"""
m = (pmax-pmin) / (dmax-dmin)
b = pmax / m - dmax
physical = m * (signal + b)
return physical
def phys2dig(signal, dmin, dmax, pmin, pmax):
"""
converts physical values to digital values
Parameters
----------
signal : np.ndarray or int
A numpy array with int values (digital values) or an int.
dmin : int
digital minimum value of the edf file (eg -2048).
dmax : int
digital maximum value of the edf file (eg 2048).
pmin : float
physical maximum value of the edf file (eg -200.0).
pmax : float
physical maximum value of the edf file (eg 200.0).
Returns
-------
digital : np.ndarray or int
converted digital values
"""
m = (pmax-pmin) / (dmax-dmin)
b = pmax / m - dmax
digital = signal/m - b
return digital
def make_header(technician='', recording_additional='', patientname='',
patient_additional='', patientcode= '', equipment= '',
admincode= '', gender= '', startdate=None, birthdate= ''):
"""
A convenience function to create an EDF header (a dictionary) that
can be used by pyedflib to update the main header of the EDF
Parameters
----------
technician : str, optional
name of the technician. The default is ''.
recording_additional : str, optional
comments etc. The default is ''.
patientname : str, optional
the name of the patient. The default is ''.
patient_additional : TYPE, optional
more info about the patient. The default is ''.
patientcode : str, optional
alphanumeric code. The default is ''.
equipment : str, optional
which system was used. The default is ''.
admincode : str, optional
code of the admin. The default is ''.
gender : str, optional
gender of patient. The default is ''.
startdate : datetime.datetime, optional
startdate of recording. The default is None.
birthdate : str/datetime.datetime, optional
date of birth of the patient. The default is ''.
Returns
-------
header : dict
a dictionary with the values given filled in.
"""
if not birthdate=='' and isinstance(birthdate, str):
birthdate = _parse_date(birthdate)
if startdate is None:
now = datetime.now()
startdate = datetime(now.year, now.month, now.day,
now.hour, now.minute, now.second)
del now
if isinstance(birthdate, datetime):
birthdate = birthdate.strftime('%d %b %Y').lower()
local = locals()
header = {}
for var in local:
if isinstance(local[var], datetime):
header[var] = local[var]
else:
header[var] = str(local[var])
return header
def make_signal_header(label, dimension='uV', sample_rate=256, sample_frequency=None,
physical_min=-200, physical_max=200, digital_min=-32768,
digital_max=32767, transducer='', prefiler=''):
"""
A convenience function that creates a signal header for a given signal.
This can be used to create a list of signal headers that is used by
pyedflib to create an edf. With this, different sampling frequencies
can be indicated.
Parameters
----------
label : str
the name of the channel.
dimension : str, optional
dimension, eg mV. The default is 'uV'.
sample_rate : int, optional
sampling frequency. The default is 256. Deprecated: use 'sample_frequency' instead.
sample_frequency : int, optional
sampling frequency. The default is 256.
physical_min : float, optional
minimum value in dimension. The default is -200.
physical_max : float, optional
maximum value in dimension. The default is 200.
digital_min : int, optional
digital minimum of the ADC. The default is -32768.
digital_max : int, optional
digital maximum of the ADC. The default is 32767.
transducer : str, optional
electrode type that was used. The default is ''.
prefiler : str, optional
filtering and sampling method. The default is ''.
Returns
-------
signal_header : dict
a signal header that can be used to save a channel to an EDF.
"""
signal_header = {'label': label,
'dimension': dimension,
'sample_rate': sample_rate,
'sample_frequency': sample_frequency,
'physical_min': physical_min,
'physical_max': physical_max,
'digital_min': digital_min,
'digital_max': digital_max,
'transducer': transducer,
'prefilter': prefiler}
return signal_header
def make_signal_headers(list_of_labels, dimension='uV', sample_rate=256,
sample_frequency=None, physical_min=-200.0, physical_max=200.0,
digital_min=-32768, digital_max=32767,
transducer='', prefiler=''):
"""
A function that creates signal headers for a given list of channel labels.
This can only be used if each channel has the same sampling frequency
Parameters
----------
list_of_labels : list of str
A list with labels for each channel.
dimension : str, optional
dimension, eg mV. The default is 'uV'.
sample_rate : int, optional
sampling frequency. The default is 256. Deprecated: use 'sample_frequency' instead.
sample_frequency : int, optional
sampling frequency. The default is 256.
physical_min : float, optional
minimum value in dimension. The default is -200.
physical_max : float, optional
maximum value in dimension. The default is 200.
digital_min : int, optional
digital minimum of the ADC. The default is -32768.
digital_max : int, optional
digital maximum of the ADC. The default is 32767.
transducer : str, optional
electrode type that was used. The default is ''.
prefiler : str, optional
filtering and sampling method. The default is ''.
Returns
-------
signal_headers : list of dict
returns n signal headers as a list to save several signal headers.
"""
signal_headers = []
for label in list_of_labels:
header = make_signal_header(label, dimension=dimension, sample_rate=sample_rate,
sample_frequency=sample_frequency,
physical_min=physical_min, physical_max=physical_max,
digital_min=digital_min, digital_max=digital_max,
transducer=transducer, prefiler=prefiler)
signal_headers.append(header)
return signal_headers
def read_edf(edf_file, ch_nrs=None, ch_names=None, digital=False, verbose=False):
"""
Convenience function for reading EDF+/BDF data with pyedflib.
Will load the edf and return the signals, the headers of the signals
and the header of the EDF. If all signals have the same sample frequency
will return a numpy array, else a list with the individual signals
Parameters
----------
edf_file : str
link to an edf file.
ch_nrs : list of int, optional
The indices of the channels to read. The default is None.
ch_names : list of str, optional
The names of channels to read. The default is None.
digital : bool, optional
will return the signals as digital values (ADC). The default is False.
verbose : bool, optional
Print progress bar while loading or not. The default is False.
Returns
-------
signals : np.ndarray or list
the signals of the chosen channels contained in the EDF.
signal_headers : list
one signal header for each channel in the EDF.
header : dict
the main header of the EDF file containing meta information.
"""
assert (ch_nrs is None) or (ch_names is None), \
'names xor numbers should be supplied'
if ch_nrs is not None and not isinstance(ch_nrs, list): ch_nrs = [ch_nrs]
if ch_names is not None and \
not isinstance(ch_names, list): ch_names = [ch_names]
with pyedflib.EdfReader(edf_file) as f:
# see which channels we want to load
available_chs = [ch.upper() for ch in f.getSignalLabels()]
n_chrs = f.signals_in_file
# find out which number corresponds to which channel
if ch_names is not None:
ch_nrs = []
for ch in ch_names:
if not ch.upper() in available_chs:
warnings.warn('{} is not in source file (contains {})'\
.format(ch, available_chs))
print('will be ignored.')
else:
ch_nrs.append(available_chs.index(ch.upper()))
# if there ch_nrs is not given, load all channels
if ch_nrs is None: # no numbers means we load all
ch_nrs = range(n_chrs)
# convert negative numbers into positives
ch_nrs = [n_chrs+ch if ch<0 else ch for ch in ch_nrs]
# load headers, signal information and
header = f.getHeader()
signal_headers = [f.getSignalHeaders()[c] for c in ch_nrs]
# add annotations to header
annotations = f.readAnnotations()
annotations = [[s, d, a] for s,d,a in zip(*annotations)]
header['annotations'] = annotations
signals = []
for i,c in enumerate(tqdm(ch_nrs, desc='Reading Channels',
disable=not verbose)):
signal = f.readSignal(c, digital=digital)
signals.append(signal)
# we can only return a np.array if all signals have the same samplefreq
sfreqs = [_get_sample_frequency(shead) for shead in signal_headers]
all_sfreq_same = sfreqs[1:]==sfreqs[:-1]
if all_sfreq_same:
dtype = np.int32 if digital else np.float
signals = np.array(signals, dtype=dtype)
assert len(signals)==len(signal_headers), 'Something went wrong, lengths'\
' of headers is not length of signals'
del f
return signals, signal_headers, header
def write_edf(edf_file, signals, signal_headers, header=None, digital=False,
file_type=-1, block_size=1):
"""
Write signals to an edf_file. Header can be generated on the fly with
generic values. EDF+/BDF+ is selected based on the filename extension,
but can be overwritten by setting filetype to pyedflib.FILETYPE_XXX
Parameters
----------
edf_file : np.ndarray or list
where to save the EDF file
signals : list
The signals as a list of arrays or a ndarray.
signal_headers : list of dict
a list with one signal header(dict) for each signal.
See pyedflib.EdfWriter.setSignalHeader..
header : dict
a main header (dict) for the EDF file, see
pyedflib.EdfWriter.setHeader for details.
If no header present, will create an empty header
digital : bool, optional
whether the signals are in digital format (ADC). The default is False.
filetype: int, optional
choose filetype for saving.
EDF = 0, EDF+ = 1, BDF = 2, BDF+ = 3, automatic from extension = -1
block_size : int
set the block size for writing. Should be divisor of signal length
in seconds. Higher values mean faster writing speed, but if it
is not a divisor of the signal duration, it will append zeros.
Can be any value between 1=><=60, -1 will auto-infer the fastest value.
Returns
-------
bool
True if successful, False if failed.
"""
assert header is None or isinstance(header, dict), \
'header must be dictioniary or None'
assert isinstance(signal_headers, list), \
'signal headers must be list'
assert len(signal_headers)==len(signals), \
'signals and signal_headers must be same length'
assert file_type in [-1, 0, 1, 2, 3], \
'filetype must be in range -1, 3'
assert block_size<=60 and block_size>=-1 and block_size!=0, \
'blocksize must be smaller or equal to 60'
# copy objects to prevent accidential changes to mutable objects
header = deepcopy(header)
signal_headers = deepcopy(signal_headers)
if file_type==-1:
ext = os.path.splitext(edf_file)[-1]
if ext.lower() == '.edf':
file_type = pyedflib.FILETYPE_EDFPLUS
elif ext.lower() == '.bdf':
file_type = pyedflib.FILETYPE_BDFPLUS
else:
raise ValueError('Unknown extension {}'.format(ext))
n_channels = len(signals)
# if there is no header, we create one with dummy values
if header is None:
header = {}
default_header = make_header()
default_header.update(header)
header = default_header
# block_size sets the size of each writing block and should be a divisor
# of the length of the signal. If it is not, the remainder of the file
# will be filled with zeros.
signal_duration = len(signals[0]) // _get_sample_frequency(signal_headers[0])
if block_size == -1:
block_size = max([d for d in range(1, 61) if signal_duration % d == 0])
elif signal_duration % block_size != 0:
warnings.warn('Signal length is not dividable by block_size. '+
'The file will have a zeros appended.')
# check dmin, dmax and pmin, pmax dont exceed signal min/max
for sig, shead in zip(signals, signal_headers):
dmin, dmax = shead['digital_min'], shead['digital_max']
pmin, pmax = shead['physical_min'], shead['physical_max']
label = shead['label']
if digital: # exception as it will lead to clipping
assert dmin<=sig.min(), \
'digital_min is {}, but signal_min is {}' \
'for channel {}'.format(dmin, sig.min(), label)
assert dmax>=sig.max(), \
'digital_min is {}, but signal_min is {}' \
'for channel {}'.format(dmax, sig.max(), label)
assert pmin != pmax, \
'physical_min {} should be different from physical_max {}'.format(pmin,pmax)
else: # only warning, as this will not lead to clipping
assert pmin<=sig.min(), \
'phys_min is {}, but signal_min is {} ' \
'for channel {}'.format(pmin, sig.min(), label)
assert pmax>=sig.max(), \
'phys_max is {}, but signal_max is {} ' \
'for channel {}'.format(pmax, sig.max(), label)
frequency_key = 'sample_rate' if shead.get('sample_frequency') is None else 'sample_frequency'
shead[frequency_key] *= block_size
# get annotations, in format [[timepoint, duration, description], [...]]
annotations = header.get('annotations', [])
with pyedflib.EdfWriter(edf_file, n_channels=n_channels, file_type=file_type) as f:
f.setDatarecordDuration(int(100000 * block_size))
f.setSignalHeaders(signal_headers)
f.setHeader(header)
f.writeSamples(signals, digital=digital)
for annotation in annotations:
f.writeAnnotation(*annotation)
del f
return os.path.isfile(edf_file)
def write_edf_quick(edf_file, signals, sfreq, digital=False):
"""
wrapper for write_pyedf without creating headers.
Use this if you don't care about headers or channel names and just
want to dump some signals with the same sampling freq. to an edf
Parameters
----------
edf_file : str
where to store the data/edf.
signals : np.ndarray
The signals you want to store as numpy array.
sfreq : int
the sampling frequency of the signals.
digital : bool, optional
if the data is present digitally (int) or as mV/uV.The default is False.
Returns
-------
bool
True if successful, else False or raise Error.
"""
signals = np.atleast_2d(signals)
header = make_header(technician='pyedflib-quickwrite')
labels = ['CH_{}'.format(i) for i in range(len(signals))]
pmin, pmax = signals.min(), signals.max()
signal_headers = make_signal_headers(labels, sample_frequency=sfreq,
physical_min=pmin, physical_max=pmax)
return write_edf(edf_file, signals, signal_headers, header, digital=digital)
def read_edf_header(edf_file, read_annotations=True):
"""
Reads the header and signal headers of an EDF file and it's annotations
Parameters
----------
edf_file : str
EDF/BDF file to read.
Returns
-------
summary : dict
header of the edf file as dictionary.
"""
assert os.path.isfile(edf_file), 'file {} does not exist'.format(edf_file)
with pyedflib.EdfReader(edf_file) as f:
summary = f.getHeader()
summary['Duration'] = f.getFileDuration()
summary['SignalHeaders'] = f.getSignalHeaders()
summary['channels'] = f.getSignalLabels()
if read_annotations:
annotations = f.read_annotation()
annotations = [[float(t)/10000000, d if d else -1, x.decode()] for t,d,x in annotations]
summary['annotations'] = annotations
del f
return summary
def compare_edf(edf_file1, edf_file2, verbose=False):
"""
Loads two edf files and checks whether the values contained in
them are the same. Does not check the header or annotations data.
Mainly to verify that other options (eg anonymization) produce the
same EDF file.
Parameters
----------
edf_file1 : str
edf file 1 to compare.
edf_file2 : str
edf file 2 to compare.
verbose : bool, optional
print progress or not. The default is False.
Returns
-------
bool
True if signals are equal, else raises error.
"""
signals1, shead1, _ = read_edf(edf_file1, digital=True, verbose=verbose)
signals2, shead2, _ = read_edf(edf_file2, digital=True, verbose=verbose)
for i, sigs in enumerate(zip(signals1, signals2)):
s1, s2 = sigs
if np.array_equal(s1, s2): continue # early stopping
s1 = np.abs(s1)
s2 = np.abs(s2)
if np.array_equal(s1, s2): continue # early stopping
close = np.mean(np.isclose(s1, s2))
assert close>0.99, 'Error, digital values of {}'\
' and {} for ch {}: {} are not the same: {:.3f}'.format(
edf_file1, edf_file2, shead1[i]['label'],
shead2[i]['label'], close)
dmin1, dmax1 = shead1[i]['digital_min'], shead1[i]['digital_max']
pmin1, pmax1 = shead1[i]['physical_min'], shead1[i]['physical_max']
dmin2, dmax2 = shead2[i]['digital_min'], shead2[i]['digital_max']
pmin2, pmax2 = shead2[i]['physical_min'], shead2[i]['physical_max']
for i, sigs in enumerate(zip(signals1, signals2)):
s1, s2 = sigs
# convert to physical values, no need to load all data again
s1 = dig2phys(s1, dmin1, dmax1, pmin1, pmax1)
s2 = dig2phys(s2, dmin2, dmax2, pmin2, pmax2)
# compare absolutes in case of inverted signals
if np.array_equal(s1, s2): continue # early stopping
s1 = np.abs(s1)
s2 = np.abs(s2)
if np.array_equal(s1, s2): continue # early stopping
min_dist = np.abs(dig2phys(1, dmin1, dmax1, pmin1, pmax1))
close = np.mean(np.isclose(s1, s2, atol=min_dist))
assert close>0.99, 'Error, physical values of {}'\
' and {} for ch {}: {} are not the same: {:.3f}'.format(
edf_file1, edf_file2, shead1[i]['label'],
shead2[i]['label'], close)
return True
def drop_channels(edf_source, edf_target=None, to_keep=None, to_drop=None,
verbose=False):
"""
Remove channels from an edf file. Save the file.
For safety reasons, no source files can be overwritten.
Parameters
----------
edf_source : str
The source edf file from which to drop channels.
edf_target : str, optional
Where to save the file.If None, will be edf_source+'dropped.edf'.
The default is None.
to_keep : list, optional
A list of channel names or indices that will be kept.
Strings will always be interpreted as channel names.
'to_keep' will overwrite any droppings proposed by to_drop.
The default is None.
to_drop : list, optional
A list of channel names/indices that should be dropped.
Strings will be interpreted as channel names. The default is None.
verbose : bool, optional
print progress or not. The default is False.
Returns
-------
edf_target : str
the target filename with the dropped channels.
"""
# convert to list if necessary
if isinstance(to_keep, (int, str)): to_keep = [to_keep]
if isinstance(to_drop, (int, str)): to_drop = [to_drop]
# check all parameters are good
assert to_keep is None or to_drop is None,'Supply only to_keep xor to_drop'
if to_keep is not None:
assert all([isinstance(ch, (str, int)) for ch in to_keep]),\
'channels must be int or string'
if to_drop is not None:
assert all([isinstance(ch, (str, int)) for ch in to_drop]),\
'channels must be int or string'
assert os.path.exists(edf_source), \
'source file {} does not exist'.format(edf_source)
assert edf_source!=edf_target, 'For safet, target must not be source file.'
if edf_target is None:
edf_target = os.path.splitext(edf_source)[0] + '_dropped.edf'
if os.path.exists(edf_target):
warnings.warn('Target file will be overwritten')
ch_names = read_edf_header(edf_source)['channels']
# convert to all lowercase for compatibility
ch_names = [ch.lower() for ch in ch_names]
ch_nrs = list(range(len(ch_names)))
if to_keep is not None:
for i,ch in enumerate(to_keep):
if isinstance(ch,str):
ch_idx = ch_names.index(ch.lower())
to_keep[i] = ch_idx
load_channels = list(to_keep) # copy list compatible with py2.7
elif to_drop is not None:
for i,ch in enumerate(to_drop):
if isinstance(ch,str):
ch_idx = ch_names.index(ch.lower())
to_drop[i] = ch_idx
to_drop = [len(ch_nrs)+ch if ch<0 else ch for ch in to_drop]
[ch_nrs.remove(ch) for ch in to_drop]
load_channels = list(ch_nrs)
else:
raise ValueError
signals, signal_headers, header = read_edf(edf_source,
ch_nrs=load_channels,
digital=True, verbose=verbose)
write_edf(edf_target, signals, signal_headers, header, digital=True)
return edf_target
def anonymize_edf(edf_file, new_file=None,
to_remove=['patientname', 'birthdate'],
new_values=['xxx', ''], verify=False, verbose=False):
"""Anonymize an EDF file by replacing values of header fields.
This function can be used to overwrite all header information that is
patient specific, for example birthdate and patientname. All header fields
can be overwritten this way (i.e., all header.keys() given
_, _, header = read_edf(edf_file, digital=True)).
Parameters
----------
edf_file : str
Filename of an EDF/BDF.
new_file : str | None
The filename of the anonymized file. If None, the input filename
appended with '_anonymized' is used. Defaults to None.
to_remove : list of str
List of attributes to overwrite in the `edf_file`. Defaults to
['patientname', 'birthdate'].
new_values : list of str
List of values used for overwriting the attributes specified in
`to_remove`. Each item in `to_remove` must have a corresponding item
in `new_values`. Defaults to ['xxx', ''].
verify : bool
Compare `edf_file` and `new_file` for equality (i.e., double check that
values are same). Defaults to False
verbose : bool, optional
print progress or not. The default is False.
Returns
-------
bool
True if successful, or if `verify` is False. Raises an error otherwise.
"""
if not len(to_remove) == len(new_values):
raise AssertionError('Each `to_remove` must have one `new_value`')
if new_file is None:
file, ext = os.path.splitext(edf_file)
new_file = file + '_anonymized' + ext
signals, signal_headers, header = read_edf(edf_file, digital=True,
verbose=verbose)
for new_val, attr in zip(new_values, to_remove):
header[attr] = new_val
write_edf(new_file, signals, signal_headers, header, digital=True)
if verify:
compare_edf(edf_file, new_file, verbose=verbose)
return True
def rename_channels(edf_file, mapping, new_file=None, verbose=False):
"""
A convenience function to rename channels in an EDF file.
Parameters
----------
edf_file : str
an string pointing to an edf file.
mapping : dict
a dictionary with channel mappings as key:value.
eg: {'M1-O2':'A1-O2'}
new_file : str, optional
the new filename. If None will be edf_file + '_renamed'
The default is None.
verbose : bool, optional
print progress or not. The default is False.
Returns
-------
bool
True if successful, False if failed.
"""
header = read_edf_header(edf_file)
channels = header['channels']
if new_file is None:
file, ext = os.path.splitext(edf_file)
new_file = file + '_renamed' + ext
signal_headers = []
signals = []
for ch_nr in tqdm(range(len(channels)), disable=not verbose):
signal, signal_header, _ = read_edf(edf_file, digital=True,
ch_nrs=ch_nr, verbose=verbose)
ch = signal_header[0]['label']
if ch in mapping :
if verbose: print('{} to {}'.format(ch, mapping[ch]))
ch = mapping[ch]
signal_header[0]['label']=ch
else:
if verbose: print('no mapping for {}, leave as it is'.format(ch))
signal_headers.append(signal_header[0])
signals.append(signal.squeeze())
return write_edf(new_file, signals, signal_headers, header, digital=True)
def change_polarity(edf_file, channels, new_file=None, verify=True,
verbose=False):
"""
Change polarity of certain channels
Parameters
----------
edf_file : str
from which file to change polarity.
channels : list of int
the indices of the channels.
new_file : str, optional
where to save the edf with inverted channels. The default is None.
verify : bool, optional
whether to verify the two edfs for similarity. The default is True.
verbose : str, optional
print progress or not. The default is True.
Returns
-------
bool
True if success.
"""
if new_file is None:
new_file = os.path.splitext(edf_file)[0] + '.edf'
if isinstance(channels, str): channels=[channels]
channels = [c.lower() for c in channels]
signals, signal_headers, header = read_edf(edf_file, digital=True,
verbose=verbose)
for i,sig in enumerate(signals):
label = signal_headers[i]['label'].lower()
if label in channels:
if verbose: print('inverting {}'.format(label))
signals[i] = -sig
write_edf(new_file, signals, signal_headers, header,
digital=True, correct=False, verbose=verbose)
if verify: compare_edf(edf_file, new_file)
return True
| bsd-2-clause | 6,544,950,857,852,173,000 | 34.822785 | 102 | 0.607645 | false |
judy2k/stupid-python-tricks | stupid_metaclass_tricks.py | 1 | 2671 | #!/usr/bin/env python3
class AutoPropertyMeta(type):
def __init__(cls, name, bases, attrd):
# Find all getter methods:
for prop_name in [name[4:] for name in attrd if name.startswith("get_")]:
# Obtain references to the getter and optionally setter methods:
getter = attrd.get("get_" + prop_name)
setter = attrd.get("set_" + prop_name, None)
# Create a new property using the getter and optional setter:
prop = property(getter, setter)
# Apply the new property, and remove the getter and setter from
# the class:
attrd[prop_name] = prop
setattr(cls, prop_name, prop)
delattr(cls, "get_" + prop_name)
if setter:
delattr(cls, "set_" + prop_name)
return super(AutoPropertyMeta, cls).__init__(name, bases, attrd)
class AutoProperty(metaclass=AutoPropertyMeta):
pass
class MyRecord(AutoProperty):
"""This class magically has its get and set methods replaced with
properties, because its metaclass is AutoPropertyMeta:
# Say hello to Bernard:
>>> bernard = MyRecord('Bernard', 'H', 'Fitch')
# Print out the full name:
>>> print(bernard)
Bernard H. Fitch
# We now have a read-only property, 'first':
>>> print(bernard.first)
Bernard
# The defined 'get_first' method has disappeared:
>>> print(bernard.get_first())
Traceback (most recent call last):
...
AttributeError: 'MyRecord' object has no attribute 'get_first'
# We can also access Bernard's middle initial:
>>> print(bernard.initial)
H
# Because there's a setter, we can also set the initial:
>>> bernard.initial = 'J'
# ... and let's just make sure that's changed the value of initial:
>>> print(bernard)
Bernard J. Fitch
# And if we do a 'dir' on Bernard, we can't see any magic:
>>> print(sorted([attr for attr in dir(bernard) if not attr.startswith('_')]))
['first', 'initial', 'last']
"""
def __init__(self, first, initial, last):
self._first = first
self._last = last
self._initial = initial
def get_first(self):
return self._first
def get_initial(self):
return self._initial
def set_initial(self, initial):
self._initial = initial
def get_last(self):
return self._last
def __str__(self):
return "%s %s. %s" % (self._first, self._initial, self._last)
if __name__ == "__main__":
import doctest
doctest.testmod()
| unlicense | 3,150,536,984,689,845,000 | 27.351648 | 82 | 0.577686 | false |
sharad/calibre | setup/server.py | 10 | 3817 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import subprocess, tempfile, os, time, sys, telnetlib
from threading import RLock
from setup import Command
try:
from pyinotify import WatchManager, ThreadedNotifier, EventsCodes, ProcessEvent
except:
wm = None
else:
wm = WatchManager()
flags = EventsCodes.ALL_FLAGS
mask = flags['IN_MODIFY']
class ProcessEvents(ProcessEvent):
def __init__(self, command):
ProcessEvent.__init__(self)
self.command = command
def process_default(self, event):
name = getattr(event,
'name', None)
if not name:
return
ext = os.path.splitext(name)[1]
reload = False
if ext == '.py':
reload = True
print
print name, 'changed'
self.command.kill_server()
self.command.launch_server()
print self.command.prompt,
sys.stdout.flush()
if reload:
self.command.reload_browser(delay=1)
class Server(Command):
description = 'Run the calibre server in development mode conveniently'
MONOCLE_PATH = '../monocle'
def rebuild_monocole(self):
subprocess.check_call(['sprocketize', '-C', self.MONOCLE_PATH,
'-I', 'src', 'src/monocle.js'],
stdout=open('resources/content_server/read/monocle.js', 'wb'))
def launch_server(self):
print 'Starting server...\n'
with self.lock:
self.rebuild_monocole()
self.server_proc = p = subprocess.Popen(['calibre-server', '--develop'],
stderr=subprocess.STDOUT, stdout=self.server_log)
time.sleep(0.2)
if p.poll() is not None:
print 'Starting server failed'
raise SystemExit(1)
return p
def kill_server(self):
print 'Killing server...\n'
if self.server_proc is not None:
with self.lock:
if self.server_proc.poll() is None:
self.server_proc.terminate()
while self.server_proc.poll() is None:
time.sleep(0.1)
def watch(self):
if wm is not None:
self.notifier = ThreadedNotifier(wm, ProcessEvents(self))
self.notifier.start()
self.wdd = wm.add_watch(os.path.abspath('src'), mask, rec=True)
def reload_browser(self, delay=0.1):
time.sleep(delay)
try:
t = telnetlib.Telnet('localhost', 4242)
t.read_until("repl>")
t.write('BrowserReload();')
t.read_until("repl>")
t.close()
except:
print 'Failed to reload browser'
import traceback
traceback.print_exc()
def run(self, opts):
self.lock = RLock()
tdir = tempfile.gettempdir()
logf = os.path.join(tdir, 'calibre-server.log')
self.server_log = open(logf, 'ab')
self.prompt = 'Press Enter to kill/restart server. Ctrl+C to quit: '
print 'Server log available at:', logf
print
self.watch()
first = True
while True:
self.launch_server()
if not first:
self.reload_browser()
first = False
try:
raw_input(self.prompt)
except:
print
self.kill_server()
break
else:
self.kill_server()
print
if hasattr(self, 'notifier'):
self.notifier.stop()
| gpl-3.0 | 3,463,302,451,723,760,000 | 28.820313 | 84 | 0.532093 | false |
smaiLee/smarthome | plugins/netio230b/__init__.py | 12 | 3694 | #!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#
# Copyright 2013 KNX-User-Forum e.V. http://knx-user-forum.de/
#
# This file is part of SmartHome.py. http://mknx.github.io/smarthome/
#
# SmartHome.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHome.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHome.py. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import re
logger = logging.getLogger('netio230b')
class NetIO230B():
def __init__(self, smarthome, address, user, password, netio_id=1):
self._sh = smarthome
self._cycle = 10
self._ports = dict()
self._error = []
self._timeout = 2
self._address = address
self._user = user
self._password = password
self._netio_id = int(netio_id)
def run(self):
self._sh.scheduler.add(
'NetIO230B' + str(self._netio_id),
self.update_status,
cycle=self._cycle)
self.alive = True
def stop(self):
self.alive = False
def parse_item(self, item):
if 'netio_id' in item.conf:
netio_id = int(item.conf['netio_id'])
else:
netio_id = 1
if netio_id != self._netio_id:
return None
if 'netio_port' in item.conf:
self._ports[item.conf['netio_port']] = item
return self.update_item
else:
if 'netio_id' in item.conf:
self._error.append(item)
return None
def update_item(self, item, caller=None, source=None, dest=None):
if caller != 'netio230b':
if 'netio_port' in item.conf:
self.set_port(item.conf['netio_port'], item())
def update_status(self):
url = 'http://' + self._address + '/tgi/control.cgi?login=p:' \
+ self._user + ':' + self._password + '&p=l'
# read html response of format '<html>1 0 1 0 </html>'
html = self._sh.tools.fetch_url(url, timeout=2)
if (html):
r = re.compile('[^0^1]')
cur_state = [_f for _f in r.split(html.decode("utf-8")) if _f]
# reset error state to False
for key in self._error:
key(False)
# assign values to items
for key in list(self._ports.keys()):
try:
if cur_state[int(key)] == '0':
self._ports[key](False, caller='netio230b')
else:
if cur_state[int(key)] == '1':
self._ports[key](True, caller='netio230b')
except IndexError as e:
logger.error("no state for port: %s", str(e))
else:
# set error state to True
for key in self._error:
key(True)
def set_port(self, port, state):
req = list('uuuu')
if int(port) in range(0, 4):
req[int(port)] = '%d' % state
url = 'http://' + self._address + '/tgi/control.cgi?login=p:' \
+ self._user + ':' + self._password + '&p=' + ''.join(req)
self._sh.tools.fetch_url(url, timeout=2)
| gpl-3.0 | -8,568,121,446,500,732,000 | 31.690265 | 74 | 0.548457 | false |
rven/odoo | addons/mail/wizard/mail_compose_message.py | 1 | 29124 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
import base64
import re
from odoo import _, api, fields, models, tools
from odoo.exceptions import UserError
# main mako-like expression pattern
EXPRESSION_PATTERN = re.compile('(\$\{.+?\})')
def _reopen(self, res_id, model, context=None):
# save original model in context, because selecting the list of available
# templates requires a model in context
context = dict(context or {}, default_model=model)
return {'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_id': res_id,
'res_model': self._name,
'target': 'new',
'context': context,
}
class MailComposer(models.TransientModel):
""" Generic message composition wizard. You may inherit from this wizard
at model and view levels to provide specific features.
The behavior of the wizard depends on the composition_mode field:
- 'comment': post on a record. The wizard is pre-populated via ``get_record_data``
- 'mass_mail': wizard in mass mailing mode where the mail details can
contain template placeholders that will be merged with actual data
before being sent to each recipient.
"""
_name = 'mail.compose.message'
_description = 'Email composition wizard'
_log_access = True
_batch_size = 500
@api.model
def default_get(self, fields):
""" Handle composition mode. Some details about context keys:
- comment: default mode, model and ID of a record the user comments
- default_model or active_model
- default_res_id or active_id
- mass_mail: model and IDs of records the user mass-mails
- active_ids: record IDs
- default_model or active_model
"""
result = super(MailComposer, self).default_get(fields)
# author
missing_author = 'author_id' in fields and 'author_id' not in result
missing_email_from = 'email_from' in fields and 'email_from' not in result
if missing_author or missing_email_from:
author_id, email_from = self.env['mail.thread']._message_compute_author(result.get('author_id'), result.get('email_from'), raise_exception=False)
if missing_email_from:
result['email_from'] = email_from
if missing_author:
result['author_id'] = author_id
if 'model' in fields and 'model' not in result:
result['model'] = self._context.get('active_model')
if 'res_id' in fields and 'res_id' not in result:
result['res_id'] = self._context.get('active_id')
if 'no_auto_thread' in fields and 'no_auto_thread' not in result and result.get('model'):
# doesn't support threading
if result['model'] not in self.env or not hasattr(self.env[result['model']], 'message_post'):
result['no_auto_thread'] = True
if 'active_domain' in self._context: # not context.get() because we want to keep global [] domains
result['active_domain'] = '%s' % self._context.get('active_domain')
if result.get('composition_mode') == 'comment' and (set(fields) & set(['model', 'res_id', 'partner_ids', 'record_name', 'subject'])):
result.update(self.get_record_data(result))
filtered_result = dict((fname, result[fname]) for fname in result if fname in fields)
return filtered_result
# content
subject = fields.Char('Subject')
body = fields.Html('Contents', default='', sanitize_style=True)
parent_id = fields.Many2one(
'mail.message', 'Parent Message', index=True, ondelete='set null',
help="Initial thread message.")
template_id = fields.Many2one(
'mail.template', 'Use template', index=True,
domain="[('model', '=', model)]")
attachment_ids = fields.Many2many(
'ir.attachment', 'mail_compose_message_ir_attachments_rel',
'wizard_id', 'attachment_id', 'Attachments')
layout = fields.Char('Layout', copy=False) # xml id of layout
add_sign = fields.Boolean(default=True)
# origin
email_from = fields.Char('From', help="Email address of the sender. This field is set when no matching partner is found and replaces the author_id field in the chatter.")
author_id = fields.Many2one(
'res.partner', 'Author', index=True,
help="Author of the message. If not set, email_from may hold an email address that did not match any partner.")
# composition
composition_mode = fields.Selection(selection=[
('comment', 'Post on a document'),
('mass_mail', 'Email Mass Mailing'),
('mass_post', 'Post on Multiple Documents')], string='Composition mode', default='comment')
model = fields.Char('Related Document Model', index=True)
res_id = fields.Integer('Related Document ID', index=True)
record_name = fields.Char('Message Record Name', help="Name get of the related document.")
use_active_domain = fields.Boolean('Use active domain')
active_domain = fields.Text('Active domain', readonly=True)
# characteristics
message_type = fields.Selection([
('comment', 'Comment'),
('notification', 'System notification')],
'Type', required=True, default='comment',
help="Message type: email for email message, notification for system "
"message, comment for other messages such as user replies")
subtype_id = fields.Many2one(
'mail.message.subtype', 'Subtype', ondelete='set null', index=True,
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('mail.mt_comment'))
mail_activity_type_id = fields.Many2one(
'mail.activity.type', 'Mail Activity Type',
index=True, ondelete='set null')
# destination
reply_to = fields.Char('Reply-To', help='Reply email address. Setting the reply_to bypasses the automatic thread creation.')
no_auto_thread = fields.Boolean(
'No threading for answers',
help='Answers do not go in the original document discussion thread. This has an impact on the generated message-id.')
is_log = fields.Boolean('Log an Internal Note',
help='Whether the message is an internal note (comment mode only)')
partner_ids = fields.Many2many(
'res.partner', 'mail_compose_message_res_partner_rel',
'wizard_id', 'partner_id', 'Additional Contacts',
domain=[('type', '!=', 'private')])
# mass mode options
notify = fields.Boolean('Notify followers', help='Notify followers of the document (mass post only)')
auto_delete = fields.Boolean('Delete Emails',
help='This option permanently removes any track of email after it\'s been sent, including from the Technical menu in the Settings, in order to preserve storage space of your Odoo database.')
auto_delete_message = fields.Boolean('Delete Message Copy', help='Do not keep a copy of the email in the document communication history (mass mailing only)')
mail_server_id = fields.Many2one('ir.mail_server', 'Outgoing mail server')
@api.model
def get_record_data(self, values):
""" Returns a defaults-like dict with initial values for the composition
wizard when sending an email related a previous email (parent_id) or
a document (model, res_id). This is based on previously computed default
values. """
result, subject = {}, False
if values.get('parent_id'):
parent = self.env['mail.message'].browse(values.get('parent_id'))
result['record_name'] = parent.record_name,
subject = tools.ustr(parent.subject or parent.record_name or '')
if not values.get('model'):
result['model'] = parent.model
if not values.get('res_id'):
result['res_id'] = parent.res_id
partner_ids = values.get('partner_ids', list()) + parent.partner_ids.ids
result['partner_ids'] = partner_ids
elif values.get('model') and values.get('res_id'):
doc_name_get = self.env[values.get('model')].browse(values.get('res_id')).name_get()
result['record_name'] = doc_name_get and doc_name_get[0][1] or ''
subject = tools.ustr(result['record_name'])
re_prefix = _('Re:')
if subject and not (subject.startswith('Re:') or subject.startswith(re_prefix)):
subject = "%s %s" % (re_prefix, subject)
result['subject'] = subject
return result
# ------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------
# action buttons call with positionnal arguments only, so we need an intermediary function
# to ensure the context is passed correctly
def action_send_mail(self):
self.send_mail()
return {'type': 'ir.actions.act_window_close'}
def send_mail(self, auto_commit=False):
""" Process the wizard content and proceed with sending the related
email(s), rendering any template patterns on the fly if needed. """
notif_layout = self._context.get('custom_layout')
# Several custom layouts make use of the model description at rendering, e.g. in the
# 'View <document>' button. Some models are used for different business concepts, such as
# 'purchase.order' which is used for a RFQ and and PO. To avoid confusion, we must use a
# different wording depending on the state of the object.
# Therefore, we can set the description in the context from the beginning to avoid falling
# back on the regular display_name retrieved in '_notify_prepare_template_context'.
model_description = self._context.get('model_description')
for wizard in self:
# Duplicate attachments linked to the email.template.
# Indeed, basic mail.compose.message wizard duplicates attachments in mass
# mailing mode. But in 'single post' mode, attachments of an email template
# also have to be duplicated to avoid changing their ownership.
if wizard.attachment_ids and wizard.composition_mode != 'mass_mail' and wizard.template_id:
new_attachment_ids = []
for attachment in wizard.attachment_ids:
if attachment in wizard.template_id.attachment_ids:
new_attachment_ids.append(attachment.copy({'res_model': 'mail.compose.message', 'res_id': wizard.id}).id)
else:
new_attachment_ids.append(attachment.id)
new_attachment_ids.reverse()
wizard.write({'attachment_ids': [(6, 0, new_attachment_ids)]})
# Mass Mailing
mass_mode = wizard.composition_mode in ('mass_mail', 'mass_post')
ActiveModel = self.env[wizard.model] if wizard.model and hasattr(self.env[wizard.model], 'message_post') else self.env['mail.thread']
if wizard.composition_mode == 'mass_post':
# do not send emails directly but use the queue instead
# add context key to avoid subscribing the author
ActiveModel = ActiveModel.with_context(mail_notify_force_send=False, mail_create_nosubscribe=True)
# wizard works in batch mode: [res_id] or active_ids or active_domain
if mass_mode and wizard.use_active_domain and wizard.model:
res_ids = self.env[wizard.model].search(ast.literal_eval(wizard.active_domain)).ids
elif mass_mode and wizard.model and self._context.get('active_ids'):
res_ids = self._context['active_ids']
else:
res_ids = [wizard.res_id]
batch_size = int(self.env['ir.config_parameter'].sudo().get_param('mail.batch_size')) or self._batch_size
sliced_res_ids = [res_ids[i:i + batch_size] for i in range(0, len(res_ids), batch_size)]
if wizard.composition_mode == 'mass_mail' or wizard.is_log or (wizard.composition_mode == 'mass_post' and not wizard.notify): # log a note: subtype is False
subtype_id = False
elif wizard.subtype_id:
subtype_id = wizard.subtype_id.id
else:
subtype_id = self.env['ir.model.data'].xmlid_to_res_id('mail.mt_comment')
for res_ids in sliced_res_ids:
# mass mail mode: mail are sudo-ed, as when going through get_mail_values
# standard access rights on related records will be checked when browsing them
# to compute mail values. If people have access to the records they have rights
# to create lots of emails in sudo as it is consdiered as a technical model.
batch_mails_sudo = self.env['mail.mail'].sudo()
all_mail_values = wizard.get_mail_values(res_ids)
for res_id, mail_values in all_mail_values.items():
if wizard.composition_mode == 'mass_mail':
batch_mails_sudo |= self.env['mail.mail'].sudo().create(mail_values)
else:
post_params = dict(
message_type=wizard.message_type,
subtype_id=subtype_id,
email_layout_xmlid=notif_layout,
add_sign=not bool(wizard.template_id),
mail_auto_delete=wizard.template_id.auto_delete if wizard.template_id else self._context.get('mail_auto_delete', True),
model_description=model_description)
post_params.update(mail_values)
if ActiveModel._name == 'mail.thread':
if wizard.model:
post_params['model'] = wizard.model
post_params['res_id'] = res_id
if not ActiveModel.message_notify(**post_params):
# if message_notify returns an empty record set, no recipients where found.
raise UserError(_("No recipient found."))
else:
ActiveModel.browse(res_id).message_post(**post_params)
if wizard.composition_mode == 'mass_mail':
batch_mails_sudo.send(auto_commit=auto_commit)
def get_mail_values(self, res_ids):
"""Generate the values that will be used by send_mail to create mail_messages
or mail_mails. """
self.ensure_one()
results = dict.fromkeys(res_ids, False)
rendered_values = {}
mass_mail_mode = self.composition_mode == 'mass_mail'
# render all template-based value at once
if mass_mail_mode and self.model:
rendered_values = self.render_message(res_ids)
# compute alias-based reply-to in batch
reply_to_value = dict.fromkeys(res_ids, None)
if mass_mail_mode and not self.no_auto_thread:
records = self.env[self.model].browse(res_ids)
reply_to_value = records._notify_get_reply_to(default=self.email_from)
blacklisted_rec_ids = set()
if mass_mail_mode and issubclass(type(self.env[self.model]), self.pool['mail.thread.blacklist']):
self.env['mail.blacklist'].flush(['email'])
self._cr.execute("SELECT email FROM mail_blacklist WHERE active=true")
blacklist = {x[0] for x in self._cr.fetchall()}
if blacklist:
targets = self.env[self.model].browse(res_ids).read(['email_normalized'])
# First extract email from recipient before comparing with blacklist
blacklisted_rec_ids.update(target['id'] for target in targets
if target['email_normalized'] in blacklist)
for res_id in res_ids:
# static wizard (mail.message) values
mail_values = {
'subject': self.subject,
'body': self.body or '',
'parent_id': self.parent_id and self.parent_id.id,
'partner_ids': [partner.id for partner in self.partner_ids],
'attachment_ids': [attach.id for attach in self.attachment_ids],
'author_id': self.author_id.id,
'email_from': self.email_from,
'record_name': self.record_name,
'no_auto_thread': self.no_auto_thread,
'mail_server_id': self.mail_server_id.id,
'mail_activity_type_id': self.mail_activity_type_id.id,
}
# mass mailing: rendering override wizard static values
if mass_mail_mode and self.model:
record = self.env[self.model].browse(res_id)
mail_values['headers'] = record._notify_email_headers()
# keep a copy unless specifically requested, reset record name (avoid browsing records)
mail_values.update(notification=not self.auto_delete_message, model=self.model, res_id=res_id, record_name=False)
# auto deletion of mail_mail
if self.auto_delete or self.template_id.auto_delete:
mail_values['auto_delete'] = True
# rendered values using template
email_dict = rendered_values[res_id]
mail_values['partner_ids'] += email_dict.pop('partner_ids', [])
mail_values.update(email_dict)
if not self.no_auto_thread:
mail_values.pop('reply_to')
if reply_to_value.get(res_id):
mail_values['reply_to'] = reply_to_value[res_id]
if self.no_auto_thread and not mail_values.get('reply_to'):
mail_values['reply_to'] = mail_values['email_from']
# mail_mail values: body -> body_html, partner_ids -> recipient_ids
mail_values['body_html'] = mail_values.get('body', '')
mail_values['recipient_ids'] = [(4, id) for id in mail_values.pop('partner_ids', [])]
# process attachments: should not be encoded before being processed by message_post / mail_mail create
mail_values['attachments'] = [(name, base64.b64decode(enc_cont)) for name, enc_cont in email_dict.pop('attachments', list())]
attachment_ids = []
for attach_id in mail_values.pop('attachment_ids'):
new_attach_id = self.env['ir.attachment'].browse(attach_id).copy({'res_model': self._name, 'res_id': self.id})
attachment_ids.append(new_attach_id.id)
attachment_ids.reverse()
mail_values['attachment_ids'] = self.env['mail.thread'].with_context(attached_to=record)._message_post_process_attachments(
mail_values.pop('attachments', []),
attachment_ids,
{'model': 'mail.message', 'res_id': 0}
)['attachment_ids']
# Filter out the blacklisted records by setting the mail state to cancel -> Used for Mass Mailing stats
if res_id in blacklisted_rec_ids:
mail_values['state'] = 'cancel'
# Do not post the mail into the recipient's chatter
mail_values['notification'] = False
results[res_id] = mail_values
return results
# ------------------------------------------------------------
# TEMPLATES
# ------------------------------------------------------------
@api.onchange('template_id')
def onchange_template_id_wrapper(self):
self.ensure_one()
values = self.onchange_template_id(self.template_id.id, self.composition_mode, self.model, self.res_id)['value']
for fname, value in values.items():
setattr(self, fname, value)
def onchange_template_id(self, template_id, composition_mode, model, res_id):
""" - mass_mailing: we cannot render, so return the template values
- normal mode: return rendered values
/!\ for x2many field, this onchange return command instead of ids
"""
if template_id and composition_mode == 'mass_mail':
template = self.env['mail.template'].browse(template_id)
fields = ['subject', 'body_html', 'email_from', 'reply_to', 'mail_server_id']
values = dict((field, getattr(template, field)) for field in fields if getattr(template, field))
if template.attachment_ids:
values['attachment_ids'] = [att.id for att in template.attachment_ids]
if template.mail_server_id:
values['mail_server_id'] = template.mail_server_id.id
elif template_id:
values = self.generate_email_for_composer(
template_id, [res_id],
['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'attachment_ids', 'mail_server_id']
)[res_id]
# transform attachments into attachment_ids; not attached to the document because this will
# be done further in the posting process, allowing to clean database if email not send
attachment_ids = []
Attachment = self.env['ir.attachment']
for attach_fname, attach_datas in values.pop('attachments', []):
data_attach = {
'name': attach_fname,
'datas': attach_datas,
'res_model': 'mail.compose.message',
'res_id': 0,
'type': 'binary', # override default_type from context, possibly meant for another model!
}
attachment_ids.append(Attachment.create(data_attach).id)
if values.get('attachment_ids', []) or attachment_ids:
values['attachment_ids'] = [(6, 0, values.get('attachment_ids', []) + attachment_ids)]
else:
default_values = self.with_context(default_composition_mode=composition_mode, default_model=model, default_res_id=res_id).default_get(['composition_mode', 'model', 'res_id', 'parent_id', 'partner_ids', 'subject', 'body', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'])
values = dict((key, default_values[key]) for key in ['subject', 'body', 'partner_ids', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'] if key in default_values)
if values.get('body_html'):
values['body'] = values.pop('body_html')
# This onchange should return command instead of ids for x2many field.
values = self._convert_to_write(values)
return {'value': values}
def save_as_template(self):
""" hit save as template button: current form value will be a new
template attached to the current document. """
for record in self:
model = self.env['ir.model']._get(record.model or 'mail.message')
model_name = model.name or ''
template_name = "%s: %s" % (model_name, tools.ustr(record.subject))
values = {
'name': template_name,
'subject': record.subject or False,
'body_html': record.body or False,
'model_id': model.id or False,
'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])],
}
template = self.env['mail.template'].create(values)
# generate the saved template
record.write({'template_id': template.id})
record.onchange_template_id_wrapper()
return _reopen(self, record.id, record.model, context=self._context)
# ------------------------------------------------------------
# RENDERING
# ------------------------------------------------------------
def render_message(self, res_ids):
"""Generate template-based values of wizard, for the document records given
by res_ids. This method is meant to be inherited by email_template that
will produce a more complete dictionary, using Jinja2 templates.
Each template is generated for all res_ids, allowing to parse the template
once, and render it multiple times. This is useful for mass mailing where
template rendering represent a significant part of the process.
Default recipients are also computed, based on mail_thread method
_message_get_default_recipients. This allows to ensure a mass mailing has
always some recipients specified.
:param browse wizard: current mail.compose.message browse record
:param list res_ids: list of record ids
:return dict results: for each res_id, the generated template values for
subject, body, email_from and reply_to
"""
self.ensure_one()
multi_mode = True
if isinstance(res_ids, int):
multi_mode = False
res_ids = [res_ids]
subjects = self.env['mail.render.mixin']._render_template(self.subject, self.model, res_ids)
bodies = self.env['mail.render.mixin']._render_template(self.body, self.model, res_ids, post_process=True)
emails_from = self.env['mail.render.mixin']._render_template(self.email_from, self.model, res_ids)
replies_to = self.env['mail.render.mixin']._render_template(self.reply_to, self.model, res_ids)
default_recipients = {}
if not self.partner_ids:
records = self.env[self.model].browse(res_ids).sudo()
default_recipients = records._message_get_default_recipients()
results = dict.fromkeys(res_ids, False)
for res_id in res_ids:
results[res_id] = {
'subject': subjects[res_id],
'body': bodies[res_id],
'email_from': emails_from[res_id],
'reply_to': replies_to[res_id],
}
results[res_id].update(default_recipients.get(res_id, dict()))
# generate template-based values
if self.template_id:
template_values = self.generate_email_for_composer(
self.template_id.id, res_ids,
['email_to', 'partner_to', 'email_cc', 'attachment_ids', 'mail_server_id'])
else:
template_values = {}
for res_id in res_ids:
if template_values.get(res_id):
# recipients are managed by the template
results[res_id].pop('partner_ids', None)
results[res_id].pop('email_to', None)
results[res_id].pop('email_cc', None)
# remove attachments from template values as they should not be rendered
template_values[res_id].pop('attachment_ids', None)
else:
template_values[res_id] = dict()
# update template values by composer values
template_values[res_id].update(results[res_id])
return multi_mode and template_values or template_values[res_ids[0]]
@api.model
def generate_email_for_composer(self, template_id, res_ids, fields):
""" Call email_template.generate_email(), get fields relevant for
mail.compose.message, transform email_cc and email_to into partner_ids """
multi_mode = True
if isinstance(res_ids, int):
multi_mode = False
res_ids = [res_ids]
returned_fields = fields + ['partner_ids', 'attachments']
values = dict.fromkeys(res_ids, False)
template_values = self.env['mail.template'].with_context(tpl_partners_only=True).browse(template_id).generate_email(res_ids, fields)
for res_id in res_ids:
res_id_values = dict((field, template_values[res_id][field]) for field in returned_fields if template_values[res_id].get(field))
res_id_values['body'] = res_id_values.pop('body_html', '')
values[res_id] = res_id_values
return multi_mode and values or values[res_ids[0]]
@api.autovacuum
def _gc_lost_attachments(self):
""" Garbage collect lost mail attachments. Those are attachments
- linked to res_model 'mail.compose.message', the composer wizard
- with res_id 0, because they were created outside of an existing
wizard (typically user input through Chatter or reports
created on-the-fly by the templates)
- unused since at least one day (create_date and write_date)
"""
limit_date = fields.Datetime.subtract(fields.Datetime.now(), days=1)
self.env['ir.attachment'].search([
('res_model', '=', self._name),
('res_id', '=', 0),
('create_date', '<', limit_date),
('write_date', '<', limit_date)]
).unlink()
| agpl-3.0 | 5,659,500,227,603,437,000 | 53.437383 | 295 | 0.591608 | false |
voidJeff/String-Jumble | stringjumble.py | 1 | 1776 | """
stringjumble.py
Author: Jeff
Credit: Google
Assignment:
The purpose of this challenge is to gain proficiency with
manipulating lists.
Write and submit a Python program that accepts a string from
the user and prints it back in three different ways:
* With all letters in reverse.
* With words in reverse order, but letters within each word in
the correct order.
* With all words in correct order, but letters reversed within
the words.
Output of your program should look like this:
Please enter a string of text (the bigger the better): There are a few techniques or tricks that you may find handy
You entered "There are a few techniques or tricks that you may find handy". Now jumble it:
ydnah dnif yam uoy taht skcirt ro seuqinhcet wef a era erehT
handy find may you that tricks or techniques few a are There
erehT era a wef seuqinhcet ro skcirt taht uoy yam dnif ydnah
"""
text = input("Please enter a string of text (the bigger the better): ")
print('You entered "{0}". Now jumble it:'.format(text))
text_l = list(text)
tlen = len(text)
n1 = tlen
while n1 > 0:
print(text_l[n1-1], end="")
n1 -= 1
print("")
i = tlen
word = []
a = 0
text_l2 = list(text_l)
text_l2.insert(0, " ")
while i >= 0:
if text_l2[i] != " ":
word.append(text_l2[i])
a += 1
else:
while a > 0:
print(word[a-1], end="")
a -= 1
word = []
a = 0
print(" ", end="")
i -= 1
print("")
i = 0
word = []
a = 0
text_l3 = text_l
text_l3.append(" ")
while i <= tlen:
if text_l3[i] != " ":
word.append(text_l3[i])
a += 1
else:
while a > 0:
print(word[a-1], end="")
a -= 1
word = []
a = 0
print(" ", end="")
i += 1
| mit | 2,530,514,769,684,547,000 | 22.368421 | 115 | 0.606419 | false |
jqk6/wilddog-doc | deploy.py | 1 | 1145 | #!/usr/bin/env python
import pexpect
import sys
import os
import datetime
time=datetime.datetime.now().strftime('%Y%m%d%H%M%S')
localPath = os.getcwd()
pathSp = localPath.split('/')
localDir = pathSp[len(pathSp)-1]
remote = '[email protected]'
remotePath = '/data/www'
remoteDir = 'z.wilddog.com'
tgz = localDir+".tar.gz"
pexpect.run('tar -cvf %s %s '%(tgz,localDir),cwd='..')
pexpect.run('scp %s %s:%s'%(tgz,remote,remotePath),cwd="..")
#ssh
ssh = pexpect.spawn('ssh',[remote])
ssh.logfile = sys.stdout
ssh.expect('#')
# cd to /data/www
ssh.sendline("cd %s"%(remotePath))
ssh.expect('#')
#backupfile
ssh.sendline("mv %s %s"%(remoteDir,"backup/"+remoteDir+"."+time))
ssh.expect('#')
#unzip file
ssh.sendline('tar -xvf %s'%(tgz))
ssh.expect('#')
#cleanup
ssh.sendline('rm -f %s'%(tgz))
ssh.expect('#')
#replece z.wilddog.com
ssh.sendline('mv %s %s'%(localDir,remoteDir))
ssh.expect('#')
#cd to z.wilddog.com
ssh.sendline('cd %s'%(remoteDir))
ssh.expect('#')
# stop
ssh.sendline('forever stop ./bin/www')
ssh.expect('#')
# start
ssh.sendline('forever start ./bin/www')
ssh.expect('#')
# cleanup
pexpect.run('rm -f %s'%(tgz),cwd='..')
| mit | -5,607,280,941,589,783,000 | 18.40678 | 65 | 0.654148 | false |
daenamkim/ansible | lib/ansible/module_utils/network/eos/eos.py | 6 | 15689 | #
# This code is part of Ansible, but is an independent component.
#
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat, Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import time
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.six import iteritems
from ansible.module_utils.urls import fetch_url
_DEVICE_CONNECTION = None
eos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS'])),
'use_ssl': dict(default=True, type='bool'),
'validate_certs': dict(default=True, type='bool'),
'timeout': dict(type='int'),
'transport': dict(default='cli', choices=['cli', 'eapi'])
}
eos_argument_spec = {
'provider': dict(type='dict', options=eos_provider_spec),
}
eos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(removed_in_version=2.9, no_log=True),
'use_ssl': dict(removed_in_version=2.9, type='bool'),
'validate_certs': dict(removed_in_version=2.9, type='bool'),
'timeout': dict(removed_in_version=2.9, type='int'),
'transport': dict(removed_in_version=2.9, choices=['cli', 'eapi'])
}
eos_argument_spec.update(eos_top_spec)
def get_provider_argspec():
return eos_provider_spec
def check_args(module, warnings):
pass
def load_params(module):
provider = module.params.get('provider') or dict()
for key, value in iteritems(provider):
if key in eos_argument_spec:
if module.params.get(key) is None and value is not None:
module.params[key] = value
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
load_params(module)
if is_eapi(module):
conn = Eapi(module)
else:
conn = Cli(module)
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
self._session_support = None
@property
def supports_sessions(self):
if self._session_support is not None:
return self._session_support
rc, out, err = self.exec_command('show configuration sessions')
self._session_support = rc == 0
return self._session_support
def exec_command(self, command):
if isinstance(command, dict):
command = self._module.jsonify(command)
return exec_command(self._module, command)
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
conn = get_connection(self)
rc, out, err = self.exec_command(cmd)
out = to_text(out, errors='surrogate_then_replace')
if rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
cfg = str(out).strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
responses = list()
for cmd in to_list(commands):
rc, out, err = self.exec_command(cmd)
out = to_text(out, errors='surrogate_then_replace')
if check_rc and rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
try:
out = self._module.from_json(out)
except ValueError:
out = str(out).strip()
responses.append(out)
return responses
def send_config(self, commands):
multiline = False
rc = 0
for command in to_list(commands):
if command == 'end':
continue
if command.startswith('banner') or multiline:
multiline = True
command = self._module.jsonify({'command': command, 'sendonly': True})
elif command == 'EOF' and multiline:
multiline = False
rc, out, err = self.exec_command(command)
if rc != 0:
return (rc, out, to_text(err, errors='surrogate_then_replace'))
return (rc, 'ok', '')
def configure(self, commands):
"""Sends configuration commands to the remote device
"""
conn = get_connection(self)
rc, out, err = self.exec_command('configure')
if rc != 0:
self._module.fail_json(msg='unable to enter configuration mode', output=to_text(err, errors='surrogate_then_replace'))
rc, out, err = self.send_config(commands)
if rc != 0:
self.exec_command('abort')
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
self.exec_command('end')
return {}
def load_config(self, commands, commit=False, replace=False):
"""Loads the config commands onto the remote device
"""
use_session = os.getenv('ANSIBLE_EOS_USE_SESSIONS', True)
try:
use_session = int(use_session)
except ValueError:
pass
if not all((bool(use_session), self.supports_sessions)):
return self.configure(self, commands)
conn = get_connection(self)
session = 'ansible_%s' % int(time.time())
result = {'session': session}
rc, out, err = self.exec_command('configure session %s' % session)
if rc != 0:
self._module.fail_json(msg='unable to enter configuration mode', output=to_text(err, errors='surrogate_then_replace'))
if replace:
self.exec_command('rollback clean-config')
rc, out, err = self.send_config(commands)
if rc != 0:
self.exec_command('abort')
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), commands=commands)
rc, out, err = self.exec_command('show session-config diffs')
if rc == 0 and out:
result['diff'] = to_text(out, errors='surrogate_then_replace').strip()
if commit:
self.exec_command('commit')
else:
self.exec_command('abort')
return result
class Eapi:
def __init__(self, module):
self._module = module
self._enable = None
self._session_support = None
self._device_configs = {}
host = module.params['provider']['host']
port = module.params['provider']['port']
self._module.params['url_username'] = self._module.params['username']
self._module.params['url_password'] = self._module.params['password']
if module.params['provider']['use_ssl']:
proto = 'https'
else:
proto = 'http'
module.params['validate_certs'] = module.params['provider']['validate_certs']
self._url = '%s://%s:%s/command-api' % (proto, host, port)
if module.params['auth_pass']:
self._enable = {'cmd': 'enable', 'input': module.params['auth_pass']}
else:
self._enable = 'enable'
@property
def supports_sessions(self):
if self._session_support:
return self._session_support
response = self.send_request(['show configuration sessions'])
self._session_support = 'error' not in response
return self._session_support
def _request_builder(self, commands, output, reqid=None):
params = dict(version=1, cmds=commands, format=output)
return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params)
def send_request(self, commands, output='text'):
commands = to_list(commands)
if self._enable:
commands.insert(0, self._enable)
body = self._request_builder(commands, output)
data = self._module.jsonify(body)
headers = {'Content-Type': 'application/json-rpc'}
timeout = self._module.params['timeout']
response, headers = fetch_url(
self._module, self._url, data=data, headers=headers,
method='POST', timeout=timeout
)
if headers['status'] != 200:
self._module.fail_json(**headers)
try:
data = response.read()
response = self._module.from_json(to_text(data, errors='surrogate_then_replace'))
except ValueError:
self._module.fail_json(msg='unable to load response from device', data=data)
if self._enable and 'result' in response:
response['result'].pop(0)
return response
def run_commands(self, commands):
"""Runs list of commands on remote device and returns results
"""
output = None
queue = list()
responses = list()
def _send(commands, output):
response = self.send_request(commands, output=output)
if 'error' in response:
err = response['error']
self._module.fail_json(msg=err['message'], code=err['code'])
return response['result']
for item in to_list(commands):
if is_json(item['command']):
item['command'] = str(item['command']).replace('| json', '')
item['output'] = 'json'
if output and output != item['output']:
responses.extend(_send(queue, output))
queue = list()
output = item['output'] or 'json'
queue.append(item['command'])
if queue:
responses.extend(_send(queue, output))
for index, item in enumerate(commands):
try:
responses[index] = responses[index]['output'].strip()
except KeyError:
pass
return responses
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
out = self.send_request(cmd)
cfg = str(out['result'][0]['output']).strip()
self._device_configs[cmd] = cfg
return cfg
def configure(self, commands):
"""Sends the ordered set of commands to the device
"""
cmds = ['configure terminal']
cmds.extend(commands)
responses = self.send_request(commands)
if 'error' in responses:
err = responses['error']
self._module.fail_json(msg=err['message'], code=err['code'])
return responses[1:]
def load_config(self, config, commit=False, replace=False):
"""Loads the configuration onto the remote devices
If the device doesn't support configuration sessions, this will
fallback to using configure() to load the commands. If that happens,
there will be no returned diff or session values
"""
if not self.supports_sessions:
return self.configure(self, config)
session = 'ansible_%s' % int(time.time())
result = {'session': session}
commands = ['configure session %s' % session]
if replace:
commands.append('rollback clean-config')
commands.extend(config)
response = self.send_request(commands)
if 'error' in response:
commands = ['configure session %s' % session, 'abort']
self.send_request(commands)
err = response['error']
self._module.fail_json(msg=err['message'], code=err['code'])
commands = ['configure session %s' % session, 'show session-config diffs']
if commit:
commands.append('commit')
else:
commands.append('abort')
response = self.send_request(commands, output='text')
diff = response['result'][1]['output']
if len(diff) > 0:
result['diff'] = diff
return result
def is_json(cmd):
return to_native(cmd, errors='surrogate_then_replace').endswith('| json')
def is_eapi(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
return 'eapi' in (transport, provider_transport)
def to_command(module, commands):
if is_eapi(module):
default_output = 'json'
else:
default_output = 'text'
transform = ComplexList(dict(
command=dict(key=True),
output=dict(default=default_output),
prompt=dict(),
answer=dict()
), module)
return transform(to_list(commands))
def get_config(module, flags=None):
flags = None if flags is None else flags
conn = get_connection(module)
return conn.get_config(flags)
def run_commands(module, commands):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands))
def load_config(module, config, commit=False, replace=False):
conn = get_connection(module)
return conn.load_config(config, commit, replace)
| gpl-3.0 | 6,160,052,965,808,525,000 | 32.958874 | 130 | 0.607942 | false |
MissionCriticalCloud/marvin | marvin/cloudstackAPI/updateVPC.py | 1 | 16897 | """Updates a VPC"""
from baseCmd import *
from baseResponse import *
class updateVPCCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the id of the VPC"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""an optional field, in case you want to set a custom id to the resource. Allowed to Root Admins only"""
self.customid = None
self.typeInfo['customid'] = 'string'
"""the display text of the VPC"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""an optional field, whether to the display the vpc to the end user or not"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""the name of the VPC"""
self.name = None
self.typeInfo['name'] = 'string'
"""The new VPC offering ID to switch to. This will result in a restart+cleanup of the VPC"""
self.vpcofferingid = None
self.typeInfo['vpcofferingid'] = 'uuid'
self.required = ["id", ]
class updateVPCResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the id of the VPC"""
self.id = None
self.typeInfo['id'] = 'string'
"""the owner of the VPC"""
self.account = None
self.typeInfo['account'] = 'string'
"""the cidr the VPC"""
self.cidr = None
self.typeInfo['cidr'] = 'string'
"""the date this VPC was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""an alternate display text of the VPC."""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""is VPC uses distributed router for one hop forwarding and host based network ACL's"""
self.distributedvpcrouter = None
self.typeInfo['distributedvpcrouter'] = 'boolean'
"""the domain name of the owner"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the domain id of the VPC owner"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""is vpc for display to the regular user"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""the name of the VPC"""
self.name = None
self.typeInfo['name'] = 'string'
"""the network domain of the VPC"""
self.networkdomain = None
self.typeInfo['networkdomain'] = 'string'
"""the project name of the VPC"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the VPC"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""if this VPC has redundant router"""
self.redundantvpcrouter = None
self.typeInfo['redundantvpcrouter'] = 'boolean'
"""true if VPC is region level"""
self.regionlevelvpc = None
self.typeInfo['regionlevelvpc'] = 'boolean'
"""true VPC requires restart"""
self.restartrequired = None
self.typeInfo['restartrequired'] = 'boolean'
"""state of the VPC. Can be Inactive/Enabled"""
self.state = None
self.typeInfo['state'] = 'string'
"""display text of the vpc offering the vpc is created from"""
self.vpcofferingdisplaytext = None
self.typeInfo['vpcofferingdisplaytext'] = 'string'
"""vpc offering id the VPC is created from"""
self.vpcofferingid = None
self.typeInfo['vpcofferingid'] = 'string'
"""name of the vpc offering the vpc is created from"""
self.vpcofferingname = None
self.typeInfo['vpcofferingname'] = 'string'
"""zone id of the vpc"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the zone the VPC belongs to"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""the list of networks belongign to the VPC"""
self.network = []
"""the list of supported services"""
self.service = []
"""the list of resource tags associated with the project"""
self.tags = []
class capability:
def __init__(self):
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
class provider:
def __init__(self):
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
class service:
def __init__(self):
""""the service name"""
self.name = None
""""the list of capabilities"""
self.capability = []
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
""""the service provider name"""
self.provider = []
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
class capability:
def __init__(self):
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
class provider:
def __init__(self):
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class network:
def __init__(self):
""""the id of the network"""
self.id = None
""""the owner of the network"""
self.account = None
""""ACL Id associated with the VPC network"""
self.aclid = None
""""acl type - access type to the network"""
self.acltype = None
""""Broadcast domain type of the network"""
self.broadcastdomaintype = None
""""broadcast uri of the network. This parameter is visible to ROOT admins only"""
self.broadcasturi = None
""""list networks available for vm deployment"""
self.canusefordeploy = None
""""Cloudstack managed address space, all CloudStack managed VMs get IP address from CIDR"""
self.cidr = None
""""an optional field, whether to the display the network to the end user or not."""
self.displaynetwork = None
""""the displaytext of the network"""
self.displaytext = None
""""the first DNS for the network"""
self.dns1 = None
""""the second DNS for the network"""
self.dns2 = None
""""the domain name of the network owner"""
self.domain = None
""""the domain id of the network owner"""
self.domainid = None
""""the network's gateway"""
self.gateway = None
""""the cidr of IPv6 network"""
self.ip6cidr = None
""""the gateway of IPv6 network"""
self.ip6gateway = None
""""list of ip addresses and/or ranges of addresses to be excluded from the network for assignment"""
self.ipexclusionlist = None
""""true if network is default, false otherwise"""
self.isdefault = None
""""list networks that are persistent"""
self.ispersistent = None
""""true if network is system, false otherwise"""
self.issystem = None
""""the name of the network"""
self.name = None
""""the network's netmask"""
self.netmask = None
""""the network CIDR of the guest network configured with IP reservation. It is the summation of CIDR and RESERVED_IP_RANGE"""
self.networkcidr = None
""""the network domain"""
self.networkdomain = None
""""availability of the network offering the network is created from"""
self.networkofferingavailability = None
""""true if network offering is ip conserve mode enabled"""
self.networkofferingconservemode = None
""""display text of the network offering the network is created from"""
self.networkofferingdisplaytext = None
""""network offering id the network is created from"""
self.networkofferingid = None
""""name of the network offering the network is created from"""
self.networkofferingname = None
""""the physical network id"""
self.physicalnetworkid = None
""""the project name of the address"""
self.project = None
""""the project id of the ipaddress"""
self.projectid = None
""""related to what other network configuration"""
self.related = None
""""the network's IP range not to be used by CloudStack guest VMs and can be used for non CloudStack purposes"""
self.reservediprange = None
""""true network requires restart"""
self.restartrequired = None
""""true if network supports specifying ip ranges, false otherwise"""
self.specifyipranges = None
""""state of the network"""
self.state = None
""""true if network can span multiple zones"""
self.strechedl2subnet = None
""""true if users from subdomains can access the domain level network"""
self.subdomainaccess = None
""""the traffic type of the network"""
self.traffictype = None
""""the type of the network"""
self.type = None
""""The vlan of the network. This parameter is visible to ROOT admins only"""
self.vlan = None
""""VPC the network belongs to"""
self.vpcid = None
""""zone id of the network"""
self.zoneid = None
""""the name of the zone the network belongs to"""
self.zonename = None
""""If a network is enabled for 'streched l2 subnet' then represents zones on which network currently spans"""
self.zonesnetworkspans = None
""""the list of services"""
self.service = []
""""the service name"""
self.name = None
""""the list of capabilities"""
self.capability = []
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
""""the service provider name"""
self.provider = []
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
""""the list of resource tags associated with network"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class capability:
def __init__(self):
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
class provider:
def __init__(self):
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
class service:
def __init__(self):
""""the service name"""
self.name = None
""""the list of capabilities"""
self.capability = []
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
""""the service provider name"""
self.provider = []
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| apache-2.0 | 6,767,991,617,231,225,000 | 37.577626 | 134 | 0.585074 | false |
276361270/sqlalchemy | test/orm/test_onetoone.py | 29 | 2244 | import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, create_session
from sqlalchemy.testing import fixtures
class O2OTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('jack', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('number', String(50)),
Column('status', String(20)),
Column('subroom', String(5)))
Table('port', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(30)),
Column('description', String(100)),
Column('jack_id', Integer, ForeignKey("jack.id")))
@classmethod
def setup_mappers(cls):
class Jack(cls.Basic):
pass
class Port(cls.Basic):
pass
def test_basic(self):
Port, port, jack, Jack = (self.classes.Port,
self.tables.port,
self.tables.jack,
self.classes.Jack)
mapper(Port, port)
mapper(Jack, jack,
order_by=[jack.c.number],
properties=dict(
port=relationship(Port, backref='jack',
uselist=False,
)),
)
session = create_session()
j = Jack(number='101')
session.add(j)
p = Port(name='fa0/1')
session.add(p)
j.port=p
session.flush()
jid = j.id
pid = p.id
j=session.query(Jack).get(jid)
p=session.query(Port).get(pid)
assert p.jack is not None
assert p.jack is j
assert j.port is not None
p.jack = None
assert j.port is None
session.expunge_all()
j = session.query(Jack).get(jid)
p = session.query(Port).get(pid)
j.port=None
self.assert_(p.jack is None)
session.flush()
session.delete(j)
session.flush()
| mit | -2,577,341,153,112,151,000 | 27.769231 | 85 | 0.53164 | false |
TangXT/GreatCatMOOC | lms/djangoapps/instructor/tests/test_access.py | 5 | 6354 | """
Test instructor.access
"""
from nose.tools import raises
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from django.test.utils import override_settings
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from courseware.roles import CourseBetaTesterRole, CourseStaffRole
from django_comment_common.models import (Role,
FORUM_ROLE_MODERATOR)
from instructor.access import (allow_access,
revoke_access,
list_with_level,
update_forum_role)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAccessList(ModuleStoreTestCase):
""" Test access listings. """
def setUp(self):
self.course = CourseFactory.create()
self.instructors = [UserFactory.create() for _ in xrange(4)]
for user in self.instructors:
allow_access(self.course, user, 'instructor')
self.beta_testers = [UserFactory.create() for _ in xrange(4)]
for user in self.beta_testers:
allow_access(self.course, user, 'beta')
def test_list_instructors(self):
instructors = list_with_level(self.course, 'instructor')
self.assertEqual(set(instructors), set(self.instructors))
def test_list_beta(self):
beta_testers = list_with_level(self.course, 'beta')
self.assertEqual(set(beta_testers), set(self.beta_testers))
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAccessAllow(ModuleStoreTestCase):
""" Test access allow. """
def setUp(self):
self.course = CourseFactory.create()
def test_allow(self):
user = UserFactory()
allow_access(self.course, user, 'staff')
self.assertTrue(CourseStaffRole(self.course.location).has_user(user))
def test_allow_twice(self):
user = UserFactory()
allow_access(self.course, user, 'staff')
allow_access(self.course, user, 'staff')
self.assertTrue(CourseStaffRole(self.course.location).has_user(user))
def test_allow_beta(self):
""" Test allow beta against list beta. """
user = UserFactory()
allow_access(self.course, user, 'beta')
self.assertTrue(CourseBetaTesterRole(self.course.location).has_user(user))
@raises(ValueError)
def test_allow_badlevel(self):
user = UserFactory()
allow_access(self.course, user, 'robot-not-a-level')
@raises(Exception)
def test_allow_noneuser(self):
user = None
allow_access(self.course, user, 'staff')
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAccessRevoke(ModuleStoreTestCase):
""" Test access revoke. """
def setUp(self):
self.course = CourseFactory.create()
self.staff = [UserFactory.create() for _ in xrange(4)]
for user in self.staff:
allow_access(self.course, user, 'staff')
self.beta_testers = [UserFactory.create() for _ in xrange(4)]
for user in self.beta_testers:
allow_access(self.course, user, 'beta')
def test_revoke(self):
user = self.staff[0]
revoke_access(self.course, user, 'staff')
self.assertFalse(CourseStaffRole(self.course.location).has_user(user))
def test_revoke_twice(self):
user = self.staff[0]
revoke_access(self.course, user, 'staff')
self.assertFalse(CourseStaffRole(self.course.location).has_user(user))
def test_revoke_beta(self):
user = self.beta_testers[0]
revoke_access(self.course, user, 'beta')
self.assertFalse(CourseBetaTesterRole(self.course.location).has_user(user))
@raises(ValueError)
def test_revoke_badrolename(self):
user = UserFactory()
revoke_access(self.course, user, 'robot-not-a-level')
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAccessForum(ModuleStoreTestCase):
"""
Test forum access control.
"""
def setUp(self):
self.course = CourseFactory.create()
self.mod_role = Role.objects.create(
course_id=self.course.id,
name=FORUM_ROLE_MODERATOR
)
self.moderators = [UserFactory.create() for _ in xrange(4)]
for user in self.moderators:
self.mod_role.users.add(user)
def test_allow(self):
user = UserFactory.create()
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'allow')
self.assertIn(user, self.mod_role.users.all())
def test_allow_twice(self):
user = UserFactory.create()
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'allow')
self.assertIn(user, self.mod_role.users.all())
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'allow')
self.assertIn(user, self.mod_role.users.all())
@raises(Role.DoesNotExist)
def test_allow_badrole(self):
user = UserFactory.create()
update_forum_role(self.course.id, user, 'robot-not-a-real-role', 'allow')
def test_revoke(self):
user = self.moderators[0]
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke')
self.assertNotIn(user, self.mod_role.users.all())
def test_revoke_twice(self):
user = self.moderators[0]
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke')
self.assertNotIn(user, self.mod_role.users.all())
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke')
self.assertNotIn(user, self.mod_role.users.all())
def test_revoke_notallowed(self):
user = UserFactory()
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke')
self.assertNotIn(user, self.mod_role.users.all())
@raises(Role.DoesNotExist)
def test_revoke_badrole(self):
user = self.moderators[0]
update_forum_role(self.course.id, user, 'robot-not-a-real-role', 'allow')
@raises(ValueError)
def test_bad_mode(self):
user = UserFactory()
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'robot-not-a-mode')
| agpl-3.0 | -771,990,380,837,534,100 | 36.376471 | 89 | 0.656122 | false |
slyphon/pants | src/python/pants/goal/aggregated_timings.py | 15 | 1737 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from pants.util.dirutil import safe_mkdir_for
class AggregatedTimings(object):
"""Aggregates timings over multiple invocations of 'similar' work.
If filepath is not none, stores the timings in that file. Useful for finding bottlenecks."""
def __init__(self, path=None):
# Map path -> timing in seconds (a float)
self._timings_by_path = defaultdict(float)
self._tool_labels = set()
self._path = path
safe_mkdir_for(self._path)
def add_timing(self, label, secs, is_tool=False):
"""Aggregate timings by label.
secs - a double, so fractional seconds are allowed.
is_tool - whether this label represents a tool invocation.
"""
self._timings_by_path[label] += secs
if is_tool:
self._tool_labels.add(label)
# Check existence in case we're a clean-all. We don't want to write anything in that case.
if self._path and os.path.exists(os.path.dirname(self._path)):
with open(self._path, 'w') as f:
for x in self.get_all():
f.write('{label}: {timing}\n'.format(**x))
def get_all(self):
"""Returns all the timings, sorted in decreasing order.
Each value is a dict: { path: <path>, timing: <timing in seconds> }
"""
return [{'label': x[0], 'timing': x[1], 'is_tool': x[0] in self._tool_labels}
for x in sorted(self._timings_by_path.items(), key=lambda x: x[1], reverse=True)]
| apache-2.0 | -3,060,271,103,790,619,000 | 35.957447 | 94 | 0.663788 | false |
sudheesh001/oh-mainline | mysite/base/feeds.py | 15 | 1473 | # This file is part of OpenHatch.
# Copyright (C) 2011 Jack Grigg
# Copyright (C) 2011 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.syndication.views import Feed, FeedDoesNotExist
from django.core.exceptions import ObjectDoesNotExist
from django.utils.feedgenerator import Atom1Feed
from mysite.profile.models import Person
from mysite.search.models import Answer, WannaHelperNote
class RecentActivityFeed(Feed):
feed_type = Atom1Feed
title = "Recent activity"
subtitle = "Recent activity on OpenHatch"
link = "/"
def items(self):
feed_items = list(Answer.objects.order_by('-modified_date')[:15])
feed_items.extend(
WannaHelperNote.objects.order_by('-modified_date')[:15])
feed_items.sort(key=lambda x: x.modified_date, reverse=True)
return feed_items[:15]
| agpl-3.0 | 2,729,559,182,556,996,600 | 39.916667 | 77 | 0.742023 | false |
corona10/Simple-MiniC-Compiler | lib/llvm-3.5.0.src/test/CodeGen/SystemZ/Large/branch-range-12.py | 13 | 4106 | # Test 64-bit COMPARE LOGICAL IMMEDIATE AND BRANCH in cases where the sheer
# number of instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffb4 bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 18 bytes if it uses a short
# branch and 24 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x4c - 6) / 18 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x4c / 18 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 50
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 51
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 52
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 53
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 54
# CHECK: jgl [[LABEL]]
# ...as mentioned above, the next one could be a CLGIJL instead...
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 55
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 56, [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 57, [[LABEL]]
# ...main goes here...
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 100, [[LABEL:\.L[^ ]*]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 101, [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 102, [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 103, [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 104
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 105
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 106
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 107
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffb4
print 'define void @f1(i8 *%base, i64 *%stopa, i64 *%stopb) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bcur%da = load i64 *%%stopa' % i
print ' %%bcur%db = load i64 *%%stopb' % i
print ' %%bsub%d = sub i64 %%bcur%da, %%bcur%db' % (i, i, i)
print ' %%btest%d = icmp ult i64 %%bsub%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%acur%da = load i64 *%%stopa' % i
print ' %%acur%db = load i64 *%%stopb' % i
print ' %%asub%d = sub i64 %%acur%da, %%acur%db' % (i, i, i)
print ' %%atest%d = icmp ult i64 %%asub%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' ret void'
print '}'
| mit | 4,197,643,926,326,120,400 | 31.330709 | 75 | 0.56113 | false |
adammaikai/OmicsPipe2.0 | omics_pipe/modules/htseq_gencode.py | 2 | 1379 | #!/usr/bin/env python
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.utils import *
p = Bunch(default_parameters)
def htseq_gencode(sample, htseq_flag):
'''Runs htseq-count to get raw count data from alignments.
input:
Aligned.out.sort.bam
output:
counts.txt
citation:
Simon Anders, EMBL
link:
http://www-huber.embl.de/users/anders/HTSeq/doc/overview.html
parameters from parameters file:
STAR_RESULTS:
HTSEQ_OPTIONS:
REF_GENES_GENCODE:
HTSEQ_GENCODE_RESULTS:
TEMP_DIR:
SAMTOOLS_VERSION:
BAM_FILE_NAME:
'''
spawn_job(jobname = 'htseq_gencode', SAMPLE = sample, LOG_PATH = p.LOG_PATH, RESULTS_EMAIL = p.RESULTS_EMAIL, SCHEDULER = p.SCHEDULER, walltime = "120:00:00", queue = p.QUEUE, nodes = 1, ppn = 8, memory = "15gb", script = "/htseq_drmaa.sh", args_list = [sample,p.STAR_RESULTS,p.HTSEQ_OPTIONS,p.REF_GENES_GENCODE,p.HTSEQ_GENCODE_RESULTS,p.TEMP_DIR,p.SAMTOOLS_VERSION, p.BAM_FILE_NAME])
job_status(jobname = 'htseq_gencode', resultspath = p.HTSEQ_GENCODE_RESULTS, SAMPLE = sample, outputfilename = sample + "_counts.txt", FLAG_PATH = p.FLAG_PATH)
return
if __name__ == '__main__':
htseq_gencode(sample, htseq_flag)
sys.exit(0) | mit | 4,979,695,044,538,874,000 | 34.384615 | 388 | 0.632342 | false |
bblay/iris | lib/iris/tests/unit/util/test_new_axis.py | 2 | 5122 | # (C) British Crown Copyright 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Test function :func:`iris.util.new_axis`."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import copy
import numpy as np
import unittest
import iris
from iris.util import new_axis
class Test(tests.IrisTest):
def setUp(self):
self.data = np.array([[1, 2], [1, 2]])
self.cube = iris.cube.Cube(self.data)
lat = iris.coords.DimCoord([1, 2], standard_name='latitude')
lon = iris.coords.DimCoord([1, 2], standard_name='longitude')
time = iris.coords.DimCoord([1], standard_name='time')
wibble = iris.coords.AuxCoord([1], long_name='wibble')
self.cube.add_dim_coord(lat, 0)
self.cube.add_dim_coord(lon, 1)
self.cube.add_aux_coord(time, None)
self.cube.add_aux_coord(wibble, None)
self.coords = {'lat': lat, 'lon': lon, 'time': time, 'wibble': wibble}
def _assert_cube_notis(self, cube_a, cube_b):
for coord_a, coord_b in zip(cube_a.coords(), cube_b.coords()):
self.assertIsNot(coord_a, coord_b)
self.assertIsNot(cube_a.metadata, cube_b.metadata)
for factory_a, factory_b in zip(
cube_a.aux_factories, cube_b.aux_factories):
self.assertIsNot(factory_a, factory_b)
def test_no_coord(self):
# Providing no coordinate to promote.
res = new_axis(self.cube)
com = iris.cube.Cube(self.data[None])
com.add_dim_coord(self.coords['lat'].copy(), 1)
com.add_dim_coord(self.coords['lon'].copy(), 2)
com.add_aux_coord(self.coords['time'].copy(), None)
com.add_aux_coord(self.coords['wibble'].copy(), None)
self.assertEqual(res, com)
self._assert_cube_notis(res, self.cube)
def test_scalar_dimcoord(self):
# Providing a scalar coordinate to promote.
res = new_axis(self.cube, 'time')
com = iris.cube.Cube(self.data[None])
com.add_dim_coord(self.coords['lat'].copy(), 1)
com.add_dim_coord(self.coords['lon'].copy(), 2)
com.add_aux_coord(self.coords['time'].copy(), 0)
com.add_aux_coord(self.coords['wibble'].copy(), None)
self.assertEqual(res, com)
self._assert_cube_notis(res, self.cube)
def test_scalar_auxcoord(self):
# Providing a scalar coordinate to promote.
res = new_axis(self.cube, 'wibble')
com = iris.cube.Cube(self.data[None])
com.add_dim_coord(self.coords['lat'].copy(), 1)
com.add_dim_coord(self.coords['lon'].copy(), 2)
com.add_aux_coord(self.coords['time'].copy(), None)
com.add_aux_coord(self.coords['wibble'].copy(), 0)
self.assertEqual(res, com)
self._assert_cube_notis(res, self.cube)
def test_maint_factory(self):
# Ensure that aux factory persists.
data = np.arange(12, dtype='i8').reshape((3, 4))
orography = iris.coords.AuxCoord(
[10, 25, 50, 5], standard_name='surface_altitude', units='m')
model_level = iris.coords.AuxCoord(
[2, 1, 0], standard_name='model_level_number')
level_height = iris.coords.DimCoord(
[100, 50, 10], long_name='level_height', units='m',
attributes={'positive': 'up'},
bounds=[[150, 75], [75, 20], [20, 0]])
sigma = iris.coords.AuxCoord(
[0.8, 0.9, 0.95], long_name='sigma',
bounds=[[0.7, 0.85], [0.85, 0.97], [0.97, 1.0]])
hybrid_height = iris.aux_factory.HybridHeightFactory(
level_height, sigma, orography)
cube = iris.cube.Cube(
data, standard_name='air_temperature', units='K',
dim_coords_and_dims=[(level_height, 0)],
aux_coords_and_dims=[(orography, 1), (model_level, 0), (sigma, 0)],
aux_factories=[hybrid_height])
com = iris.cube.Cube(
data[None], standard_name='air_temperature', units='K',
dim_coords_and_dims=[(copy.copy(level_height), 1)],
aux_coords_and_dims=[(copy.copy(orography), 2),
(copy.copy(model_level), 1),
(copy.copy(sigma), 1)],
aux_factories=[copy.copy(hybrid_height)])
res = new_axis(cube)
self.assertEqual(res, com)
self._assert_cube_notis(res, cube)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,208,233,724,755,391,000 | 36.661765 | 79 | 0.605232 | false |
bardin-lab/readtagger | readtagger/cluster_base.py | 1 | 1991 | import logging
import os
from itertools import chain
from cached_property import cached_property
from .gff_io import (
sort_gff,
write_gff_cluster
)
from .vcf_io import write_vcf
logger = logging.getLogger(__name__)
class SampleNameMixin(object):
"""Provide a sample name property."""
@cached_property
def sample_name(self):
"""Return sample name if passed in manually, else guess sample name from input file."""
if not self._sample_name:
basename = os.path.basename(self.input_path)
if '.' in basename:
basename = basename.rsplit('.', 1)[0]
return basename
else:
return self._sample_name
class ToOutput(object):
"""Provides logic for writing clusters and softclip clusters."""
def to_output(self, output_path, write_func, sort_func):
"""Write clusters as GFF file."""
logger.info("Writing clusters of GFF (%s)", self.region or 0)
if output_path:
if hasattr(self, 'softclip_finder'):
clusters = chain(self.clusters, self.softclip_finder.clusters)
else:
clusters = self.clusters
write_func(clusters=clusters,
header=self.header,
output_path=output_path,
sample_name=self.sample_name,
threads=self.threads)
if self.threads < 2:
sort_func(input_path=output_path, output_path=output_path)
class ToGffMixin(ToOutput):
"""Provide a `to_gff` function."""
def to_gff(self, output_path):
"""Write clusters as GFF file."""
self.to_output(output_path, write_func=write_gff_cluster, sort_func=sort_gff)
class ToVcfMixin(ToOutput):
"""Provide a `to_vcf` function."""
def to_vcf(self, output_path):
"""Write clusters as VCF file."""
self.to_output(output_path=output_path, write_func=write_vcf, sort_func=sort_gff)
| mit | -6,055,043,630,086,990,000 | 30.603175 | 95 | 0.603717 | false |
schlichtanders/pyparsing-2.0.3-OrderedDict | examples/test_bibparse.py | 1 | 7869 | """ Test for bibparse grammar """
from os.path import join as pjoin, dirname
from pyparsingOD import ParseException
from btpyparse import Macro
import btpyparse as bp
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
def test_names():
# check various types of names
# All names can contains alphas, but not some special chars
bad_chars = '"#%\'(),={}'
for name_type, dig1f in ((bp.macro_def, False),
(bp.field_name, False),
(bp.entry_type, False),
(bp.cite_key, True)):
if dig1f: # can start with digit
assert_equal(name_type.parseString('2t')[0], '2t')
else:
assert_raises(ParseException, name_type.parseString, '2t')
# All of the names cannot contain some characters
for char in bad_chars:
assert_raises(ParseException, name_type.parseString, char)
# standard strings all OK
assert_equal(name_type.parseString('simple_test')[0], 'simple_test')
# Test macro ref
mr = bp.macro_ref
# can't start with digit
assert_raises(ParseException, mr.parseString, '2t')
for char in bad_chars:
assert_raises(ParseException, mr.parseString, char)
assert_equal(mr.parseString('simple_test')[0].name, 'simple_test')
def test_numbers():
assert_equal(bp.number.parseString('1066')[0], '1066')
assert_equal(bp.number.parseString('0')[0], '0')
assert_raises(ParseException, bp.number.parseString, '-4')
assert_raises(ParseException, bp.number.parseString, '+4')
assert_raises(ParseException, bp.number.parseString, '.4')
# something point something leaves a trailing .4 unmatched
assert_equal(bp.number.parseString('0.4')[0], '0')
def test_parse_string():
# test string building blocks
assert_equal(bp.chars_no_quotecurly.parseString('x')[0], 'x')
assert_equal(bp.chars_no_quotecurly.parseString("a string")[0], 'a string')
assert_equal(bp.chars_no_quotecurly.parseString('a "string')[0], 'a ')
assert_equal(bp.chars_no_curly.parseString('x')[0], 'x')
assert_equal(bp.chars_no_curly.parseString("a string")[0], 'a string')
assert_equal(bp.chars_no_curly.parseString('a {string')[0], 'a ')
assert_equal(bp.chars_no_curly.parseString('a }string')[0], 'a ')
# test more general strings together
for obj in (bp.curly_string, bp.string, bp.field_value):
assert_equal(obj.parseString('{}').asList(), [])
assert_equal(obj.parseString('{a "string}')[0], 'a "string')
assert_equal(obj.parseString('{a {nested} string}').asList(),
['a ', ['nested'], ' string'])
assert_equal(obj.parseString('{a {double {nested}} string}').asList(),
['a ', ['double ', ['nested']], ' string'])
for obj in (bp.quoted_string, bp.string, bp.field_value):
assert_equal(obj.parseString('""').asList(), [])
assert_equal(obj.parseString('"a string"')[0], 'a string')
assert_equal(obj.parseString('"a {nested} string"').asList(),
['a ', ['nested'], ' string'])
assert_equal(obj.parseString('"a {double {nested}} string"').asList(),
['a ', ['double ', ['nested']], ' string'])
# check macro def in string
assert_equal(bp.string.parseString('someascii')[0], Macro('someascii'))
assert_raises(ParseException, bp.string.parseString, '%#= validstring')
# check number in string
assert_equal(bp.string.parseString('1994')[0], '1994')
def test_parse_field():
# test field value - hashes included
fv = bp.field_value
# Macro
assert_equal(fv.parseString('aname')[0], Macro('aname'))
assert_equal(fv.parseString('ANAME')[0], Macro('aname'))
# String and macro
assert_equal(fv.parseString('aname # "some string"').asList(),
[Macro('aname'), 'some string'])
# Nested string
assert_equal(fv.parseString('aname # {some {string}}').asList(),
[Macro('aname'), 'some ', ['string']])
# String and number
assert_equal(fv.parseString('"a string" # 1994').asList(),
['a string', '1994'])
# String and number and macro
assert_equal(fv.parseString('"a string" # 1994 # a_macro').asList(),
['a string', '1994', Macro('a_macro')])
def test_comments():
res = bp.comment.parseString('@Comment{about something}')
assert_equal(res.asList(), ['comment', '{about something}'])
assert_equal(
bp.comment.parseString('@COMMENT{about something').asList(),
['comment', '{about something'])
assert_equal(
bp.comment.parseString('@comment(about something').asList(),
['comment', '(about something'])
assert_equal(
bp.comment.parseString('@COMment about something').asList(),
['comment', ' about something'])
assert_raises(ParseException, bp.comment.parseString,
'@commentabout something')
assert_raises(ParseException, bp.comment.parseString,
'@comment+about something')
assert_raises(ParseException, bp.comment.parseString,
'@comment"about something')
def test_preamble():
res = bp.preamble.parseString('@preamble{"about something"}')
assert_equal(res.asList(), ['preamble', 'about something'])
assert_equal(bp.preamble.parseString(
'@PREamble{{about something}}').asList(),
['preamble', 'about something'])
assert_equal(bp.preamble.parseString("""@PREamble{
{about something}
}""").asList(),
['preamble', 'about something'])
def test_macro():
res = bp.macro.parseString('@string{ANAME = "about something"}')
assert_equal(res.asList(), ['string', 'aname', 'about something'])
assert_equal(
bp.macro.parseString('@string{aname = {about something}}').asList(),
['string', 'aname', 'about something'])
def test_entry():
txt = """@some_entry{akey, aname = "about something",
another={something else}}"""
res = bp.entry.parseString(txt)
assert_equal(res.asList(),
['some_entry', 'akey',
['aname', 'about something'], ['another', 'something else']])
# Case conversion
txt = """@SOME_ENTRY{akey, ANAME = "about something",
another={something else}}"""
res = bp.entry.parseString(txt)
assert_equal(res.asList(),
['some_entry', 'akey',
['aname', 'about something'], ['another', 'something else']])
def test_bibfile():
txt = """@some_entry{akey, aname = "about something",
another={something else}}"""
res = bp.bibfile.parseString(txt)
assert_equal(res.asList(),
[['some_entry', 'akey',
['aname', 'about something'],
['another', 'something else']]])
def test_bib1():
# First pass whole bib-like tests
txt = """
Some introductory text
(implicit comment)
@ARTICLE{Brett2002marsbar,
author = {Matthew Brett and Jean-Luc Anton and Romain Valabregue and Jean-Baptise
Poline},
title = {{Region of interest analysis using an SPM toolbox}},
journal = {Neuroimage},
year = {2002},
volume = {16},
pages = {1140--1141},
number = {2}
}
@some_entry{akey, aname = "about something",
another={something else}}
"""
res = bp.bibfile.parseString(txt)
assert_equal(len(res), 3)
res2 = bp.parse_str(txt)
assert_equal(res.asList(), res2.asList())
res3 = [r.asList()[0] for r, start, end in bp.definitions.scanString(txt)]
assert_equal(res.asList(), res3)
if __name__ == '__main__':
import nose
nose.main()
| mit | 1,246,147,764,180,435,000 | 38.353846 | 83 | 0.594993 | false |
nanolearning/edx-platform | common/djangoapps/terrain/stubs/youtube.py | 19 | 5279 | """
Stub implementation of YouTube for acceptance tests.
To start this stub server on its own from Vagrant:
1.) Locally, modify your Vagrantfile so that it contains:
config.vm.network :forwarded_port, guest: 8031, host: 8031
2.) From within Vagrant dev environment do:
cd common/djangoapps/terrain
python -m stubs.start youtube 8031
3.) Locally, try accessing http://localhost:8031/ and see that
you get "Unused url" message inside the browser.
"""
import textwrap
from .http import StubHttpRequestHandler, StubHttpService
import json
import time
import requests
from urlparse import urlparse
from collections import OrderedDict
IFRAME_API_RESPONSE = textwrap.dedent(
"if (!window['YT']) {var YT = {loading: 0,loaded: 0};}if (!window['YTConfig']) {var YTConfig"
" = {};}if (!YT.loading) {YT.loading = 1;(function(){var l = [];YT.ready = function(f) {if ("
"YT.loaded) {f();} else {l.push(f);}};window.onYTReady = function() {YT.loaded = 1;for (var "
"i = 0; i < l.length; i++) {try {l[i]();} catch (e) {}}};YT.setConfig = function(c) {for (var"
" k in c) {if (c.hasOwnProperty(k)) {YTConfig[k] = c[k];}}};var a = document.createElement"
"('script');a.id = 'www-widgetapi-script';a.src = 'http:' + '"
"//s.ytimg.com/yts/jsbin/www-widgetapi-vflxHr_AR.js';a.async = true;var b = "
"document.getElementsByTagName('script')[0];b.parentNode.insertBefore(a, b);})();}")
class StubYouTubeHandler(StubHttpRequestHandler):
"""
A handler for Youtube GET requests.
"""
# Default number of seconds to delay the response to simulate network latency.
DEFAULT_DELAY_SEC = 0.5
def do_DELETE(self): # pylint: disable=C0103
"""
Allow callers to delete all the server configurations using the /del_config URL.
"""
if self.path == "/del_config" or self.path == "/del_config/":
self.server.config = dict()
self.log_message("Reset Server Configuration.")
self.send_response(200)
else:
self.send_response(404)
def do_GET(self):
"""
Handle a GET request from the client and sends response back.
"""
self.log_message(
"Youtube provider received GET request to path {}".format(self.path)
)
if 'test_transcripts_youtube' in self.path:
if 't__eq_exist' in self.path:
status_message = "".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.0" dur="1.0">',
'Equal transcripts</text></transcript>'
])
self.send_response(
200, content=status_message, headers={'Content-type': 'application/xml'}
)
elif 't_neq_exist' in self.path:
status_message = "".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.1" dur="5.5">',
'Transcripts sample, different that on server',
'</text></transcript>'
])
self.send_response(
200, content=status_message, headers={'Content-type': 'application/xml'}
)
else:
self.send_response(404)
elif 'test_youtube' in self.path:
params = urlparse(self.path)
youtube_id = params.path.split('/').pop()
self._send_video_response(youtube_id, "I'm youtube.")
elif 'get_youtube_api' in self.path:
if self.server.config.get('youtube_api_blocked'):
self.send_response(404, content='', headers={'Content-type': 'text/plain'})
else:
self.send_response(200, content=IFRAME_API_RESPONSE, headers={'Content-type': 'text/html'})
else:
self.send_response(
404, content="Unused url", headers={'Content-type': 'text/plain'}
)
def _send_video_response(self, youtube_id, message):
"""
Send message back to the client for video player requests.
Requires sending back callback id.
"""
# Delay the response to simulate network latency
time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC))
# Construct the response content
callback = self.get_params['callback']
youtube_metadata = json.loads(
requests.get(
"http://gdata.youtube.com/feeds/api/videos/{id}?v=2&alt=jsonc".format(id=youtube_id)
).text
)
data = OrderedDict({
'data': OrderedDict({
'id': youtube_id,
'message': message,
'duration': youtube_metadata['data']['duration'],
})
})
response = "{cb}({data})".format(cb=callback, data=json.dumps(data))
self.send_response(200, content=response, headers={'Content-type': 'text/html'})
self.log_message("Youtube: sent response {}".format(message))
class StubYouTubeService(StubHttpService):
"""
A stub Youtube provider server that responds to GET requests to localhost.
"""
HANDLER_CLASS = StubYouTubeHandler
| agpl-3.0 | 6,063,858,674,805,412,000 | 35.157534 | 107 | 0.57795 | false |
lucacorsato/feincms-in-a-box | fbox/$PROJECT_NAME/migrate/medialibrary/0001_initial.py | 1 | 3541 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import feincms.translations
import feincms.extensions
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('title', models.CharField(verbose_name='title', max_length=200)),
('slug', models.SlugField(verbose_name='slug', max_length=150)),
('parent', models.ForeignKey(null=True, verbose_name='parent', blank=True, to='medialibrary.Category')),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'ordering': ['parent__title', 'title'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MediaFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('file', models.FileField(verbose_name='file', max_length=255, upload_to='medialibrary/%Y/%m/')),
('type', models.CharField(choices=[('image', 'Image'), ('video', 'Video'), ('audio', 'Audio'), ('pdf', 'PDF document'), ('swf', 'Flash'), ('txt', 'Text'), ('rtf', 'Rich Text'), ('zip', 'Zip archive'), ('doc', 'Microsoft Word'), ('xls', 'Microsoft Excel'), ('ppt', 'Microsoft PowerPoint'), ('other', 'Binary')], verbose_name='file type', max_length=12, editable=False)),
('created', models.DateTimeField(verbose_name='created', editable=False, default=django.utils.timezone.now)),
('copyright', models.CharField(verbose_name='copyright', blank=True, max_length=200)),
('file_size', models.IntegerField(verbose_name='file size', null=True, blank=True, editable=False)),
('categories', models.ManyToManyField(verbose_name='categories', null=True, blank=True, to='medialibrary.Category')),
],
options={
'verbose_name': 'media file',
'verbose_name_plural': 'media files',
'abstract': False,
'ordering': ['-created'],
},
bases=(models.Model, feincms.extensions.ExtensionsMixin, feincms.translations.TranslatedObjectMixin),
),
migrations.CreateModel(
name='MediaFileTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('language_code', models.CharField(choices=[('de', 'German')], verbose_name='language', max_length=10, default='de', editable=False)),
('caption', models.CharField(verbose_name='caption', max_length=200)),
('description', models.TextField(verbose_name='description', blank=True)),
('parent', models.ForeignKey(to='medialibrary.MediaFile')),
],
options={
'verbose_name': 'media file translation',
'verbose_name_plural': 'media file translations',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='mediafiletranslation',
unique_together=set([('parent', 'language_code')]),
),
]
| bsd-3-clause | 6,217,907,947,511,045,000 | 50.318841 | 385 | 0.569896 | false |
aneeshusa/android-quill | jni/libhpdf-2.3.0RC2/if/python/demo/encoding_list.py | 32 | 5633 | ###
## * << Haru Free PDF Library 2.0.0 >> -- encoding_list.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <[email protected]>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
PAGE_WIDTH = 420
PAGE_HEIGHT = 400
CELL_WIDTH = 20
CELL_HEIGHT = 20
CELL_HEADER = 10
def draw_graph (page):
# Draw 16 X 15 cells
# Draw vertical lines.
HPDF_Page_SetLineWidth (page, 0.5)
for i in range(18):
x = i * CELL_WIDTH + 40
HPDF_Page_MoveTo (page, x, PAGE_HEIGHT - 60)
HPDF_Page_LineTo (page, x, 40)
HPDF_Page_Stroke (page)
if (i > 0 and i <= 16):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x + 5, PAGE_HEIGHT - 75)
buf="%X" %(i - 1)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
# Draw horizontal lines.
for i in range(16):
y = i * CELL_HEIGHT + 40
HPDF_Page_MoveTo (page, 40, y)
HPDF_Page_LineTo (page, PAGE_WIDTH - 40, y)
HPDF_Page_Stroke (page)
if (i < 14):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 45, y + 5)
buf="%X" %( 15 - i)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def draw_fonts (page):
HPDF_Page_BeginText (page)
# Draw all character from 0x20 to 0xFF to the canvas.
for i in range(1,17):
for j in range(1,17):
buf=[None, None]
y = PAGE_HEIGHT - 55 - ((i - 1) * CELL_HEIGHT)
x = j * CELL_WIDTH + 50
buf[1] = 0x00
buf[0] = (i - 1) * 16 + (j - 1)
if (buf[0] >= 32):
d = x - HPDF_Page_TextWidth (page, buf) / 2
HPDF_Page_TextOut (page, d, y, buf)
HPDF_Page_EndText (page)
def main ():
encodings=[
"StandardEncoding",
"MacRomanEncoding",
"WinAnsiEncoding",
"ISO8859-2",
"ISO8859-3",
"ISO8859-4",
"ISO8859-5",
"ISO8859-9",
"ISO8859-10",
"ISO8859-13",
"ISO8859-14",
"ISO8859-15",
"ISO8859-16",
"CP1250",
"CP1251",
"CP1252",
"CP1254",
"CP1257",
"KOI8-R",
"Symbol-Set",
"ZapfDingbats-Set",
NULL
]
pdf = HPDF_NewEx (error_handler, NULL, NULL, 0, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
# set compression mode
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# get default font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# load font object
font_name = HPDF_LoadType1FontFromFile (pdf, "type1/a010013l.afm",
"type1/a010013l.pfb")
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "Encoding list", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
i=0
while (encodings[i]):
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, PAGE_WIDTH)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
outline = HPDF_CreateOutline (pdf, root, encodings[i], NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page), 1)
# HPDF_Destination_SetFitB(dst);
HPDF_Outline_SetDestination(outline, dst)
HPDF_Page_SetFontAndSize (page, font, 15)
draw_graph (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 40, PAGE_HEIGHT - 50)
HPDF_Page_ShowText (page, encodings[i])
HPDF_Page_ShowText (page, " Encoding")
HPDF_Page_EndText (page)
if encodings[i]=="Symbol-Set":
font2 = HPDF_GetFont (pdf, "Symbol", NULL)
elif encodings[i]=="ZapfDingbats-Set":
font2 = HPDF_GetFont (pdf, "ZapfDingbats", NULL)
else:
font2 = HPDF_GetFont (pdf, font_name, encodings[i])
HPDF_Page_SetFontAndSize (page, font2, 14)
draw_fonts (page)
i+=1
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | gpl-3.0 | -2,405,763,116,444,155,000 | 25.354369 | 80 | 0.54447 | false |
mansonul/events | events/contrib/plugins/form_elements/fields/select_multiple_with_max/fields.py | 1 | 1343 | from django.core.exceptions import ValidationError
from django.forms.fields import MultipleChoiceField
from django.utils.translation import ugettext_lazy as _
__title__ = 'fobi.contrib.plugins.form_elements.fields.' \
'select_multiple_with_max.fields'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('MultipleChoiceWithMaxField',)
class MultipleChoiceWithMaxField(MultipleChoiceField):
"""Multiple choice with max field."""
def __init__(self, max_choices=None, choices=(), required=True,
widget=None, label=None, initial=None, help_text='', *args,
**kwargs):
"""Constructor."""
super(MultipleChoiceWithMaxField, self).__init__(
choices=choices, required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs
)
self.max_choices = max_choices
def validate(self, value):
"""Validate."""
super(MultipleChoiceWithMaxField, self).validate(value)
if self.max_choices:
if len(value) > self.max_choices:
raise ValidationError(_("You must choose no more than {0} "
"values.".format(self.max_choices)))
| mit | 8,567,387,226,112,469,000 | 40.96875 | 76 | 0.631422 | false |
campagnola/acq4 | acq4/util/LogWidgetTemplate.py | 3 | 9022 | # -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file 'acq4\LogWidgetTemplate.ui'
#
# Created: Fri Jan 03 01:38:54 2014
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(633, 437)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
self.gridLayout_2 = QtGui.QGridLayout(Form)
self.gridLayout_2.setMargin(3)
self.gridLayout_2.setSpacing(3)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.splitter = QtGui.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.widget = QtGui.QWidget(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(6)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setObjectName(_fromUtf8("widget"))
self.gridLayout = QtGui.QGridLayout(self.widget)
self.gridLayout.setSpacing(3)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.exportHtmlBtn = QtGui.QPushButton(self.widget)
self.exportHtmlBtn.setObjectName(_fromUtf8("exportHtmlBtn"))
self.gridLayout.addWidget(self.exportHtmlBtn, 0, 2, 1, 1)
self.dirLabel = QtGui.QLabel(self.widget)
self.dirLabel.setText(_fromUtf8(""))
self.dirLabel.setObjectName(_fromUtf8("dirLabel"))
self.gridLayout.addWidget(self.dirLabel, 0, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(148, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 1, 1, 1)
self.output = QtGui.QTextBrowser(self.widget)
self.output.setOpenLinks(False)
self.output.setObjectName(_fromUtf8("output"))
self.gridLayout.addWidget(self.output, 1, 0, 1, 3)
self.widget1 = QtGui.QWidget(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget1.sizePolicy().hasHeightForWidth())
self.widget1.setSizePolicy(sizePolicy)
self.widget1.setObjectName(_fromUtf8("widget1"))
self.verticalLayout = QtGui.QVBoxLayout(self.widget1)
self.verticalLayout.setSpacing(3)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.filterTree = TreeWidget(self.widget1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.filterTree.sizePolicy().hasHeightForWidth())
self.filterTree.setSizePolicy(sizePolicy)
self.filterTree.setMinimumSize(QtCore.QSize(210, 0))
self.filterTree.setMaximumSize(QtCore.QSize(8777205, 16777215))
self.filterTree.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.filterTree.setProperty("showDropIndicator", False)
self.filterTree.setDragEnabled(True)
self.filterTree.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.filterTree.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.filterTree.setObjectName(_fromUtf8("filterTree"))
item_0 = QtGui.QTreeWidgetItem(self.filterTree)
item_0.setCheckState(0, QtCore.Qt.Unchecked)
item_0.setFlags(QtCore.Qt.ItemIsUserCheckable|QtCore.Qt.ItemIsEnabled)
item_0 = QtGui.QTreeWidgetItem(self.filterTree)
item_0.setCheckState(0, QtCore.Qt.Checked)
item_0.setFlags(QtCore.Qt.ItemIsDropEnabled|QtCore.Qt.ItemIsUserCheckable|QtCore.Qt.ItemIsEnabled)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1.setCheckState(0, QtCore.Qt.Checked)
item_1.setFlags(QtCore.Qt.ItemIsUserCheckable|QtCore.Qt.ItemIsEnabled)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1.setCheckState(0, QtCore.Qt.Checked)
item_1.setFlags(QtCore.Qt.ItemIsUserCheckable|QtCore.Qt.ItemIsEnabled)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1.setCheckState(0, QtCore.Qt.Checked)
item_1.setFlags(QtCore.Qt.ItemIsUserCheckable|QtCore.Qt.ItemIsEnabled)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1.setCheckState(0, QtCore.Qt.Checked)
item_1.setFlags(QtCore.Qt.ItemIsUserCheckable|QtCore.Qt.ItemIsEnabled)
self.verticalLayout.addWidget(self.filterTree)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2 = QtGui.QLabel(self.widget1)
font = QtGui.QFont()
font.setPointSize(11)
self.label_2.setFont(font)
self.label_2.setIndent(0)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.label = QtGui.QLabel(self.widget1)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.label_3 = QtGui.QLabel(self.widget1)
font = QtGui.QFont()
font.setPointSize(11)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout.addWidget(self.label_3)
self.verticalLayout.addLayout(self.horizontalLayout)
self.importanceSlider = QtGui.QSlider(self.widget1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.importanceSlider.sizePolicy().hasHeightForWidth())
self.importanceSlider.setSizePolicy(sizePolicy)
self.importanceSlider.setMaximum(9)
self.importanceSlider.setPageStep(0)
self.importanceSlider.setProperty("value", 4)
self.importanceSlider.setTracking(True)
self.importanceSlider.setOrientation(QtCore.Qt.Horizontal)
self.importanceSlider.setTickPosition(QtGui.QSlider.TicksAbove)
self.importanceSlider.setTickInterval(1)
self.importanceSlider.setObjectName(_fromUtf8("importanceSlider"))
self.verticalLayout.addWidget(self.importanceSlider)
self.gridLayout_2.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.exportHtmlBtn.setText(_translate("Form", "Export HTML", None))
self.filterTree.headerItem().setText(0, _translate("Form", "Display:", None))
__sortingEnabled = self.filterTree.isSortingEnabled()
self.filterTree.setSortingEnabled(False)
self.filterTree.topLevelItem(0).setText(0, _translate("Form", "Current directory only", None))
self.filterTree.topLevelItem(1).setText(0, _translate("Form", "All message types:", None))
self.filterTree.topLevelItem(1).child(0).setText(0, _translate("Form", "user", None))
self.filterTree.topLevelItem(1).child(1).setText(0, _translate("Form", "status", None))
self.filterTree.topLevelItem(1).child(2).setText(0, _translate("Form", "warning", None))
self.filterTree.topLevelItem(1).child(3).setText(0, _translate("Form", "error", None))
self.filterTree.setSortingEnabled(__sortingEnabled)
self.label_2.setText(_translate("Form", "Low", None))
self.label.setText(_translate("Form", "Importance Filter:", None))
self.label_3.setText(_translate("Form", "High", None))
from acq4.pyqtgraph import TreeWidget
| mit | -6,845,810,853,680,141,000 | 52.070588 | 106 | 0.708934 | false |
cleophasmashiri/oppia | core/domain/exp_services.py | 6 | 46287 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on explorations.
All functions here should be agnostic of how ExplorationModel objects are
stored in the database. In particular, the various query methods should
delegate to the Exploration model class. This will enable the exploration
storage model to be changed without affecting this module and others above it.
"""
__author__ = 'Sean Lip'
import copy
import datetime
import logging
import os
import StringIO
import zipfile
from core.domain import event_services
from core.domain import exp_domain
from core.domain import fs_domain
from core.domain import rights_manager
from core.platform import models
import feconf
memcache_services = models.Registry.import_memcache_services()
search_services = models.Registry.import_search_services()
taskqueue_services = models.Registry.import_taskqueue_services()
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
import utils
# This takes additional 'title' and 'category' parameters.
CMD_CREATE_NEW = 'create_new'
#Name for the exploration search index
SEARCH_INDEX_EXPLORATIONS = 'explorations'
# Repository GET methods.
def _get_exploration_memcache_key(exploration_id, version=None):
"""Returns a memcache key for an exploration."""
if version:
return 'exploration-version:%s:%s' % (exploration_id, version)
else:
return 'exploration:%s' % exploration_id
def get_exploration_from_model(exploration_model):
return exp_domain.Exploration(
exploration_model.id, exploration_model.title,
exploration_model.category, exploration_model.objective,
exploration_model.language_code, exploration_model.skill_tags,
exploration_model.blurb, exploration_model.author_notes,
exploration_model.default_skin, exploration_model.init_state_name,
exploration_model.states, exploration_model.param_specs,
exploration_model.param_changes, exploration_model.version,
exploration_model.created_on, exploration_model.last_updated)
def get_exploration_summary_from_model(exp_summary_model):
return exp_domain.ExplorationSummary(
exp_summary_model.id, exp_summary_model.title,
exp_summary_model.category, exp_summary_model.objective,
exp_summary_model.language_code, exp_summary_model.skill_tags,
exp_summary_model.status, exp_summary_model.community_owned,
exp_summary_model.owner_ids, exp_summary_model.editor_ids,
exp_summary_model.viewer_ids, exp_summary_model.version,
exp_summary_model.exploration_model_created_on,
exp_summary_model.exploration_model_last_updated)
def get_exploration_by_id(exploration_id, strict=True, version=None):
"""Returns a domain object representing an exploration."""
exploration_memcache_key = _get_exploration_memcache_key(
exploration_id, version=version)
memcached_exploration = memcache_services.get_multi(
[exploration_memcache_key]).get(exploration_memcache_key)
if memcached_exploration is not None:
return memcached_exploration
else:
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=strict, version=version)
if exploration_model:
exploration = get_exploration_from_model(exploration_model)
memcache_services.set_multi({
exploration_memcache_key: exploration})
return exploration
else:
return None
def get_exploration_summary_by_id(exploration_id):
"""Returns a domain object representing an exploration summary."""
# TODO(msl): Maybe use memcache similarly to get_exploration_by_id.
exp_summary_model = exp_models.ExpSummaryModel.get(
exploration_id)
if exp_summary_model:
exp_summary = get_exploration_summary_from_model(exp_summary_model)
return exp_summary
else:
return None
def get_multiple_explorations_by_id(exp_ids, strict=True):
"""Returns a dict of domain objects representing explorations with the
given ids as keys. If an exp_id is not present it is not included in the
return dict.
"""
exp_ids = set(exp_ids)
result = {}
uncached = []
memcache_keys = [_get_exploration_memcache_key(i) for i in exp_ids]
cache_result = memcache_services.get_multi(memcache_keys)
for exp_obj in cache_result.itervalues():
result[exp_obj.id] = exp_obj
for _id in exp_ids:
if _id not in result:
uncached.append(_id)
db_exp_models = exp_models.ExplorationModel.get_multi(uncached)
db_results_dict = {}
not_found = []
for i, eid in enumerate(uncached):
model = db_exp_models[i]
if model:
exploration = get_exploration_from_model(model)
db_results_dict[eid] = exploration
else:
logging.info('Tried to fetch exploration with id %s, but no such '
'exploration exists in the datastore' % eid)
not_found.append(eid)
if strict and not_found:
raise ValueError(
'Couldn\'t find explorations with the following ids:\n%s'
% '\n'.join(not_found))
cache_update = {
eid: db_results_dict[eid] for eid in db_results_dict.iterkeys()
if db_results_dict[eid] is not None
}
if cache_update:
memcache_services.set_multi(cache_update)
result.update(db_results_dict)
return result
def get_new_exploration_id():
"""Returns a new exploration id."""
return exp_models.ExplorationModel.get_new_id('')
def is_exp_summary_editable(exp_summary, user_id=None):
"""Checks if a given user may edit an exploration by checking
the given domain object."""
return user_id is not None and (
user_id in exp_summary.editor_ids
or user_id in exp_summary.owner_ids
or exp_summary.community_owned)
# Query methods.
def get_exploration_titles_and_categories(exp_ids):
"""Returns exploration titles and categories for the given ids.
The result is a dict with exploration ids as keys. The corresponding values
are dicts with the keys 'title' and 'category'.
Any invalid exp_ids will not be included in the return dict. No error will
be raised.
"""
explorations = [
(get_exploration_from_model(e) if e else None)
for e in exp_models.ExplorationModel.get_multi(exp_ids)]
result = {}
for ind, exploration in enumerate(explorations):
if exploration is None:
logging.error(
'Could not find exploration corresponding to id')
else:
result[exploration.id] = {
'title': exploration.title,
'category': exploration.category,
}
return result
def _get_exploration_summary_dicts_from_models(exp_summary_models):
"""Given an iterable of ExpSummaryModel instances, create a dict containing
corresponding exploration summary domain objects, keyed by id."""
exploration_summaries = [
get_exploration_summary_from_model(exp_summary_model)
for exp_summary_model in exp_summary_models]
result = {}
for exp_summary in exploration_summaries:
result[exp_summary.id] = exp_summary
return result
def get_exploration_summaries_matching_ids(exp_ids):
"""Given a list of exploration ids, return a list with the corresponding
summary domain objects (or None if the corresponding summary does not
exist).
"""
return [
(get_exploration_summary_from_model(model) if model else None)
for model in exp_models.ExpSummaryModel.get_multi(exp_ids)]
def get_exploration_summaries_matching_query(query_string, cursor=None):
"""Returns a list with all exploration summary domain objects matching the
given search query string, as well as a search cursor for future fetches.
This method returns exactly feconf.GALLERY_PAGE_SIZE results if there are
at least that many, otherwise it returns all remaining results. (If this
behaviour does not occur, an error will be logged.) The method also returns
a search cursor.
"""
MAX_ITERATIONS = 10
summary_models = []
for i in range(MAX_ITERATIONS):
remaining_to_fetch = feconf.GALLERY_PAGE_SIZE - len(summary_models)
exp_ids, search_cursor = search_explorations(
query_string, remaining_to_fetch, cursor=cursor)
invalid_exp_ids = []
for ind, model in enumerate(
exp_models.ExpSummaryModel.get_multi(exp_ids)):
if model is not None:
summary_models.append(model)
else:
invalid_exp_ids.append(exp_ids[ind])
if len(summary_models) == feconf.GALLERY_PAGE_SIZE or (
search_cursor is None):
break
else:
logging.error(
'Search index contains stale exploration ids: %s' %
', '.join(invalid_exp_ids))
if (len(summary_models) < feconf.GALLERY_PAGE_SIZE
and search_cursor is not None):
logging.error(
'Could not fulfill search request for query string %s; at least '
'%s retries were needed.' % (query_string, MAX_ITERATIONS))
return ([
get_exploration_summary_from_model(summary_model)
for summary_model in summary_models
], search_cursor)
def get_non_private_exploration_summaries():
"""Returns a dict with all non-private exploration summary domain objects,
keyed by their id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_non_private())
def get_all_exploration_summaries():
"""Returns a dict with all exploration summary domain objects,
keyed by their id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_all())
def get_private_at_least_viewable_exploration_summaries(user_id):
"""Returns a dict with all exploration summary domain objects that are
at least viewable by given user. The dict is keyed by exploration id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_private_at_least_viewable(
user_id=user_id))
def get_at_least_editable_exploration_summaries(user_id):
"""Returns a dict with all exploration summary domain objects that are
at least editable by given user. The dict is keyed by exploration id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_at_least_editable(
user_id=user_id))
def count_explorations():
"""Returns the total number of explorations."""
return exp_models.ExplorationModel.get_exploration_count()
# Methods for exporting states and explorations to other formats.
def export_to_zip_file(exploration_id, version=None):
"""Returns a ZIP archive of the exploration."""
exploration = get_exploration_by_id(exploration_id, version=version)
yaml_repr = exploration.to_yaml()
o = StringIO.StringIO()
with zipfile.ZipFile(o, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr('%s.yaml' % exploration.title, yaml_repr)
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
dir_list = fs.listdir('')
for filepath in dir_list:
# Currently, the version number of all files is 1, since they are
# not modifiable post-upload.
# TODO(sll): When allowing editing of files, implement versioning
# for them.
file_contents = fs.get(filepath, version=1)
str_filepath = 'assets/%s' % filepath
assert isinstance(str_filepath, str)
unicode_filepath = str_filepath.decode('utf-8')
zf.writestr(unicode_filepath, file_contents)
return o.getvalue()
def export_states_to_yaml(exploration_id, version=None, width=80):
"""Returns a python dictionary of the exploration, whose keys are state
names and values are yaml strings representing the state contents with
lines wrapped at 'width' characters."""
exploration = get_exploration_by_id(exploration_id, version=version)
exploration_dict = {}
for state in exploration.states:
exploration_dict[state] = utils.yaml_from_dict(
exploration.states[state].to_dict(), width=width)
return exploration_dict
# Repository SAVE and DELETE methods.
def apply_change_list(exploration_id, change_list):
"""Applies a changelist to a pristine exploration and returns the result.
Each entry in change_list is a dict that represents an ExplorationChange
object.
Returns:
the resulting exploration domain object.
"""
exploration = get_exploration_by_id(exploration_id)
try:
changes = [exp_domain.ExplorationChange(change_dict)
for change_dict in change_list]
for change in changes:
if change.cmd == 'add_state':
exploration.add_states([change.state_name])
elif change.cmd == 'rename_state':
exploration.rename_state(
change.old_state_name, change.new_state_name)
elif change.cmd == 'delete_state':
exploration.delete_state(change.state_name)
elif change.cmd == 'edit_state_property':
state = exploration.states[change.state_name]
if (change.property_name ==
exp_domain.STATE_PROPERTY_PARAM_CHANGES):
state.update_param_changes(change.new_value)
elif change.property_name == exp_domain.STATE_PROPERTY_CONTENT:
state.update_content(change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_ID):
state.update_interaction_id(change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS):
state.update_interaction_customization_args(
change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_HANDLERS):
state.update_interaction_handlers(change.new_value)
elif change.cmd == 'edit_exploration_property':
if change.property_name == 'title':
exploration.update_title(change.new_value)
elif change.property_name == 'category':
exploration.update_category(change.new_value)
elif change.property_name == 'objective':
exploration.update_objective(change.new_value)
elif change.property_name == 'language_code':
exploration.update_language_code(change.new_value)
elif change.property_name == 'skill_tags':
exploration.update_skill_tags(change.new_value)
elif change.property_name == 'blurb':
exploration.update_blurb(change.new_value)
elif change.property_name == 'author_notes':
exploration.update_author_notes(change.new_value)
elif change.property_name == 'param_specs':
exploration.update_param_specs(change.new_value)
elif change.property_name == 'param_changes':
exploration.update_param_changes(change.new_value)
elif change.property_name == 'default_skin_id':
exploration.update_default_skin_id(change.new_value)
elif change.property_name == 'init_state_name':
exploration.update_init_state_name(change.new_value)
return exploration
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, exploration_id, change_list)
)
raise
def get_summary_of_change_list(base_exploration, change_list):
"""Applies a changelist to a pristine exploration and returns a summary.
Each entry in change_list is a dict that represents an ExplorationChange
object.
Returns:
a dict with five keys:
exploration_property_changes: a dict, where each key is a property_name
of the exploration, and the corresponding values are dicts with keys
old_value and new_value.
state_property_changes: a dict, where each key is a state name, and the
corresponding values are dicts; the keys of these dicts represent
properties of the state, and the corresponding values are dicts with
keys old_value and new_value. If a state name is changed, this is
listed as a property name change under the old state name in the
outer dict.
changed_states: a list of state names. This indicates that the state
has changed but we do not know what the changes are. This can happen
for complicated operations like removing a state and later adding a
new state with the same name as the removed state.
added_states: a list of added state names.
deleted_states: a list of deleted state names.
"""
# TODO(sll): This really needs tests, especially the diff logic. Probably
# worth comparing with the actual changed exploration.
# Ensure that the original exploration does not get altered.
exploration = copy.deepcopy(base_exploration)
changes = [
exp_domain.ExplorationChange(change_dict)
for change_dict in change_list]
exploration_property_changes = {}
state_property_changes = {}
changed_states = []
added_states = []
deleted_states = []
original_state_names = {
state_name: state_name for state_name in exploration.states.keys()
}
for change in changes:
if change.cmd == 'add_state':
if change.state_name in changed_states:
continue
elif change.state_name in deleted_states:
changed_states.append(change.state_name)
del state_property_changes[change.state_name]
deleted_states.remove(change.state_name)
else:
added_states.append(change.state_name)
original_state_names[change.state_name] = change.state_name
elif change.cmd == 'rename_state':
orig_state_name = original_state_names[change.old_state_name]
original_state_names[change.new_state_name] = orig_state_name
if orig_state_name in changed_states:
continue
if orig_state_name not in state_property_changes:
state_property_changes[orig_state_name] = {}
if 'name' not in state_property_changes[orig_state_name]:
state_property_changes[orig_state_name]['name'] = {
'old_value': change.old_state_name
}
state_property_changes[orig_state_name]['name']['new_value'] = (
change.new_state_name)
elif change.cmd == 'delete_state':
orig_state_name = original_state_names[change.state_name]
if orig_state_name in changed_states:
continue
elif orig_state_name in added_states:
added_states.remove(orig_state_name)
else:
deleted_states.append(orig_state_name)
elif change.cmd == 'edit_state_property':
orig_state_name = original_state_names[change.state_name]
if orig_state_name in changed_states:
continue
property_name = change.property_name
if orig_state_name not in state_property_changes:
state_property_changes[orig_state_name] = {}
if property_name not in state_property_changes[orig_state_name]:
state_property_changes[orig_state_name][property_name] = {
'old_value': change.old_value
}
state_property_changes[orig_state_name][property_name][
'new_value'] = change.new_value
elif change.cmd == 'edit_exploration_property':
property_name = change.property_name
if property_name not in exploration_property_changes:
exploration_property_changes[property_name] = {
'old_value': change.old_value
}
exploration_property_changes[property_name]['new_value'] = (
change.new_value)
unchanged_exploration_properties = []
for property_name in exploration_property_changes:
if (exploration_property_changes[property_name]['old_value'] ==
exploration_property_changes[property_name]['new_value']):
unchanged_exploration_properties.append(property_name)
for property_name in unchanged_exploration_properties:
del exploration_property_changes[property_name]
unchanged_state_names = []
for state_name in state_property_changes:
unchanged_state_properties = []
changes = state_property_changes[state_name]
for property_name in changes:
if (changes[property_name]['old_value'] ==
changes[property_name]['new_value']):
unchanged_state_properties.append(property_name)
for property_name in unchanged_state_properties:
del changes[property_name]
if len(changes) == 0:
unchanged_state_names.append(state_name)
for state_name in unchanged_state_names:
del state_property_changes[state_name]
return {
'exploration_property_changes': exploration_property_changes,
'state_property_changes': state_property_changes,
'changed_states': changed_states,
'added_states': added_states,
'deleted_states': deleted_states,
}
def _save_exploration(
committer_id, exploration, commit_message, change_list):
"""Validates an exploration and commits it to persistent storage.
If successful, increments the version number of the incoming exploration
domain object by 1.
"""
if change_list is None:
change_list = []
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
if exploration_rights.status != rights_manager.EXPLORATION_STATUS_PRIVATE:
exploration.validate(strict=True)
else:
exploration.validate()
exploration_model = exp_models.ExplorationModel.get(
exploration.id, strict=False)
if exploration_model is None:
exploration_model = exp_models.ExplorationModel(id=exploration.id)
else:
if exploration.version > exploration_model.version:
raise Exception(
'Unexpected error: trying to update version %s of exploration '
'from version %s. Please reload the page and try again.'
% (exploration_model.version, exploration.version))
elif exploration.version < exploration_model.version:
raise Exception(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_model.version, exploration.version))
exploration_model.category = exploration.category
exploration_model.title = exploration.title
exploration_model.objective = exploration.objective
exploration_model.language_code = exploration.language_code
exploration_model.skill_tags = exploration.skill_tags
exploration_model.blurb = exploration.blurb
exploration_model.author_notes = exploration.author_notes
exploration_model.default_skin = exploration.default_skin
exploration_model.init_state_name = exploration.init_state_name
exploration_model.states = {
state_name: state.to_dict()
for (state_name, state) in exploration.states.iteritems()}
exploration_model.param_specs = exploration.param_specs_dict
exploration_model.param_changes = exploration.param_change_dicts
exploration_model.commit(
committer_id, commit_message, change_list)
memcache_services.delete(_get_exploration_memcache_key(exploration.id))
event_services.ExplorationContentChangeEventHandler.record(exploration.id)
index_explorations_given_ids([exploration.id])
exploration.version += 1
def _create_exploration(
committer_id, exploration, commit_message, commit_cmds):
"""Ensures that rights for a new exploration are saved first.
This is because _save_exploration() depends on the rights object being
present to tell it whether to do strict validation or not.
"""
# This line is needed because otherwise a rights object will be created,
# but the creation of an exploration object will fail.
exploration.validate(allow_null_interaction=True)
rights_manager.create_new_exploration_rights(exploration.id, committer_id)
model = exp_models.ExplorationModel(
id=exploration.id,
category=exploration.category,
title=exploration.title,
objective=exploration.objective,
language_code=exploration.language_code,
skill_tags=exploration.skill_tags,
blurb=exploration.blurb,
author_notes=exploration.author_notes,
default_skin=exploration.default_skin,
init_state_name=exploration.init_state_name,
states={
state_name: state.to_dict()
for (state_name, state) in exploration.states.iteritems()},
param_specs=exploration.param_specs_dict,
param_changes=exploration.param_change_dicts,
)
model.commit(committer_id, commit_message, commit_cmds)
event_services.ExplorationContentChangeEventHandler.record(exploration.id)
exploration.version += 1
create_exploration_summary(exploration.id)
def save_new_exploration(committer_id, exploration):
commit_message = (
'New exploration created with title \'%s\'.' % exploration.title)
_create_exploration(committer_id, exploration, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': exploration.title,
'category': exploration.category,
}])
def delete_exploration(committer_id, exploration_id, force_deletion=False):
"""Deletes the exploration with the given exploration_id.
IMPORTANT: Callers of this function should ensure that committer_id has
permissions to delete this exploration, prior to calling this function.
If force_deletion is True the exploration and its history are fully deleted
and are unrecoverable. Otherwise, the exploration and all its history are
marked as deleted, but the corresponding models are still retained in the
datastore. This last option is the preferred one.
"""
# TODO(sll): Delete the files too?
exploration_rights_model = exp_models.ExplorationRightsModel.get(
exploration_id)
exploration_rights_model.delete(
committer_id, '', force_deletion=force_deletion)
exploration_model = exp_models.ExplorationModel.get(exploration_id)
exploration_model.delete(
committer_id, feconf.COMMIT_MESSAGE_EXPLORATION_DELETED,
force_deletion=force_deletion)
# This must come after the exploration is retrieved. Otherwise the memcache
# key will be reinstated.
exploration_memcache_key = _get_exploration_memcache_key(exploration_id)
memcache_services.delete(exploration_memcache_key)
#delete the exploration from search.
delete_documents_from_search_index([exploration_id])
# delete summary of exploration
delete_exploration_summary(exploration_id, force_deletion=force_deletion)
# Operations on exploration snapshots.
def _get_simple_changelist_summary(
exploration_id, version_number, change_list):
"""Returns an auto-generated changelist summary for the history logs."""
# TODO(sll): Get this from memcache where possible. It won't change, so we
# can keep it there indefinitely.
base_exploration = get_exploration_by_id(
exploration_id, version=version_number)
if (len(change_list) == 1 and change_list[0]['cmd'] in
['create_new', 'AUTO_revert_version_number']):
# An automatic summary is not needed here, because the original commit
# message is sufficiently descriptive.
return ''
else:
full_summary = get_summary_of_change_list(
base_exploration, change_list)
short_summary_fragments = []
if full_summary['added_states']:
short_summary_fragments.append(
'added \'%s\'' % '\', \''.join(full_summary['added_states']))
if full_summary['deleted_states']:
short_summary_fragments.append(
'deleted \'%s\'' % '\', \''.join(
full_summary['deleted_states']))
if (full_summary['changed_states'] or
full_summary['state_property_changes']):
affected_states = (
full_summary['changed_states'] +
full_summary['state_property_changes'].keys())
short_summary_fragments.append(
'edited \'%s\'' % '\', \''.join(affected_states))
if full_summary['exploration_property_changes']:
short_summary_fragments.append(
'edited exploration properties %s' % ', '.join(
full_summary['exploration_property_changes'].keys()))
return '; '.join(short_summary_fragments)
def get_exploration_snapshots_metadata(exploration_id):
"""Returns the snapshots for this exploration, as dicts.
Args:
exploration_id: str. The id of the exploration in question.
limit: int. The maximum number of snapshots to return.
Returns:
list of dicts, each representing a recent snapshot. Each dict has the
following keys: committer_id, commit_message, commit_cmds, commit_type,
created_on, version_number. The version numbers are consecutive and in
ascending order. There are exploration.version_number items in the
returned list.
"""
exploration = get_exploration_by_id(exploration_id)
current_version = exploration.version
version_nums = range(1, current_version + 1)
return exp_models.ExplorationModel.get_snapshots_metadata(
exploration_id, version_nums)
def update_exploration(
committer_id, exploration_id, change_list, commit_message):
"""Update an exploration. Commits changes.
Args:
- committer_id: str. The id of the user who is performing the update
action.
- exploration_id: str. The exploration id.
- change_list: list of dicts, each representing a _Change object. These
changes are applied in sequence to produce the resulting exploration.
- commit_message: str or None. A description of changes made to the state.
For published explorations, this must be present; for unpublished
explorations, it should be equal to None.
"""
is_public = rights_manager.is_exploration_public(exploration_id)
if is_public and not commit_message:
raise ValueError(
'Exploration is public so expected a commit message but '
'received none.')
exploration = apply_change_list(exploration_id, change_list)
_save_exploration(committer_id, exploration, commit_message, change_list)
# update summary of changed exploration
update_exploration_summary(exploration.id)
def create_exploration_summary(exploration_id):
"""Create summary of an exploration and store in datastore."""
exploration = get_exploration_by_id(exploration_id)
exp_summary = get_summary_of_exploration(exploration)
_save_exploration_summary(exp_summary)
def update_exploration_summary(exploration_id):
"""Update the summary of an exploration."""
exploration = get_exploration_by_id(exploration_id)
exp_summary = get_summary_of_exploration(exploration)
_save_exploration_summary(exp_summary)
def get_summary_of_exploration(exploration):
"""Create ExplorationSummary domain object for a given Exploration
domain object and return it.
"""
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exploration.id)
exploration_model_last_updated = exploration.last_updated
exploration_model_created_on = exploration.created_on
exp_summary = exp_domain.ExplorationSummary(
exploration.id,
exploration.title,
exploration.category,
exploration.objective,
exploration.language_code,
exploration.skill_tags,
exp_rights.status,
exp_rights.community_owned,
exp_rights.owner_ids,
exp_rights.editor_ids,
exp_rights.viewer_ids,
exploration.version,
exploration_model_created_on,
exploration_model_last_updated
)
return exp_summary
def _save_exploration_summary(exp_summary):
"""Save exploration summary domain object as ExpSummaryModel
entity in datastore."""
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_summary.id,
title=exp_summary.title,
category=exp_summary.category,
objective=exp_summary.objective,
language_code=exp_summary.language_code,
skill_tags=exp_summary.skill_tags,
status=exp_summary.status,
community_owned=exp_summary.community_owned,
owner_ids=exp_summary.owner_ids,
editor_ids=exp_summary.editor_ids,
viewer_ids=exp_summary.viewer_ids,
version=exp_summary.version,
exploration_model_last_updated=(
exp_summary.exploration_model_last_updated),
exploration_model_created_on=(
exp_summary.exploration_model_created_on)
)
exp_summary_model.put()
def delete_exploration_summary(exploration_id, force_deletion=False):
"""Delete an exploration summary model."""
exp_models.ExpSummaryModel.get(exploration_id).delete()
def revert_exploration(
committer_id, exploration_id, current_version, revert_to_version):
"""Reverts an exploration to the given version number. Commits changes."""
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=False)
if current_version > exploration_model.version:
raise Exception(
'Unexpected error: trying to update version %s of exploration '
'from version %s. Please reload the page and try again.'
% (exploration_model.version, current_version))
elif current_version < exploration_model.version:
raise Exception(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_model.version, current_version))
# Validate the previous version of the exploration before committing the
# change.
exploration = get_exploration_by_id(
exploration_id, version=revert_to_version)
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
if exploration_rights.status != rights_manager.EXPLORATION_STATUS_PRIVATE:
exploration.validate(strict=True)
else:
exploration.validate()
exploration_model.revert(
committer_id, 'Reverted exploration to version %s' % revert_to_version,
revert_to_version)
memcache_services.delete(_get_exploration_memcache_key(exploration_id))
update_exploration_summary(exploration_id)
# Creation and deletion methods.
def get_demo_exploration_components(demo_path):
"""Gets the content of `demo_path` in the sample explorations folder.
Args:
demo_path: the file or folder path for the content of an exploration
in SAMPLE_EXPLORATIONS_DIR. E.g.: 'adventure.yaml' or 'tar/'.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
"""
demo_filepath = os.path.join(feconf.SAMPLE_EXPLORATIONS_DIR, demo_path)
if demo_filepath.endswith('yaml'):
file_contents = utils.get_file_contents(demo_filepath)
return file_contents, []
elif os.path.isdir(demo_filepath):
return utils.get_exploration_components_from_dir(demo_filepath)
else:
raise Exception('Unrecognized file path: %s' % demo_path)
def save_new_exploration_from_yaml_and_assets(
committer_id, yaml_content, title, category, exploration_id,
assets_list):
if assets_list is None:
assets_list = []
exploration = exp_domain.Exploration.from_yaml(
exploration_id, title, category, yaml_content)
commit_message = (
'New exploration created from YAML file with title \'%s\'.'
% exploration.title)
_create_exploration(committer_id, exploration, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': exploration.title,
'category': exploration.category,
}])
for (asset_filename, asset_content) in assets_list:
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
fs.commit(committer_id, asset_filename, asset_content)
def delete_demo(exploration_id):
"""Deletes a single demo exploration."""
if not (0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS)):
raise Exception('Invalid demo exploration id %s' % exploration_id)
exploration = get_exploration_by_id(exploration_id, strict=False)
if not exploration:
logging.info('Exploration with id %s was not deleted, because it '
'does not exist.' % exploration_id)
else:
delete_exploration(
feconf.ADMIN_COMMITTER_ID, exploration_id, force_deletion=True)
def load_demo(exploration_id):
"""Loads a demo exploration.
The resulting exploration will have version 2 (one for its initial
creation and one for its subsequent modification.)
"""
# TODO(sll): Speed this method up. It is too slow.
delete_demo(exploration_id)
if not (0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS)):
raise Exception('Invalid demo exploration id %s' % exploration_id)
exploration_info = feconf.DEMO_EXPLORATIONS[int(exploration_id)]
if len(exploration_info) == 3:
(exp_filename, title, category) = exploration_info
else:
raise Exception('Invalid demo exploration: %s' % exploration_info)
yaml_content, assets_list = get_demo_exploration_components(exp_filename)
save_new_exploration_from_yaml_and_assets(
feconf.ADMIN_COMMITTER_ID, yaml_content, title, category,
exploration_id, assets_list)
rights_manager.publish_exploration(
feconf.ADMIN_COMMITTER_ID, exploration_id)
# Release ownership of all explorations.
rights_manager.release_ownership(
feconf.ADMIN_COMMITTER_ID, exploration_id)
index_explorations_given_ids([exploration_id])
logging.info('Exploration with id %s was loaded.' % exploration_id)
def get_next_page_of_all_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None):
"""Returns a page of commits to all explorations in reverse time order.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
results, new_urlsafe_start_cursor, more = (
exp_models.ExplorationCommitLogEntryModel.get_all_commits(
page_size, urlsafe_start_cursor))
return ([exp_domain.ExplorationCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.exploration_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def get_next_page_of_all_non_private_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None,
max_age=None):
"""Returns a page of non-private commits in reverse time order. If max_age
is given, it should be a datetime.timedelta instance.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
if max_age is not None and not isinstance(max_age, datetime.timedelta):
raise ValueError(
"max_age must be a datetime.timedelta instance. or None.")
results, new_urlsafe_start_cursor, more = (
exp_models.ExplorationCommitLogEntryModel.get_all_non_private_commits(
page_size, urlsafe_start_cursor, max_age=max_age))
return ([exp_domain.ExplorationCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.exploration_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def _exp_rights_to_search_dict(rights):
# Allow searches like "is:featured".
doc = {}
if rights.status == rights_manager.EXPLORATION_STATUS_PUBLICIZED:
doc['is'] = 'featured'
return doc
def _should_index(exp):
rights = rights_manager.get_exploration_rights(exp.id)
return rights.status != rights_manager.EXPLORATION_STATUS_PRIVATE
def _get_search_rank(exp_id):
"""Returns an integer determining the document's rank in search.
Featured explorations get a ranking bump, and so do explorations that
have been more recently updated.
"""
# TODO(sll): Improve this calculation.
exploration = get_exploration_by_id(exp_id)
rights = rights_manager.get_exploration_rights(exp_id)
rank = (
3000 if rights.status == rights_manager.EXPLORATION_STATUS_PUBLICIZED
else 0)
_BEGINNING_OF_TIME = datetime.datetime(2013, 6, 30)
time_delta_days = (exploration.last_updated - _BEGINNING_OF_TIME).days
rank += int(time_delta_days)
return rank
def _exp_to_search_dict(exp):
rights = rights_manager.get_exploration_rights(exp.id)
doc = {
'id': exp.id,
'language_code': exp.language_code,
'title': exp.title,
'category': exp.category,
'skills': exp.skill_tags,
'blurb': exp.blurb,
'objective': exp.objective,
'author_notes': exp.author_notes,
'rank': _get_search_rank(exp.id),
}
doc.update(_exp_rights_to_search_dict(rights))
return doc
def clear_search_index():
"""WARNING: This runs in-request, and may therefore fail if there are too
many entries in the index.
"""
search_services.clear_index(SEARCH_INDEX_EXPLORATIONS)
def index_explorations_given_ids(exp_ids):
# We pass 'strict=False' so as not to index deleted explorations.
exploration_models = get_multiple_explorations_by_id(exp_ids, strict=False)
search_services.add_documents_to_index([
_exp_to_search_dict(exp) for exp in exploration_models.values()
if _should_index(exp)
], SEARCH_INDEX_EXPLORATIONS)
def patch_exploration_search_document(exp_id, update):
"""Patches an exploration's current search document, with the values
from the 'update' dictionary."""
doc = search_services.get_document_from_index(
exp_id, SEARCH_INDEX_EXPLORATIONS)
doc.update(update)
search_services.add_documents_to_index([doc], SEARCH_INDEX_EXPLORATIONS)
def update_exploration_status_in_search(exp_id):
rights = rights_manager.get_exploration_rights(exp_id)
if rights.status == rights_manager.EXPLORATION_STATUS_PRIVATE:
delete_documents_from_search_index([exp_id])
else:
patch_exploration_search_document(
rights.id, _exp_rights_to_search_dict(rights))
def delete_documents_from_search_index(exploration_ids):
search_services.delete_documents_from_index(
exploration_ids, SEARCH_INDEX_EXPLORATIONS)
def search_explorations(query, limit, sort=None, cursor=None):
"""Searches through the available explorations.
args:
- query_string: the query string to search for.
- sort: a string indicating how to sort results. This should be a string
of space separated values. Each value should start with a '+' or a
'-' character indicating whether to sort in ascending or descending
order respectively. This character should be followed by a field name
to sort on. When this is None, results are based on 'rank'. See
_exp_to_search_dict to see how rank is determined.
- limit: the maximum number of results to return.
- cursor: A cursor, used to get the next page of results.
If there are more documents that match the query than 'limit', this
function will return a cursor to get the next page.
returns: a tuple:
- a list of exploration ids that match the query.
- a cursor if there are more matching explorations to fetch, None
otherwise. If a cursor is returned, it will be a web-safe string that
can be used in URLs.
"""
return search_services.search(
query, SEARCH_INDEX_EXPLORATIONS, cursor, limit, sort, ids_only=True)
| apache-2.0 | 4,618,016,028,863,785,000 | 39.110052 | 79 | 0.66887 | false |
pancentric/django-cms | cms/test_utils/testcases.py | 3 | 14383 | # -*- coding: utf-8 -*-
from cms.models import Page
from cms.test_utils.util.context_managers import (UserLoginContext,
SettingsOverride)
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser, Permission
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.template.context import Context
from django.test import testcases
from django.test.client import RequestFactory
from django.utils.translation import activate
from menus.menu_pool import menu_pool
from cms.utils.compat.urls import urljoin, unquote
import sys
import warnings
import json
from cms.utils.permissions import set_current_user
URL_CMS_PAGE = "/en/admin/cms/page/"
URL_CMS_PAGE_ADD = urljoin(URL_CMS_PAGE, "add/")
URL_CMS_PAGE_CHANGE = urljoin(URL_CMS_PAGE, "%d/")
URL_CMS_PAGE_ADVANCED_CHANGE = urljoin(URL_CMS_PAGE, "%d/advanced-settings/")
URL_CMS_PAGE_PERMISSION_CHANGE = urljoin(URL_CMS_PAGE, "%d/permission-settings/")
URL_CMS_PAGE_CHANGE_LANGUAGE = URL_CMS_PAGE_CHANGE + "?language=%s"
URL_CMS_PAGE_CHANGE_TEMPLATE = URL_CMS_PAGE_CHANGE + "change_template/"
URL_CMS_PAGE_PUBLISH = URL_CMS_PAGE_CHANGE + "publish/"
URL_CMS_PAGE_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete/")
URL_CMS_PLUGIN_ADD = urljoin(URL_CMS_PAGE, "add-plugin/")
URL_CMS_PLUGIN_EDIT = urljoin(URL_CMS_PAGE, "edit-plugin/")
URL_CMS_PLUGIN_MOVE = urljoin(URL_CMS_PAGE, "move-plugin/")
URL_CMS_PLUGIN_REMOVE = urljoin(URL_CMS_PAGE, "delete-plugin/")
URL_CMS_TRANSLATION_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete-translation/")
URL_CMS_PAGE_HISTORY = urljoin(URL_CMS_PAGE_CHANGE, "history/%d/")
URL_CMS_PLUGIN_HISTORY_EDIT = urljoin(URL_CMS_PAGE_HISTORY, "edit-plugin/")
class _Warning(object):
def __init__(self, message, category, filename, lineno):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
def _collectWarnings(observeWarning, f, *args, **kwargs):
def showWarning(message, category, filename, lineno, file=None, line=None):
assert isinstance(message, Warning)
observeWarning(_Warning(
message.args[0], category, filename, lineno))
# Disable the per-module cache for every module otherwise if the warning
# which the caller is expecting us to collect was already emitted it won't
# be re-emitted by the call to f which happens below.
for v in sys.modules.values():
if v is not None:
try:
v.__warningregistry__ = None
except:
# Don't specify a particular exception type to handle in case
# some wacky object raises some wacky exception in response to
# the setattr attempt.
pass
origFilters = warnings.filters[:]
origShow = warnings.showwarning
warnings.simplefilter('always')
try:
warnings.showwarning = showWarning
result = f(*args, **kwargs)
finally:
warnings.filters[:] = origFilters
warnings.showwarning = origShow
return result
class CMSTestCase(testcases.TestCase):
counter = 1
def _fixture_setup(self):
super(CMSTestCase, self)._fixture_setup()
self.create_fixtures()
activate("en")
def create_fixtures(self):
pass
def _post_teardown(self):
# Needed to clean the menu keys cache, see menu.menu_pool.clear()
menu_pool.clear()
super(CMSTestCase, self)._post_teardown()
set_current_user(None)
def login_user_context(self, user):
return UserLoginContext(self, user)
def _create_user(self, username, is_staff=False, is_superuser=False,
is_active=True, add_default_permissions=False, permissions=None):
"""
Use this method to create users.
Default permissions on page and text plugin are added if creating a
non-superuser and `add_default_permissions` is set.
Set `permissions` parameter to an iterable of permission codes to add
custom permissios.
"""
user = User(username=username, email=username+'@django-cms.org',
is_staff=is_staff, is_active=is_active, is_superuser=is_superuser)
user.set_password(username)
user.save()
if is_staff and not is_superuser and add_default_permissions:
user.user_permissions.add(Permission.objects.get(codename='add_text'))
user.user_permissions.add(Permission.objects.get(codename='delete_text'))
user.user_permissions.add(Permission.objects.get(codename='change_text'))
user.user_permissions.add(Permission.objects.get(codename='publish_page'))
user.user_permissions.add(Permission.objects.get(codename='add_page'))
user.user_permissions.add(Permission.objects.get(codename='change_page'))
user.user_permissions.add(Permission.objects.get(codename='delete_page'))
if is_staff and not is_superuser and permissions:
for permission in permissions:
user.user_permissions.add(Permission.objects.get(codename=permission))
return user
def get_superuser(self):
try:
admin = User.objects.get(username="admin")
except User.DoesNotExist:
admin = self._create_user("admin", is_staff=True, is_superuser=True)
return admin
def get_staff_user_with_no_permissions(self):
"""
Used in security tests
"""
staff = self._create_user("staff", is_staff=True, is_superuser=False)
return staff
def get_staff_user_with_std_permissions(self):
"""
This is a non superuser staff
"""
staff = self._create_user("staff", is_staff=True, is_superuser=False,
add_permissions=True)
return staff
def get_new_page_data(self, parent_id=''):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0],
'template': 'nav_playground.html',
'parent': parent_id,
'site': 1,
}
# required only if user haves can_change_permission
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
page_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
self.counter = self.counter + 1
return page_data
def get_new_page_data_dbfields(self, parent=None, site=None,
language=None,
template='nav_playground.html',):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0] if not language else language,
'template': template,
'parent': parent if parent else None,
'site': site if site else Site.objects.get_current(),
}
self.counter = self.counter + 1
return page_data
def get_pagedata_from_dbfields(self, page_data):
"""Converts data created by get_new_page_data_dbfields to data
created from get_new_page_data so you can switch between test cases
in api.create_page and client.post"""
page_data['site'] = page_data['site'].id
page_data['parent'] = page_data['parent'].id if page_data['parent'] else ''
# required only if user haves can_change_permission
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
page_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
return page_data
def print_page_structure(self, qs):
"""Just a helper to see the page struct.
"""
for page in qs.order_by('tree_id', 'lft'):
ident = " " * page.level
print(u"%s%s (%s), lft: %s, rght: %s, tree_id: %s" % (ident, page,
page.pk, page.lft, page.rght, page.tree_id))
def print_node_structure(self, nodes, *extra):
def _rec(nodes, level=0):
ident = level * ' '
for node in nodes:
raw_attrs = [(bit, getattr(node, bit, node.attr.get(bit, "unknown"))) for bit in extra]
attrs = ', '.join(['%s: %r' % data for data in raw_attrs])
print(u"%s%s: %s" % (ident, node.title, attrs))
_rec(node.children, level + 1)
_rec(nodes)
def assertObjectExist(self, qs, **filter):
try:
return qs.get(**filter)
except ObjectDoesNotExist:
pass
raise self.failureException("ObjectDoesNotExist raised for filter %s" % filter)
def assertObjectDoesNotExist(self, qs, **filter):
try:
qs.get(**filter)
except ObjectDoesNotExist:
return
raise self.failureException("ObjectDoesNotExist not raised for filter %s" % filter)
def copy_page(self, page, target_page):
from cms.utils.page import get_available_slug
data = {
'position': 'last-child',
'target': target_page.pk,
'site': 1,
'copy_permissions': 'on',
'copy_moderation': 'on',
}
response = self.client.post(URL_CMS_PAGE + "%d/copy-page/" % page.pk, data)
self.assertEquals(response.status_code, 200)
# Altered to reflect the new django-js jsonified response messages
expected = {"status": 200, "content": "ok"}
self.assertEquals(json.loads(response.content.decode('utf8')), expected)
title = page.title_set.all()[0]
copied_slug = get_available_slug(title)
copied_page = self.assertObjectExist(Page.objects, title_set__slug=copied_slug, parent=target_page)
return copied_page
def move_page(self, page, target_page, position="first-child"):
page.move_page(target_page, position)
return self.reload_page(page)
def reload_page(self, page):
"""
Returns a fresh instance of the page from the database
"""
return self.reload(page)
def reload(self, obj):
return obj.__class__.objects.get(pk=obj.pk)
def get_pages_root(self):
return unquote(reverse("pages-root"))
def get_context(self, path=None, page=None):
if not path:
path = self.get_pages_root()
context = {}
request = self.get_request(path, page=page)
context['request'] = request
return Context(context)
def get_request(self, path=None, language=None, post_data=None, enforce_csrf_checks=False, page=None):
factory = RequestFactory()
if not path:
path = self.get_pages_root()
if not language:
if settings.USE_I18N:
language = settings.LANGUAGES[0][0]
else:
language = settings.LANGUAGE_CODE
if post_data:
request = factory.post(path, post_data)
else:
request = factory.get(path)
request.session = self.client.session
request.user = getattr(self, 'user', AnonymousUser())
request.LANGUAGE_CODE = language
request._dont_enforce_csrf_checks = not enforce_csrf_checks
if page:
request.current_page = page
class MockStorage(object):
def __len__(self):
return 0
def __iter__(self):
return iter([])
def add(self, level, message, extra_tags=''):
pass
def update(self, response):
pass
request._messages = MockStorage()
return request
def check_published_page_attributes(self, page):
public_page = page.publisher_public
if page.parent:
self.assertEqual(page.parent_id, public_page.parent.publisher_draft.id)
self.assertEqual(page.level, public_page.level)
# TODO: add check for siblings
draft_siblings = list(page.get_siblings(True).filter(
publisher_is_draft=True
).order_by('tree_id', 'parent', 'lft'))
public_siblings = list(public_page.get_siblings(True).filter(
publisher_is_draft=False
).order_by('tree_id', 'parent', 'lft'))
skip = 0
for i, sibling in enumerate(draft_siblings):
if not sibling.publisher_public_id:
skip += 1
continue
self.assertEqual(sibling.id,
public_siblings[i - skip].publisher_draft.id)
def failUnlessWarns(self, category, message, f, *args, **kwargs):
warningsShown = []
result = _collectWarnings(warningsShown.append, f, *args, **kwargs)
if not warningsShown:
self.fail("No warnings emitted")
first = warningsShown[0]
for other in warningsShown[1:]:
if ((other.message, other.category)
!= (first.message, first.category)):
self.fail("Can't handle different warnings")
self.assertEqual(first.message, message)
self.assertTrue(first.category is category)
return result
assertWarns = failUnlessWarns
class SettingsOverrideTestCase(CMSTestCase):
settings_overrides = {}
def _pre_setup(self):
self._enter_settings_override()
super(SettingsOverrideTestCase, self)._pre_setup()
def _enter_settings_override(self):
self._settings_ctx_manager = SettingsOverride(**self.settings_overrides)
self._settings_ctx_manager.__enter__()
def _post_teardown(self):
super(SettingsOverrideTestCase, self)._post_teardown()
self._exit_settings_override()
def _exit_settings_override(self):
self._settings_ctx_manager.__exit__(None, None, None)
| bsd-3-clause | 3,114,386,973,136,896,000 | 36.949868 | 107 | 0.614614 | false |
UManPychron/pychron | pychron/hardware/thermo_spectrometer_controller.py | 3 | 1290 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# from traits.api import HasTraits, on_trait_change, Str, Int, Float, Button
# from traitsui.api import View, Item, Group, HGroup, VGroup
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.hardware.core.core_device import CoreDevice
class ArgusController(CoreDevice):
pass
class HelixController(CoreDevice):
pass
# ============= EOF =============================================
| apache-2.0 | 339,919,871,310,038,660 | 36.941176 | 81 | 0.562016 | false |
shubhdev/openedx | common/djangoapps/util/module_utils.py | 18 | 1152 | """
Utility library containing operations used/shared by multiple courseware modules
"""
def yield_dynamic_descriptor_descendents(descriptor, module_creator): # pylint: disable=invalid-name
"""
This returns all of the descendants of a descriptor. If the descriptor
has dynamic children, the module will be created using module_creator
and the children (as descriptors) of that module will be returned.
"""
stack = [descriptor]
while len(stack) > 0:
next_descriptor = stack.pop()
stack.extend(get_dynamic_descriptor_children(next_descriptor, module_creator))
yield next_descriptor
def get_dynamic_descriptor_children(descriptor, module_creator, usage_key_filter=None):
"""
Returns the children of the given descriptor, while supporting descriptors with dynamic children.
"""
module_children = []
if descriptor.has_dynamic_children():
module = module_creator(descriptor)
if module is not None:
module_children = module.get_child_descriptors()
else:
module_children = descriptor.get_children(usage_key_filter)
return module_children
| agpl-3.0 | 9,151,138,095,491,664,000 | 36.16129 | 101 | 0.708333 | false |
andresailer/DIRAC | Resources/Computing/SudoComputingElement.py | 7 | 8112 | """ A computing element class that uses sudo
"""
import os
import pwd
import stat
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
__RCSID__ = "$Id$"
class SudoComputingElement( ComputingElement ):
#############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
super(SudoComputingElement, self).__init__(ceUniqueID)
self.submittedJobs = 0
#############################################################################
def _addCEConfigDefaults( self ):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# Assure that any global parameters are loaded
super(SudoComputingElement, self)._addCEConfigDefaults()
#############################################################################
def submitJob( self, executableFile, proxy, **kwargs ):
""" Method to submit job, overridden from super-class.
"""
self.log.verbose( 'Setting up proxy for payload' )
result = self.writeProxyToFile( proxy )
if not result['OK']:
return result
payloadProxy = result['Value']
if not 'X509_USER_PROXY' in os.environ:
self.log.error( 'X509_USER_PROXY variable for pilot proxy not found in local environment' )
return S_ERROR( DErrno.EPROXYFIND, "X509_USER_PROXY not found")
pilotProxy = os.environ['X509_USER_PROXY']
self.log.info( 'Pilot proxy X509_USER_PROXY=%s' % pilotProxy )
# See if a fixed value has been given
payloadUsername = self.ceParameters.get( 'PayloadUser' )
if payloadUsername:
self.log.info( 'Payload username %s from PayloadUser in ceParameters' % payloadUsername )
else:
# First username in the sequence to use when running payload job
# If first is pltXXp00 then have pltXXp01, pltXXp02, ...
try:
baseUsername = self.ceParameters.get('BaseUsername')
baseCounter = int( baseUsername[-2:] )
self.log.info( "Base username from BaseUsername in ceParameters : %s" % baseUsername )
except:
baseUsername = os.environ['USER'] + '00p00'
baseCounter = 0
self.log.info( 'Base username from $USER + 00p00 : %s' % baseUsername )
# Next one in the sequence
payloadUsername = baseUsername[:-2] + ( '%02d' % (baseCounter + self.submittedJobs) )
self.log.info( 'Payload username set to %s using jobs counter' % payloadUsername )
try:
payloadUID = pwd.getpwnam(payloadUsername).pw_uid
payloadGID = pwd.getpwnam(payloadUsername).pw_gid
except KeyError:
error = S_ERROR( 'User "' + str(payloadUsername) + '" does not exist!' )
return error
self.log.verbose( 'Starting process for monitoring payload proxy' )
gThreadScheduler.addPeriodicTask( self.proxyCheckPeriod, self.monitorProxy,
taskArgs = ( pilotProxy, payloadProxy, payloadUsername, payloadUID, payloadGID ),
executions = 0, elapsedTime = 0 )
# Submit job
self.log.info( 'Changing permissions of executable (%s) to 0755' % executableFile )
try:
os.chmod( os.path.abspath( executableFile ), stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH )
except OSError as x:
self.log.error( 'Failed to change permissions of executable to 0755 with exception',
'\n%s' % ( x ) )
result = self.sudoExecute( os.path.abspath( executableFile ), payloadProxy, payloadUsername, payloadUID, payloadGID )
if not result['OK']:
self.log.error( 'Failed sudoExecute', result )
return result
self.log.debug( 'Sudo CE result OK' )
self.submittedJobs += 1
return S_OK()
#############################################################################
def sudoExecute( self, executableFile, payloadProxy, payloadUsername, payloadUID, payloadGID ):
"""Run sudo with checking of the exit status code.
"""
# We now implement a file giveaway using groups, to avoid any need to sudo to root.
# Each payload user must have their own group. The pilot user must be a member
# of all of these groups. This allows the pilot user to set the group of the
# payloadProxy file to be that of the payload user. The payload user can then
# read it and make a copy of it (/tmp/x509up_uNNNN) that it owns. Some grid
# commands check that the proxy is owned by the current user so the copy stage
# is necessary.
# 1) Make sure the payload user can read its proxy via its per-user group
os.chown( payloadProxy, -1, payloadGID )
os.chmod( payloadProxy, stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP )
# 2) Now create a copy of the proxy owned by the payload user
result = shellCall( 0,
'/usr/bin/sudo -u %s sh -c "cp -f %s /tmp/x509up_u%d ; chmod 0400 /tmp/x509up_u%d"' % ( payloadUsername, payloadProxy, payloadUID, payloadUID ),
callbackFunction = self.sendOutput )
# 3) Make sure the current directory is +rwx by the pilot's group
# (needed for InstallDIRAC but not for LHCbInstallDIRAC, for example)
os.chmod('.', os.stat('.').st_mode | stat.S_IRWXG)
# Run the executable (the wrapper in fact)
cmd = "/usr/bin/sudo -u %s PATH=$PATH DIRACSYSCONFIG=/scratch/%s/pilot.cfg LD_LIBRARY_PATH=$LD_LIBRARY_PATH PYTHONPATH=$PYTHONPATH X509_CERT_DIR=$X509_CERT_DIR X509_USER_PROXY=/tmp/x509up_u%d sh -c '%s'" % ( payloadUsername, os.environ['USER'], payloadUID, executableFile )
self.log.info( 'CE submission command is: %s' % cmd )
result = shellCall( 0, cmd, callbackFunction = self.sendOutput )
if not result['OK']:
result['Value'] = ( 0, '', '' )
return result
resultTuple = result['Value']
status = resultTuple[0]
stdOutput = resultTuple[1]
stdError = resultTuple[2]
self.log.info( "Status after the sudo execution is %s" % str( status ) )
if status > 128:
error = S_ERROR( status )
error['Value'] = ( status, stdOutput, stdError )
return error
return result
#############################################################################
def getCEStatus( self ):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = 0
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
return result
#############################################################################
def monitorProxy( self, pilotProxy, payloadProxy, payloadUsername, payloadUID, payloadGID ):
""" Monitor the payload proxy and renew as necessary.
"""
retVal = self._monitorProxy( pilotProxy, payloadProxy )
if not retVal['OK']:
# Failed to renew the proxy, nothing else to be done
return retVal
if not retVal['Value']:
# No need to renew the proxy, nothing else to be done
return retVal
self.log.info( 'Re-executing sudo to make renewed payload proxy available as before' )
# New version of the proxy file, so we have to do the copy again
# 1) Make sure the payload user can read its proxy via its per-user group
os.chown( payloadProxy, -1, payloadGID )
os.chmod( payloadProxy, stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP )
# 2) Now recreate the copy of the proxy owned by the payload user
result = shellCall( 0,
'/usr/bin/sudo -u %s sh -c "cp -f %s /tmp/x509up_u%d ; chmod 0400 /tmp/x509up_u%d"'
% ( payloadUsername, payloadProxy, payloadUID, payloadUID ),
callbackFunction = self.sendOutput )
return S_OK( 'Proxy checked' )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| gpl-3.0 | 7,033,879,609,211,570,000 | 43.327869 | 277 | 0.612303 | false |
ygenc/onlineLDA | onlineldavb_new/build/scipy/scipy/weave/size_check.py | 11 | 9761 | from numpy import ones, ndarray, array, asarray, concatenate, zeros, shape, \
alltrue, equal, divide, arccos, arcsin, arctan, cos, cosh, \
sin, sinh, exp, ceil, floor, fabs, log, log10, sqrt, argmin, \
argmax, argsort, around, absolute, sign, negative, float32
import sys
numericTypes = (int, long, float, complex)
def isnumeric(t):
return isinstance(t, numericTypes)
def time_it():
import time
expr = "ex[:,1:,1:] = ca_x[:,1:,1:] * ex[:,1:,1:]" \
"+ cb_y_x[:,1:,1:] * (hz[:,1:,1:] - hz[:,:-1,1:])" \
"- cb_z_x[:,1:,1:] * (hy[:,1:,1:] - hy[:,1:,:-1])"
ex = ones((10,10,10),dtype=float32)
ca_x = ones((10,10,10),dtype=float32)
cb_y_x = ones((10,10,10),dtype=float32)
cb_z_x = ones((10,10,10),dtype=float32)
hz = ones((10,10,10),dtype=float32)
hy = ones((10,10,10),dtype=float32)
N = 1
t1 = time.time()
for i in range(N):
passed = check_expr(expr,locals())
t2 = time.time()
print 'time per call:', (t2 - t1)/N
print 'passed:', passed
def check_expr(expr,local_vars,global_vars={}):
""" Currently only checks expressions (not suites).
Doesn't check that lhs = rhs. checked by compiled func though
"""
values ={}
#first handle the globals
for var,val in global_vars.items():
if isinstance(val, ndarray):
values[var] = dummy_array(val,name=var)
elif isnumeric(val):
values[var] = val
#now handle the locals
for var,val in local_vars.items():
if isinstance(val, ndarray):
values[var] = dummy_array(val,name=var)
if isnumeric(val):
values[var] = val
exec(expr,values)
try:
exec(expr,values)
except:
try:
eval(expr,values)
except:
return 0
return 1
empty = array(())
empty_slice = slice(None)
def make_same_length(x,y):
try:
Nx = len(x)
except:
Nx = 0
try:
Ny = len(y)
except:
Ny = 0
if Nx == Ny == 0:
return empty,empty
elif Nx == Ny:
return asarray(x),asarray(y)
else:
diff = abs(Nx - Ny)
front = ones(diff, int)
if Nx > Ny:
return asarray(x), concatenate((front,y))
elif Ny > Nx:
return concatenate((front,x)),asarray(y)
def binary_op_size(xx,yy):
""" This returns the resulting size from operating on xx, and yy
with a binary operator. It accounts for broadcasting, and
throws errors if the array sizes are incompatible.
"""
x,y = make_same_length(xx,yy)
res = zeros(len(x))
for i in range(len(x)):
if x[i] == y[i]:
res[i] = x[i]
elif x[i] == 1:
res[i] = y[i]
elif y[i] == 1:
res[i] = x[i]
else:
# offer more information here about which variables.
raise ValueError("frames are not aligned")
return res
class dummy_array(object):
def __init__(self,ary,ary_is_shape = 0,name=None):
self.name = name
if ary_is_shape:
self.shape = ary
#self.shape = asarray(ary)
else:
try:
self.shape = shape(ary)
except:
self.shape = empty
#self.value = ary
def binary_op(self,other):
try:
x = other.shape
except AttributeError:
x = empty
new_shape = binary_op_size(self.shape,x)
return dummy_array(new_shape,1)
def __cmp__(self,other):
# This isn't an exact compare, but does work for ==
# cluge for Numeric
if isnumeric(other):
return 0
if len(self.shape) == len(other.shape) == 0:
return 0
return not alltrue(equal(self.shape,other.shape),axis=0)
def __add__(self,other): return self.binary_op(other)
def __radd__(self,other): return self.binary_op(other)
def __sub__(self,other): return self.binary_op(other)
def __rsub__(self,other): return self.binary_op(other)
def __mul__(self,other): return self.binary_op(other)
def __rmul__(self,other): return self.binary_op(other)
def __div__(self,other): return self.binary_op(other)
def __rdiv__(self,other): return self.binary_op(other)
def __mod__(self,other): return self.binary_op(other)
def __rmod__(self,other): return self.binary_op(other)
def __lshift__(self,other): return self.binary_op(other)
def __rshift__(self,other): return self.binary_op(other)
# unary ops
def __neg__(self,other): return self
def __pos__(self,other): return self
def __abs__(self,other): return self
def __invert__(self,other): return self
# Not sure what to do with coersion ops. Ignore for now.
#
# not currently supported by compiler.
# __divmod__
# __pow__
# __rpow__
# __and__
# __or__
# __xor__
# item access and slicing
def __setitem__(self,indices,val):
#ignore for now
pass
def __len__(self):
return self.shape[0]
def __getslice__(self,i,j):
i = max(i, 0); j = max(j, 0)
return self.__getitem__((slice(i,j),))
def __getitem__(self,indices):
# ayeyaya this is a mess
#print indices, type(indices), indices.shape
if not isinstance(indices, tuple):
indices = (indices,)
if Ellipsis in indices:
raise IndexError("Ellipsis not currently supported")
new_dims = []
dim = 0
for index in indices:
try:
dim_len = self.shape[dim]
except IndexError:
raise IndexError("To many indices specified")
#if (type(index) is SliceType and index.start == index.stop == index.step):
if (index is empty_slice):
slc_len = dim_len
elif isinstance(index, slice):
beg,end,step = index.start,index.stop,index.step
# handle if they are dummy arrays
#if hasattr(beg,'value') and type(beg.value) != ndarray:
# beg = beg.value
#if hasattr(end,'value') and type(end.value) != ndarray:
# end = end.value
#if hasattr(step,'value') and type(step.value) != ndarray:
# step = step.value
if beg is None: beg = 0
if end == sys.maxint or end is None:
end = dim_len
if step is None:
step = 1
if beg < 0: beg += dim_len
if end < 0: end += dim_len
# the following is list like behavior,
# which isn't adhered to by arrays.
# FIX THIS ANOMOLY IN NUMERIC!
if beg < 0: beg = 0
if beg > dim_len: beg = dim_len
if end < 0: end = 0
if end > dim_len: end = dim_len
# This is rubbish.
if beg == end:
beg,end,step = 0,0,1
elif beg >= dim_len and step > 0:
beg,end,step = 0,0,1
#elif index.step > 0 and beg <= end:
elif step > 0 and beg <= end:
pass #slc_len = abs(divide(end-beg-1,step)+1)
# handle [::-1] and [-1::-1] correctly
#elif index.step > 0 and beg > end:
elif step > 0 and beg > end:
beg,end,step = 0,0,1
elif(step < 0 and index.start is None and index.stop is None):
beg,end,step = 0,dim_len,-step
elif(step < 0 and index.start is None):
# +1 because negative stepping is inclusive
beg,end,step = end+1,dim_len,-step
elif(step < 0 and index.stop is None):
beg,end,step = 0,beg+1,-step
elif(step < 0 and beg > end):
beg,end,step = end,beg,-step
elif(step < 0 and beg < end):
beg,end,step = 0,0,-step
slc_len = abs(divide(end-beg-1,step)+1)
new_dims.append(slc_len)
else:
if index < 0: index += dim_len
if index >=0 and index < dim_len:
#this reduces the array dimensions by one
pass
else:
raise IndexError("Index out of range")
dim += 1
new_dims.extend(self.shape[dim:])
if 0 in new_dims:
raise IndexError("Zero length slices not currently supported")
return dummy_array(new_dims,1)
def __repr__(self):
val = str((self.name, str(self.shape)))
return val
def unary(ary):
return ary
def not_implemented(ary):
return ary
#all imported from Numeric and need to be reassigned.
unary_op = [arccos, arcsin, arctan, cos, cosh, sin, sinh,
exp,ceil,floor,fabs,log,log10,sqrt]
unsupported = [argmin,argmax, argsort,around, absolute,sign,negative,floor]
for func in unary_op:
func = unary
for func in unsupported:
func = not_implemented
def reduction(ary,axis=0):
if axis < 0:
axis += len(ary.shape)
if axis < 0 or axis >= len(ary.shape):
raise ValueError("Dimension not in array")
new_dims = list(ary.shape[:axis]) + list(ary.shape[axis+1:])
return dummy_array(new_dims,1)
# functions currently not supported by compiler
# reductions are gonna take some array reordering for the general case,
# so this is gonna take some thought (probably some tree manipulation).
def take(ary,axis=0): raise NotImplemented
# and all the rest
| gpl-3.0 | 7,040,450,800,996,412,000 | 33.860714 | 87 | 0.529352 | false |
prateekjoshi2013/courses | cs/udacity/cs101-intro-cs/code/lesson6/problem-set/multiplying_rabbits.py | 4 | 1473 | # Rabbits Multiplying
# A (slightly) more realistic model of rabbit multiplication than the Fibonacci
# model, would assume that rabbits eventually die. For this question, some
# rabbits die from month 6 onwards.
#
# Thus, we can model the number of rabbits as:
#
# rabbits(1) = 1 # There is one pair of immature rabbits in Month 1
# rabbits(2) = 1 # There is one pair of mature rabbits in Month 2
#
# For months 3-5:
# Same as Fibonacci model, no rabbits dying yet
# rabbits(n) = rabbits(n - 1) + rabbits(n - 2)
#
#
# For months > 5:
# All the rabbits that are over 5 months old die along with a few others
# so that the number that die is equal to the number alive 5 months ago.
# Before dying, the bunnies reproduce.
# rabbits(n) = rabbits(n - 1) + rabbits(n - 2) - rabbits(n - 5)
#
# This produces the rabbit sequence: 1, 1, 2, 3, 5, 7, 11, 16, 24, 35, 52, ...
#
# Define a procedure rabbits that takes as input a number n, and returns a
# number that is the value of the nth number in the rabbit sequence.
# For example, rabbits(10) -> 35. (It is okay if your procedure takes too
# long to run on inputs above 30.)
def rabbits(n):
if n == 1 or n == 2:
return 1
elif n < 6:
return rabbits(n-1) + rabbits(n-2)
else:
return rabbits(n-1) + rabbits(n-2) - rabbits(n-5)
print rabbits(10)
#>>> 35
s = ""
for i in range(1,12):
s = s + str(rabbits(i)) + " "
print s
#>>> 1 1 2 3 5 7 11 16 24 35 52 | mit | -4,106,670,378,788,214,000 | 31.755556 | 79 | 0.643585 | false |
robocomp/learnbot | learnbot_dsl/guis/AddVar.py | 2 | 2292 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/ivan/robocomp/components/learnbot/learnbot_dsl/guis/AddVar.ui',
# licensing of '/home/ivan/robocomp/components/learnbot/learnbot_dsl/guis/AddVar.ui' applies.
#
# Created: Thu Mar 7 12:39:25 2019
# by: pyside2-uic running on PySide2 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(300, 89)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.text = QtWidgets.QLabel(Dialog)
self.text.setObjectName("text")
self.horizontalLayout_2.addWidget(self.text)
self.nameLineEdit = QtWidgets.QLineEdit(Dialog)
self.nameLineEdit.setObjectName("nameLineEdit")
self.horizontalLayout_2.addWidget(self.nameLineEdit)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.cancelPushButton = QtWidgets.QPushButton(Dialog)
self.cancelPushButton.setAutoDefault(False)
self.cancelPushButton.setObjectName("cancelPushButton")
self.horizontalLayout.addWidget(self.cancelPushButton)
self.okPushButton = QtWidgets.QPushButton(Dialog)
self.okPushButton.setObjectName("okPushButton")
self.horizontalLayout.addWidget(self.okPushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtWidgets.QApplication.translate("Dialog", "Add Variable", None, -1))
self.text.setText(QtWidgets.QApplication.translate("Dialog", "Nombre:", None, -1))
self.cancelPushButton.setText(QtWidgets.QApplication.translate("Dialog", "Cancel", None, -1))
self.okPushButton.setText(QtWidgets.QApplication.translate("Dialog", "Ok", None, -1))
| gpl-3.0 | -5,714,146,100,032,923,000 | 47.765957 | 123 | 0.724258 | false |
csawyerYumaed/pyOwnCloud | csync/csync.py | 1 | 18028 | #!/usr/bin/env python
import os
import sys
import argparse
import ConfigParser
import ctypes
import re
import pprint
import copy
import getpass
import logging
logging.basicConfig(level=logging.DEBUG, format='%(name)s-%(levelname)s: %(message)s')
try:
import keyring
except:
logging.debug('Keyring not available')
keyring = None
try:
from progressbar import ProgressBar, Percentage, Bar, ETA, FileTransferSpeed
except ImportError:
logging.debug('ProgressBar not available')
ProgressBar = None
try:
import csynclib
except ImportError as impError:
logging.critical(impError.message)
sys.exit(1)
import version
#Global variables
VERSION = version.version
PASSWORD_SAFE = '********'
DEBUG = False
def CSYNC_VERSION_INT(a, b, c):
return ((a) << 16 | (b) << 8 | (c))
class ownCloudSync():
"""This handles the actual syncying with ownCloud
cfg is a {}. should have these things:
user:
pass:
url:
src:
None of them are optional. :)
optional items:
SSLfingerPrint:
"""
def __init__(self, cfg = None):
"""initialize"""
self.auth_callback = None
self.log_callback = None
self.progress_callback = None
self.logger = logging.getLogger("pyOC")
self.cfg = cfg
self.debug = cfg['debug']
self._user = cfg['user']
self._password = cfg['pass']
self._fingerprint = cfg['sslfingerprint'].lower()
self._keyring = cfg['use_keyring']
self.libVersion = csynclib.csync_version(0,40,1)
self.logger.debug('libocsync version: %s', self.libVersion)
c = csynclib.CSYNC()
self.ctx = ctypes.pointer(c)
self.buildURL()
self.logger.info('Syncing %s to %s, logging in as user: %s' , self.cfg['src'],
self.cfg['url'],
self._user,
)
if cfg.has_key('dry_run') and cfg['dry_run']:
return
self.sync()
def buildURL(self):
"""build the URL we use for owncloud"""
url = self.cfg['url']
if not url:
self.logger.error('You must specify a url, use --url, or put in cfg file.')
sys.exit(1)
url = url.replace('https','ownclouds')
url = url.replace('http','owncloud')
#add / if needed
if url[-1:] != '/':
url = ''.join((url,'/'))
url += self.cfg['davPath']
#add / if needed
if url[-1:] != '/':
url = ''.join((url,'/'))
url = ''.join((url, self.cfg['dst']))
#take off any trailing slash.
if url[-1:] == '/':
url = url[:-1]
self.cfg['url'] = url
self.logger.debug('buildURL: %s', url)
return
def get_auth_callback(self):
"""gives back the auth callback:
The actual function is called out of the ownCloudSync object."""
def auth_wrapper(prompt, buffer, bufferLength, echo, verify, userData):
return self.authCallback(prompt, buffer, bufferLength, echo, verify, userData)
if not self.auth_callback:
self.auth_callback = csynclib.csync_auth_callback(auth_wrapper)
return self.auth_callback
def authCallback(self, prompt, buffer, bufferLength, echo, verify, userData):
"""
(const char *prompt, char *buf, size_t len,
int echo, int verify, void *userdata)
called like this:
("Enter your username: ", buf, NE_ABUFSIZ-1, 1, 0, dav_session.userdata )
type is 1 for username, 0 for password.
calls functions username(), password() or ssl(fingerprint)
"""
self.logger.debug("authCallback: '%s', %s, %i, %i, %i, %s", prompt, buffer, bufferLength, echo, verify, userData)
ret = None
if 'username' in prompt:
ret = self.username()
elif 'password' in prompt:
ret = self.password()
elif 'SSL' in prompt:
fingerprint = re.search("fingerprint: ([\\w\\d:]+)", prompt).group(1)
ret = self.ssl(fingerprint)
else:
self.logger.warning("authCallback: unknown prompt: '%s'", prompt)
return -1
for i in range(len(ret)):
ctypes.memset(buffer+i, ord(ret[i]), 1)
if self.debug:
buffString = ctypes.string_at(buffer, len(ret))
if 'password' in prompt:
if ret and ret in buffString:
buffString = buffString.replace(ret, PASSWORD_SAFE)
self.logger.debug("returning: '%s'", buffString)
return 0
def sync(self):
r = csynclib.csync_create(self.ctx, self.cfg['src'], self.cfg['url'])
if r != 0:
self.error('csync_create', r)
csynclib.csync_set_log_callback(self.ctx, self.get_log_callback())
csynclib.csync_set_log_verbosity(self.ctx, self.cfg['verbosity_ocsync'])
self.logger.debug('authCallback setup')
csynclib.csync_set_auth_callback(self.ctx, self.get_auth_callback())
if self.cfg['progress']:
csynclib.csync_set_progress_callback(self.ctx, self.get_progress_callback())
r = csynclib.csync_init(self.ctx)
if r != 0:
self.error('csync_init', r)
self.logger.debug('Initialization done.')
if (self.cfg.has_key('downloadlimit') and self.cfg['downloadlimit']) or \
(self.cfg.has_key('uploadlimit') and self.cfg['uploadlimit']):
if csynclib.csync_version(CSYNC_VERSION_INT(0,81,0)) is None:
self.logger.warning('Bandwidth throttling requires ocsync version >= 0.81.0, ignoring limits')
else:
if self.cfg.has_key('downloadlimit') and self.cfg['downloadlimit']:
dlimit = ctypes.c_int(int(self.cfg['downloadlimit']) * 1000)
self.logger.debug('Download limit: %i', dlimit.value)
csynclib.csync_set_module_property(self.ctx, 'bandwidth_limit_download', ctypes.pointer(dlimit))
if self.cfg.has_key('uploadlimit') and self.cfg['uploadlimit']:
ulimit = ctypes.c_int(int(self.cfg['uploadlimit']) * 1000)
self.logger.debug('Upload limit: %i', ulimit.value)
csynclib.csync_set_module_property(self.ctx,'bandwidth_limit_upload',ctypes.pointer(ulimit))
r = csynclib.csync_update(self.ctx)
if r != 0:
self.error('csync_update', r)
self.logger.debug('Update done.')
r = csynclib.csync_reconcile(self.ctx)
if r != 0:
self.error('csync_reconcile', r)
self.logger.debug('Reconcile done.')
r = csynclib.csync_propagate(self.ctx)
if r != 0:
self.error('csync_propogate', r)
self.logger.debug('Propogate finished, destroying.')
r = csynclib.csync_destroy(self.ctx)
if r != 0:
self.error('csync_destroy', r)
def get_progress_callback(self):
def progress_wrapper(progress_p, userdata_p):
if progress_p:
progress_p = progress_p[0]
if userdata_p:
userdata_p = userdata_p[0]
return self.progress(progress_p, userdata_p)
if not self.progress_callback:
self.progress_callback = csynclib.csync_progress_callback(progress_wrapper)
return self.progress_callback
def progress(self, progress, userdata):
progress_text = {
csynclib.CSYNC_NOTIFY_INVALID: "invalid",
csynclib.CSYNC_NOTIFY_START_SYNC_SEQUENCE: "start syncing",
csynclib.CSYNC_NOTIFY_START_DOWNLOAD: "start downloading",
csynclib.CSYNC_NOTIFY_START_UPLOAD: "start uploading",
csynclib.CSYNC_NOTIFY_PROGRESS: "progess message",
csynclib.CSYNC_NOTIFY_FINISHED_DOWNLOAD: "finished downloading",
csynclib.CSYNC_NOTIFY_FINISHED_UPLOAD: "finished uploading",
csynclib.CSYNC_NOTIFY_FINISHED_SYNC_SEQUENCE: "finished sycing",
csynclib.CSYNC_NOTIFY_START_DELETE: "start deleted",
csynclib.CSYNC_NOTIFY_END_DELETE: "end deleted",
csynclib.CSYNC_NOTIFY_ERROR: "error",
}
if progress.kind in (csynclib.CSYNC_NOTIFY_START_UPLOAD, csynclib.CSYNC_NOTIFY_START_DOWNLOAD, csynclib.CSYNC_NOTIFY_START_DELETE):
maxval = progress.overall_file_count
if progress.kind == csynclib.CSYNC_NOTIFY_START_UPLOAD:
self.progress_mode = "Uploading "
if progress.kind == csynclib.CSYNC_NOTIFY_START_DOWNLOAD:
self.progress_mode = "Downloading "
if progress.kind == csynclib.CSYNC_NOTIFY_START_DELETE:
self.progress_mode = "Deleting "
maxval = progress.overall_transmission_size
fname = progress.path[len(self.cfg['url'])+1:]
widgets = [self.progress_mode, fname, ' ', Percentage(), ' ', Bar(),
' ', ETA(), ' ', FileTransferSpeed()]
self.pbar = ProgressBar(widgets=widgets, maxval=maxval).start()
elif progress.kind in (csynclib.CSYNC_NOTIFY_FINISHED_DOWNLOAD, csynclib.CSYNC_NOTIFY_FINISHED_UPLOAD, csynclib.CSYNC_NOTIFY_END_DELETE):
self.pbar.finish()
return
elif progress.kind == csynclib.CSYNC_NOTIFY_PROGRESS:
self.pbar.update(progress.curr_bytes)
else:
if progress.kind in (csynclib.CSYNC_NOTIFY_START_SYNC_SEQUENCE, csynclib.CSYNC_NOTIFY_FINISHED_SYNC_SEQUENCE):
return
self.logger.debug(progress_text[progress.kind])
self.logger.debug("'%s', %i, %i, %i, %i, %i, %i", progress.path, progress.file_size, progress.curr_bytes, progress.overall_file_count, progress.current_file_no, progress.overall_transmission_size, progress.current_overall_bytes)
def username(self):
"""returns the username"""
return self._user
def password(self):
"""returns the password"""
ret = None
if keyring and self._keyring:
self.logger.debug("using password from keyring")
ret = keyring.get_password('ownCloud', self.username())
if ret is None:
if not self._password:
ret = getpass.getpass('ownCloud password:')
else:
ret = self._password
if keyring and self._keyring:
self.logger.debug("saving password to keyring")
keyring.set_password('ownCloud', self.username(), ret)
return ret
def ssl(self, fingerprint):
"""returns if fingerprint is valid (yes or no as string)"""
if fingerprint.lower() == self._fingerprint:
return 'yes'
else:
self.logger.error('SSL fingerprint: %s not accepted, aborting' , fingerprint)
return 'no'
def get_log_callback(self):
def log_wrapper(ctx, verbosity, function, buffer, userdata):
return self.log(verbosity, function, buffer, userdata)
if not self.log_callback:
self.log_callback = csynclib.csync_log_callback(log_wrapper)
return self.log_callback
def log(self, verbosity, function, buffer, userdata):
"""Log stuff from the ocsync library."""
v2l = {csynclib.CSYNC_LOG_PRIORITY_NOLOG: logging.CRITICAL,
csynclib.CSYNC_LOG_PRIORITY_FATAL: logging.CRITICAL,
csynclib.CSYNC_LOG_PRIORITY_ALERT: logging.CRITICAL,
csynclib.CSYNC_LOG_PRIORITY_CRIT: logging.CRITICAL,
csynclib.CSYNC_LOG_PRIORITY_ERROR: logging.ERROR,
csynclib.CSYNC_LOG_PRIORITY_WARN: logging.WARN,
csynclib.CSYNC_LOG_PRIORITY_NOTICE: logging.INFO,
csynclib.CSYNC_LOG_PRIORITY_INFO: logging.INFO,
csynclib.CSYNC_LOG_PRIORITY_DEBUG: logging.DEBUG,
csynclib.CSYNC_LOG_PRIORITY_TRACE: logging.DEBUG,
csynclib.CSYNC_LOG_PRIORITY_NOTSET: logging.DEBUG,
csynclib.CSYNC_LOG_PRIORITY_UNKNOWN: logging.DEBUG,
}
level = logging.DEBUG
if verbosity in v2l:
level = v2l[verbosity]
logging.getLogger("ocsync").log(level, buffer)
def error(self, cmd, returnCode):
"""handle library errors"""
errNum = csynclib.csync_get_error(self.ctx)
errMsg = csynclib.csync_get_error_string(self.ctx)
if not errMsg:
if errNum == csynclib.CSYNC_ERR_AUTH_SERVER and cmd == 'csync_update':
errMsg = 'The user could not be authenticated with the server, check username/password combination.'
if errNum == csynclib.CSYNC_ERR_NOT_FOUND and cmd == 'csync_update':
errMsg = 'The remote folder "' + self.cfg['dst'] + '" could not be found, check that the remote folder exists on ownCloud.'
self.logger.error('%s exited with %s, csync(%s) error %s: %s',
cmd,
returnCode,
self.libVersion,
errNum,
errMsg,
)
sys.exit(1)
def getConfigPath():
"""get the local configuration file path
"""
if sys.platform.startswith('linux'):
cfgPath = os.path.join('~','.local','share','data','ownCloud')
cfgPath = os.path.expanduser(cfgPath)
elif sys.platform == 'darwin':
cfgPath = os.path.join('~','Library','Application Support','ownCloud')
cfgPath = os.path.expanduser(cfgPath)
elif 'win' in sys.platform:
cfgPath = os.path.join('%LOCALAPPDATA%','ownCloud')
cfgPath = os.path.expandvars(cfgPath)
else:
logging.warning('Unknown/not supported platform %s, please file a bug report. ', sys.platform)
sys.exit(1)
logging.debug('getConfigPath: %s', cfgPath)
return cfgPath
def getConfig(parser):
args = vars(parser.parse_args())
if DEBUG:
logging.debug('From args: ')
pargs = copy.copy(args)
if pargs['pass']:
pargs['pass'] = PASSWORD_SAFE
logging.debug(pprint.pformat(pargs))
newArgs = {}
for k, v in args.iteritems():
if v:
newArgs[k] = v
args = newArgs
cfg = {}
cfgFile = None
if args.has_key('config'):
cfgFile = args['config']
else:
cfgPath = getConfigPath()
if os.path.exists(os.path.join(cfgPath,'owncloud.cfg')):
cfgFile = os.path.join(cfgPath, 'owncloud.cfg')
if cfgFile:
with open(cfgFile) as fd:
"""We use the INI file format that Mirall does. we allow more
things in the cfg file...
pass: the password
"""
c = ConfigParser.SafeConfigParser()
c.readfp(fd)
if csynclib.csync_version(CSYNC_VERSION_INT(0,81,0)) is None:
cfg = dict(c.items('ownCloud'))
else:
if c.has_section('BWLimit'):
cfg = dict(c.items('BWLimit') + c.items('ownCloud'))
if not cfg['useuploadlimit']:
cfg['uploadlimit'] = None
if not cfg['usedownloadlimit']:
cfg['downloadlimit'] = None
else:
logging.debug('config file has no section [BWLimit]')
cfg = dict(c.items('ownCloud'))
if DEBUG:
logging.debug('configuration info received from %s:', cfgFile)
pcfg = copy.copy(cfg)
if pcfg.has_key('pass'):
pcfg['pass'] = PASSWORD_SAFE
logging.debug(pprint.pformat(pcfg))
cfg.setdefault('davPath', 'remote.php/webdav/')
cfg.setdefault('sslfingerprint' '')
cfg.setdefault('pass', None)
cfg.setdefault('user', getpass.getuser())
cfg.setdefault('use_keyring', False)
cfg.setdefault('progress', False)
if os.environ.has_key('OCPASS'):
cfg['pass'] = os.environ['OCPASS']
logging.debug('password coming from environment variable OCPASS')
#cmd line arguments win out over config files.
parser.set_defaults(**cfg)
args = vars(parser.parse_args())
cfg.update(args)
if DEBUG:
logging.debug('Finished parsing configuration file:')
pcfg = copy.copy(cfg)
if pcfg.has_key('pass'):
pcfg['pass'] = PASSWORD_SAFE
logging.debug(pprint.pformat(pcfg))
return cfg
def startSync(parser):
cfg = getConfig(parser)
try:
ownCloudSync(cfg)
except KeyError:
exc_type, exc_value, exc_tb = sys.exc_info()
logging.error('This option "%s" is required, but was not found in the configuration.', exc_value)
if DEBUG:
raise
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description = 'Synchronize files across machines using ownCloud DAV server.',
epilog = """
oclient supports the ownCloud config file, which is located here:
{cfg}
oclient only supports the 'ownCloud' section of the config.
oclient supports the following keys in the cfg file:
user: username on the ownCloud server
pass: password on the ownCloud server
url: url of the ownCloud server
sslfingerprint: valid SSL fingerprint for the server
src: local directory to sync against
dst: folder on the server to sync against
complete example:
[ownCloud]
user=awesomeSauce
pass=PasswordThisIsSuperSuperSecretReallyISwearLOL
url=https://www.example.org/owncloud/
sslfingerprint=
src=/home/awesomeSauce/ownCloud
dst=clientsync
Password options:
*) You can specify on the cmd line: -p (not very safe)
*) In the envifonment variable: OCPASS
*) In the owncloud.cfg file as pass = <password>
*) Use keyring to store passwords in a keyring. (keyring package is {keyring}installed)
*) Do none of the above, and it will prompt you for the password.
The choice is yours, if you put it in the cfg file, be careful to
make sure nobody but you can read the file. (0400/0600 file perms).
""".format(cfg = os.path.join(getConfigPath(),'owncloud.cfg'), keyring="" if keyring else "NOT "),
)
v = "%s - repo: %s" % (VERSION.asString, VERSION.asHead)
parser.add_argument('-v', '--version',
action='version',
version = '%(prog)s ' + v)
parser.add_argument('-c', '--config', nargs='?', default = None,
help = "Configuration file to use.")
parser.add_argument('-u', '--user', nargs='?', default = None,
help = "Username on server.")
parser.add_argument('--ssl', nargs='?', default = None,
dest = 'sslfingerprint',
help = "SSL fingerprint on server to accept.")
parser.add_argument('-p', '--pass', nargs='?', default = None,
help = "Password on server. You can also store this in environment variable OCPASS.")
parser.add_argument('--dry-run', action = 'store_true', default = False,
help = "Dry Run, do not actually execute command.")
parser.add_argument('--debug', action = 'store_true', default = False,
help = "Print debug information.")
parser.add_argument('--verbosity-ocsync', default = csynclib.CSYNC_LOG_PRIORITY_WARN, type=int,
help = "Verbosity for libocsync. (0=NOLOG,11=Everything)")
parser.add_argument('-s', '--src', nargs='?',
default = os.path.expanduser(os.path.join('~','ownCloud')),
help = "Local Directory to sync with.")
parser.add_argument('-d', '--dst', nargs='?', default = 'clientsync',
help = "Remote Directory on server to sync to.")
parser.add_argument('--url', nargs='?', default = None,
help = "URL of owncloud server.")
if csynclib.csync_version(CSYNC_VERSION_INT(0,81,0)) is not None:
parser.add_argument('--downloadlimit', nargs = '?', default = None,
help = "Download limit in KB/s.")
parser.add_argument('--uploadlimit', nargs = '?', default = None,
help = "Upload limit in KB/s.")
if keyring:
parser.add_argument('--use-keyring', action = 'store_true', default = False,
help = "Use keyring if available to store password safely.")
if ProgressBar and csynclib.csync_version(CSYNC_VERSION_INT(0,90,0)) is not None:
parser.add_argument('--progress', action = 'store_true', default = False,
help = "Show progress while syncing.")
args = vars(parser.parse_args())
if args['debug']:
global DEBUG
DEBUG = True
logging.debug('Turning debug on')
else:
logging.getLogger('').setLevel(logging.INFO)
startSync(parser)
if __name__ == '__main__':
import signal
def signal_handler(signal, frame):
logging.info('\nYou pressed Ctrl+C, aborting ...')
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
main()
# vim: noet:ts=4:sw=4:sts=4
| gpl-2.0 | -1,965,994,136,070,533,600 | 34.69901 | 231 | 0.694697 | false |
ProteinDF/QCLObot | qclobot/qcframe.py | 1 | 45730 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2002-2014 The ProteinDF project
# see also AUTHORS and README.
#
# This file is part of ProteinDF.
#
# ProteinDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ProteinDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ProteinDF. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
logger = logging.getLogger(__name__)
import math
from collections import OrderedDict
import shutil
try:
import msgpack
except:
import msgpack_pure as msgpack
import proteindf_bridge as bridge
import proteindf_tools as pdf
from .qcfragment import QcFragment
class QcFrame(object):
_pdfparam_filename = 'pdfparam.mpac'
_db_filename = 'pdfresults.db'
TOO_SMALL = 1.0E-5
# ------------------------------------------------------------------
def __init__(self, name, *args, **kwargs):
'''
create QcFrame object.
name: name of the frame molecule
'''
# mandatory parameter
self._name = name
self._fragments = OrderedDict()
self._charge = 0
self._state = {} # 状態保持
self._cmds = self._get_default_cmds() # 外部コマンド
self._initialize()
self._prepare_work_dir()
self._load()
# cache data
self._cache = {}
if ((len(args) > 0) and isinstance(args[0], QcFrame)):
self._copy_constructer(args[0])
# copy constructer
def _copy_constructer(self, rhs):
self._name = rhs._name
self._fragments = copy.deepcopy(rhs._fragments)
self._charge = rhs._charge
self._state = copy.deepcopy(rhs._state)
self._cmds = copy.copy(rhs._cmds)
# def __del__(self):
# self._save()
def _initialize(self, *args, **kwargs):
pass
def _get_default_cmds(self):
answer = {}
answer['mat-extend'] = 'mat-extend'
answer['mat-mul'] = 'mat-mul'
answer['mat-select'] = 'mat-select'
answer['mat-symmetrize'] = 'mat-symmetrize'
answer['mat-transpose'] = 'mat-transpose'
answer['mat-diagonal'] = 'mat-diagonal'
return answer
# save & load ------------------------------------------------------
def _load(self):
path = os.path.join(self.work_dir, 'qcframe.mpac')
if os.path.exists(path):
logger.info('load the fragment state: {}'.format(path))
f = open(path, 'rb')
packed = f.read()
state_dat = msgpack.unpackb(packed)
f.close()
state_dat = bridge.Utils.to_unicode_dict(state_dat)
self.set_by_raw_data(state_dat)
else:
logger.debug('not found the state file')
def save(self):
path = os.path.join(self.work_dir, 'qcframe.mpac')
# logger.info('save the fragment state: {}'.format(path))
state_dat = self.get_raw_data()
packed = msgpack.packb(state_dat)
f = open(path, 'wb')
f.write(packed)
f.close()
def get_raw_data(self):
return self.__getstate__()
def set_by_raw_data(self, raw_data):
self.__setstate__(raw_data)
def __getstate__(self):
state = {}
state['name'] = self.name
tmp_frgs = []
for k, frg in self.fragments():
tmp_frgs.append((k, frg.get_raw_data()))
state['fragments'] = tmp_frgs
state['charge'] = self.charge
state['state'] = self._state
state['cmds'] = self._cmds
return state
def __setstate__(self, state):
assert(isinstance(state, dict))
self._name = state.get('name')
self._fragments = OrderedDict()
if 'fragments' in state:
for (k, frg) in state.get('fragments'):
self._fragments[k] = QcFragment(frg, parent=self)
self.charge = state.get('charge', 0)
self._state = state.get('state', {})
self._cmds = state.get('cmds', self._get_default_cmds())
# pdfparam ---------------------------------------------------------
def _get_pdfparam(self):
'''
pdfparamオブジェクトを返す
'''
pdfparam_path = os.path.abspath(os.path.join(self.work_dir,
self._pdfparam_filename))
if 'pdfparam' not in self._cache:
if os.path.exists(pdfparam_path):
mpac_file = open(pdfparam_path, 'rb')
mpac_data = msgpack.unpackb(mpac_file.read())
mpac_data = bridge.Utils.to_unicode_dict(mpac_data)
mpac_file.close()
logger.debug('pdfparam({}) is loaded.'.format(pdfparam_path))
self._cache['pdfparam'] = pdf.PdfParam(mpac_data)
else:
pdfsim = pdf.PdfSim()
self._cache['pdfparam'] = pdf.get_default_pdfparam()
logger.debug('use default pdfparam.')
else:
logger.debug('pdfparam is cached.')
return self._cache['pdfparam']
pdfparam = property(_get_pdfparam)
# DB ---------------------------------------------------------------
def _get_db_path(self):
db_path = os.path.abspath(os.path.join(self.work_dir,
self._db_filename))
return db_path
db_path = property(_get_db_path)
def get_pdfarchive(self):
'''
pdfArchiveオブジェクトを返す
'''
pdfarc = pdf.PdfArchive(self.db_path)
return pdfarc
# ==================================================================
# PROPERTIES
# ==================================================================
# command alias ----------------------------------------------------
def set_command_alias(self, cmd_alias_dict):
for k, v in cmd_alias_dict.items():
logger.debug("command update: {} -> {}".format(k, v))
self._cmds[k] = v
# work_dir ---------------------------------------------------------
def _get_work_dir(self):
return self._work_dir
work_dir = property(_get_work_dir)
# name -------------------------------------------------------------
def _get_name(self):
return self._name
name = property(_get_name)
# basisset ---------------------------------------------------------
def _set_basisset(self, pdfparam):
for fragment_name, fragment in self.fragments():
fragment.set_basisset(pdfparam)
# frame_molecule ---------------------------------------------------
def _get_frame_molecule(self):
'''
モデリングされた分子構造をAtomGroupオブジェクトで返す
'''
if 'frame_molecule' not in self._cache:
logger.info('create frame molecule coordinates.')
frame_molecule = bridge.AtomGroup()
for frg_name, frg in self._fragments.items():
logger.info('fragment name={name}: atoms={atoms}, elec={elec}, charge={charge}'.format(
name = frg_name,
atoms = frg.get_number_of_all_atoms(),
elec = frg.sum_of_atomic_number(),
charge = frg.get_AtomGroup().charge))
frame_molecule[frg_name] = frg.get_AtomGroup()
self._cache['frame_molecule'] = frame_molecule
logger.info('')
return self._cache['frame_molecule']
frame_molecule = property(_get_frame_molecule)
# work dir ---------------------------------------------------------
def _prepare_work_dir(self):
'''
カレントディレクトリ下に作業ディレクトリとなる
nameのディレクトリを作成する。
'''
# assert(len(self.name) > 0)
if len(self.name) == 0:
logger.critical("frame name is not defined.")
raise
self._work_dir = os.path.abspath(os.path.join(os.curdir, self.name))
if not os.path.exists(self.work_dir):
logger.info("{header} make work dir: {path}".format(
header=self.header,
path=self.work_dir))
os.mkdir(self.work_dir)
else:
logger.debug("{header} already exist: {path}".format(
header=self.header,
path=self.work_dir))
def cd_work_dir(self, job_name=''):
'''
作業ディレクトリをオブジェクトのwork_dirに移動する
'''
logger.info('=' * 20)
logger.info("{header} > {job_name}@{frame_name}".format(
header=self.header,
job_name = job_name,
frame_name = self.name))
logger.debug("{header} work dir: {work_dir}".format(
header=self.header,
work_dir=self.work_dir))
logger.info('=' * 20)
self._prev_dir = os.path.abspath(os.curdir)
os.chdir(self.work_dir)
def restore_cwd(self):
'''
self.cd_work_dir() 以前のディレクトリに戻す
'''
os.chdir(self._prev_dir)
logger.debug("{header} < (prev_dir: {path})".format(
header=self.header,
path=self._prev_dir))
def _check_path(self, path):
if not os.path.exists(path):
logger.warn("{header} NOT FOUND: {path}".format(
header=self.header, path=path))
# charge -----------------------------------------------------------
def _get_charge(self):
return int(self._charge)
def _set_charge(self, charge):
self._charge = int(charge)
charge = property(_get_charge, _set_charge)
# num_of_AOs -------------------------------------------------------
def get_number_of_AOs(self):
'''
return the number of atomic orbitals.
'''
num_of_AOs = 0
for frg_name, frg in self.fragments():
num_of_AOs += frg.get_number_of_AOs()
return num_of_AOs
# ==================================================================
# STATE
# ==================================================================
# guess_density ----------------------------------------------------
def _get_state_finished_guess_density(self):
self._state.setdefault('is_finished_guess_density', False)
return self._state['is_finished_guess_density']
def _set_state_finished_guess_density(self, yn):
self._state['is_finished_guess_density'] = bool(yn)
is_finished_guess_density = property(_get_state_finished_guess_density,
_set_state_finished_guess_density)
# guess_QCLO -------------------------------------------------------
def _get_state_finished_guess_QCLO(self):
self._state.setdefault('is_finished_guess_QCLO', False)
return self._state['is_finished_guess_QCLO']
def _set_state_finished_guess_QCLO(self, yn):
self._state['is_finished_guess_QCLO'] = bool(yn)
is_finished_guess_QCLO = property(_get_state_finished_guess_QCLO,
_set_state_finished_guess_QCLO)
# pre-SCF ----------------------------------------------------------
def _get_state_finished_prescf(self):
self._state.setdefault('is_finished_prescf', False)
return self._state['is_finished_prescf']
def _set_state_finished_prescf(self, yn):
self._state['is_finished_prescf'] = bool(yn)
is_finished_prescf = property(_get_state_finished_prescf,
_set_state_finished_prescf)
# SCF --------------------------------------------------------------
def _get_state_finished_scf(self):
self._state.setdefault('is_finished_scf', False)
return self._state['is_finished_scf']
def _set_state_finished_scf(self, yn):
self._state['is_finished_scf'] = bool(yn)
is_finished_scf = property(_get_state_finished_scf,
_set_state_finished_scf)
# Force ------------------------------------------------------------
def _get_state_finished_force(self):
self._state.setdefault('is_finished_force', False)
return self._state['is_finished_force']
def _set_state_finished_force(self, yn):
self._state['is_finished_force'] = bool(yn)
is_finished_force = property(_get_state_finished_force,
_set_state_finished_force)
# pick density matrix ---------------------------------------------
def _get_state_finished_pickup_density_matrix(self):
self._state.setdefault('is_finished_pickup_density_matrix', False)
return self._state['is_finished_pickup_density_matrix']
def _set_state_finished_pickup_density_matrix(self, yn):
self._state['is_finished_pickup_density_matrix'] = bool(yn)
is_finished_pickup_density_matrix = property(_get_state_finished_pickup_density_matrix,
_set_state_finished_pickup_density_matrix)
# LO ---------------------------------------------------------------
def _get_state_finished_LO(self):
self._state.setdefault('is_finished_LO', False)
return self._state['is_finished_LO']
def _set_state_finished_LO(self, yn):
self._state['is_finished_LO'] = bool(yn)
is_finished_LO = property(_get_state_finished_LO,
_set_state_finished_LO)
# pickup LO --------------------------------------------------------
def _get_state_finished_pickup_LO(self):
self._state.setdefault('is_finished_pickup_LO', False)
return self._state['is_finished_pickup_LO']
def _set_state_finished_pickup_LO(self, yn):
self._state['is_finished_pickup_LO'] = bool(yn)
is_finished_pickup_LO = property(_get_state_finished_pickup_LO,
_set_state_finished_pickup_LO)
# ==================================================================
# GUESS
# ==================================================================
# guess density ----------------------------------------------------
def guess_density(self, run_type ='rks', force=False):
if ((self.is_finished_guess_density == True) and
(force == False)):
logger.info('guess_density has been calced.')
return
self.cd_work_dir('guess_density')
guess_density_matrix_path = 'guess.density.{}.mat'.format(run_type)
# 既存のデータを消去する
if os.path.exists(guess_density_matrix_path):
os.remove(guess_density_matrix_path)
pdfsim = pdf.PdfSim()
pdfsim.setup()
for frg_name, frg in self.fragments():
logger.info('fragment name={}: {} atoms'.format(frg_name,
frg.get_number_of_all_atoms()))
if frg.parent == None:
logger.warn('guess_density(): parent == None. frg_name={}'.format(frg_name))
frg.set_command_alias(self._cmds)
frg_guess_density_matrix_path = frg.prepare_guess_density_matrix(run_type)
logger.debug('guess_density() [{}@{}] ext: {} from {}'.format(
frg_name,
frg.parent.name,
guess_density_matrix_path,
frg_guess_density_matrix_path))
if os.path.exists(frg_guess_density_matrix_path):
pdf.run_pdf([self._cmds['mat-extend'], '-d',
guess_density_matrix_path,
frg_guess_density_matrix_path,
guess_density_matrix_path])
else:
logger.warn('not found: frg.guess.dens.mat={}'.format(frg_guess_density_matrix_path))
self.pdfparam.guess = 'density_matrix'
logger.info('initial guess (density matrix) created at {}'.format(guess_density_matrix_path))
# check
self._check_path(guess_density_matrix_path)
self.is_finished_guess_density = True
self.save()
self.restore_cwd()
def guess_QCLO(self, run_type='rks',
force = False,
isCalcOrthogonalize = False):
"""create guess by using QCLO method
"""
if ((self.is_finished_guess_QCLO == True) and
(force == False)):
logger.info('guess_density has been calced.')
return
self.cd_work_dir('guess_QCLO')
guess_QCLO_matrix_path = 'guess.QCLO.{}.mat'.format(run_type)
if os.path.exists(guess_QCLO_matrix_path):
os.remove(guess_QCLO_matrix_path)
num_of_AOs = 0
for frg_name, frg in self.fragments():
logger.info('guess QCLO: frg_name={}, parent={}'.format(frg_name, frg.parent.name))
frg.set_command_alias(self._cmds)
frg_QCLO_matrix_path = frg.prepare_guess_QCLO_matrix(run_type, self, force=force)
if os.path.exists(frg_QCLO_matrix_path):
pdf.run_pdf([self._cmds['mat-extend'], '-c',
guess_QCLO_matrix_path,
frg_QCLO_matrix_path,
guess_QCLO_matrix_path])
else:
logger.warn('The QCLO of the subgroup, {}, was not created.'.format(frg_name))
# orthogonalize
guess_path = 'guess.lcao.{}.mat'.format(run_type)
if isCalcOrthogonalize:
if self.is_finished_prescf != True:
self.calc_preSCF()
logger.info('orthogonalize')
Xinv_path = self.pdfparam.get_Xinv_mat_path()
self._check_path(guess_QCLO_matrix_path)
pdf.run_pdf([self._cmds['mat-mul'], '-v',
Xinv_path,
guess_QCLO_matrix_path,
guess_path])
else:
shutil.copy(guess_QCLO_matrix_path, guess_path)
self.pdfparam.guess = 'lcao'
logger.info('guess LCAO matrix created: {}'.format(guess_path))
# check
self._check_path(guess_QCLO_matrix_path)
self.is_finished_guess_QCLO = True
self.save()
self.restore_cwd()
# create occ file
self._create_occupation_file(run_type)
def _create_occupation_file(self, run_type='rks'):
self.cd_work_dir('create occ')
self._setup_pdf()
occ_level = -1
electrons_per_orb = 0.0
run_type = run_type.upper()
if run_type == 'RKS':
occ_level = int((self.pdfparam.num_of_electrons / 2.0))
electrons_per_orb = 2.0
else:
logger.critical("{header} NOT supported. run_type={run_type}".format(
header=self.header, run_type=run_type))
# num_of_MOs = self.pdfparam.num_of_MOs
# occ_vtr = pdf.Vector(num_of_MOs)
occ_vtr = pdf.Vector(occ_level)
for i in range(occ_level):
occ_vtr.set(i, electrons_per_orb)
occ_vtr_path = "guess.occ.{}.vtr".format(run_type.lower())
occ_vtr.save(occ_vtr_path)
self._check_path(occ_vtr_path)
self.save()
self.restore_cwd()
# ==================================================================
# CALC
# ==================================================================
def _setup_pdf(self):
logger.info("{header} setup ProteinDF condition".format(header=self.header))
for frg_name, frg in self.fragments():
frg.set_basisset(self.pdfparam)
self.pdfparam.molecule = self.frame_molecule
# num_of_electrons
num_of_electrons = self.frame_molecule.sum_of_atomic_number() # calc from the molecule data
logger.info("{header} the number of electrons = {elec}".format(
header=self.header, elec=num_of_electrons))
if self.charge != 0:
logger.info('specify the charge => {}'.format(self.charge))
num_of_electrons -= self.charge # 電子(-)数と電荷(+)の正負が逆なことに注意
self.pdfparam.num_of_electrons = num_of_electrons
logger.info("{header} update the number of electrons => {elec}".format(
header=self.header,
elec=self.pdfparam.num_of_electrons))
if self.pdfparam.num_of_electrons % 2 != 0:
logger.warning("{header} the number of electrons is not even.".format(header=self.header))
# ------------------------------------------------------------------
def calc_preSCF(self, dry_run=False):
'''
'''
if self.is_finished_prescf:
logger.info('preSCF has been calced.')
return
self.cd_work_dir('calc preSCF')
self.check_bump_of_atoms()
self._setup_pdf()
self.pdfparam.step_control = 'integral'
self.save()
pdfsim = pdf.PdfSim()
pdfsim.sp(self.pdfparam,
workdir = self.work_dir,
db_path = self.db_path,
dry_run = dry_run)
self._cache.pop('pdfparam')
self.is_finished_prescf = True
self.save()
self.restore_cwd()
# sp ---------------------------------------------------------------
def calc_sp(self, dry_run=False):
'''
calculate single point energy
'''
if self.is_finished_scf:
logger.info('SP has been calced.')
self._grouping_fragments()
self._switch_fragments()
return
if self.is_finished_prescf != True:
self.calc_preSCF(dry_run)
self.cd_work_dir('calc SP')
self.check_bump_of_atoms()
self._setup_pdf()
#self.output_xyz("{}/model.xyz".format(self.name))
self.pdfparam.step_control = 'guess scf'
self.save()
pdfsim = pdf.PdfSim()
pdfsim.sp(self.pdfparam,
workdir = self.work_dir,
db_path = self.db_path,
dry_run = dry_run)
self._cache.pop('pdfparam')
self.is_finished_scf = True
self._grouping_fragments()
self._switch_fragments()
self.save()
self.restore_cwd()
# force ------------------------------------------------------------
def calc_force(self, dry_run=False):
'''
calculate force (energy gradient)
absolute: force -> gradient
'''
if self.is_finished_force:
logger.info('force has been calced.')
return
if self.is_finished_scf != True:
self.calc_sp(dry_run)
self.cd_work_dir('calc force')
self._setup_pdf()
self.pdfparam.step_control = 'force'
self.save()
pdfsim = pdf.PdfSim()
# for frg_name, frg in self.fragments():
# frg.set_basisset(self.pdfparam)
# self.pdfparam.molecule = self.frame_molecule
#
# # num_of_electrons
# num_of_electrons = self.pdfparam.num_of_electrons # calc from the molecule data
# logger.info('the number of electrons = {}'.format(num_of_electrons))
# if self.charge != 0:
# logger.info('specify the charge => {}'.format(self.charge))
# num_of_electrons -= self.charge # 電子(-)数と電荷(+)の正負が逆なことに注意
# self.pdfparam.num_of_electrons = num_of_electrons
# logger.info('update the number of electrons => {}'.format(self.pdfparam.num_of_electrons))
pdfsim.sp(self.pdfparam,
workdir = self.work_dir,
db_path = self.db_path,
dry_run = dry_run)
self._cache.pop('pdfparam')
self.is_finished_force = True
self.save()
self.restore_cwd()
# summary ------------------------------------------------------------------
def summary(self, dry_run=False, format_str=None, filepath=None):
'''
Format:
{NUM_OF_ATOMS}: number of atoms
{NUM_OF_AO}: number of AOs
{NUM_OF_MO}: number of MOs
{METHOD}: method
{IS_CONVERGED}: Whether the SCF is converged or not
{ITERATION}: iteration
{TOTAL_ENERGY}: total energy
{GRADIENT_RMS}: gradient RMS
'''
if self.is_finished_scf != True:
self.calc_sp(dry_run)
self.cd_work_dir('summary')
values = {}
pdfarc = self.get_pdfarchive()
values['NUM_OF_ATOMS'] = pdfarc.num_of_atoms
values['NUM_OF_AO'] = pdfarc.num_of_AOs
values['NUM_OF_MO'] = pdfarc.num_of_MOs
values['METHOD'] = pdfarc.method
values['IS_CONVERGED'] = pdfarc.scf_converged
itr = pdfarc.iterations
values['ITERATION'] = itr
values['TOTAL_ENERGY'] = pdfarc.get_total_energy(itr)
values['GRADIENT_RMS'] = pdfarc.get_gradient_rms()
if format_str == None:
format_str = 'total energy: {TOTAL_ENERGY} at {ITERATION}'
output = format_str.format(**values)
if output[-1] != "\n":
output += "\n"
logger.info(output)
if filepath != None:
with open(filepath, 'a') as f:
f.write(output)
self.restore_cwd()
return output
def get_gradient(self):
'''
'''
self.cd_work_dir('get_gradient')
pdfarc = self.get_pdfarchive()
num_of_atoms = pdfarc.num_of_atoms
grad =[ [] * num_of_atoms]
for atom_index in range(num_of_atoms):
grad[atom_index] = pdfarc.get_force(atom_index)
self.restore_cwd()
# pop --------------------------------------------------------------
def pop(self, dry_run = False, iteration = -1):
'''
'''
if self.is_finished_scf != True:
self.calc_sp(dry_run)
if iteration == -1:
iteration = self.pdfparam.iterations
self._calc_pop(iteration = iteration)
pop_vtr = self.get_pop(iteration)
self.save()
self.restore_cwd()
return pop_vtr
def _calc_pop(self, iteration = -1, dry_run = False):
"""
"""
if iteration == -1:
iteration = self.pdfparam.iterations
self.cd_work_dir('calc pop: iteration={}'.format(iteration))
pdfsim = pdf.PdfSim()
pdfsim.pop(iteration = iteration,
dry_run = dry_run)
self.restore_cwd()
def get_pop(self, iteration = -1):
"""
"""
if iteration == -1:
iteration = self.pdfparam.iterations
self.cd_work_dir('get pop: iteration={}'.format(iteration))
run_type = "rks"
pop_path = self.pdfparam.get_pop_mulliken_path(run_type, iteration = iteration)
pop_vtr = pdf.Vector()
pop_vtr.load(pop_path)
self.restore_cwd()
return pop_vtr
# ------------------------------------------------------------------
# ==================================================================
# PICKUP
# ==================================================================
# pickup density matrix --------------------------------------------
def pickup_density_matrix(self, runtype ='rks'):
'''
密度行列を各フラグメントに割り当てる
'''
if self.is_finished_pickup_density_matrix:
logger.info("{header} pickup density matrix has done.".format(header=self.header))
return
self.cd_work_dir('pickup density matrix')
# post-SCF
self._grouping_fragments()
self._switch_fragments()
dens_mat_path = self.pdfparam.get_density_matrix_path(runtype=runtype)
logger.info("{header} reference density matrix: {path}".format(
header=self.header,
path=dens_mat_path))
global_dim = 0
for frg_name, frg in self.fragments():
dim = frg.get_number_of_AOs()
if dim > 0:
frg_dens_mat_path = 'Ppq.{}.{}.mat'.format(runtype, frg_name)
logger.info("{header} select [{start}:{end}] for {fragment}".format(
header=self.header,
fragment=frg_name,
start=global_dim,
end=global_dim +dim -1))
# フラグメント対応部分を切り出す
pdf.run_pdf([self._cmds['mat-select'],
'-t', global_dim,
'-l', global_dim,
'-b', global_dim +dim -1,
'-r', global_dim +dim -1,
dens_mat_path,
frg_dens_mat_path])
# select された行列を対称行列に変換
pdf.run_pdf([self._cmds['mat-symmetrize'],
frg_dens_mat_path,
frg_dens_mat_path])
logger.debug("{header} density matrix for {fragment} was saved as {path}".format(
header=self.header,
fragment=frg_name,
path=frg_dens_mat_path))
is_loadable = pdf.SymmetricMatrix.is_loadable(frg_dens_mat_path)
assert(is_loadable == True)
(row, col) = pdf.SymmetricMatrix.get_size(frg_dens_mat_path)
assert(row == dim)
assert(row == col)
# 対称行列パスをフラグメントに登録
frg.set_density_matrix(frg_dens_mat_path)
global_dim += dim
logger.is_finished_pickup_density_matrix = True
self.save()
self.restore_cwd()
# ------------------------------------------------------------------
def calc_lo(self, run_type, dry_run=False):
if self.is_finished_LO:
logger.info('LO has done.')
return
if self.is_finished_scf != True:
self.calc_sp(dry_run=dry_run)
self.cd_work_dir('calc lo')
logger.info('start lo calculation.')
pdf.run_pdf('lo')
self.is_finished_LO = True
self.save()
self.restore_cwd()
# ------------------------------------------------------------------
def pickup_QCLO_matrix(self, run_type='rks', force=False):
if ((self.is_finished_pickup_LO == True) and
(force == False)):
logger.info('pickup LO has been finished.')
return
self.calc_lo(run_type)
self.cd_work_dir('pickup lo')
# post-SCF
self._grouping_fragments()
self._switch_fragments()
# debug
pdfarc = self.get_pdfarchive()
num_of_AOs = pdfarc.num_of_AOs
num_of_MOs = pdfarc.num_of_MOs
HOMO_level = pdfarc.get_HOMO_level('rks') # option base 0
logger.info('num of AOs: {}'.format(num_of_AOs))
logger.info('num of MOs: {}'.format(num_of_MOs))
logger.info('HOMO level: {}'.format(HOMO_level +1))
logger.info('fragment information:')
for frg_name, frg in self.fragments():
frg_AOs = frg.get_number_of_AOs()
logger.info('fragment name:[{}] AOs={}'.format(frg_name, frg_AOs))
logger.info('')
# calc S*C
if 'pdfparam' in self._cache:
self._cache.pop('pdfparam')
lo_satisfied = self.pdfparam.lo_satisfied
if lo_satisfied != True:
logger.warn('lo_satisfied: {}'.format(lo_satisfied))
lo_iterations = self.pdfparam.lo_num_of_iterations
logger.info('lo iterations: {}'.format(lo_iterations))
logger.info('calc S*C')
CSC_path = 'CSC.mat'
Clo_path = self.pdfparam.get_clo_mat_path()
pdf.run_pdf(['component',
'-v',
'-S', 'CSC.mat',
'-c', Clo_path])
# load CSC
CSC = pdf.Matrix()
CSC.load(CSC_path)
logger.info("{header} make AO v.s. fragment table".format(header=self.header))
AO_frg_tbl = self._get_AO_fragment_table(num_of_AOs)
# pickup
logger.info('{header} assign fragment: start: HOMO={homo}'.format(header=self.header, homo=HOMO_level))
MO_fragment_assigned = {}
for mo in range(HOMO_level +1):
frg_name = self._define_lo_fragment(mo, num_of_AOs, AO_frg_tbl, CSC)
logger.info("{header} #{mo} MO -> fragment: '{frg_name}'".format(
header=self.header, mo=mo, frg_name=frg_name))
MO_fragment_assigned.setdefault(frg_name, [])
MO_fragment_assigned[frg_name].append(mo)
logger.info("{header} assign fragment: end".format(header=self.header))
# assign report
logger.info('==== assign report ====')
for k, MOs in MO_fragment_assigned.items():
logger.info("{header} fragment '{frag_name}' has {mo} MO(s)".format(
header=self.header, frag_name=k, mo=len(MOs)))
# フラグメントのC_LOを作成する
logger.info("{header} create C_LO: start".format(header=self.header))
Clo = pdf.Matrix()
Clo.load(Clo_path)
assert(num_of_AOs == Clo.rows)
for frg_name, frg in self.fragments():
frg_cols = len(MO_fragment_assigned.get(frg_name, []))
logger.info("{header} fragment '{frg_name}': col={col}".format(header=self.header, frg_name=frg_name, col=frg_cols))
if frg_cols == 0:
logger.warning("{header} fragment '{frg_name}' has no colomns.".format(header=self.header, frg_name=frg_name))
# continue
Clo_frg = pdf.Matrix(num_of_AOs, frg_cols)
if frg_name in MO_fragment_assigned:
for col, ref_col in enumerate(MO_fragment_assigned[frg_name]):
for row in range(num_of_AOs):
v = Clo.get(row, ref_col)
Clo_frg.set(row, col, v)
Clo_path = 'Clo_{}.mat'.format(frg_name)
logger.debug("{header} fragment C_LO save: {path}".format(header=self.header, path=Clo_path))
Clo_frg.save(Clo_path)
frg.set_LO_matrix(Clo_path, run_type)
logger.info("{header} create C_LO: end".format(header=self.header))
# trans C_LO to QCLO
self._trans_LO2QCLO()
# finish
self.is_finished_pickup_LO = True
self.save()
self.restore_cwd()
def _get_AO_fragment_table(self, num_of_AOs):
'''
AO v.s. fragment_name の辞書を返す
'''
frg_table = [ None for x in range(num_of_AOs) ]
AO_index = 0
for frg_name, frg in self.fragments():
frg_num_of_AOs = frg.get_number_of_AOs()
for i in range(AO_index, AO_index + frg_num_of_AOs):
frg_table[i] = frg_name
AO_index += frg_num_of_AOs
return frg_table
def _define_lo_fragment(self, mo, num_of_AOs, AO_frg_tbl, CSC):
judge = {}
total = 0.0
for ao in range(num_of_AOs):
frg_name = AO_frg_tbl[ao]
v = math.fabs(CSC.get(ao, mo))
total += v
judge.setdefault(frg_name, 0.0)
judge[frg_name] += v
for frg_name in judge.keys():
judge[frg_name] /= total
ranked_judge = sorted(judge.items(), key=lambda x:x[1], reverse=True)
for rank, (k, v) in enumerate(ranked_judge):
logger.info("{header} [{rank}] name:{name}, score:{score:.3f}".format(
header=self.header,
rank=rank +1,
name=k,
score=v))
high_score = ranked_judge[0][1]
if high_score < 0.5:
logger.warning("{header} 1st score is too small: {score}".format(header=self.header, score=high_score))
return ranked_judge[0][0]
def _trans_LO2QCLO(self):
logger.info('trans LO at {}'.format(os.getcwd()))
run_type = 'rks'
F_path = self.pdfparam.get_f_mat_path(run_type)
logger.info('F matrix: {}'.format(F_path))
for frg_name, frg in self.fragments():
C_QCLO_path = 'C_QCLO.{}.mat'.format(frg_name) # output for each fragment
frg_AO = frg.get_number_of_AOs()
logger.info("{header} fragment '{name}' has {ao} AO(s)".format(
header=self.header, name=frg_name, ao=frg_AO))
if frg.get_number_of_AOs() != 0:
Clo_path = frg.get_LO_matrix_path(run_type)
assert(Clo_path != None)
# calc (C_LO)dagger * F * C_LO => F'
F_Clo_path = 'F_Clo.{}.mat'.format(frg_name)
pdf.run_pdf([self._cmds['mat-mul'], '-v',
F_path,
Clo_path,
F_Clo_path])
Clo_dagger_path = 'Clo_dagger.{}.mat'.format(frg_name)
pdf.run_pdf([self._cmds['mat-transpose'], '-v',
Clo_path,
Clo_dagger_path])
F_prime_path = 'Fprime.{}.mat'.format(frg_name)
pdf.run_pdf([self._cmds['mat-mul'], '-v',
Clo_dagger_path,
F_Clo_path,
F_prime_path])
pdf.run_pdf([self._cmds['mat-symmetrize'],
F_prime_path,
F_prime_path])
# diagonal F'
eigval_path = 'QCLO_eigval.{}.vtr'.format(frg_name)
Cprime_path = 'Cprime.{}.mat'.format(frg_name)
logger.info("diagonal F'")
pdf.run_pdf([self._cmds['mat-diagonal'], '-v',
'-l', eigval_path,
'-x', Cprime_path,
F_prime_path])
# AO基底に変換
pdf.run_pdf([self._cmds['mat-mul'], '-v',
Clo_path,
Cprime_path,
C_QCLO_path])
else:
logger.info("{header} create empty QCLO matrix.".format(header=self.header))
empty_mat = pdf.Matrix()
empty_mat.save(C_QCLO_path)
frg.set_QCLO_matrix(C_QCLO_path)
logger.info('C_QCLO saved: {}'.format(C_QCLO_path))
# =================================================================
# for fragments
# =================================================================
def fragments(self):
'''
フラグメントの名前とオブジェクトを返すイテレータ
'''
for k in self._fragments.keys():
yield(k, self._fragments[k])
def has_fragment(self, fragment_name):
'''
フラグメントを持っていればTrueを返す
'''
fragment_name = bridge.Utils.to_unicode(fragment_name)
return fragment_name in self._fragments.keys()
# operator[] -------------------------------------------------------
def __getitem__(self, fragment_name):
'''
出力用[]演算子
'''
fragment_name = bridge.Utils.to_unicode(fragment_name)
return self._fragments.get(fragment_name, None)
def __setitem__(self, fragment_name, obj):
'''
入力用[]演算子
計算前であれば代入可能(つまりモデリング中)であるが、
計算後は代入できない
'''
if self.is_finished_scf:
logger.debug("rearrangement of fragments is prohibited after calculation.")
return
if 'frame_molecule' in self._cache:
self._cache.pop('frame_molecule')
fragment_name = bridge.Utils.to_unicode(fragment_name)
if isinstance(obj, QcFragment):
fragment = QcFragment(obj)
fragment.parent = self
fragment.name = fragment_name
logger.debug('[{my_name}] add fragment: name={fragment_name}'.format(
my_name=self.name,
fragment_name=fragment_name))
self._fragments[fragment_name] = fragment
elif isinstance(obj, QcFrame):
logger.info('begin to register frame molecule: for {}'.format(fragment_name))
fragment = QcFragment()
fragment.parent = self
fragment.name = fragment_name
for k, f in obj.fragments():
if not f.margin:
logger.warn('add fragment: fragment={} for {}'.format(k, fragment_name))
fragment.set_group(k, f)
else:
logger.warn('pass fragment: fragment={} is margin'.format(k))
self._fragments[fragment_name] = fragment
logger.info('end of registration frame molecule: for {}'.format(fragment_name))
else:
raise
# rearangement -----------------------------------------------------
def _switch_fragments(self):
'''
fragmentsを入力用から出力用に切り替える
処理内容:
- 各fragmentの親を自分(self)にする
'''
logger.info("{header} switch fragment".format(header=self.header))
output_fragments = OrderedDict()
for frg_name, frg in self.fragments():
logger.info("{header} fragment_name: {name}".format(
header=self.header, name=frg_name))
new_frg = QcFragment(frg, parent=self)
assert(new_frg.parent.name == self.name)
output_fragments[frg_name] = new_frg
self._fragments = output_fragments
#logger.info('merge subgroups')
#for key, frg in self.fragments():
# frg.merge_subgroups()
logger.info("{header} ---> switch".format(header=self.header))
for frg_name, frg in self.fragments():
logger.info("{header} {frg_name}: parent={parent_name}".format(
header=self.header,
frg_name=frg_name,
parent_name=frg.parent.name))
logger.info("{header} <---".format(header=self.header))
def _grouping_fragments(self):
logger.info("{header} grouping fragments".format(header=self.header))
for frg_name, frg in self.fragments():
frg.grouping_subfragments()
# ==================================================================
# coordinates
# ==================================================================
# outout XYZ -------------------------------------------------------
def output_xyz(self, file_path):
xyz = bridge.Xyz(self.frame_molecule)
xyz.save(file_path)
def check_bump_of_atoms(self):
logger.info("{header} check bump of atoms".format(header=self.header))
atom_list = self.frame_molecule.get_atom_list()
num_of_atoms = len(atom_list)
for i in range(num_of_atoms):
xyz1 = atom_list[i].xyz
for j in range(i):
d = xyz1.distance_from(atom_list[j].xyz)
if d < self.TOO_SMALL:
logger.warning("{header} atom[{i}][{atom_i}]({atom_i_path}) is near by atom[{j}][{atom_j}]({atom_j_path})".format(
header=self.header,
i=i, atom_i=str(atom_list[i]), atom_i_path=atom_list[i].path,
j=j, atom_j=str(atom_list[j]), atom_j_path=atom_list[j].path))
logger.debug("{header} check_bump of atoms: end".format(header=self.header))
# ==================================================================
# orbital table
# ==================================================================
def get_orbital_info(self):
'''
AOに対するQcOrbitalDataリストを返す
'''
orbinfo = []
for k, frg in self.fragments():
orbinfo.extend(frg.get_orbital_info())
return orbinfo
# ==================================================================
# operators
# ==================================================================
# operator == ------------------------------------------------------
def __eq__(self, rhs):
if rhs == None:
return False
return (self.name == rhs.name)
def __ne__(self, rhs):
return not self.__eq__(rhs)
# operator str -----------------------------------------------------
def __str__(self):
answer = ""
answer = 'frame name={}\n'.format(self.name)
for key, fragment in self.fragments():
answer += '>> fragment: {}\n'.format(key)
answer += str(fragment)
answer += '\n'
return answer
# ==================================================================
# debug
# ==================================================================
def _get_logger_header(self):
"""return header string for logger
"""
header = "{name}>".format(name=self.name)
return header
header=property(_get_logger_header)
| gpl-3.0 | -4,512,768,778,004,133,400 | 35.072173 | 134 | 0.497177 | false |
JamesMura/sentry | src/sentry/web/api.py | 1 | 21527 | from __future__ import absolute_import, print_function
import base64
import logging
import six
import traceback
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed
from django.utils.encoding import force_bytes
from django.views.decorators.cache import never_cache, cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View as BaseView
from functools import wraps
from raven.contrib.django.models import client as Raven
from sentry import quotas, tsdb
from sentry.coreapi import (
APIError, APIForbidden, APIRateLimited, ClientApiHelper, CspApiHelper,
LazyData
)
from sentry.models import Project, OrganizationOption, Organization
from sentry.signals import (
event_accepted, event_dropped, event_filtered, event_received, api_called
)
from sentry.quotas.base import RateLimit
from sentry.utils import json, metrics
from sentry.utils.data_scrubber import SensitiveDataFilter
from sentry.utils.http import (
is_valid_origin, get_origins, is_same_domain,
)
from sentry.utils.safe import safe_execute
from sentry.web.helpers import render_to_response
logger = logging.getLogger('sentry')
# Transparent 1x1 gif
# See http://probablyprogramming.com/2009/03/15/the-tiniest-gif-ever
PIXEL = base64.b64decode('R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=')
PROTOCOL_VERSIONS = frozenset(('2.0', '3', '4', '5', '6', '7'))
def api(func):
@wraps(func)
def wrapped(request, *args, **kwargs):
data = func(request, *args, **kwargs)
if request.is_ajax():
response = HttpResponse(data)
response['Content-Type'] = 'application/json'
else:
ref = request.META.get('HTTP_REFERER')
if ref is None or not is_same_domain(ref, request.build_absolute_uri()):
ref = reverse('sentry')
return HttpResponseRedirect(ref)
return response
return wrapped
class APIView(BaseView):
helper_cls = ClientApiHelper
def _get_project_from_id(self, project_id):
if not project_id:
return
if not project_id.isdigit():
raise APIError('Invalid project_id: %r' % project_id)
try:
return Project.objects.get_from_cache(id=project_id)
except Project.DoesNotExist:
raise APIError('Invalid project_id: %r' % project_id)
def _parse_header(self, request, helper, project):
auth = helper.auth_from_request(request)
if auth.version not in PROTOCOL_VERSIONS:
raise APIError('Client using unsupported server protocol version (%r)' % six.text_type(auth.version or ''))
if not auth.client:
raise APIError("Client did not send 'client' identifier")
return auth
@csrf_exempt
@never_cache
def dispatch(self, request, project_id=None, *args, **kwargs):
helper = self.helper_cls(
agent=request.META.get('HTTP_USER_AGENT'),
project_id=project_id,
ip_address=request.META['REMOTE_ADDR'],
)
origin = None
try:
origin = helper.origin_from_request(request)
response = self._dispatch(request, helper, project_id=project_id,
origin=origin,
*args, **kwargs)
except APIError as e:
context = {
'error': force_bytes(e.msg, errors='replace'),
}
if e.name:
context['error_name'] = e.name
response = HttpResponse(json.dumps(context),
content_type='application/json',
status=e.http_status)
# Set X-Sentry-Error as in many cases it is easier to inspect the headers
response['X-Sentry-Error'] = context['error']
if isinstance(e, APIRateLimited) and e.retry_after is not None:
response['Retry-After'] = six.text_type(e.retry_after)
except Exception as e:
# TODO(dcramer): test failures are not outputting the log message
# here
if settings.DEBUG:
content = traceback.format_exc()
else:
content = ''
logger.exception(e)
response = HttpResponse(content,
content_type='text/plain',
status=500)
# TODO(dcramer): it'd be nice if we had an incr_multi method so
# tsdb could optimize this
metrics.incr('client-api.all-versions.requests')
metrics.incr('client-api.all-versions.responses.%s' % (
response.status_code,
))
metrics.incr('client-api.all-versions.responses.%sxx' % (
six.text_type(response.status_code)[0],
))
if helper.context.version:
metrics.incr('client-api.v%s.requests' % (
helper.context.version,
))
metrics.incr('client-api.v%s.responses.%s' % (
helper.context.version, response.status_code
))
metrics.incr('client-api.v%s.responses.%sxx' % (
helper.context.version, six.text_type(response.status_code)[0]
))
if response.status_code != 200 and origin:
# We allow all origins on errors
response['Access-Control-Allow-Origin'] = '*'
if origin:
response['Access-Control-Allow-Headers'] = \
'X-Sentry-Auth, X-Requested-With, Origin, Accept, ' \
'Content-Type, Authentication'
response['Access-Control-Allow-Methods'] = \
', '.join(self._allowed_methods())
response['Access-Control-Expose-Headers'] = \
'X-Sentry-Error, Retry-After'
return response
def _dispatch(self, request, helper, project_id=None, origin=None,
*args, **kwargs):
request.user = AnonymousUser()
project = self._get_project_from_id(project_id)
if project:
helper.context.bind_project(project)
Raven.tags_context(helper.context.get_tags_context())
if origin is not None:
# This check is specific for clients who need CORS support
if not project:
raise APIError('Client must be upgraded for CORS support')
if not is_valid_origin(origin, project):
raise APIForbidden('Invalid origin: %s' % (origin,))
# XXX: It seems that the OPTIONS call does not always include custom headers
if request.method == 'OPTIONS':
response = self.options(request, project)
else:
auth = self._parse_header(request, helper, project)
project_id = helper.project_id_from_auth(auth)
# Legacy API was /api/store/ and the project ID was only available elsewhere
if not project:
project = Project.objects.get_from_cache(id=project_id)
helper.context.bind_project(project)
elif project_id != project.id:
raise APIError('Two different projects were specified')
helper.context.bind_auth(auth)
Raven.tags_context(helper.context.get_tags_context())
# Explicitly bind Organization so we don't implicitly query it later
# this just allows us to comfortably assure that `project.organization` is safe.
# This also allows us to pull the object from cache, instead of being
# implicitly fetched from database.
project.organization = Organization.objects.get_from_cache(id=project.organization_id)
if auth.version != '2.0':
if not auth.secret_key:
# If we're missing a secret_key, check if we are allowed
# to do a CORS request.
# If we're missing an Origin/Referrer header entirely,
# we only want to support this on GET requests. By allowing
# un-authenticated CORS checks for POST, we basially
# are obsoleting our need for a secret key entirely.
if origin is None and request.method != 'GET':
raise APIForbidden('Missing required attribute in authentication header: sentry_secret')
if not is_valid_origin(origin, project):
raise APIForbidden('Missing required Origin or Referer header')
response = super(APIView, self).dispatch(
request=request,
project=project,
auth=auth,
helper=helper,
**kwargs
)
if origin:
if origin == 'null':
# If an Origin is `null`, but we got this far, that means
# we've gotten past our CORS check for some reason. But the
# problem is that we can't return "null" as a valid response
# to `Access-Control-Allow-Origin` and we don't have another
# value to work with, so just allow '*' since they've gotten
# this far.
response['Access-Control-Allow-Origin'] = '*'
else:
response['Access-Control-Allow-Origin'] = origin
api_called.send(project=project, sender=self)
return response
# XXX: backported from Django 1.5
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
def options(self, request, *args, **kwargs):
response = HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
class StoreView(APIView):
"""
The primary endpoint for storing new events.
This will validate the client's authentication and data, and if
successful pass on the payload to the internal database handler.
Authentication works in three flavors:
1. Explicit signed requests
These are implemented using the documented signed request protocol, and
require an authentication header which is signed using with the project
member's secret key.
2. CORS Secured Requests
Generally used for communications with client-side platforms (such as
JavaScript in the browser), they require a standard header, excluding
the signature and timestamp requirements, and must be listed in the
origins for the given project (or the global origins).
3. Implicit trusted requests
Used by the Sentry core, they are only available from same-domain requests
and do not require any authentication information. They only require that
the user be authenticated, and a project_id be sent in the GET variables.
"""
def post(self, request, **kwargs):
try:
data = request.body
except Exception as e:
logger.exception(e)
# We were unable to read the body.
# This would happen if a request were submitted
# as a multipart form for example, where reading
# body yields an Exception. There's also not a more
# sane exception to catch here. This will ultimately
# bubble up as an APIError.
data = None
response_or_event_id = self.process(request, data=data, **kwargs)
if isinstance(response_or_event_id, HttpResponse):
return response_or_event_id
return HttpResponse(json.dumps({
'id': response_or_event_id,
}), content_type='application/json')
def get(self, request, **kwargs):
data = request.GET.get('sentry_data', '')
response_or_event_id = self.process(request, data=data, **kwargs)
# Return a simple 1x1 gif for browser so they don't throw a warning
response = HttpResponse(PIXEL, 'image/gif')
if not isinstance(response_or_event_id, HttpResponse):
response['X-Sentry-ID'] = response_or_event_id
return response
def process(self, request, project, auth, helper, data, **kwargs):
metrics.incr('events.total')
if not data:
raise APIError('No JSON data was found')
remote_addr = request.META['REMOTE_ADDR']
data = LazyData(
data=data,
content_encoding=request.META.get('HTTP_CONTENT_ENCODING', ''),
helper=helper,
project=project,
auth=auth,
client_ip=remote_addr,
)
event_received.send_robust(
ip=remote_addr,
project=project,
sender=type(self),
)
if helper.should_filter(project, data, ip_address=remote_addr):
tsdb.incr_multi([
(tsdb.models.project_total_received, project.id),
(tsdb.models.project_total_blacklisted, project.id),
(tsdb.models.organization_total_received, project.organization_id),
(tsdb.models.organization_total_blacklisted, project.organization_id),
])
metrics.incr('events.blacklisted')
event_filtered.send_robust(
ip=remote_addr,
project=project,
sender=type(self),
)
raise APIForbidden('Event dropped due to filter')
# TODO: improve this API (e.g. make RateLimit act on __ne__)
rate_limit = safe_execute(quotas.is_rate_limited, project=project,
_with_transaction=False)
if isinstance(rate_limit, bool):
rate_limit = RateLimit(is_limited=rate_limit, retry_after=None)
# XXX(dcramer): when the rate limiter fails we drop events to ensure
# it cannot cascade
if rate_limit is None or rate_limit.is_limited:
if rate_limit is None:
helper.log.debug('Dropped event due to error with rate limiter')
tsdb.incr_multi([
(tsdb.models.project_total_received, project.id),
(tsdb.models.project_total_rejected, project.id),
(tsdb.models.organization_total_received, project.organization_id),
(tsdb.models.organization_total_rejected, project.organization_id),
])
metrics.incr('events.dropped', tags={
'reason': rate_limit.reason_code if rate_limit else 'unknown',
})
event_dropped.send_robust(
ip=remote_addr,
project=project,
sender=type(self),
reason_code=rate_limit.reason_code if rate_limit else None,
)
if rate_limit is not None:
raise APIRateLimited(rate_limit.retry_after)
else:
tsdb.incr_multi([
(tsdb.models.project_total_received, project.id),
(tsdb.models.organization_total_received, project.organization_id),
])
org_options = OrganizationOption.objects.get_all_values(project.organization_id)
if org_options.get('sentry:require_scrub_ip_address', False):
scrub_ip_address = True
else:
scrub_ip_address = project.get_option('sentry:scrub_ip_address', False)
event_id = data['event_id']
# TODO(dcramer): ideally we'd only validate this if the event_id was
# supplied by the user
cache_key = 'ev:%s:%s' % (project.id, event_id,)
if cache.get(cache_key) is not None:
raise APIForbidden('An event with the same ID already exists (%s)' % (event_id,))
if org_options.get('sentry:require_scrub_data', False):
scrub_data = True
else:
scrub_data = project.get_option('sentry:scrub_data', True)
if scrub_data:
# We filter data immediately before it ever gets into the queue
sensitive_fields_key = 'sentry:sensitive_fields'
sensitive_fields = (
org_options.get(sensitive_fields_key, []) +
project.get_option(sensitive_fields_key, [])
)
exclude_fields_key = 'sentry:safe_fields'
exclude_fields = (
org_options.get(exclude_fields_key, []) +
project.get_option(exclude_fields_key, [])
)
if org_options.get('sentry:require_scrub_defaults', False):
scrub_defaults = True
else:
scrub_defaults = project.get_option('sentry:scrub_defaults', True)
inst = SensitiveDataFilter(
fields=sensitive_fields,
include_defaults=scrub_defaults,
exclude_fields=exclude_fields,
)
inst.apply(data)
if scrub_ip_address:
# We filter data immediately before it ever gets into the queue
helper.ensure_does_not_have_ip(data)
# mutates data (strips a lot of context if not queued)
helper.insert_data_to_database(data)
cache.set(cache_key, '', 60 * 5)
helper.log.debug('New event received (%s)', event_id)
event_accepted.send_robust(
ip=remote_addr,
data=data,
project=project,
sender=type(self),
)
return event_id
class CspReportView(StoreView):
helper_cls = CspApiHelper
content_types = ('application/csp-report', 'application/json')
def _dispatch(self, request, helper, project_id=None, origin=None,
*args, **kwargs):
# A CSP report is sent as a POST request with no Origin or Referer
# header. What we're left with is a 'document-uri' key which is
# inside of the JSON body of the request. This 'document-uri' value
# should be treated as an origin check since it refers to the page
# that triggered the report. The Content-Type is supposed to be
# `application/csp-report`, but FireFox sends it as `application/json`.
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
if request.META.get('CONTENT_TYPE') not in self.content_types:
raise APIError('Invalid Content-Type')
request.user = AnonymousUser()
project = self._get_project_from_id(project_id)
helper.context.bind_project(project)
Raven.tags_context(helper.context.get_tags_context())
# This is yanking the auth from the querystring since it's not
# in the POST body. This means we expect a `sentry_key` and
# `sentry_version` to be set in querystring
auth = helper.auth_from_request(request)
project_id = helper.project_id_from_auth(auth)
if project_id != project.id:
raise APIError('Two different projects were specified')
helper.context.bind_auth(auth)
Raven.tags_context(helper.context.get_tags_context())
return super(APIView, self).dispatch(
request=request,
project=project,
auth=auth,
helper=helper,
**kwargs
)
def post(self, request, project, auth, helper, **kwargs):
data = helper.safely_load_json_string(request.body)
# Do origin check based on the `document-uri` key as explained
# in `_dispatch`.
try:
report = data['csp-report']
except KeyError:
raise APIError('Missing csp-report')
origin = report.get('document-uri')
# No idea, but this is garbage
if origin == 'about:blank':
raise APIForbidden('Invalid document-uri')
if not is_valid_origin(origin, project):
raise APIForbidden('Invalid document-uri')
# Attach on collected meta data. This data obviously isn't a part
# of the spec, but we need to append to the report sentry specific things.
report['_meta'] = {
'release': request.GET.get('sentry_release'),
}
response_or_event_id = self.process(
request,
project=project,
auth=auth,
helper=helper,
data=report,
**kwargs
)
if isinstance(response_or_event_id, HttpResponse):
return response_or_event_id
return HttpResponse(status=201)
@cache_control(max_age=3600, public=True)
def robots_txt(request):
return HttpResponse("User-agent: *\nDisallow: /\n", content_type='text/plain')
@cache_control(max_age=3600, public=True)
def crossdomain_xml_index(request):
response = render_to_response('sentry/crossdomain_index.xml')
response['Content-Type'] = 'application/xml'
return response
@cache_control(max_age=60)
def crossdomain_xml(request, project_id):
if not project_id.isdigit():
return HttpResponse(status=404)
try:
project = Project.objects.get_from_cache(id=project_id)
except Project.DoesNotExist:
return HttpResponse(status=404)
origin_list = get_origins(project)
response = render_to_response('sentry/crossdomain.xml', {
'origin_list': origin_list
})
response['Content-Type'] = 'application/xml'
return response
| bsd-3-clause | 2,529,860,356,974,594,600 | 37.100885 | 119 | 0.599898 | false |
jk1/intellij-community | python/testData/inspections/PyDataclassInspection/comparisonForUnorderedAttrs.py | 18 | 1056 | import attr
@attr.s(cmp=False, auto_attribs=True)
class A1:
x: int = 0
@attr.s(cmp=False, auto_attribs=True)
class A2:
y: int = 0
print(A1(1) <error descr="'__lt__' not supported between instances of 'A1'"><</error> A1(2))
print(A1(1) <error descr="'__le__' not supported between instances of 'A1'"><=</error> A1(2))
print(A1(1) <error descr="'__gt__' not supported between instances of 'A1'">></error> A1(2))
print(A1(1) <error descr="'__ge__' not supported between instances of 'A1'">>=</error> A1(2))
print(A1(1) <error descr="'__lt__' not supported between instances of 'A1' and 'A2'"><</error> A2(2))
print(A1(1) <error descr="'__le__' not supported between instances of 'A1' and 'A2'"><=</error> A2(2))
print(A1(1) <error descr="'__gt__' not supported between instances of 'A1' and 'A2'">></error> A2(2))
print(A1(1) <error descr="'__ge__' not supported between instances of 'A1' and 'A2'">>=</error> A2(2))
print(A1 < A1)
print(A1 <= A1)
print(A1 > A1)
print(A1 >= A1)
print(A1 < A2)
print(A1 <= A2)
print(A1 > A2)
print(A1 >= A2) | apache-2.0 | -2,806,690,299,931,262,500 | 30.088235 | 102 | 0.626894 | false |
kingctan/oppia | core/storage/email/gae_models_test.py | 19 | 6610 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import types
from core.platform import models
from core.tests import test_utils
import feconf
(email_models,) = models.Registry.import_models([models.NAMES.email])
class SentEmailModelUnitTests(test_utils.GenericTestBase):
"""Test the SentEmailModel class."""
def setUp(self):
super(SentEmailModelUnitTests, self).setUp()
# pylint: disable=unused-argument
def _generate_hash_for_tests(
cls, recipient_id, email_subject, email_body):
return 'Email Hash'
self.generate_constant_hash_ctx = self.swap(
email_models.SentEmailModel, '_generate_hash',
types.MethodType(_generate_hash_for_tests,
email_models.SentEmailModel))
def test_saved_model_can_be_retrieved_with_same_hash(self):
with self.generate_constant_hash_ctx:
email_models.SentEmailModel.create(
'recipient_id', '[email protected]', 'sender_id',
'[email protected]', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
query = email_models.SentEmailModel.query()
query = query.filter(
email_models.SentEmailModel.email_hash == 'Email Hash')
results = query.fetch(2)
self.assertEqual(len(results), 1)
query = email_models.SentEmailModel.query()
query = query.filter(
email_models.SentEmailModel.email_hash == 'Bad Email Hash')
results = query.fetch(2)
self.assertEqual(len(results), 0)
def test_get_by_hash_works_correctly(self):
with self.generate_constant_hash_ctx:
email_models.SentEmailModel.create(
'recipient_id', '[email protected]', 'sender_id',
'[email protected]', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
results = email_models.SentEmailModel.get_by_hash('Email Hash')
self.assertEqual(len(results), 1)
results = email_models.SentEmailModel.get_by_hash('Bad Email Hash')
self.assertEqual(len(results), 0)
def test_get_by_hash_returns_multiple_models_with_same_hash(self):
with self.generate_constant_hash_ctx:
email_models.SentEmailModel.create(
'recipient_id', '[email protected]', 'sender_id',
'[email protected]', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
email_models.SentEmailModel.create(
'recipient_id', '[email protected]', 'sender_id',
'[email protected]', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
results = email_models.SentEmailModel.get_by_hash('Email Hash')
self.assertEqual(len(results), 2)
def test_get_by_hash_behavior_with_sent_datetime_lower_bound(self):
with self.generate_constant_hash_ctx:
time_now = datetime.datetime.utcnow()
email_models.SentEmailModel.create(
'recipient_id', '[email protected]', 'sender_id',
'[email protected]', feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
results = email_models.SentEmailModel.get_by_hash(
'Email Hash', sent_datetime_lower_bound=time_now)
self.assertEqual(len(results), 1)
time_now1 = datetime.datetime.utcnow()
results = email_models.SentEmailModel.get_by_hash(
'Email Hash', sent_datetime_lower_bound=time_now1)
self.assertEqual(len(results), 0)
time_before = (datetime.datetime.utcnow() -
datetime.timedelta(minutes=10))
results = email_models.SentEmailModel.get_by_hash(
'Email Hash', sent_datetime_lower_bound=time_before)
self.assertEqual(len(results), 1)
# Check that it accepts only DateTime objects.
with self.assertRaises(Exception):
results = email_models.SentEmailModel.get_by_hash(
'Email Hash',
sent_datetime_lower_bound='Not a datetime object')
class GenerateHashTests(test_utils.GenericTestBase):
"""Test that generating hash functionality works as expected."""
def test_same_inputs_always_gives_same_hashes(self):
# pylint: disable=protected-access
email_hash1 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject', 'email_html_body')
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject', 'email_html_body')
self.assertEqual(email_hash1, email_hash2)
# pylint: enable=protected-access
def test_different_inputs_give_different_hashes(self):
# pylint: disable=protected-access
email_hash1 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject', 'email_html_body')
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject', 'email_html_body2')
self.assertNotEqual(email_hash1, email_hash2)
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id2', 'email_subject', 'email_html_body')
self.assertNotEqual(email_hash1, email_hash2)
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id', 'email_subject2', 'email_html_body')
self.assertNotEqual(email_hash1, email_hash2)
email_hash2 = email_models.SentEmailModel._generate_hash(
'recipient_id2', 'email_subject2', 'email_html_body2')
self.assertNotEqual(email_hash1, email_hash2)
# pylint: enable=protected-access
| apache-2.0 | -7,345,519,225,533,441,000 | 39.552147 | 79 | 0.634191 | false |
trishnaguha/ansible | test/units/modules/network/nxos/test_nxos_config.py | 20 | 8365 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch, MagicMock
from ansible.modules.network.nxos import nxos_config
from ansible.plugins.cliconf.nxos import Cliconf
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosConfigModule(TestNxosModule):
module = nxos_config
def setUp(self):
super(TestNxosConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_save_config = patch('ansible.modules.network.nxos.nxos_config.save_config')
self.save_config = self.mock_save_config.start()
self.mock_get_connection = patch('ansible.modules.network.nxos.nxos_config.get_connection')
self.get_connection = self.mock_get_connection.start()
self.conn = self.get_connection()
self.conn.edit_config = MagicMock()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_config.run_commands')
self.run_commands = self.mock_run_commands.start()
self.cliconf_obj = Cliconf(MagicMock())
self.running_config = load_fixture('nxos_config', 'config.cfg')
def tearDown(self):
super(TestNxosConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
self.mock_get_connection.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_config', 'config.cfg')
self.load_config.return_value = None
def test_nxos_config_no_change(self):
lines = ['hostname localhost']
args = dict(lines=lines)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
set_module_args(args)
result = self.execute_module()
def test_nxos_config_src(self):
src = load_fixture('nxos_config', 'candidate.cfg')
args = dict(src=src)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(src, self.running_config))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01', 'interface Ethernet1',
'description test interface', 'no shutdown', 'ip routing']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_replace_src(self):
set_module_args(dict(replace_src='bootflash:config', replace='config'))
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(self.running_config, self.running_config, diff_replace='config'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['config replace bootflash:config'])
def test_nxos_config_lines(self):
lines = ['hostname switch01', 'ip domain-name eng.ansible.com']
args = dict(lines=lines)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_before(self):
lines = ['hostname switch01', 'ip domain-name eng.ansible.com']
args = dict(lines=lines,
before=['before command'])
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['before command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('before command', result['commands'][0])
def test_nxos_config_after(self):
lines = ['hostname switch01', 'ip domain-name eng.ansible.com']
args = dict(lines=lines,
after=['after command'])
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['after command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('after command', result['commands'][-1])
def test_nxos_config_parents(self):
lines = ['ip address 1.2.3.4/5', 'no shutdown']
parents = ['interface Ethernet10']
args = dict(lines=lines, parents=parents)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(parents + lines), self.running_config, path=parents))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['interface Ethernet10', 'ip address 1.2.3.4/5', 'no shutdown']
self.assertEqual(config, result['commands'], result['commands'])
def test_nxos_config_src_and_lines_fails(self):
args = dict(src='foo', lines='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_src_and_parents_fails(self):
args = dict(src='foo', parents='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_exact_requires_lines(self):
args = dict(match='exact')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_strict_requires_lines(self):
args = dict(match='strict')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_block_requires_lines(self):
args = dict(replace='block')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_config_requires_src(self):
args = dict(replace='config')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_backup_returns__backup__(self):
args = dict(backup=True)
set_module_args(args)
result = self.execute_module()
self.assertIn('__backup__', result)
def test_nxos_config_save_always(self):
args = dict(save_when='always')
set_module_args(args)
self.execute_module()
self.assertEqual(self.save_config.call_count, 1)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
def test_nxos_config_save_changed_true(self):
args = dict(save_when='changed', lines=['hostname foo', 'interface GigabitEthernet0/0', 'no ip address'])
set_module_args(args)
self.execute_module(changed=True)
self.assertEqual(self.save_config.call_count, 1)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
def test_nxos_config_save_changed_false(self):
args = dict(save_when='changed')
set_module_args(args)
self.execute_module()
self.assertEqual(self.save_config.call_count, 0)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
| gpl-3.0 | 3,923,950,364,714,460,700 | 40.825 | 143 | 0.664077 | false |
vidartf/hyperspy | hyperspy/io_plugins/mrc.py | 1 | 7681 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
# The details of the format were taken from
# http://www.biochem.mpg.de/doc_tom/TOM_Release_2008/IOfun/tom_mrcread.html
# and http://ami.scripps.edu/software/mrctools/mrc_specification.php
import os
import logging
import numpy as np
from traits.api import Undefined
from hyperspy.misc.array_tools import sarray2dict
_logger = logging.getLogger(__name__)
# Plugin characteristics
# ----------------------
format_name = 'MRC'
description = ''
full_support = False
# Recognised file extension
file_extensions = ['mrc', 'MRC', 'ALI', 'ali']
default_extension = 0
# Writing capabilities
writes = False
def get_std_dtype_list(endianess='<'):
end = endianess
dtype_list = \
[
('NX', end + 'u4'),
('NY', end + 'u4'),
('NZ', end + 'u4'),
('MODE', end + 'u4'),
('NXSTART', end + 'u4'),
('NYSTART', end + 'u4'),
('NZSTART', end + 'u4'),
('MX', end + 'u4'),
('MY', end + 'u4'),
('MZ', end + 'u4'),
('Xlen', end + 'f4'),
('Ylen', end + 'f4'),
('Zlen', end + 'f4'),
('ALPHA', end + 'f4'),
('BETA', end + 'f4'),
('GAMMA', end + 'f4'),
('MAPC', end + 'u4'),
('MAPR', end + 'u4'),
('MAPS', end + 'u4'),
('AMIN', end + 'f4'),
('AMAX', end + 'f4'),
('AMEAN', end + 'f4'),
('ISPG', end + 'u2'),
('NSYMBT', end + 'u2'),
('NEXT', end + 'u4'),
('CREATID', end + 'u2'),
('EXTRA', (np.void, 30)),
('NINT', end + 'u2'),
('NREAL', end + 'u2'),
('EXTRA2', (np.void, 28)),
('IDTYPE', end + 'u2'),
('LENS', end + 'u2'),
('ND1', end + 'u2'),
('ND2', end + 'u2'),
('VD1', end + 'u2'),
('VD2', end + 'u2'),
('TILTANGLES', (np.float32, 6)),
('XORIGIN', end + 'f4'),
('YORIGIN', end + 'f4'),
('ZORIGIN', end + 'f4'),
('CMAP', (bytes, 4)),
('STAMP', (bytes, 4)),
('RMS', end + 'f4'),
('NLABL', end + 'u4'),
('LABELS', (bytes, 800)),
]
return dtype_list
def get_fei_dtype_list(endianess='<'):
end = endianess
dtype_list = [
('a_tilt', end + 'f4'), # Alpha tilt (deg)
('b_tilt', end + 'f4'), # Beta tilt (deg)
# Stage x position (Unit=m. But if value>1, unit=???m)
('x_stage', end + 'f4'),
# Stage y position (Unit=m. But if value>1, unit=???m)
('y_stage', end + 'f4'),
# Stage z position (Unit=m. But if value>1, unit=???m)
('z_stage', end + 'f4'),
# Signal2D shift x (Unit=m. But if value>1, unit=???m)
('x_shift', end + 'f4'),
# Signal2D shift y (Unit=m. But if value>1, unit=???m)
('y_shift', end + 'f4'),
('defocus', end + 'f4'), # Defocus Unit=m. But if value>1, unit=???m)
('exp_time', end + 'f4'), # Exposure time (s)
('mean_int', end + 'f4'), # Mean value of image
('tilt_axis', end + 'f4'), # Tilt axis (deg)
('pixel_size', end + 'f4'), # Pixel size of image (m)
('magnification', end + 'f4'), # Magnification used
# Not used (filling up to 128 bytes)
('empty', (np.void, 128 - 13 * 4)),
]
return dtype_list
def get_data_type(index, endianess='<'):
end = endianess
data_type = [
end + 'u2', # 0 = Signal2D unsigned bytes
end + 'i2', # 1 = Signal2D signed short integer (16 bits)
end + 'f4', # 2 = Signal2D float
(end + 'i2', 2), # 3 = Complex short*2
end + 'c8', # 4 = Complex float*2
]
return data_type[index]
def file_reader(filename, endianess='<', **kwds):
metadata = {}
f = open(filename, 'rb')
std_header = np.fromfile(f, dtype=get_std_dtype_list(endianess),
count=1)
fei_header = None
if std_header['NEXT'] / 1024 == 128:
_logger.info("%s seems to contain an extended FEI header", filename)
fei_header = np.fromfile(f, dtype=get_fei_dtype_list(endianess),
count=1024)
if f.tell() == 1024 + std_header['NEXT']:
_logger.debug("The FEI header was correctly loaded")
else:
_logger.warn("There was a problem reading the extended header")
f.seek(1024 + std_header['NEXT'])
fei_header = None
NX, NY, NZ = std_header['NX'], std_header['NY'], std_header['NZ']
data = np.memmap(f, mode='c', offset=f.tell(),
dtype=get_data_type(std_header['MODE'], endianess)
).squeeze().reshape((NX, NY, NZ), order='F').T
original_metadata = {'std_header': sarray2dict(std_header)}
# Convert bytes to unicode
for key in ["CMAP", "STAMP", "LABELS"]:
original_metadata["std_header"][key] = \
original_metadata["std_header"][key].decode()
if fei_header is not None:
fei_dict = sarray2dict(fei_header,)
del fei_dict['empty']
original_metadata['fei_header'] = fei_dict
dim = len(data.shape)
if fei_header is None:
# The scale is in Amstrongs, we convert it to nm
scales = [10 * float(std_header['Zlen'] / std_header['MZ'])
if float(std_header['MZ']) != 0 else 1,
10 * float(std_header['Ylen'] / std_header['MY'])
if float(std_header['MY']) != 0 else 1,
10 * float(std_header['Xlen'] / std_header['MX'])
if float(std_header['MX']) != 0 else 1, ]
offsets = [10 * float(std_header['ZORIGIN']),
10 * float(std_header['YORIGIN']),
10 * float(std_header['XORIGIN']), ]
else:
# FEI does not use the standard header to store the scale
# It does store the spatial scale in pixel_size, one per angle in
# meters
scales = [1, ] + [fei_header['pixel_size'][0] * 10 ** 9, ] * 2
offsets = [0, ] * 3
units = [Undefined, 'nm', 'nm']
names = ['z', 'y', 'x']
metadata = {'General': {'original_filename': os.path.split(filename)[1]},
"Signal": {'signal_type': "",
'record_by': 'image', },
}
# create the axis objects for each axis
axes = [
{
'size': data.shape[i],
'index_in_array': i,
'name': names[i + 3 - dim],
'scale': scales[i + 3 - dim],
'offset': offsets[i + 3 - dim],
'units': units[i + 3 - dim], }
for i in range(dim)]
dictionary = {'data': data,
'axes': axes,
'metadata': metadata,
'original_metadata': original_metadata, }
return [dictionary, ]
| gpl-3.0 | 6,899,910,529,376,483,000 | 35.061033 | 78 | 0.498633 | false |
guschmue/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py | 11 | 13962 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linear_operator_diag
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _powerset(x):
s = list(x)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
def _matrix_diag(d):
"""Batch version of np.diag."""
orig_shape = d.shape
d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))
diag_list = []
for i in range(d.shape[0]):
diag_list.append(np.diag(d[i, ...]))
return np.reshape(diag_list, orig_shape + (d.shape[-1],))
def _make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None):
if scale_tril is not None:
scale_tril = np.tril(scale_tril)
if scale_diag is not None:
scale_tril += _matrix_diag(np.array(scale_diag, dtype=np.float32))
if scale_identity_multiplier is not None:
scale_tril += (
scale_identity_multiplier * _matrix_diag(np.ones(
[scale_tril.shape[-1]], dtype=np.float32)))
return scale_tril
return _make_diag_scale(
loc, scale_diag, scale_identity_multiplier, shape_hint)
def _make_diag_scale(
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None):
if scale_diag is not None:
scale_diag = np.asarray(scale_diag)
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier
return _matrix_diag(scale_diag)
if loc is None and shape_hint is None:
return None
if shape_hint is None:
shape_hint = loc.shape[-1]
if scale_identity_multiplier is None:
scale_identity_multiplier = 1.
return scale_identity_multiplier * np.diag(np.ones(shape_hint))
class MakeTrilScaleTest(test.TestCase):
def _testLegalInputs(
self, loc=None, shape_hint=None, scale_params=None):
for args in _powerset(scale_params.items()):
with self.test_session():
args = dict(args)
scale_args = dict({
"loc": loc,
"shape_hint": shape_hint}, **args)
expected_scale = _make_tril_scale(**scale_args)
if expected_scale is None:
# Not enough shape information was specified.
with self.assertRaisesRegexp(ValueError, ("is specified.")):
scale = distribution_util.make_tril_scale(**scale_args)
scale.to_dense().eval()
else:
scale = distribution_util.make_tril_scale(**scale_args)
self.assertAllClose(expected_scale, scale.to_dense().eval())
def testLegalInputs(self):
self._testLegalInputs(
loc=np.array([-1., -1.], dtype=np.float32),
shape_hint=2,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.],
"scale_tril": [[1., 0.],
[-3., 3.]],
})
def testLegalInputsMultidimensional(self):
self._testLegalInputs(
loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),
shape_hint=3,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3., 4.], [3., 4., 5.]]],
"scale_tril": [[[[1., 0., 0.],
[-3., 3., 0.],
[1., -2., 1.]],
[[2., 1., 0.],
[-4., 7., 0.],
[1., -1., 1.]]]]
})
def testZeroTriU(self):
with self.test_session():
scale = distribution_util.make_tril_scale(scale_tril=[[1., 1], [1., 1.]])
self.assertAllClose([[1., 0], [1., 1.]], scale.to_dense().eval())
def testValidateArgs(self):
with self.test_session():
with self.assertRaisesOpError("diagonal part must be non-zero"):
scale = distribution_util.make_tril_scale(
scale_tril=[[0., 1], [1., 1.]], validate_args=True)
scale.to_dense().eval()
def testAssertPositive(self):
with self.test_session():
with self.assertRaisesOpError("diagonal part must be positive"):
scale = distribution_util.make_tril_scale(
scale_tril=[[-1., 1], [1., 1.]],
validate_args=True,
assert_positive=True)
scale.to_dense().eval()
class MakeDiagScaleTest(test.TestCase):
def _testLegalInputs(
self, loc=None, shape_hint=None, scale_params=None):
for args in _powerset(scale_params.items()):
with self.test_session():
args = dict(args)
scale_args = dict({
"loc": loc,
"shape_hint": shape_hint}, **args)
expected_scale = _make_diag_scale(**scale_args)
if expected_scale is None:
# Not enough shape information was specified.
with self.assertRaisesRegexp(ValueError, ("is specified.")):
scale = distribution_util.make_diag_scale(**scale_args)
scale.to_dense().eval()
else:
scale = distribution_util.make_diag_scale(**scale_args)
self.assertAllClose(expected_scale, scale.to_dense().eval())
def testLegalInputs(self):
self._testLegalInputs(
loc=np.array([-1., -1.], dtype=np.float32),
shape_hint=2,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.]
})
def testLegalInputsMultidimensional(self):
self._testLegalInputs(
loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),
shape_hint=3,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3., 4.], [3., 4., 5.]]]
})
def testValidateArgs(self):
with self.test_session():
with self.assertRaisesOpError("diagonal part must be non-zero"):
scale = distribution_util.make_diag_scale(
scale_diag=[[0., 1], [1., 1.]], validate_args=True)
scale.to_dense().eval()
def testAssertPositive(self):
with self.test_session():
with self.assertRaisesOpError("diagonal part must be positive"):
scale = distribution_util.make_diag_scale(
scale_diag=[[-1., 1], [1., 1.]],
validate_args=True,
assert_positive=True)
scale.to_dense().eval()
class ShapesFromLocAndScaleTest(test.TestCase):
def test_static_loc_static_scale_non_matching_event_size_raises(self):
loc = constant_op.constant(np.zeros((2, 4)))
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
with self.assertRaisesRegexp(ValueError, "could not be broadcast"):
distribution_util.shapes_from_loc_and_scale(loc, scale)
def test_static_loc_static_scale(self):
loc = constant_op.constant(np.zeros((2, 3)))
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tensor_shape.TensorShape([5, 2]), batch_shape)
self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
def test_static_loc_dynamic_scale(self):
loc = constant_op.constant(np.zeros((2, 3)))
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.test_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_static_scale(self):
loc = array_ops.placeholder(dtypes.float64)
diag = constant_op.constant(np.ones((5, 2, 3)))
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.test_session():
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
# batch_shape depends on both args, and so is dynamic. Since loc did not
# have static shape, we inferred event shape entirely from scale, and this
# is available statically.
self.assertAllEqual(
[5, 2], batch_shape.eval(feed_dict={loc: np.zeros((2, 3))}))
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_dynamic_scale(self):
loc = array_ops.placeholder(dtypes.float64)
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.test_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 2, 3)), loc: np.zeros((2, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_none_loc_static_scale(self):
loc = None
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tensor_shape.TensorShape([5, 1]), batch_shape)
self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
def test_none_loc_dynamic_scale(self):
loc = None
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.test_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 1], batch_shape)
self.assertAllEqual([3], event_shape)
class GetBroadcastShapeTest(test.TestCase):
def test_all_static_shapes_work(self):
x = array_ops.ones((2, 1, 3))
y = array_ops.ones((1, 5, 3))
z = array_ops.ones(())
self.assertAllEqual([2, 5, 3],
distribution_util.get_broadcast_shape(x, y, z))
def test_with_some_dynamic_shapes_works(self):
x = array_ops.ones((2, 1, 3))
y = array_ops.placeholder(x.dtype)
z = array_ops.ones(())
with self.test_session() as sess:
bcast_shape = sess.run(
distribution_util.get_broadcast_shape(x, y, z),
feed_dict={y: np.ones((1, 5, 3)).astype(np.float32)})
self.assertAllEqual([2, 5, 3], bcast_shape)
class TridiagTest(test.TestCase):
def testWorksCorrectlyNoBatches(self):
with self.test_session():
self.assertAllEqual(
[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
distribution_util.tridiag(
[1., 2., 3.],
[4., 5., 6., 7.],
[8., 9., 10.]).eval())
def testWorksCorrectlyBatches(self):
with self.test_session():
self.assertAllClose(
[[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
[[0.7, 0.1, 0.0, 0.0],
[0.8, 0.6, 0.2, 0.0],
[0.0, 0.9, 0.5, 0.3],
[0.0, 0.0, 1.0, 0.4]]],
distribution_util.tridiag(
[[1., 2., 3.],
[0.8, 0.9, 1.]],
[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]],
[[8., 9., 10.],
[0.1, 0.2, 0.3]]).eval(),
rtol=1e-5, atol=0.)
def testHandlesNone(self):
with self.test_session():
self.assertAllClose(
[[[4., 0., 0., 0.],
[0., 5., 0., 0.],
[0., 0., 6., 0.],
[0., 0., 0, 7.]],
[[0.7, 0.0, 0.0, 0.0],
[0.0, 0.6, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 0.0, 0.4]]],
distribution_util.tridiag(
diag=[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]]).eval(),
rtol=1e-5, atol=0.)
class MixtureStddevTest(test.TestCase):
def test_mixture_dev(self):
mixture_weights = np.array([
[1.0/3, 1.0/3, 1.0/3],
[0.750, 0.250, 0.000]
])
component_means = np.array([
[1.0, 1.0, 1.0],
[-5, 0, 1.25]
])
component_devs = np.array([
[1.0, 1.0, 1.0],
[0.01, 2.0, 0.1]
])
# The first case should trivially have a standard deviation of 1.0 because
# all components are identical and have that standard deviation.
# The second case was computed by hand.
expected_devs = np.array([
1.0,
2.3848637277
])
weights_tf = array_ops.constant(mixture_weights)
means_tf = array_ops.constant(component_means)
sigmas_tf = array_ops.constant(component_devs)
mix_dev = distribution_util.mixture_stddev(weights_tf,
means_tf,
sigmas_tf)
with self.test_session() as sess:
actual_devs = sess.run(mix_dev)
self.assertAllClose(actual_devs, expected_devs)
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,220,895,612,693,028,000 | 33.992481 | 80 | 0.584587 | false |
mimimitm/Responder | servers/SMB.py | 10 | 13579 | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
import settings
from random import randrange
from packets import SMBHeader, SMBNegoAnsLM, SMBNegoAns, SMBNegoKerbAns, SMBSession1Data, SMBSession2Accept, SMBSessEmpty, SMBTreeData
from SocketServer import BaseRequestHandler
from utils import *
# Detect if SMB auth was Anonymous
def Is_Anonymous(data):
SecBlobLen = struct.unpack('<H',data[51:53])[0]
if SecBlobLen < 260:
LMhashLen = struct.unpack('<H',data[89:91])[0]
return True if LMhashLen == 0 or LMhashLen == 1 else False
if SecBlobLen > 260:
LMhashLen = struct.unpack('<H',data[93:95])[0]
return True if LMhashLen == 0 or LMhashLen == 1 else False
def Is_LMNT_Anonymous(data):
LMhashLen = struct.unpack('<H',data[51:53])[0]
return True if LMhashLen == 0 or LMhashLen == 1 else False
#Function used to know which dialect number to return for NT LM 0.12
def Parse_Nego_Dialect(data):
Dialect = tuple([e.replace('\x00','') for e in data[40:].split('\x02')[:10]])
if Dialect[0] == "NT LM 0.12":
return "\x00\x00"
if Dialect[1] == "NT LM 0.12":
return "\x01\x00"
if Dialect[2] == "NT LM 0.12":
return "\x02\x00"
if Dialect[3] == "NT LM 0.12":
return "\x03\x00"
if Dialect[4] == "NT LM 0.12":
return "\x04\x00"
if Dialect[5] == "NT LM 0.12":
return "\x05\x00"
if Dialect[6] == "NT LM 0.12":
return "\x06\x00"
if Dialect[7] == "NT LM 0.12":
return "\x07\x00"
if Dialect[8] == "NT LM 0.12":
return "\x08\x00"
if Dialect[9] == "NT LM 0.12":
return "\x09\x00"
if Dialect[10] == "NT LM 0.12":
return "\x0a\x00"
if Dialect[11] == "NT LM 0.12":
return "\x0b\x00"
if Dialect[12] == "NT LM 0.12":
return "\x0c\x00"
if Dialect[13] == "NT LM 0.12":
return "\x0d\x00"
if Dialect[14] == "NT LM 0.12":
return "\x0e\x00"
if Dialect[15] == "NT LM 0.12":
return "\x0f\x00"
#Set MID SMB Header field.
def midcalc(data):
pack=data[34:36]
return pack
#Set UID SMB Header field.
def uidcalc(data):
pack=data[32:34]
return pack
#Set PID SMB Header field.
def pidcalc(data):
pack=data[30:32]
return pack
#Set TID SMB Header field.
def tidcalc(data):
pack=data[28:30]
return pack
def ParseShare(data):
packet = data[:]
a = re.search('(\\x5c\\x00\\x5c.*.\\x00\\x00\\x00)', packet)
if a:
print text("[SMB] Requested Share : %s" % a.group(0).replace('\x00', ''))
#Parse SMB NTLMSSP v1/v2
def ParseSMBHash(data,client):
SecBlobLen = struct.unpack('<H',data[51:53])[0]
BccLen = struct.unpack('<H',data[61:63])[0]
if SecBlobLen < 260:
SSPIStart = data[75:]
LMhashLen = struct.unpack('<H',data[89:91])[0]
LMhashOffset = struct.unpack('<H',data[91:93])[0]
LMHash = SSPIStart[LMhashOffset:LMhashOffset+LMhashLen].encode("hex").upper()
NthashLen = struct.unpack('<H',data[97:99])[0]
NthashOffset = struct.unpack('<H',data[99:101])[0]
else:
SSPIStart = data[79:]
LMhashLen = struct.unpack('<H',data[93:95])[0]
LMhashOffset = struct.unpack('<H',data[95:97])[0]
LMHash = SSPIStart[LMhashOffset:LMhashOffset+LMhashLen].encode("hex").upper()
NthashLen = struct.unpack('<H',data[101:103])[0]
NthashOffset = struct.unpack('<H',data[103:105])[0]
if NthashLen == 24:
SMBHash = SSPIStart[NthashOffset:NthashOffset+NthashLen].encode("hex").upper()
DomainLen = struct.unpack('<H',data[105:107])[0]
DomainOffset = struct.unpack('<H',data[107:109])[0]
Domain = SSPIStart[DomainOffset:DomainOffset+DomainLen].replace('\x00','')
UserLen = struct.unpack('<H',data[113:115])[0]
UserOffset = struct.unpack('<H',data[115:117])[0]
Username = SSPIStart[UserOffset:UserOffset+UserLen].replace('\x00','')
WriteHash = '%s::%s:%s:%s:%s' % (Username, Domain, LMHash, SMBHash, settings.Config.NumChal)
SaveToDb({
'module': 'SMB',
'type': 'NTLMv1-SSP',
'client': client,
'user': Domain+'\\'+Username,
'hash': SMBHash,
'fullhash': WriteHash,
})
if NthashLen > 60:
SMBHash = SSPIStart[NthashOffset:NthashOffset+NthashLen].encode("hex").upper()
DomainLen = struct.unpack('<H',data[109:111])[0]
DomainOffset = struct.unpack('<H',data[111:113])[0]
Domain = SSPIStart[DomainOffset:DomainOffset+DomainLen].replace('\x00','')
UserLen = struct.unpack('<H',data[117:119])[0]
UserOffset = struct.unpack('<H',data[119:121])[0]
Username = SSPIStart[UserOffset:UserOffset+UserLen].replace('\x00','')
WriteHash = '%s::%s:%s:%s:%s' % (Username, Domain, settings.Config.NumChal, SMBHash[:32], SMBHash[32:])
SaveToDb({
'module': 'SMB',
'type': 'NTLMv2-SSP',
'client': client,
'user': Domain+'\\'+Username,
'hash': SMBHash,
'fullhash': WriteHash,
})
# Parse SMB NTLMv1/v2
def ParseLMNTHash(data, client):
LMhashLen = struct.unpack('<H',data[51:53])[0]
NthashLen = struct.unpack('<H',data[53:55])[0]
Bcc = struct.unpack('<H',data[63:65])[0]
Username, Domain = tuple([e.replace('\x00','') for e in data[89+NthashLen:Bcc+60].split('\x00\x00\x00')[:2]])
if NthashLen > 25:
FullHash = data[65+LMhashLen:65+LMhashLen+NthashLen].encode('hex')
LmHash = FullHash[:32].upper()
NtHash = FullHash[32:].upper()
WriteHash = '%s::%s:%s:%s:%s' % (Username, Domain, settings.Config.NumChal, LmHash, NtHash)
SaveToDb({
'module': 'SMB',
'type': 'NTLMv2',
'client': client,
'user': Domain+'\\'+Username,
'hash': NtHash,
'fullhash': WriteHash,
})
if NthashLen == 24:
NtHash = data[65+LMhashLen:65+LMhashLen+NthashLen].encode('hex').upper()
LmHash = data[65:65+LMhashLen].encode('hex').upper()
WriteHash = '%s::%s:%s:%s:%s' % (Username, Domain, LmHash, NtHash, settings.Config.NumChal)
SaveToDb({
'module': 'SMB',
'type': 'NTLMv1',
'client': client,
'user': Domain+'\\'+Username,
'hash': NtHash,
'fullhash': WriteHash,
})
def IsNT4ClearTxt(data, client):
HeadLen = 36
if data[14:16] == "\x03\x80":
SmbData = data[HeadLen+14:]
WordCount = data[HeadLen]
ChainedCmdOffset = data[HeadLen+1]
if ChainedCmdOffset == "\x75":
PassLen = struct.unpack('<H',data[HeadLen+15:HeadLen+17])[0]
if PassLen > 2:
Password = data[HeadLen+30:HeadLen+30+PassLen].replace("\x00","")
User = ''.join(tuple(data[HeadLen+30+PassLen:].split('\x00\x00\x00'))[:1]).replace("\x00","")
print text("[SMB] Clear Text Credentials: %s:%s" % (User,Password))
WriteData(settings.Config.SMBClearLog % client, User+":"+Password, User+":"+Password)
# SMB Server class, NTLMSSP
class SMB1(BaseRequestHandler):
def handle(self):
try:
while True:
data = self.request.recv(1024)
self.request.settimeout(1)
if len(data) < 1:
break
##session request 139
if data[0] == "\x81":
Buffer = "\x82\x00\x00\x00"
try:
self.request.send(Buffer)
data = self.request.recv(1024)
except:
pass
# Negociate Protocol Response
if data[8:10] == "\x72\x00":
# \x72 == Negociate Protocol Response
Header = SMBHeader(cmd="\x72",flag1="\x88", flag2="\x01\xc8", pid=pidcalc(data),mid=midcalc(data))
Body = SMBNegoKerbAns(Dialect=Parse_Nego_Dialect(data))
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
# Session Setup AndX Request
if data[8:10] == "\x73\x00":
IsNT4ClearTxt(data, self.client_address[0])
# STATUS_MORE_PROCESSING_REQUIRED
Header = SMBHeader(cmd="\x73",flag1="\x88", flag2="\x01\xc8", errorcode="\x16\x00\x00\xc0", uid=chr(randrange(256))+chr(randrange(256)),pid=pidcalc(data),tid="\x00\x00",mid=midcalc(data))
Body = SMBSession1Data(NTLMSSPNtServerChallenge=settings.Config.Challenge)
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(4096)
# STATUS_SUCCESS
if data[8:10] == "\x73\x00":
if Is_Anonymous(data):
Header = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x01\xc8",errorcode="\x72\x00\x00\xc0",pid=pidcalc(data),tid="\x00\x00",uid=uidcalc(data),mid=midcalc(data))###should always send errorcode="\x72\x00\x00\xc0" account disabled for anonymous logins.
Body = SMBSessEmpty()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
else:
# Parse NTLMSSP_AUTH packet
ParseSMBHash(data,self.client_address[0])
# Send STATUS_SUCCESS
Header = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x01\xc8", errorcode="\x00\x00\x00\x00",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = SMBSession2Accept()
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
# Tree Connect AndX Request
if data[8:10] == "\x75\x00":
ParseShare(data)
# Tree Connect AndX Response
Header = SMBHeader(cmd="\x75",flag1="\x88", flag2="\x01\xc8", errorcode="\x00\x00\x00\x00", pid=pidcalc(data), tid=chr(randrange(256))+chr(randrange(256)), uid=uidcalc(data), mid=midcalc(data))
Body = SMBTreeData()
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
##Tree Disconnect.
if data[8:10] == "\x71\x00":
Header = SMBHeader(cmd="\x71",flag1="\x98", flag2="\x07\xc8", errorcode="\x00\x00\x00\x00",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = "\x00\x00\x00"
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
##NT_CREATE Access Denied.
if data[8:10] == "\xa2\x00":
Header = SMBHeader(cmd="\xa2",flag1="\x98", flag2="\x07\xc8", errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = "\x00\x00\x00"
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
##Trans2 Access Denied.
if data[8:10] == "\x25\x00":
Header = SMBHeader(cmd="\x25",flag1="\x98", flag2="\x07\xc8", errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = "\x00\x00\x00"
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
##LogOff.
if data[8:10] == "\x74\x00":
Header = SMBHeader(cmd="\x74",flag1="\x98", flag2="\x07\xc8", errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = "\x02\xff\x00\x27\x00\x00\x00"
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
except socket.timeout:
pass
# SMB Server class, old version
class SMB1LM(BaseRequestHandler):
def handle(self):
try:
self.request.settimeout(0.5)
data = self.request.recv(1024)
##session request 139
if data[0] == "\x81":
Buffer = "\x82\x00\x00\x00"
self.request.send(Buffer)
data = self.request.recv(1024)
##Negotiate proto answer.
if data[8:10] == "\x72\x00":
head = SMBHeader(cmd="\x72",flag1="\x80", flag2="\x00\x00",pid=pidcalc(data),mid=midcalc(data))
Body = SMBNegoAnsLM(Dialect=Parse_Nego_Dialect(data),Domain="",Key=settings.Config.Challenge)
Body.calculate()
Packet = str(head)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
##Session Setup AndX Request
if data[8:10] == "\x73\x00":
if Is_LMNT_Anonymous(data):
head = SMBHeader(cmd="\x73",flag1="\x90", flag2="\x53\xc8",errorcode="\x72\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Packet = str(head)+str(SMBSessEmpty())
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
else:
ParseLMNTHash(data,self.client_address[0])
head = SMBHeader(cmd="\x73",flag1="\x90", flag2="\x53\xc8",errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Packet = str(head)+str(SMBSessEmpty())
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
except Exception:
self.request.close()
pass
| gpl-3.0 | -8,775,220,457,988,083,000 | 33.290404 | 255 | 0.643199 | false |
DoWhatILove/turtle | game/alien invasion/game_functions.py | 1 | 7001 | import sys
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
def check_keydown_events(event, ai_settings, screen, ship, bullets):
'''response to keydown presses'''
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
def check_keyup_events(event, ship):
'''response to keyup event'''
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets):
'''response to keypresses and mouse events'''
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, sb,
play_button, ship, aliens, bullets, mouse_x, mouse_y)
def check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):
'''start a new game when the player clicks Play'''
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
ai_settings.initialize_dynamic_settings()
pygame.mouse.set_visible(False)
stats.game_active = True
stats.reset_stats()
sb.prep_all()
aliens.empty()
bullets.empty()
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button):
'''update image on the screen and flip to the new screen'''
screen.fill(ai_settings.bg_color)
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
sb.show_score()
if not stats.game_active:
play_button.draw_button()
pygame.display.flip()
def update_bullets(ai_settings, screen, stats, sb, ship, bullets, aliens):
'''update position of bullets and get rid of old bullets'''
bullets.update()
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(
ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):
'''respond to bullet-alien collision'''
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
bullets.empty()
ai_settings.increase_speed_and_alien_point()
# increse level
stats.level += 1
sb.prep_level()
create_fleet(ai_settings, screen, ship, aliens)
def fire_bullet(ai_settings, screen, ship, bullets):
'''fire a bullet if limit not reached yet'''
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def create_fleet(ai_settings, screen, ship, aliens):
'''create a full feet of aliens'''
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(
ai_settings, ship.rect.height, alien.rect.height)
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
create_alien(ai_settings, screen, aliens, alien_number, row_number)
def get_number_aliens_x(ai_settings, alien_width):
'''determine the number of aliens that fit in a row'''
available_space_x = ai_settings.screen_width - \
ai_settings.space_x_in_alien * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
'''create an alien and place it in the row'''
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def get_number_rows(ai_settings, ship_height, alien_height):
'''determine the nubmer of rows of aliens that fit on the screen'''
available_space_y = ai_settings.screen_height - \
ai_settings.space_y_in_alien * alien_height - ship_height
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):
'''update the position of all aliens in the fleet'''
check_fleet_edges(ai_settings, aliens)
aliens.update()
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
'''respond to ship being hit by alien'''
if stats.ships_left > 0:
stats.ships_left -= 1
sb.prep_ships()
aliens.empty()
bullets.empty()
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
sleep(1)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets):
'''check if any alien have reached the bottom of the screen'''
screen_rect = screen.get_rect()
for alien in aliens:
if alien.rect.bottom >= screen_rect.bottom:
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
break
def check_fleet_edges(ai_settings, aliens):
'''respond if any clien has reached an edge'''
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
'''drop the entire fleet and change the fleet's direction'''
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def check_high_score(stats, sb):
'''check to see if there's a new high score'''
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
| mit | -1,700,123,483,793,267,700 | 33.318627 | 108 | 0.647479 | false |
psteinb/vigra | vigranumpy/lib/__init__.py | 2 | 62874 | #######################################################################
#
# Copyright 2009-2010 by Ullrich Koethe
#
# This file is part of the VIGRA computer vision library.
# The VIGRA Website is
# http://hci.iwr.uni-heidelberg.de/vigra/
# Please direct questions, bug reports, and contributions to
# [email protected] or
# [email protected]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#######################################################################
import sys, os
_vigra_path = os.path.abspath(os.path.dirname(__file__))
_vigra_doc_path = _vigra_path + '/doc/vigranumpy/index.html'
if sys.platform.startswith('win'):
# On Windows, add subdirectory 'dlls' to the PATH in order to find
# the DLLs vigranumpy depends upon. Since this directory appears
# at the end of PATH, already installed DLLs are always preferred.
_vigra_dll_path = _vigra_path + '/dlls'
if os.path.exists(_vigra_dll_path):
os.putenv('PATH', os.getenv('PATH') + os.pathsep + _vigra_dll_path)
def _fallbackModule(moduleName, message):
'''This function installs a fallback module with the given 'moduleName'.
All function calls into this module raise an ImportError with the
given 'message' that hopefully tells the user why the real module
was not available.
'''
import sys
moduleClass = vigranumpycore.__class__
class FallbackModule(moduleClass):
def __init__(self, name):
moduleClass.__init__(self, name)
self.__name__ = name
def __getattr__(self, name):
if name.startswith('__'):
return moduleClass.__getattribute__(self, name)
try:
return moduleClass.__getattribute__(self, name)
except AttributeError:
raise ImportError("""%s.%s: %s""" % (self.__name__, name, self.__doc__))
module = FallbackModule(moduleName)
sys.modules[moduleName] = module
module.__doc__ = """Import of module '%s' failed.\n%s""" % (moduleName, message)
if not os.path.exists(_vigra_doc_path):
_vigra_doc_path = "http://hci.iwr.uni-heidelberg.de/vigra/doc/vigranumpy/index.html"
__doc__ = '''VIGRA Computer Vision Library
HTML documentation is available in
%s
Help on individual functions can be obtained via their doc strings
as usual.
The following sub-modules group related functionality:
* arraytypes (VigraArray and axistags, automatically imported into 'vigra')
* ufunc (improved array arithmetic, automatically used by VigraArray)
* impex (image and array I/O)
* colors (color space transformations)
* filters (spatial filtering, e.g. smoothing)
* sampling (image and array re-sampling and interpolation)
* fourier (Fourier transform and Fourier domain filters)
* analysis (image analysis and segmentation)
* learning (machine learning and classification)
* noise (noise estimation and normalization)
* geometry (geometric algorithms, e.g. convex hull)
* histogram (histograms and channel representation)
* graphs (grid graphs / graphs / graph algorithms)
* utilities (priority queues)
''' % _vigra_doc_path
from __version__ import version
import vigranumpycore
import arraytypes
import impex
import sampling
import filters
import analysis
import learning
import colors
import noise
import geometry
import optimization
import histogram
import graphs
import utilities
sampling.ImagePyramid = arraytypes.ImagePyramid
try:
import fourier
except Exception, e:
_fallbackModule('vigra.fourier',
'''
%s
Make sure that the fftw3 libraries are found during compilation and import.
They may be downloaded at http://www.fftw.org/.''' % str(e))
import fourier
# import most frequently used functions
from arraytypes import *
standardArrayType = arraytypes.VigraArray
defaultAxistags = arraytypes.VigraArray.defaultAxistags
from vigranumpycore import ChunkedArrayFull, ChunkedArrayLazy, ChunkedArrayCompressed, ChunkedArrayTmpFile, Compression
try:
from vigranumpycore import ChunkedArrayHDF5, HDF5Mode
except:
pass
from impex import readImage, readVolume
def readHDF5(filenameOrGroup, pathInFile, order=None):
'''Read an array from an HDF5 file.
'filenameOrGroup' can contain a filename or a group object
referring to an already open HDF5 file. 'pathInFile' is the name
of the dataset to be read, including intermediate groups. If the
first argument is a group object, the path is relative to this
group, otherwise it is relative to the file's root group.
If the dataset has an attribute 'axistags', the returned array
will have type :class:`~vigra.VigraArray` and will be transposed
into the given 'order' ('vigra.VigraArray.defaultOrder'
will be used if no order is given). Otherwise, the returned
array is a plain 'numpy.ndarray'. In this case, order='F' will
return the array transposed into Fortran order.
Requirements: the 'h5py' module must be installed.
'''
import h5py
if isinstance(filenameOrGroup, h5py.highlevel.Group):
file = None
group = filenameOrGroup
else:
file = h5py.File(filenameOrGroup, 'r')
group = file['/']
try:
dataset = group[pathInFile]
if not isinstance(dataset, h5py.highlevel.Dataset):
raise IOError("readHDF5(): '%s' is not a dataset" % pathInFile)
data = dataset.value
axistags = dataset.attrs.get('axistags', None)
if axistags is not None:
data = data.view(arraytypes.VigraArray)
data.axistags = arraytypes.AxisTags.fromJSON(axistags)
if order is None:
order = arraytypes.VigraArray.defaultOrder
data = data.transposeToOrder(order)
else:
if order == 'F':
data = data.transpose()
elif order not in [None, 'C', 'A']:
raise IOError("readHDF5(): unsupported order '%s'" % order)
finally:
if file is not None:
file.close()
return data
def writeHDF5(data, filenameOrGroup, pathInFile, compression=None):
'''Write an array to an HDF5 file.
'filenameOrGroup' can contain a filename or a group object
referring to an already open HDF5 file. 'pathInFile' is the name of the
dataset to be written, including intermediate groups. If the first
argument is a group object, the path is relative to this group,
otherwise it is relative to the file's root group. If the dataset already
exists, it will be replaced without warning.
If 'data' has an attribute 'axistags', the array is transposed to
numpy order before writing. Moreover, the axistags will be
stored along with the data in an attribute 'axistags'.
'compression' can be set to 'gzip', 'szip' or 'lzf'
gzip (standard compression),
szip (available if HDF5 is compiled with szip. Faster compression, limited types),
lzf (very fast compression, all types).
The 'lzf' compression filter is many times faster than 'gzip'
at the cost of a lower compresion ratio.
Requirements: the 'h5py' module must be installed.
'''
import h5py
if isinstance(filenameOrGroup, h5py.highlevel.Group):
file = None
group = filenameOrGroup
else:
file = h5py.File(filenameOrGroup)
group = file['/']
try:
levels = pathInFile.split('/')
for groupname in levels[:-1]:
if groupname == '':
continue
g = group.get(groupname, default=None)
if g is None:
group = group.create_group(groupname)
elif not isinstance(g, h5py.highlevel.Group):
raise IOError("writeHDF5(): invalid path '%s'" % pathInFile)
else:
group = g
dataset = group.get(levels[-1], default=None)
if dataset is not None:
if isinstance(dataset, h5py.highlevel.Dataset):
del group[levels[-1]]
else:
raise IOError("writeHDF5(): cannot replace '%s' because it is not a dataset" % pathInFile)
try:
data = data.transposeToNumpyOrder()
except:
pass
dataset = group.create_dataset(levels[-1], data=data, compression=compression)
if hasattr(data, 'axistags'):
dataset.attrs['axistags'] = data.axistags.toJSON()
finally:
if file is not None:
file.close()
impex.readHDF5 = readHDF5
readHDF5.__module__ = 'vigra.impex'
impex.writeHDF5 = writeHDF5
writeHDF5.__module__ = 'vigra.impex'
from filters import convolve, gaussianSmoothing
from sampling import resize
# import enums
CLOCKWISE = sampling.RotationDirection.CLOCKWISE
COUNTER_CLOCKWISE = sampling.RotationDirection.COUNTER_CLOCKWISE
UPSIDE_DOWN = sampling.RotationDirection.UPSIDE_DOWN
CompleteGrow = analysis.SRGType.CompleteGrow
KeepContours = analysis.SRGType.KeepContours
StopAtThreshold = analysis.SRGType.StopAtThreshold
_selfdict = globals()
def searchfor(searchstring):
'''Scan all vigra modules to find classes and functions containing
'searchstring' in their name.
'''
for attr in _selfdict.keys():
contents = dir(_selfdict[attr])
for cont in contents:
if ( cont.upper().find(searchstring.upper()) ) >= 0:
print attr+"."+cont
# FIXME: use axistags here
def imshow(image,show=True):
'''Display a scalar or RGB image by means of matplotlib.
If the image does not have one or three channels, an exception is raised.
The image will be automatically scaled to the range 0...255 when its dtype
is not already 'uint8'.
'''
import matplotlib.pylab
if not hasattr(image, 'axistags'):
return matplotlib.pyplot.imshow(image)
image = image.transposeToNumpyOrder()
if image.channels == 1:
image = image.dropChannelAxis().view(numpy.ndarray)
plot = matplotlib.pyplot.imshow(image, cmap=matplotlib.cm.gray, \
norm=matplotlib.cm.colors.Normalize())
if show:
matplotlib.pylab.show()
return plot
elif image.channels == 3:
if image.dtype != numpy.uint8:
out = image.__class__(image.shape, dtype=numpy.uint8, axistags=image.axistags)
image = colors.linearRangeMapping(image, newRange=(0.0, 255.0), out=out)
plot = matplotlib.pyplot.imshow(image.view(numpy.ndarray))
if show:
matplotlib.pylab.show()
return plot
else:
raise RuntimeError("vigra.imshow(): Image must have 1 or 3 channels.")
def segShow(img,labels,edgeColor=(0,0,0),alpha=0.3,show=False,returnImg=False):
labels = numpy.squeeze(labels)
crackedEdges = analysis.regionImageToCrackEdgeImage(labels).squeeze()
print "cracked shape",crackedEdges.shape
whereEdge = numpy.where(crackedEdges==0)
imgToDisplay = resize(img,numpy.squeeze(crackedEdges).shape)
imgToDisplay-=imgToDisplay.min()
imgToDisplay/=imgToDisplay.max()
for c in range(3):
ic = imgToDisplay[:,:,c]
ic[whereEdge]=(1.0-alpha)*edgeColor[c] + alpha*ic[whereEdge]
if returnImg:
return imgToDisplay
return imshow(imgToDisplay,show=show)
def nestedSegShow(img,labels,edgeColors=None,scale=1,show=False,returnImg=False):
shape=(labels.shape[0]*scale,labels.shape[1]*scale)
if scale!=1:
img=vigra.resize(img,shape)
assert numpy.squeeze(labels).ndim==3
nSegs = labels.shape[2]
if edgeColors is None :
edgeColors=numpy.ones([nSegs,4])
a =numpy.array([0,0,0.0,0.6],dtype=numpy.float32)
b =numpy.array([1,0,0,0.4],dtype=numpy.float32)
for s in range(nSegs):
f=float(s)/float(nSegs-1)
edgeColors[s,:]=f*b + (1.0-f)*a
tShape=(img.shape[0]*2-1,img.shape[1]*2-1)
imgToDisplay = resize(img,tShape)
imgToDisplay-=imgToDisplay.min()
imgToDisplay/=imgToDisplay.max()
imgIn = imgToDisplay.copy()
for si in range(nSegs):
l = labels[:,:,si].copy()
if scale!=1:
l=resize(l.astype(numpy.float32),shape,order=0).astype(numpy.uint32)
crackedEdges = analysis.regionImageToCrackEdgeImage(l)
whereEdge = numpy.where(crackedEdges==0)
if len(edgeColors[si])<4:
alpha = 0.0
else:
alpha = edgeColors[si,3]
for c in range(3):
icI = imgIn[:,:,c]
ic = imgToDisplay[:,:,c]
ic[whereEdge]=(1.0-alpha) * edgeColors[si,c] + alpha*icI[whereEdge]
if returnImg:
return imgToDisplay
return imshow(imgToDisplay,show=show)
def show():
import matplotlib.pylab
matplotlib.pylab.show()
# auto-generate code for additional Kernel generators:
def _genKernelFactories(name):
for oldName in dir(eval('filters.'+name)):
if not oldName.startswith('init'):
continue
#remove init from beginning and start with lower case character
newPrefix = oldName[4].lower() + oldName[5:]
if newPrefix == "explicitly":
newPrefix = "explict"
newName = newPrefix + 'Kernel'
if name == 'Kernel2D':
newName += '2D'
code = '''def %(newName)s(*args):
k = filters.%(name)s()
k.%(oldName)s(*args)
return k
%(newName)s.__doc__ = filters.%(name)s.%(oldName)s.__doc__
filters.%(newName)s=%(newName)s
''' % {'oldName': oldName, 'newName': newName, 'name': name}
exec code
_genKernelFactories('Kernel1D')
_genKernelFactories('Kernel2D')
del _genKernelFactories
# define watershedsUnionFind()
def _genWatershedsUnionFind():
def watershedsUnionFind(image, neighborhood=None, out = None):
'''Compute watersheds of an image using the union find algorithm.
If 'neighborhood' is 'None', it defaults to 8-neighborhood for 2D inputs
and 6-neighborhood for 3D inputs.
Calls :func:`watersheds` with parameters::\n\n
watersheds(image, neighborhood=neighborhood, method='UnionFind', out=out)
'''
if neighborhood is None:
neighborhood = 8 if image.spatialDimensions == 2 else 6
return analysis.watersheds(image, neighborhood=neighborhood, method='UnionFind', out=out)
watershedsUnionFind.__module__ = 'vigra.analysis'
analysis.watershedsUnionFind = watershedsUnionFind
_genWatershedsUnionFind()
del _genWatershedsUnionFind
# define watershedsReoptimization)
def _genWatershedsReoptimization():
def watershedsReoptimization(labels,edgeIndicator,shrinkN,out=None,visu=False):
# do unseeding
#if visu :
# import matplotlib,numpy
# import pylab
# # A random colormap for matplotlib
# cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
# pylab.imshow ( numpy.swapaxes(labels,0,1) , cmap = cmap)
# pylab.show()
seeds=analysis.segToSeeds(labels,long(shrinkN))
if visu :
import matplotlib,numpy
import pylab
# A random colormap for matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
pylab.imshow ( numpy.swapaxes(seeds,0,1) , cmap = cmap)
pylab.show()
#if seeds.ndim==2:
# seeds=analysis.labelImageWithBackground(seeds)
#elif seeds.ndim==3:
# seeds=analysis.labelVolumeWithBackground(seeds)
#else :
# raise RuntimeError("only implemented for 2d and 3d")
if visu :
import matplotlib,numpy
import pylab
# A random colormap for matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
pylab.imshow ( numpy.swapaxes(seeds,0,1) , cmap = cmap)
pylab.show()
return analysis.watersheds(edgeIndicator,seeds=seeds,out=out)
watershedsReoptimization.__module__ = 'vigra.analysis'
analysis.watershedsReoptimization = watershedsReoptimization
_genWatershedsReoptimization()
del _genWatershedsReoptimization
# define tensor convenience functions
def _genTensorConvenienceFunctions():
def hessianOfGaussianEigenvalues(image, scale, out=None,
sigma_d=0.0, step_size=1.0, window_size=0.0, roi=None):
'''Compute the eigenvalues of the Hessian of Gaussian at the given scale
for a scalar image or volume.
Calls :func:`hessianOfGaussian` and :func:`tensorEigenvalues`.
'''
hessian = filters.hessianOfGaussian(image, scale,
sigma_d=sigma_d, step_size=step_size,
window_size=window_size, roi=roi)
return filters.tensorEigenvalues(hessian, out=out)
hessianOfGaussianEigenvalues.__module__ = 'vigra.filters'
filters.hessianOfGaussianEigenvalues = hessianOfGaussianEigenvalues
def structureTensorEigenvalues(image, innerScale, outerScale, out=None,
sigma_d=0.0, step_size=1.0, window_size=0.0, roi=None):
'''Compute the eigenvalues of the structure tensor at the given scales
for a scalar or multi-channel image or volume.
Calls :func:`structureTensor` and :func:`tensorEigenvalues`.
'''
st = filters.structureTensor(image, innerScale, outerScale,
sigma_d=sigma_d, step_size=step_size,
window_size=window_size, roi=roi)
return filters.tensorEigenvalues(st, out=out)
structureTensorEigenvalues.__module__ = 'vigra.filters'
filters.structureTensorEigenvalues = structureTensorEigenvalues
_genTensorConvenienceFunctions()
del _genTensorConvenienceFunctions
# define feature convenience functions
def _genFeaturConvenienceFunctions():
def supportedFeatures(array):
'''Return a list of feature names that are available for the given array. These feature
names are the valid inputs to a call of :func:`extractFeatures`. E.g., to compute
just the first two features in the list, use::
f = vigra.analysis.supportedFeatures(array)
print "Computing features:", f[:2]
r = vigra.analysis.extractFeatures(array, features=f[:2])
'''
return analysis.extractFeatures(array, None).supportedFeatures()
supportedFeatures.__module__ = 'vigra.analysis'
analysis.supportedFeatures = supportedFeatures
def supportedRegionFeatures(array, labels):
'''Return a list of feature names that are available for the given array and label array.
These feature names are the valid inputs to a call of
:func:`extractRegionFeatures`. E.g., to compute just the first two features in the
list, use::
f = vigra.analysis.supportedRegionFeatures(array, labels)
print "Computing features:", f[:2]
r = vigra.analysis.extractRegionFeatures(array, labels, features=f[:2])
'''
return analysis.extractRegionFeatures(array, labels, None).supportedFeatures()
supportedRegionFeatures.__module__ = 'vigra.analysis'
analysis.supportedRegionFeatures = supportedRegionFeatures
# implement the read-only part of the 'dict' API in FeatureAccumulator and RegionFeatureAccumulator
def __len__(self):
return len(self.keys())
def __iter__(self):
return self.keys().__iter__()
def has_key(self, key):
try:
return self.isActive(key)
except:
return False
def values(self):
return [self[k] for k in self.keys()]
def items(self):
return [(k, self[k]) for k in self.keys()]
def iterkeys(self):
return self.keys().__iter__()
def itervalues(self):
for k in self.keys():
yield self[k]
def iteritems(self):
for k in self.keys():
yield (k, self[k])
for k in ['__len__', '__iter__', 'has_key', 'values', 'items', 'iterkeys', 'itervalues', 'iteritems']:
setattr(analysis.FeatureAccumulator, k, eval(k))
setattr(analysis.RegionFeatureAccumulator, k, eval(k))
_genFeaturConvenienceFunctions()
del _genFeaturConvenienceFunctions
MetricType = graphs.MetricType
# define grid graph convenience functions
# and extend grid graph classes
def _genGridGraphConvenienceFunctions():
def gridGraph(shape,directNeighborhood=True):
'''Return a grid graph with certain shape.
Parameters:
- shape -- shape of the image
- directNeighborhood -- use 4 (True) or 8 (False) neighborhood (default: True)
Returns:
- grid graph
use::
>>> # 4-connected
>>> g = vigra.graps.gridGraph(shape=[10,20])
>>> g.nodeNum
200
>>> # 8-connected
>>> g = vigra.graps.gridGraph(shape=[10,20],directNeighborhood=False)
'''
if(len(shape)==2):
return graphs.GridGraphUndirected2d(shape,directNeighborhood)
elif(len(shape)==3):
return graphs.GridGraphUndirected3d(shape,directNeighborhood)
else:
raise RuntimeError("GridGraph is only implemented for 2d and 3d grids")
gridGraph.__module__ = 'vigra.graphs'
graphs.gridGraph = gridGraph
# extend grid graph via meta classes
for cls in [graphs.GridGraphUndirected2d, graphs.GridGraphUndirected3d] :
metaCls = cls.__class__
class gridGraphInjector(object):
class __metaclass__(metaCls):
def __init__(self, name, bases, dict):
for b in bases:
if type(b) not in (self, type):
for k,v in dict.items():
setattr(b,k,v)
return type.__init__(self, name, bases, dict)
##inject some methods in the point foo
class moreGridGraph(gridGraphInjector, cls):
@property
def shape(self):
""" shape of grid graph"""
return self.intrinsicNodeMapShape()
def nodeSize(self):
""" node map filled with 1.0"""
size = graphs.graphMap(self,item='node',dtype=numpy.float32)
size[:]=1
return size
def edgeLengths(self):
""" node map filled with 1.0"""
size = graphs.graphMap(self,item='edge',dtype=numpy.float32)
size[:]=1
return size
def mergeGraph(self):
if len(self.shape)==2:
mg = graphs.GridGraphUndirected2dMergeGraph(self)
else:
mg = graphs.GridGraphUndirected3dMergeGraph(self)
return mg
def isGridGraph(obj):
""" check if obj is gridGraph"""
return isinstance(obj,(graphs.GridGraphUndirected2d , graphs.GridGraphUndirected3d))
isGridGraph.__module__ = 'vigra.graphs'
graphs.isGridGraph = isGridGraph
_genGridGraphConvenienceFunctions()
del _genGridGraphConvenienceFunctions
def _genGraphConvenienceFunctions():
def listGraph(nodes=0,edges=0):
''' Return an empty directed graph
Parameters :
- nodes : number of nodes to reserveEdges
- edges : number of edges to reserve
Returns :
- graph
'''
return graphs.AdjacencyListGraph(nodes,edges)
listGraph.__module__ = 'vigra.graphs'
graphs.listGraph = listGraph
def intrinsicGraphMapShape(graph,item):
""" Intrinsic shape of node/edge/arc-map for a given graph.
Node edge and arc maps are stored in numpy arrays by default.
The instric shape may not be confused with the number
of nodes/edges/arcs. The instric shape is used to
allocate a numpy are which can store data for nodes/arcs/edgeSizes
of a given graph.
Parameters:
- graph : input graph to get the shape for
- item : item must be ``'node'`` , ``'edge'`` or ``'arc'``
Returns:
- shape as tuple
"""
if item=='edge':
return graph.intrinsicEdgeMapShape()
elif item=='node':
return graph.intrinsicNodeMapShape()
elif item=='arc':
return graph.intrinsicArcMapShape()
else :
raise RuntimeError("%s is not valid,must be 'edge','node' or 'arc' "%item)
intrinsicGraphMapShape.__module__ = 'vigra.graphs'
graphs.intrinsicGraphMapShape = intrinsicGraphMapShape
def graphMap(graph,item,dtype=numpy.float32,channels=1,addChannelDim=False):
""" Return a graph map for a given graph item (``'node'`` , ``'edge'`` or ``'arc'``).
Parameters:
- graph : graph to get a graph map for
- item : ``'node'`` , ``'edge'`` or ``'arc'``
- dtype : desired dtype
- channels : number of channels (default: 1)
- addChannelDim -- add an explicit channelDim :(default: False)
only useful if channels == 1
Returns:
- graphmap as numpy.ndarray / VigraArray
"""
s = intrinsicGraphMapShape(graph,item)
intrDim = len(s)
if(channels==1) and addChannelDim==False:
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'x')
elif intrDim == 2:
return taggedView(a,'xy')
elif intrDim == 3:
return taggedView(a,'xyz')
elif intrDim == 4:
return taggedView(a,'xyzt')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
else:
s = s+(channels,)
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'xc')
elif intrDim == 2:
return taggedView(a,'xyc')
elif intrDim == 3:
return taggedView(a,'xyzc')
elif intrDim == 4:
return taggedView(a,'xyztc')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
def graphMap2(graph,item,dtype=numpy.float32,channels=1,addChannelDim=False):
""" Return a graph map for a given graph item (``'node'`` , ``'edge'`` or ``'arc'``).
Parameters:
- graph : graph to get a graph map for
- item : ``'node'`` , ``'edge'`` or ``'arc'``
- dtype : desired dtype
- channels : number of channels (default: 1)
- addChannelDim -- add an explicit channelDim :(default: False)
only useful if channels == 1
Returns:
- graphmap as numpy.ndarray / VigraArray
"""
s = intrinsicGraphMapShape(graph,item)
intrDim = len(s)
if(channels==1) and addChannelDim==False:
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'x')
elif intrDim == 2:
return taggedView(a,'xy')
elif intrDim == 3:
return taggedView(a,'xyz')
elif intrDim == 4:
return taggedView(a,'xyzt')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
else:
s = s+(channels,)
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'xc')
elif intrDim == 2:
return taggedView(a,'xyc')
elif intrDim == 3:
return taggedView(a,'xyzc')
elif intrDim == 4:
return taggedView(a,'xyztc')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
graphMap.__module__ = 'vigra.graphs'
graphs.graphMap = graphMap
def mergeGraph(graph):
""" get a merge graph from input graph.
A merge graph might be usefull for hierarchical clustering
"""
#mg = graph.mergeGraph()
mg = graphs.__mergeGraph(graph)
#mg.__base_graph__=graph
return mg
mergeGraph.__module__ = 'vigra.graphs'
graphs.mergeGraph = mergeGraph
INVALID = graphs.Invalid()
graphs.INVALID = INVALID
class ShortestPathPathDijkstra(object):
def __init__(self,graph):
""" shortest path computer
Keyword Arguments:
- graph : input graph
"""
self.pathFinder = graphs._shortestPathDijkstra(graph)
self.graph=graph
self.source = None
self.target = None
def run(self,weights,source,target=None):
""" run shortest path search
Keyword Arguments:
- weights : edge weights encoding distance from two adjacent nodes
- source : source node
- target : target node (default: None)
If target node is None, the shortest path
to all nodes!=source is computed
"""
self.source = source
self.target = target
if target is None:
self.pathFinder.run(weights,source)
else:
self.pathFinder.run(weights,source,target)
return self
def runIgnoreLargeWeights(self,weights,source,val):
""" run shortest path search, nodes with all edge weights larger than val will be ignored
Keyword Arguments:
- weights : edge weights encoding distance from two adjacent nodes
- source : source node
- val : upper bound
"""
self.source = source
self.target = None
self.pathFinder.runIgnoreLargeWeights(weights,source,val)
return self
def path(self,target=None,pathType='coordinates'):
""" get the shortest path from source to target
Keyword Arguments:
- weights : edge weights encoding distance from two adjacent nodes
- source : source node
- target : target node (default: None)
If target node is None, the target specified
by 'run' is used.
pathType : 'coordinates' or 'ids' path (default: 'coordinates')
"""
if target is None:
assert self.target is not None
target=self.target
if pathType=='coordinates':
return self.pathFinder.nodeCoordinatePath(target)
elif pathType == 'ids':
return self.pathFinder.nodeIdPath(target)
def distance(self,target=None):
""" get distance from source to target
Keyword Arguments:
- target : target node (default: None)
If target node is None, the target specified
by 'run' is used.
"""
if target is None:
assert self.target is not None
target=self.target
return self.pathFinder.distance(target)
def distances(self,out=None):
""" return the full distance map"""
return self.pathFinder.distances(out)
def predecessors(self,out=None):
""" return the full predecessors map"""
return self.pathFinder.predecessors(out)
ShortestPathPathDijkstra.__module__ = 'vigra.graphs'
graphs.ShortestPathPathDijkstra = ShortestPathPathDijkstra
_genGraphConvenienceFunctions()
del _genGraphConvenienceFunctions
def _genRegionAdjacencyGraphConvenienceFunctions():
class RegionAdjacencyGraph(graphs.AdjacencyListGraph):
def __init__(self,graph,labels,ignoreLabel=None,reserveEdges=0):
""" Region adjacency graph
Keyword Arguments :
- graph : the base graph, the region adjacency graph should be based on
- labels : label map for the graph
- ignoreLabel : ignore a label in the labels map (default: None)
- reserveEdges : reserve a certain number of Edges
Attributes:
- labels : labels passed in constructor
- ignoreLabel : ignoreLabel passed in constructor
- baseGraphLabels : labels passed in constructor
(fixme,dublicated attribute (see labels) )
- baseGraph : baseGraph is the graph passed in constructor
- affiliatedEdges : for each edge in the region adjacency graph,
a vector of edges of the baseGraph is stored in affiliatedEdges
"""
super(RegionAdjacencyGraph,self).__init__(long(labels.max()+1),long(reserveEdges))
if ignoreLabel is None:
ignoreLabel=-1
self.labels = labels
self.ignoreLabel = ignoreLabel
self.baseGraphLabels = labels
self.baseGraph = graph
# set up rag
self.affiliatedEdges = graphs._regionAdjacencyGraph(graph,labels,self,self.ignoreLabel)
def mergeGraph(self):
return graphs.AdjacencyListGraphMergeGraph(self)
def accumulateEdgeFeatures(self,edgeFeatures,acc='mean',out=None):
""" accumulate edge features from base graphs edges features
Keyword Argument:
- edgeFeatures : edge features of baseGraph
- acc : used accumulator (default: 'mean')
Currently only 'mean' and 'sum' are implemented
- out : preallocated edge map
Returns :
accumulated edge features
"""
if self.edgeNum == 0 :
raise RuntimeError("self.edgeNum == 0 => cannot accumulate edge features")
graph = self.baseGraph
affiliatedEdges = self.affiliatedEdges
if acc == 'mean':
weights = self.baseGraph.edgeLengths()
else :
weights = graphs.graphMap(self.baseGraph,'edge',dtype=numpy.float32)
weights[:]=1
return graphs._ragEdgeFeatures(self,graph,affiliatedEdges,edgeFeatures,weights,acc,out)
def accumulateNodeFeatures(self,nodeFeatures,acc='mean',out=None):
""" accumulate edge features from base graphs edges features
Keyword Argument:
- nodeFeatures : node features of baseGraph
- acc : used accumulator (default: 'mean')
Currently only 'mean' and 'sum' are implemented
- out : preallocated node map (default: None)
Returns :
accumulated node features
"""
if self.edgeNum == 0 :
raise RuntimeError("self.edgeNum == 0 => cannot accumulate edge features")
graph = self.baseGraph
labels = self.baseGraphLabels
ignoreLabel = self.ignoreLabel
if acc == 'mean':
weights = self.baseGraph.nodeSize()
else :
weights = graphs.graphMap(self.baseGraph,'node',dtype=numpy.float32)
weights[:]=1
return graphs._ragNodeFeatures(self,graph,labels,nodeFeatures,weights,acc,ignoreLabel,out)
def projectNodeFeatureToBaseGraph(self,features,out=None):
""" project node features from this graph, to the base graph of this graph.
Keyword Arguments:
- features : node feautres for this graph
- out : preallocated node map of baseGraph (default: None)
Returns :
projected node features of base graph
"""
out=graphs._ragProjectNodeFeaturesToBaseGraph(
rag=self,
baseGraph=self.baseGraph,
baseGraphLabels=numpy.squeeze(self.baseGraphLabels),
ragNodeFeatures=features,
ignoreLabel=self.ignoreLabel,
out=out
)
#print "out",out.shape,out.dtype
return out
def projectLabelsBack(self,steps,labels=None,_current=0):
""" project labels from current graph to baseGraph and repeat this recursively
Keyword Arguments:
- steps : how often should the labels be projected back
- labels : labels for the current graph (default: None)
If labels is None, each node gets its own label
"""
if labels is None :
# identity segmentation on this level
labels = self.nodeIdMap()
if steps == current :
return labels
else :
labels = self.projectLabelsToBaseGraph(labels)
return self.baseGraph.projectLabelsBack(steps,labels,_current+1)
def projectLabelsToBaseGraph(self,labels=None):
""" project node labels from this graph, to the base graph of this graph.
Keyword Arguments:
- labels : node labels for this graph (default: None)
If labels is None, each node gets its own label
- out : preallocated node map of baseGraph (default: None)
Returns :
"""
if labels is None :
# identity segmentation on this level
labels = self.nodeIdMap()
return self.projectNodeFeatureToBaseGraph(features=labels)
def projectBaseGraphGt(self, baseGraphGt, gt=None, gtQuality=None):
gt, gtQuality = graphs._ragProjectGroundTruth(rag=self, graph=self.baseGraph,
labels=self.baseGraphLabels, gt=baseGraphGt,
ragGt=gt, ragGtQuality=gtQuality)
return gt, gtQuality
RegionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.RegionAdjacencyGraph = RegionAdjacencyGraph
class GridRegionAdjacencyGraph(graphs.RegionAdjacencyGraph):
def __init__(self,graph,labels,ignoreLabel=None,reserveEdges=0):
""" Grid Region adjacency graph
A region adjaceny graph,where the base graph should be
a grid graph or a GridRegionAdjacencyGraph.
Keyword Arguments :
- graph : the base graph, the region adjacency graph should be based on
- labels : label map for the graph
- ignoreLabel : ignore a label in the labels map (default: None)
- reserveEdges : reserve a certain number of Edges
Attributes :
- labels : labels passed in constructor
- ignoreLabel : ignoreLabel passed in constructor
- baseGraphLabels : labels passed in constructor
(fixme,dublicated attribute (see labels) )
- baseGraph : baseGraph is the graph passed in constructor
- affiliatedEdges : for each edge in the region adjacency graph,
a vector of edges of the baseGraph is stored in affiliatedEdges
- shape : shape of the grid graph which is a base graph in the
complete graph chain.
"""
if not (graphs.isGridGraph(graph) or isinstance(graph,GridRegionAdjacencyGraph)):
raise RuntimeError("graph must be a GridGraph or a GridRegionAdjacencyGraph")
super(GridRegionAdjacencyGraph,self).__init__(graph,labels,ignoreLabel,reserveEdges)
@property
def shape(self):
""" shape of the underlying grid graph"""
return self.baseGraph.shape
def projectLabelsToGridGraph(self,labels=None):
"""project labels of this graph to the underlying grid graph.
Keyword Arguments :
- labels : node labeling of this graph (default: None)
If labels is None, each node gets its own label
Returns :
grid graph labeling
"""
if labels is None :
# identity segmentation on this level
labels = self.nodeIdMap()
if graphs.isGridGraph(self.baseGraph):
return self.projectLabelsToBaseGraph(labels)
else :
labels = self.projectLabelsToBaseGraph(labels)
return self.baseGraph.projectLabelsToGridGraph(labels)
def projectNodeFeaturesToGridGraph(self,features):
""" project features of this graph to the underlying grid graph.
Therefore project the features to an image.
Keyword Arguments :
- features : nodeFeatures of the current graph
Returns :
grid graph labeling
"""
if graphs.isGridGraph(self.baseGraph):
return self.projectNodeFeatureToBaseGraph(features)
else :
features = self.projectNodeFeatureToBaseGraph(features)
return self.baseGraph.projectNodeFeaturesToGridGraph(features)
def showNested(self,img,labels=None,returnImg=False):
""" show the complet graph chain / hierarchy given an RGB image
Keyword Arguments:
- img : RGB image
- labels : node labeling of this graph (default: None)
If labels is None, each node gets its own label
"""
ll=[]
if labels is not None:
ll.append( self.projectLabelsToGridGraph(labels) )
ll.append( self.projectLabelsToGridGraph() )
g=self.baseGraph
while graphs.isGridGraph(g)==False:
ll.append( g.projectLabelsToGridGraph() )
g=g.baseGraph
ll.reverse()
gridLabels = [l[...,numpy.newaxis] for l in ll ]
gridLabels = numpy.concatenate(gridLabels,axis=2)
return nestedSegShow(img,gridLabels,returnImg=returnImg)
def show(self,img,labels=None,edgeColor=(0,0,0),alpha=0.3,returnImg=False):
""" show the graph given an RGB image
Keyword Arguments:
- img : RGB image
- labels : node labeling of this graph (default: None)
If labels is None, each node gets its own label
- edgeColor : RGB tuple of edge color (default: (0,0,0) ).
Do not use values bigger than 1 in edgeColor.
- alpha : make edges semi transparent (default: 0.3).
0 means no transparency,1 means full transparency.
"""
pLabels = self.projectLabelsToGridGraph(labels)
return segShow(img,numpy.squeeze(pLabels),edgeColor=edgeColor,alpha=alpha,returnImg=returnImg)
def nodeSize(self):
""" get the geometric size of the nodes """
baseNodeSizes = self.baseGraph.nodeSize()
return self.accumulateNodeFeatures(baseNodeSizes,acc='sum')
def edgeLengths(self):
""" get the geometric length of the edges"""
baseNodeSizes = self.baseGraph.edgeLengths()
return self.accumulateEdgeFeatures(baseNodeSizes,acc='sum')
GridRegionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.GridRegionAdjacencyGraph = GridRegionAdjacencyGraph
def regionAdjacencyGraph(graph,labels,ignoreLabel=None,reserveEdges=0):
""" Return a region adjacency graph for a labeld graph.
Parameters:
- graph -- input graph
- lables -- node-map with labels for each nodeSumWeights
- ignoreLabel -- label to ingnore (default: None)
- reserveEdges -- reverse a certain number of edges (default: 0)
Returns:
- rag -- instance of RegionAdjacencyGraph or GridRegionAdjacencyGraph
If graph is a GridGraph or a GridRegionAdjacencyGraph, a GridRegionAdjacencyGraph
will be returned.
Otherwise a RegionAdjacencyGraph will be returned
"""
if isinstance(graph , graphs.GridRegionAdjacencyGraph) or graphs.isGridGraph(graph):
return GridRegionAdjacencyGraph(graph=graph,labels=labels,ignoreLabel=ignoreLabel,reserveEdges=reserveEdges)
else:
return RegionAdjacencyGraph(graph=graph,labels=labels,ignoreLabel=ignoreLabel,reserveEdges=reserveEdges)
regionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.regionAdjacencyGraph = regionAdjacencyGraph
def gridRegionAdjacencyGraph(labels,ignoreLabel=None,reserveEdges=0):
""" get a region adjacency graph and a grid graph from a labeling.
This function will call 'graphs.gridGraph' and 'graphs.regionAdjacencyGraph'
Keyword Arguments:
- labels : label image
- ignoreLabel : label to ingnore (default: None)
- reserveEdges : reserve a number of edges (default: 0)
"""
_gridGraph=graphs.gridGraph(numpy.squeeze(labels).shape)
rag=graphs.regionAdjacencyGraph(_gridGraph,labels,ignoreLabel,reserveEdges)
return _gridGraph,rag
gridRegionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.gridRegionAdjacencyGraph = gridRegionAdjacencyGraph
_genRegionAdjacencyGraphConvenienceFunctions()
del _genRegionAdjacencyGraphConvenienceFunctions
def _genGraphSegmentationFunctions():
def getNodeSizes(graph):
""" get size of nodes:
This functions will try to call 'graph.nodeSize()' .
If this fails, a node map filled with 1.0 will be
returned
Keyword Arguments:
- graph : input graph
"""
try:
return graph.nodeSize()
except:
size = graphs.graphMap(graph,'node',dtype=numpy.float32)
size[:]=1
return size
getNodeSizes.__module__ = 'vigra.graphs'
graphs.getNodeSizes = getNodeSizes
def getEdgeLengths(graph):
""" get lengths/sizes of edges:
This functions will try to call 'graph.edgeLength()' .
If this fails, an edge map filled with 1.0 will be
returned
Keyword Arguments:
- graph : input graph
"""
try:
return graph.edgeLengths()
except:
size = graphs.graphMap(graph,'edge',dtype=numpy.float32)
size[:]=1
return size
getEdgeLengths.__module__ = 'vigra.graphs'
graphs.getEdgeLengths = getEdgeLengths
def felzenszwalbSegmentation(graph,edgeWeights,nodeSizes=None,k=1.0,nodeNumStop=None,out=None):
""" felzenszwalbs segmentation method
Keyword Arguments :
- graph : input graph
- edgeWeights : edge weights / indicators
- nodeSizes : size of each node (default: None)
If nodeSizes is None, 'getNodeSizes' will be called
- k : free parameter in felzenszwalbs algorithms (default : 1.0)
(todo: write better docu)
- nodeNumStop : stop the agglomeration at a given nodeNum (default :None)
If nodeNumStop is None, the resulting number of nodes does depends on k.
- backgroundBias : backgroundBias (default : None)
"""
if nodeNumStop is None :
nodeNumStop=-1
if nodeSizes is None :
nodeSizes=graphs.getNodeSizes(graph)
return graphs._felzenszwalbSegmentation(graph=graph,edgeWeights=edgeWeights,nodeSizes=nodeSizes,
k=k,nodeNumStop=nodeNumStop,out=out)
felzenszwalbSegmentation.__module__ = 'vigra.graphs'
graphs.felzenszwalbSegmentation = felzenszwalbSegmentation
def edgeWeightedWatersheds(graph,edgeWeights,seeds,backgoundLabel=None,backgroundBias=None,out=None):
""" edge weighted seeded watersheds
Keyword Arguments :
- graph : input graph
- edgeWeights : evaluation weights
- seeds : node map with seeds .
For at least one node, seeds must be nonzero
- backgroundLabel : a specific backgroundLabel (default : None)
- backgroundBias : backgroundBias (default : None)
"""
if backgoundLabel is None and backgroundBias is None:
return graphs._edgeWeightedWatershedsSegmentation(graph=graph,edgeWeights=edgeWeights,seeds=seeds,
out=out)
else :
if backgoundLabel is None or backgroundBias is None:
raise RuntimeError("if backgoundLabel or backgroundBias is not None, the other must also be not None")
return graphs._carvingSegmentation(graph=graph,edgeWeights=edgeWeights,seeds=seeds,
backgroundLabel=backgroundLabel,backgroundBias=backgroundBias,out=out)
edgeWeightedWatersheds.__module__ = 'vigra.graphs'
graphs.edgeWeightedWatersheds = edgeWeightedWatersheds
def nodeWeightedWatershedsSeeds(graph,nodeWeights,out=None):
""" generate watersheds seeds
Keyword Arguments :
- graph : input graph
- nodeWeights : node height map
- out : seed map
"""
return graphs._nodeWeightedWatershedsSeeds(graph=graph,nodeWeights=nodeWeights,out=out)
nodeWeightedWatershedsSeeds.__module__ = 'vigra.graphs'
graphs.nodeWeightedWatershedsSeeds = nodeWeightedWatershedsSeeds
def nodeWeightedWatersheds(graph,nodeWeights,seeds=None,method='regionGrowing',out=None):
""" node weighted seeded watersheds
Keyword Arguments :
- graph : input graph
- nodeWeights : node height map / evaluation weights
- seeds : node map with seeds (default: None)
If seeds are None, 'nodeWeightedWatershedsSeeds' will be called
"""
if seeds is None:
seeds = graphs.nodeWeightedWatershedsSeeds(graph=graph,nodeWeights=nodeWeights)
if method!='regionGrowing':
raise RuntimeError("currently only 'regionGrowing' is supported")
return graphs._nodeWeightedWatershedsSegmentation(graph=graph,nodeWeights=nodeWeights,seeds=seeds,method=method,out=out)
nodeWeightedWatersheds.__module__ = 'vigra.graphs'
graphs.nodeWeightedWatersheds = nodeWeightedWatersheds
def agglomerativeClustering(graph,edgeWeights=None,edgeLengths=None,nodeFeatures=None,nodeSizes=None,
nodeNumStop=None,beta=0.5,metric='l1',wardness=1.0,out=None):
""" agglomerative hierarchicalClustering
Keyword Arguments :
- graph : input graph
- edgeWeights : edge weights / indicators (default : None)
- edgeLengths : length / weight of each edge (default : None)
Since we do weighted mean agglomeration, a length/weight
is needed for each edge to merge 2 edges w.r.t. weighted mean.
If no edgeLengths is given, 'getEdgeLengths' is called.
- nodeFeatures : a feature vector for each node (default: None)
A feature vector as RGB values,or a histogram for each node.
Within the agglomeration, an additional edge weight will be
computed from the "difference" between the features of two adjacent nodes.
The metric specified in the keyword 'metric' is used to compute this
difference
- nodeSizes : size / weight of each node (default : None)
Since we do weighted mean agglomeration, a size / weight
is needed for each node to merge 2 edges w.r.t. weighted mean.
If no nodeSizes is given, 'getNodeSizes' is called.
- nodeNumStop : stop the agglomeration at a given nodeNum (default : graph.nodeNum/2)
- beta : weight between edgeWeights and nodeFeatures based edgeWeights (default:0.5) :
0.0 means only edgeWeights (from keyword edge weights) and 1.0 means only edgeWeights
from nodeFeatures differences
- metric : metric used to compute node feature difference (default : 'l1')
- wardness : 0 means do not apply wards critrion, 1.0 means fully apply wards critrion (default : 1.0)
- out : preallocated nodeMap for the resulting labeling (default : None)
Returns:
A node labele map encoding the segmentation
"""
assert edgeWeights is not None or nodeFeatures is not None
if nodeNumStop is None:
nodeNumStop = max(graph.nodeNum/2,min(graph.nodeNum,2))
if edgeLengths is None :
edgeLengths = graphs.getEdgeLengths(graph)
if nodeSizes is None:
nodeSizes = graphs.getNodeSizes(graph)
if edgeWeights is None :
edgeWeights = graphs.graphMap(graph,'edge')
edgeWeights[:]=0
if nodeFeatures is None :
nodeFeatures = graphs.graphMap(graph,'node',addChannelDim=True)
nodeFeatures[:]=0
#import sys
#print "graph refcout", sys.getrefcount(graph)
mg = graphs.mergeGraph(graph)
#print "graph refcout", sys.getrefcount(graph)
#mg = []
#del mg
#import gc
#gc.collect()
#print "graph refcout", sys.getrefcount(graph)
#sys.exit(0)
clusterOp = graphs.minEdgeWeightNodeDist(mg,edgeWeights=edgeWeights,edgeLengths=edgeLengths,
nodeFeatures=nodeFeatures,nodeSizes=nodeSizes,
beta=float(beta),metric=metric,wardness=wardness)
#clusterOp.mg=mg
#clusterOp.nodeSizes = nodeSizes
#clusterOp.edgeLengths = edgeLengths
#clusterOp.nodeFeatures = nodeFeatures
#clusterOp.edgeWeights = edgeWeights
hc = graphs.hierarchicalClustering(clusterOp,nodeNumStopCond=nodeNumStop)
hc.cluster()
labels = hc.resultLabels(out=out)
del hc
del clusterOp
del mg
return labels
agglomerativeClustering.__module__ = 'vigra.graphs'
graphs.agglomerativeClustering = agglomerativeClustering
def minEdgeWeightNodeDist(mergeGraph,edgeWeights=None,edgeLengths=None,nodeFeatures=None,nodeSizes=None,outWeight=None,
beta=0.5,metric='squaredNorm',wardness=1.0):
graph=mergeGraph.graph()
assert edgeWeights is not None or nodeFeatures is not None
if edgeLengths is None :
edgeLengths = graphs.getEdgeLengths(graph,addChannelDim=True)
if nodeSizes is None:
nodeSizes = graphs.getNodeSizes(graph,addChannelDim=True)
if edgeWeights is None :
edgeWeights = graphs.graphMap(graph,'edge',addChannelDim=True)
edgeWeights[:]=0
if nodeFeatures is None :
nodeFeatures = graphs.graphMap(graph,'node',addChannelDim=True)
nodeFeatures[:]=0
if outWeight is None:
outWeight=graphs.graphMap(graph,item='edge',dtype=numpy.float32)
if metric=='squaredNorm':
nd=graphs.MetricType.squaredNorm
elif metric=='norm':
nd=graphs.MetricType.norm
elif metric=='chiSquared':
nd=graphs.MetricType.chiSquared
elif metric in ('l1','manhattan'):
nd=graphs.MetricType.manhattan
elif isinstance(metric,graphs.MetricType):
nd=metric
else :
raise RuntimeError("'%s' is not a supported distance type"%str(metric))
# call unsave c++ function and make it sav
op = graphs.__minEdgeWeightNodeDistOperator(mergeGraph,edgeWeights,edgeLengths,nodeFeatures,nodeSizes,outWeight,
float(beta),nd,float(wardness))
op.__base_object__=mergeGraph
op.__outWeightArray__=outWeight
op.edgeLengths=edgeLengths
op.nodeSizes=nodeSizes
op.edgeWeights=edgeWeights
op.nodeFeatures=nodeFeatures
return op
minEdgeWeightNodeDist.__module__ = 'vigra.graphs'
graphs.minEdgeWeightNodeDist = minEdgeWeightNodeDist
def pythonClusterOperator(mergeGraph,opertator,useMergeNodeCallback=True,useMergeEdgesCallback=True,useEraseEdgeCallback=True):
#call unsave function and make it save
op = graphs.__pythonClusterOperator(mergeGraph,opertator,useMergeNodeCallback,useMergeEdgesCallback,useEraseEdgeCallback)
#op.__dict__['__base_object__']=mergeGraph
#op.__base_object__=mergeGraph
return op
pythonClusterOperator.__module__ = 'vigra.graphs'
graphs.pythonClusterOperator = pythonClusterOperator
def hierarchicalClustering(clusterOperator,nodeNumStopCond):
# call unsave c++ function and make it save
hc = graphs.__hierarchicalClustering(clusterOperator,long(nodeNumStopCond))
#hc.__dict__['__base_object__']=clusterOperator
hc.__base_object__ = clusterOperator
return hc
hierarchicalClustering.__module__ = 'vigra.graphs'
graphs.hierarchicalClustering = hierarchicalClustering
_genGraphSegmentationFunctions()
del _genGraphSegmentationFunctions
def _genGraphSmoothingFunctions():
def recursiveGraphSmoothing( graph,nodeFeatures,edgeIndicator,gamma,
edgeThreshold,scale=1.0,iterations=1,out=None):
""" recursive graph smoothing to smooth node features.
Each node feature is smoothed with the features of neighbor nodes.
The strength of the smoothing is computed from:
"edgeIndicator > edgeThreshold ? 0 : exp(-1.0*gamma*edgeIndicator)*scale"
Therefore this filter is edge preserving.
Keyword Arguments :
- graph : input graph
- nodeFeatures : node features which should be smoothed
- edgeIndicator : edge indicator
- gamma : scale edgeIndicator by gamma bevore taking the negative exponent
- scale : how much should a node be mixed with its neighbours per iteration
- iteration : how often should recursiveGraphSmoothing be called recursively
Returns :
smoothed nodeFeatures
"""
return graphs._recursiveGraphSmoothing(graph=graph,nodeFeatures=nodeFeatures,edgeIndicator=edgeIndicator,
gamma=gamma,edgeThreshold=edgeThreshold,scale=scale,iterations=iterations,out=out)
recursiveGraphSmoothing.__module__ = 'vigra.graphs'
graphs.recursiveGraphSmoothing = recursiveGraphSmoothing
_genGraphSmoothingFunctions()
del _genGraphSmoothingFunctions
def _genGraphMiscFunctions():
def nodeFeaturesToEdgeWeights(graph,nodeFeatures,metric='l1',out=None):
""" compute an edge indicator from node features .
Keyword Arguments :
- graph : input graph
- nodeFeatures : node map with feature vector for each node
- metric : metric / distance used to convert 2 node features to
an edge weight
Returns :
edge indicator
"""
return graphs._nodeFeatureDistToEdgeWeight(graph=graph,nodeFeatures=nodeFeatures,metric=metric,out=out)
nodeFeaturesToEdgeWeights.__module__ = 'vigra.graphs'
graphs.nodeFeaturesToEdgeWeights = nodeFeaturesToEdgeWeights
_genGraphMiscFunctions()
del _genGraphMiscFunctions
def loadBSDGt(filename):
import scipy.io as sio
matContents = sio.loadmat(filename)
ngt = len(matContents['groundTruth'][0])
print "ngts",ngt
gts = []
for gti in range(ngt):
gt = matContents['groundTruth'][0][gti][0]['Segmentation'][0]
gt = numpy.swapaxes(gt,0,1)
gt = gt.astype(numpy.uint32)
print gt.min(),gt.max()
gts.append(gt[:,:,None])
gtArray = numpy.concatenate(gts,axis=2)
print gtArray.shape
return gtArray
| mit | 9,014,505,248,478,581,000 | 35.941246 | 131 | 0.604701 | false |
mancoast/CPythonPyc_test | fail/312_test_exceptions.py | 1 | 24203 | # Python test set -- part 5, built-in exceptions
import os
import sys
import unittest
import pickle
import weakref
from test.support import (TESTFN, unlink, run_unittest, captured_output,
gc_collect)
# XXX This is not really enough, each *operation* should be tested!
class ExceptionTests(unittest.TestCase):
def raise_catch(self, exc, excname):
try:
raise exc("spam")
except exc as err:
buf1 = str(err)
try:
raise exc("spam")
except exc as err:
buf2 = str(err)
self.assertEquals(buf1, buf2)
self.assertEquals(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
import marshal
marshal.loads('')
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(IOError, "IOError")
self.assertRaises(IOError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec('/\n')
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n",
'<string>', 'exec')
except TabError: pass
else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 17<<16)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1/0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1/0
except Exception as e: pass
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError as e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''while 1:
try:
pass
finally:
continue'''
if not sys.platform.startswith('java'):
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException(Exception):
def __init__(self_):
raise RuntimeError("can't instantiate BadException")
class InvalidException:
pass
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEquals(co.co_name, "test_capi1")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEquals(co.co_name, "__init__")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEquals(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
def test_capi3():
import _testcapi
self.assertRaises(SystemError, _testcapi.raise_exception,
InvalidException, 1)
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
test_capi3()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.assertEqual(str(WindowsError(1001)),
"1001")
self.assertEqual(str(WindowsError(1001, "message")),
"[Error 1001] message")
self.assertEqual(WindowsError(1001, "message").errno, 22)
self.assertEqual(WindowsError(1001, "message").winerror, 1001)
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'args' : ()}),
(BaseException, (1, ), {'args' : (1,)}),
(BaseException, ('foo',),
{'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'args' : ('foo',), 'code' : 'foo'}),
(IOError, ('foo',),
{'args' : ('foo',), 'filename' : None,
'errno' : None, 'strerror' : None}),
(IOError, ('foo', 'bar'),
{'args' : ('foo', 'bar'), 'filename' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz'),
{'args' : ('foo', 'bar'), 'filename' : 'baz',
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz', 'quux'),
{'args' : ('foo', 'bar', 'baz', 'quux')}),
(EnvironmentError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(EnvironmentError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}),
(SyntaxError, (), {'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'args' : (),}),
(UnicodeEncodeError, ('ascii', 'a', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', 'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : 'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
{'args' : ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, ("\u3042", 0, 1, "ouch"),
{'args' : ('\u3042', 0, 1, 'ouch'),
'object' : '\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
]
try:
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : 1,
'errno' : 22, 'filename' : 'filenameStr'})
)
except NameError:
pass
for exc, args, expected in exceptionList:
try:
e = exc(*args)
except:
print("\nexc=%r, args=%r" % (exc, args), file=sys.stderr)
raise
else:
# Verify module name
self.assertEquals(type(e).__module__, 'builtins')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
value = getattr(e, checkArgName)
self.assertEquals(repr(value),
repr(expected[checkArgName]),
'%r.%s == %r, expected %r' % (
e, checkArgName,
value, expected[checkArgName]))
# test for pickling support
for p in [pickle]:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
s = p.dumps(e, protocol)
new = p.loads(s)
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEquals(got, want,
'pickled "%r", attribute "%s' %
(e, checkArgName))
def testWithTraceback(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = BaseException().with_traceback(tb)
self.assertTrue(isinstance(e, BaseException))
self.assertEqual(e.__traceback__, tb)
e = IndexError(5).with_traceback(tb)
self.assertTrue(isinstance(e, IndexError))
self.assertEqual(e.__traceback__, tb)
class MyException(Exception):
pass
e = MyException().with_traceback(tb)
self.assertTrue(isinstance(e, MyException))
self.assertEqual(e.__traceback__, tb)
def testInvalidTraceback(self):
try:
Exception().__traceback__ = 5
except TypeError as e:
self.assertTrue("__traceback__ must be a traceback" in str(e))
else:
self.fail("No exception raised")
def testInvalidAttrs(self):
self.assertRaises(TypeError, setattr, Exception(), '__cause__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__cause__')
self.assertRaises(TypeError, setattr, Exception(), '__context__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__context__')
def testNoneClearsTracebackAttr(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = Exception()
e.__traceback__ = tb
e.__traceback__ = None
self.assertEqual(e.__traceback__, None)
def testChainingAttrs(self):
e = Exception()
self.assertEqual(e.__context__, None)
self.assertEqual(e.__cause__, None)
e = TypeError()
self.assertEqual(e.__context__, None)
self.assertEqual(e.__cause__, None)
class MyException(EnvironmentError):
pass
e = MyException()
self.assertEqual(e.__context__, None)
self.assertEqual(e.__cause__, None)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
# but user-defined subclasses can if they want
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEquals(x.fancy_arg, 42)
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RuntimeError, f)
def g():
try:
return g()
except ValueError:
return -1
self.assertRaises(RuntimeError, g)
def test_str(self):
# Make sure both instances and classes have a str representation.
self.assertTrue(str(Exception))
self.assertTrue(str(Exception('a')))
self.assertTrue(str(Exception('a', 'b')))
def testExceptionCleanupNames(self):
# Make sure the local variable bound to the exception instance by
# an "except" statement is only visible inside the except block.
try:
raise Exception()
except Exception as e:
self.assertTrue(e)
del e
self.assertFalse('e' in locals())
def testExceptionCleanupState(self):
# Make sure exception state is cleaned up as soon as the except
# block is left. See #2507
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def inner_raising_func():
# Create some references in exception value and traceback
local_ref = obj
raise MyException(obj)
# Qualified "except" with "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException as e:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Qualified "except" without "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Bare "except"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# "except" with premature block leave
obj = MyObj()
wr = weakref.ref(obj)
for i in [0]:
try:
inner_raising_func()
except:
break
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# "except" block raising another exception
obj = MyObj()
wr = weakref.ref(obj)
try:
try:
inner_raising_func()
except:
raise KeyError
except KeyError as e:
# We want to test that the except block above got rid of
# the exception raised in inner_raising_func(), but it
# also ends up in the __context__ of the KeyError, so we
# must clear the latter manually for our test to succeed.
e.__context__ = None
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Some complicated construct
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
try:
try:
raise
finally:
raise
except MyException:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Inside an exception-silencing "with" block
class Context:
def __enter__(self):
return self
def __exit__ (self, exc_type, exc_value, exc_tb):
return True
obj = MyObj()
wr = weakref.ref(obj)
with Context():
inner_raising_func()
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
def test_generator_leaking(self):
# Test that generator exception state doesn't leak into the calling
# frame
def yield_raise():
try:
raise KeyError("caught")
except KeyError:
yield sys.exc_info()[0]
yield sys.exc_info()[0]
yield sys.exc_info()[0]
g = yield_raise()
self.assertEquals(next(g), KeyError)
self.assertEquals(sys.exc_info()[0], None)
self.assertEquals(next(g), KeyError)
self.assertEquals(sys.exc_info()[0], None)
self.assertEquals(next(g), None)
# Same test, but inside an exception handler
try:
raise TypeError("foo")
except TypeError:
g = yield_raise()
self.assertEquals(next(g), KeyError)
self.assertEquals(sys.exc_info()[0], TypeError)
self.assertEquals(next(g), KeyError)
self.assertEquals(sys.exc_info()[0], TypeError)
self.assertEquals(next(g), TypeError)
del g
self.assertEquals(sys.exc_info()[0], TypeError)
def test_generator_finalizing_and_exc_info(self):
# See #7173
def simple_gen():
yield 1
def run_gen():
gen = simple_gen()
try:
raise RuntimeError
except RuntimeError:
return next(gen)
run_gen()
gc_collect()
self.assertEqual(sys.exc_info(), (None, None, None))
def test_3114(self):
# Bug #3114: in its destructor, MyObject retrieves a pointer to
# obsolete and/or deallocated objects.
class MyObject:
def __del__(self):
nonlocal e
e = sys.exc_info()
e = ()
try:
raise Exception(MyObject())
except:
pass
self.assertEquals(e, (None, None, None))
def testUnicodeChangeAttributes(self):
# See issue 7309. This was a crasher.
u = UnicodeEncodeError('baz', 'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't encode character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1000-4: 965230951443685724997")
u = UnicodeDecodeError('baz', b'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't decode byte 0x78 in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1000-4: 965230951443685724997")
u = UnicodeTranslateError('xxxx', 1, 5, 'foo')
self.assertEqual(str(u), "can't translate characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "can't translate character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "can't translate characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "can't translate characters in position 1000-4: 965230951443685724997")
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
class Meta(type):
def __subclasscheck__(cls, subclass):
raise ValueError()
class MyException(Exception, metaclass=Meta):
pass
with captured_output("stderr") as stderr:
try:
raise KeyError()
except MyException as e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
def g():
try:
return g()
except RuntimeError:
return sys.exc_info()
e, v, tb = g()
self.assertTrue(isinstance(v, RuntimeError), type(v))
self.assertTrue("maximum recursion depth exceeded" in str(v), str(v))
def test_MemoryError(self):
# PyErr_NoMemory always raises the same exception instance.
# Check that the traceback is not doubled.
import traceback
from _testcapi import raise_memoryerror
def raiseMemError():
try:
raise_memoryerror()
except MemoryError as e:
tb = e.__traceback__
else:
self.fail("Should have raises a MemoryError")
return traceback.format_tb(tb)
tb1 = raiseMemError()
tb2 = raiseMemError()
self.assertEqual(tb1, tb2)
def test_main():
run_unittest(ExceptionTests)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 569,357,798,893,516,600 | 34.803254 | 114 | 0.511011 | false |
matmutant/sl4a | python/src/Demo/tkinter/matt/rubber-line-demo-1.py | 47 | 1919 | from Tkinter import *
class Test(Frame):
def printit(self):
print "hi"
def createWidgets(self):
self.QUIT = Button(self, text='QUIT',
background='red',
foreground='white',
height=3,
command=self.quit)
self.QUIT.pack(side=BOTTOM, fill=BOTH)
self.canvasObject = Canvas(self, width="5i", height="5i")
self.canvasObject.pack(side=LEFT)
def mouseDown(self, event):
# canvas x and y take the screen coords from the event and translate
# them into the coordinate system of the canvas object
self.startx = self.canvasObject.canvasx(event.x)
self.starty = self.canvasObject.canvasy(event.y)
def mouseMotion(self, event):
# canvas x and y take the screen coords from the event and translate
# them into the coordinate system of the canvas object
x = self.canvasObject.canvasx(event.x)
y = self.canvasObject.canvasy(event.y)
if (self.startx != event.x) and (self.starty != event.y) :
self.canvasObject.delete(self.rubberbandLine)
self.rubberbandLine = self.canvasObject.create_line(
self.startx, self.starty, x, y)
# this flushes the output, making sure that
# the rectangle makes it to the screen
# before the next event is handled
self.update_idletasks()
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
# this is a "tagOrId" for the rectangle we draw on the canvas
self.rubberbandLine = None
Widget.bind(self.canvasObject, "<Button-1>", self.mouseDown)
Widget.bind(self.canvasObject, "<Button1-Motion>", self.mouseMotion)
test = Test()
test.mainloop()
| apache-2.0 | -5,728,300,051,880,559,000 | 36.627451 | 76 | 0.592496 | false |
royalharsh/grpc | src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py | 13 | 8661 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of grpc_reflection.v1alpha.reflection."""
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from grpc_reflection.v1alpha import reflection
from grpc_reflection.v1alpha import reflection_pb2
from grpc_reflection.v1alpha import reflection_pb2_grpc
from google.protobuf import descriptor_pool
from google.protobuf import descriptor_pb2
from src.proto.grpc.testing import empty_pb2
#empty2_pb2 is imported for import-consequent side-effects.
from src.proto.grpc.testing.proto2 import empty2_pb2 # pylint: disable=unused-import
from src.proto.grpc.testing.proto2 import empty2_extensions_pb2
from tests.unit.framework.common import test_constants
_EMPTY_PROTO_FILE_NAME = 'src/proto/grpc/testing/empty.proto'
_EMPTY_PROTO_SYMBOL_NAME = 'grpc.testing.Empty'
_SERVICE_NAMES = ('Angstrom', 'Bohr', 'Curie', 'Dyson', 'Einstein', 'Feynman',
'Galilei')
_EMPTY_EXTENSIONS_SYMBOL_NAME = 'grpc.testing.proto2.EmptyWithExtensions'
_EMPTY_EXTENSIONS_NUMBERS = (124, 125, 126, 127, 128,)
def _file_descriptor_to_proto(descriptor):
proto = descriptor_pb2.FileDescriptorProto()
descriptor.CopyToProto(proto)
return proto.SerializeToString()
class ReflectionServicerTest(unittest.TestCase):
def setUp(self):
servicer = reflection.ReflectionServicer(service_names=_SERVICE_NAMES)
server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(server_pool)
port = self._server.add_insecure_port('[::]:0')
reflection_pb2_grpc.add_ServerReflectionServicer_to_server(servicer,
self._server)
self._server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
self._stub = reflection_pb2_grpc.ServerReflectionStub(channel)
def testFileByName(self):
requests = (reflection_pb2.ServerReflectionRequest(
file_by_filename=_EMPTY_PROTO_FILE_NAME),
reflection_pb2.ServerReflectionRequest(
file_by_filename='i-donut-exist'),)
responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
expected_responses = (
reflection_pb2.ServerReflectionResponse(
valid_host='',
file_descriptor_response=reflection_pb2.FileDescriptorResponse(
file_descriptor_proto=(
_file_descriptor_to_proto(empty_pb2.DESCRIPTOR),))),
reflection_pb2.ServerReflectionResponse(
valid_host='',
error_response=reflection_pb2.ErrorResponse(
error_code=grpc.StatusCode.NOT_FOUND.value[0],
error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
)),)
self.assertSequenceEqual(expected_responses, responses)
def testFileBySymbol(self):
requests = (reflection_pb2.ServerReflectionRequest(
file_containing_symbol=_EMPTY_PROTO_SYMBOL_NAME
), reflection_pb2.ServerReflectionRequest(
file_containing_symbol='i.donut.exist.co.uk.org.net.me.name.foo'),)
responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
expected_responses = (
reflection_pb2.ServerReflectionResponse(
valid_host='',
file_descriptor_response=reflection_pb2.FileDescriptorResponse(
file_descriptor_proto=(
_file_descriptor_to_proto(empty_pb2.DESCRIPTOR),))),
reflection_pb2.ServerReflectionResponse(
valid_host='',
error_response=reflection_pb2.ErrorResponse(
error_code=grpc.StatusCode.NOT_FOUND.value[0],
error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
)),)
self.assertSequenceEqual(expected_responses, responses)
@unittest.skip(
'TODO(mmx): enable when (pure) python protobuf issue is fixed'
'(see https://github.com/google/protobuf/issues/2882)')
def testFileContainingExtension(self):
requests = (reflection_pb2.ServerReflectionRequest(
file_containing_extension=reflection_pb2.ExtensionRequest(
containing_type=_EMPTY_EXTENSIONS_SYMBOL_NAME,
extension_number=125,),
), reflection_pb2.ServerReflectionRequest(
file_containing_extension=reflection_pb2.ExtensionRequest(
containing_type='i.donut.exist.co.uk.org.net.me.name.foo',
extension_number=55,),),)
responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
expected_responses = (
reflection_pb2.ServerReflectionResponse(
valid_host='',
file_descriptor_response=reflection_pb2.FileDescriptorResponse(
file_descriptor_proto=(_file_descriptor_to_proto(
empty2_extensions_pb2.DESCRIPTOR),))),
reflection_pb2.ServerReflectionResponse(
valid_host='',
error_response=reflection_pb2.ErrorResponse(
error_code=grpc.StatusCode.NOT_FOUND.value[0],
error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
)),)
self.assertSequenceEqual(expected_responses, responses)
def testExtensionNumbersOfType(self):
requests = (reflection_pb2.ServerReflectionRequest(
all_extension_numbers_of_type=_EMPTY_EXTENSIONS_SYMBOL_NAME
), reflection_pb2.ServerReflectionRequest(
all_extension_numbers_of_type='i.donut.exist.co.uk.net.name.foo'),)
responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
expected_responses = (
reflection_pb2.ServerReflectionResponse(
valid_host='',
all_extension_numbers_response=reflection_pb2.
ExtensionNumberResponse(
base_type_name=_EMPTY_EXTENSIONS_SYMBOL_NAME,
extension_number=_EMPTY_EXTENSIONS_NUMBERS)),
reflection_pb2.ServerReflectionResponse(
valid_host='',
error_response=reflection_pb2.ErrorResponse(
error_code=grpc.StatusCode.NOT_FOUND.value[0],
error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
)),)
self.assertSequenceEqual(expected_responses, responses)
def testListServices(self):
requests = (reflection_pb2.ServerReflectionRequest(
list_services='',),)
responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
expected_responses = (reflection_pb2.ServerReflectionResponse(
valid_host='',
list_services_response=reflection_pb2.ListServiceResponse(
service=tuple(
reflection_pb2.ServiceResponse(name=name)
for name in _SERVICE_NAMES))),)
self.assertSequenceEqual(expected_responses, responses)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | 2,338,188,847,022,997,500 | 47.385475 | 85 | 0.665281 | false |
jjmachan/activityPointsApp | activitypoints/lib/python3.5/site-packages/django/template/defaultfilters.py | 18 | 28032 | """Default variable filters."""
from __future__ import unicode_literals
import random as random_module
import re
import warnings
from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation
from functools import wraps
from operator import itemgetter
from pprint import pformat
from django.utils import formats, six
from django.utils.dateformat import format, time_format
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import (
avoid_wrapping, conditional_escape, escape, escapejs, linebreaks,
strip_tags, urlize as _urlize,
)
from django.utils.http import urlquote
from django.utils.safestring import SafeData, mark_for_escaping, mark_safe
from django.utils.text import (
Truncator, normalize_newlines, phone2numeric, slugify as _slugify, wrap,
)
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from .base import Variable, VariableDoesNotExist
from .library import Library
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_text(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completely invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = repr(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return ''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_text(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1])
units += -tupl[2] if m else tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP, Context(prec=prec)).as_tuple()
digits = [six.text_type(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_text(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=True):
"""Displays text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = six.text_type(len(six.text_type(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, or hyphens. Converts to lowercase.
Also strips leading and trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting
for documentation of Python string formatting.
"""
if isinstance(value, tuple):
value = six.text_type(value)
try:
return ("%" + six.text_type(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub(r"\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncates HTML after a certain number of chars.
Argument: Number of chars to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=True):
"""Converts URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=True):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should be auto-escaped.
"""
with warnings.catch_warnings():
# Ignore mark_for_escaping deprecation -- this will use
# conditional_escape() in Django 2.0.
warnings.simplefilter('ignore', category=RemovedInDjango20Warning)
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=True):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=True):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_text(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
def _property_resolver(arg):
"""
When arg is convertible to float, behave like operator.itemgetter(arg)
Otherwise, behave like Variable(arg).resolve
>>> _property_resolver(1)('abc')
'b'
>>> _property_resolver('1')('abc')
Traceback (most recent call last):
...
TypeError: string indices must be integers
>>> class Foo:
... a = 42
... b = 3.14
... c = 'Hey!'
>>> _property_resolver('b')(Foo())
3.14
"""
try:
float(arg)
except ValueError:
return Variable(arg).resolve
else:
return itemgetter(arg)
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=_property_resolver(arg))
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=_property_resolver(arg), reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=True):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_text, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=False)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://www.diveintopython3.net/native-datatypes.html#slicinglists
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=True):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
def escaper(x):
return x
def walk_items(item_list):
item_iterator = iter(item_list)
try:
item = next(item_iterator)
while True:
try:
next_item = next(item_iterator)
except StopIteration:
yield item, None
break
if not isinstance(next_item, six.string_types):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
item = next(item_iterator)
continue
yield item, None
item = next_item
except StopIteration:
pass
def list_formatter(item_list, tabs=1):
indent = '\t' * tabs
output = []
for item, children in walk_items(item_list):
sublist = ''
if children:
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (
indent, list_formatter(children, tabs + 1), indent, indent)
output.append('%s<li>%s%s</li>' % (
indent, escaper(force_text(item)), sublist))
return '\n'.join(output)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if value in (None, ''):
return ''
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, ''):
return ''
try:
return formats.time_format(value, arg)
except (AttributeError, TypeError):
try:
return time_format(value, arg)
except (AttributeError, TypeError):
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is divisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes_):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc.).
"""
try:
bytes_ = float(bytes_)
except (TypeError, ValueError, UnicodeDecodeError):
value = ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
def filesize_number_format(value):
return formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
negative = bytes_ < 0
if negative:
bytes_ = -bytes_ # Allow formatting of negative numbers.
if bytes_ < KB:
value = ungettext("%(size)d byte", "%(size)d bytes", bytes_) % {'size': bytes_}
elif bytes_ < MB:
value = ugettext("%s KB") % filesize_number_format(bytes_ / KB)
elif bytes_ < GB:
value = ugettext("%s MB") % filesize_number_format(bytes_ / MB)
elif bytes_ < TB:
value = ugettext("%s GB") % filesize_number_format(bytes_ / GB)
elif bytes_ < PB:
value = ugettext("%s TB") % filesize_number_format(bytes_ / TB)
else:
value = ugettext("%s PB") % filesize_number_format(bytes_ / PB)
if negative:
value = "-%s" % value
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if float(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, force_text(e, errors="replace"))
| mit | 7,703,748,176,919,867,000 | 27.869207 | 102 | 0.612443 | false |
mrry/tensorflow | tensorflow/python/kernel_tests/sparse_add_op_test.py | 20 | 7877 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseAdd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import timeit
import numpy as np
import tensorflow as tf
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return tf.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), len(x_values)
class SparseAddTest(tf.test.TestCase):
def _randomTensor(self, size, np_dtype, sparse=True):
n, m = size
x = np.random.randn(n, m).astype(np_dtype)
return _sparsify(x) if sparse else x
def _SparseTensorValue_3x3(self, negate=False):
# [ 1]
# [2 ]
# [3 4]
# ...or its cwise negation, if `negate`
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, 2, 3, 4])
if negate:
val = -np.array([1, 2, 3, 4])
shape = np.array([3, 3])
return tf.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32),
np.array(shape, np.int64))
def _SparseTensor_3x3(self, negate=False):
return tf.SparseTensor.from_value(self._SparseTensorValue_3x3(negate))
def _SparseTensor_3x3_v2(self):
# [ 1]
# [-1.9 ]
# [ 3 -4.2]
ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]])
val = np.array([1, -1.9, 3, -4.2])
shape = np.array([3, 3])
return tf.SparseTensor(
tf.constant(ind, tf.int64),
tf.constant(val, tf.float32),
tf.constant(shape, tf.int64))
def testAddSelf(self):
with self.test_session(use_gpu=False) as sess:
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
sp_sum = tf.sparse_add(sp_a, sp_b)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.shape.get_shape(), [2])
self.assertAllEqual(
sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]])
self.assertAllEqual(sum_out.values, [2, 4, 6, 8])
self.assertAllEqual(sum_out.shape, [3, 3])
def testAddSelfAndNegation(self):
with self.test_session(use_gpu=False) as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3(negate=True)
sp_sum = tf.sparse_add(sp_a, sp_b, 0.1)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, np.empty([0, 2]))
self.assertAllEqual(sum_out.values, [])
self.assertAllEqual(sum_out.shape, [3, 3])
def testSmallValuesShouldVanish(self):
with self.test_session(use_gpu=False) as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x3_v2()
# sum:
# [ 2]
# [.1 ]
# [ 6 -.2]
# two values should vanish: |.1| < .21, and |-.2| < .21
sp_sum = tf.sparse_add(sp_a, sp_b, thresh=0.21)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0]])
self.assertAllEqual(sum_out.values, [2, 6])
self.assertAllEqual(sum_out.shape, [3, 3])
# only .1 vanishes
sp_sum = tf.sparse_add(sp_a, sp_b, thresh=0.11)
sum_out = sess.run(sp_sum)
self.assertEqual(sp_sum.shape.get_shape(), [2])
self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0], [2, 1]])
self.assertAllClose(sum_out.values, [2, 6, -.2])
self.assertAllEqual(sum_out.shape, [3, 3])
def testGradients(self):
np.random.seed(1618) # Make it reproducible.
with self.test_session(use_gpu=False):
for n in [10, 31]:
for m in [4, 17]:
sp_a, nnz_a = self._randomTensor([n, m], np.float32)
sp_b, nnz_b = self._randomTensor([n, m], np.float32)
sp_sum = tf.sparse_add(sp_a, sp_b)
nnz_sum = len(sp_sum.values.eval())
err = tf.test.compute_gradient_error([sp_a.values, sp_b.values],
[(nnz_a,), (nnz_b,)],
sp_sum.values, (nnz_sum,))
self.assertLess(err, 1e-3)
def testAddSparseDense(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
for dtype in [np.float32, np.float64, np.int64, np.complex64]:
for index_dtype in [np.int32, np.int64]:
rand_vals_np = np.random.randn(n, m).astype(dtype)
dense_np = np.random.randn(n, m).astype(dtype)
with self.test_session(use_gpu=False):
sparse, unused_nnz = _sparsify(rand_vals_np, index_dtype=index_dtype)
s = tf.sparse_add(sparse, tf.constant(dense_np)).eval()
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
# check commutativity
s = tf.sparse_add(tf.constant(dense_np), sparse).eval()
self.assertAllEqual(dense_np + rand_vals_np, s)
self.assertTrue(s.dtype == dtype)
def testSparseTensorDenseAddGradients(self):
np.random.seed(1618) # Make it reproducible.
n, m = np.random.randint(30, size=2)
rand_vals_np = np.random.randn(n, m).astype(np.float32)
dense_np = np.random.randn(n, m).astype(np.float32)
with self.test_session(use_gpu=False):
sparse, nnz = _sparsify(rand_vals_np)
dense = tf.constant(dense_np, dtype=tf.float32)
s = tf.sparse_add(sparse, dense)
err = tf.test.compute_gradient_error(
[sparse.values, dense], [(nnz,), (n, m)], s, (n, m))
self.assertLess(err, 1e-3)
######################## Benchmarking code
def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50):
np.random.seed(1618)
with tf.Session(graph=tf.Graph()) as sess:
sp_vals = np.random.rand(n, m).astype(np.float32)
sp_t, unused_nnz = _sparsify(sp_vals, thresh=sparsity, index_dtype=np.int32)
vals = np.random.rand(n, m).astype(np.float32)
s2d = tf.add(tf.sparse_tensor_to_dense(sp_t), tf.constant(vals))
sa = tf.sparse_add(sp_t, tf.constant(vals))
timeit.timeit(lambda: sess.run(s2d), number=3)
timeit.timeit(lambda: sess.run(sa), number=3)
s2d_total = timeit.timeit(lambda: sess.run(s2d), number=num_iters)
sa_total = timeit.timeit(lambda: sess.run(sa), number=num_iters)
# per-iter latency; secs to millis
return s2d_total * 1e3 / num_iters, sa_total * 1e3 / num_iters
class SparseAddBenchmark(tf.test.Benchmark):
def benchmarkSparseAddDense(self):
print("SparseAddDense: add with sparse_to_dense vs. sparse_add")
print("%nnz \t n \t m \t millis(s2d) \t millis(sparse_add) \t speedup")
for sparsity in [0.99, 0.5, 0.01]:
for n in [1, 256, 50000]:
for m in [100, 1000]:
s2d_dt, sa_dt = _s2d_add_vs_sparse_add(sparsity, n, m)
print("%.2f \t %d \t %d \t %.4f \t %.4f \t %.2f" % (sparsity, n, m,
s2d_dt, sa_dt,
s2d_dt / sa_dt))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 5,942,183,108,475,274,000 | 34.642534 | 80 | 0.588422 | false |
JaeGyu/PythonEx_1 | flask_corsEx.py | 1 | 3683 | from flask import Flask, request, jsonify
from flask_cors import CORS
import json
import base64
import numpy as np
from PIL import Image
import tensorflow as tf
import array
app = Flask(__name__)
CORS(app)
@app.route("/")
def helloWorld():
return "Hello, cross-origin-world!"
# @app.after_request
# def add_header(r):
# """
# Add headers to both force latest IE rendering engine or Chrome Frame,
# and also to cache the rendered page for 10 minutes.
# """
# r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
# r.headers["Pragma"] = "no-cache"
# r.headers["Expires"] = "0"
# r.headers['Cache-Control'] = 'public, max-age=0'
# return r
@app.route("/api/image3", methods=['POST'])
def imagePro():
req_data = request.data.decode('utf-8')
json_obj = json.loads(req_data)
img_str = json_obj['img'].split(',')[-1]
decode_str = base64.b64decode(img_str)
print(decode_str)
with open('temp.png','wb') as temp:
temp.write(decode_str)
im = Image.open("./temp.png")
print("image size : ",im.size)
img2 = im.resize((28,28))
img2.save("./temp28x28.png")
return 'ok'
def convert_to_alpha_list(decode_str):
img = Image.frombytes(data=decode_str, size=(250,250), mode='RGBA')
# pixels = Image.open("./temp.png").resize((28,28)).tobytes("raw","A")
pixels = img.resize((28,28)).tobytes("raw","A")
return np.array([pixel / 255 for pixel in pixels]).reshape(1,784)
def predict_to_number(img_arr):
X = tf.placeholder(dtype=tf.float32, shape=[None, 784])
Y = tf.placeholder(dtype=tf.float32, shape=[None, 10])
W1 = tf.Variable(tf.random_normal(shape=[784, 256], stddev=0.01), name="w1val")
L1 = tf.nn.relu(tf.matmul(X, W1))
W2 = tf.Variable(tf.random_normal(shape=[256, 256], stddev=0.01), name="w2val")
L2 = tf.nn.relu(tf.matmul(L1, W2))
W3 = tf.Variable(tf.random_normal([256, 10], stddev=0.01), name="w3val")
model = tf.matmul(L2, W3)
saver = tf.train.Saver({"w1val":W1, "w2val":W2, "w3val":W3})
with tf.Session() as sess:
saver.restore(sess, "./number_model/mnist")
predict = sess.run([ model ], feed_dict = {X : img_arr})
predict = np.array(predict)
result = np.argmax(predict[0], axis=1)
print("result : ", result)
return result
"""
1) 클라이언트에서 넘어온 이미지를 28*28로 변환한다.
2) 변환한 이미지 중 alpha값만 추출해서 리스트화 한다.
3) 변환한 alpha값을 0~1사이의 값으로 스케일링한다.
4) reshape(1,748)을 하여 텐서플로 모델에 질의 한다.
"""
@app.route("/api/predict", methods=['POST'])
def get_number():
req_data = request.data.decode('utf-8')
json_obj = json.loads(req_data)
img_str = json_obj['img']
print("넘어온 배열의 길이 : ",len(img_str))
bt_str = array.array('B', img_str).tostring()
ll = convert_to_alpha_list(bt_str)
result = predict_to_number(ll)
print("예측한 결과는 : ", result[0])
return str(result[0])
@app.route("/api/image2", methods=['POST'])
def imagePross():
req_data = request.data.decode('utf-8')
json_obj = json.loads(req_data)
img_str = json_obj['img'].split(',')[-1]
decode_str = base64.b64decode(img_str)
l = [int(i) for i in decode_str]
print(l)
return jsonify(
img = l
)
@app.route("/api/image", methods=['POST'])
def imagePros():
req_data = request.data.decode('utf-8')
json_obj = json.loads(req_data)
list_img_str = json_obj['img'].split(',')
print(list_img_str[0:10])
return "ok"
if __name__ == "__main__":
app.run(debug=True) | mit | 327,149,010,969,620,000 | 27.836066 | 83 | 0.613022 | false |
openstack/ironic | ironic/tests/unit/objects/test_bios.py | 1 | 12384 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import types
from unittest import mock
from ironic.common import context
from ironic.db import api as dbapi
from ironic import objects
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
class TestBIOSSettingObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
def setUp(self):
super(TestBIOSSettingObject, self).setUp()
self.ctxt = context.get_admin_context()
self.bios_setting = db_utils.get_test_bios_setting()
self.node_id = self.bios_setting['node_id']
@mock.patch.object(dbapi.IMPL, 'get_bios_setting', autospec=True)
def test_get(self, mock_get_setting):
mock_get_setting.return_value = self.bios_setting
bios_obj = objects.BIOSSetting.get(self.context, self.node_id,
self.bios_setting['name'])
mock_get_setting.assert_called_once_with(self.node_id,
self.bios_setting['name'])
self.assertEqual(self.context, bios_obj._context)
self.assertEqual(self.bios_setting['node_id'], bios_obj.node_id)
self.assertEqual(self.bios_setting['name'], bios_obj.name)
self.assertEqual(self.bios_setting['value'], bios_obj.value)
self.assertEqual(self.bios_setting['attribute_type'],
bios_obj.attribute_type)
self.assertEqual(self.bios_setting['allowable_values'],
bios_obj.allowable_values)
self.assertEqual(self.bios_setting['reset_required'],
bios_obj.reset_required)
self.assertEqual(self.bios_setting['read_only'],
bios_obj.read_only)
self.assertEqual(self.bios_setting['unique'], bios_obj.unique)
@mock.patch.object(dbapi.IMPL, 'get_bios_setting_list', autospec=True)
def test_get_by_node_id(self, mock_get_setting_list):
bios_setting2 = db_utils.get_test_bios_setting(name='hyperthread',
value='enabled')
mock_get_setting_list.return_value = [self.bios_setting, bios_setting2]
bios_obj_list = objects.BIOSSettingList.get_by_node_id(
self.context, self.node_id)
mock_get_setting_list.assert_called_once_with(self.node_id)
self.assertEqual(self.context, bios_obj_list._context)
self.assertEqual(2, len(bios_obj_list))
self.assertEqual(self.bios_setting['node_id'],
bios_obj_list[0].node_id)
self.assertEqual(self.bios_setting['name'], bios_obj_list[0].name)
self.assertEqual(self.bios_setting['value'], bios_obj_list[0].value)
self.assertEqual(bios_setting2['node_id'], bios_obj_list[1].node_id)
self.assertEqual(bios_setting2['name'], bios_obj_list[1].name)
self.assertEqual(bios_setting2['value'], bios_obj_list[1].value)
@mock.patch.object(dbapi.IMPL, 'create_bios_setting_list', autospec=True)
def test_create(self, mock_create_list):
fake_call_args = {'node_id': self.bios_setting['node_id'],
'name': self.bios_setting['name'],
'value': self.bios_setting['value'],
'attribute_type':
self.bios_setting['attribute_type'],
'allowable_values':
self.bios_setting['allowable_values'],
'read_only': self.bios_setting['read_only'],
'reset_required':
self.bios_setting['reset_required'],
'unique': self.bios_setting['unique'],
'version': self.bios_setting['version']}
setting = [{'name': 'virtualization', 'value': 'on', 'attribute_type':
'Enumeration', 'allowable_values': ['on', 'off'],
'lower_bound': None, 'max_length': None,
'min_length': None, 'read_only': False,
'reset_required': True, 'unique': False,
'upper_bound': None}]
bios_obj = objects.BIOSSetting(context=self.context,
**fake_call_args)
mock_create_list.return_value = [self.bios_setting]
mock_create_list.call_args
bios_obj.create()
mock_create_list.assert_called_once_with(self.bios_setting['node_id'],
setting,
self.bios_setting['version'])
self.assertEqual(self.bios_setting['node_id'], bios_obj.node_id)
self.assertEqual(self.bios_setting['name'], bios_obj.name)
self.assertEqual(self.bios_setting['value'], bios_obj.value)
self.assertEqual(self.bios_setting['attribute_type'],
bios_obj.attribute_type)
self.assertEqual(self.bios_setting['allowable_values'],
bios_obj.allowable_values)
self.assertEqual(self.bios_setting['read_only'],
bios_obj.read_only)
self.assertEqual(self.bios_setting['reset_required'],
bios_obj.reset_required)
self.assertEqual(self.bios_setting['unique'], bios_obj.unique)
@mock.patch.object(dbapi.IMPL, 'update_bios_setting_list', autospec=True)
def test_save(self, mock_update_list):
fake_call_args = {'node_id': self.bios_setting['node_id'],
'name': self.bios_setting['name'],
'value': self.bios_setting['value'],
'version': self.bios_setting['version']}
setting = [{'name': self.bios_setting['name'],
'value': self.bios_setting['value'],
'attribute_type': None, 'allowable_values': None,
'lower_bound': None, 'max_length': None,
'min_length': None, 'read_only': None,
'reset_required': None, 'unique': None,
'upper_bound': None}]
bios_obj = objects.BIOSSetting(context=self.context,
**fake_call_args)
mock_update_list.return_value = [self.bios_setting]
mock_update_list.call_args
bios_obj.save()
mock_update_list.assert_called_once_with(self.bios_setting['node_id'],
setting,
self.bios_setting['version'])
self.assertEqual(self.bios_setting['node_id'], bios_obj.node_id)
self.assertEqual(self.bios_setting['name'], bios_obj.name)
self.assertEqual(self.bios_setting['value'], bios_obj.value)
@mock.patch.object(dbapi.IMPL, 'create_bios_setting_list', autospec=True)
def test_list_create(self, mock_create_list):
bios_setting2 = db_utils.get_test_bios_setting(name='hyperthread',
value='enabled')
settings = db_utils.get_test_bios_setting_setting_list()[:-1]
mock_create_list.return_value = [self.bios_setting, bios_setting2]
bios_obj_list = objects.BIOSSettingList.create(
self.context, self.node_id, settings)
mock_create_list.assert_called_once_with(self.node_id, settings, '1.1')
self.assertEqual(self.context, bios_obj_list._context)
self.assertEqual(2, len(bios_obj_list))
self.assertEqual(self.bios_setting['node_id'],
bios_obj_list[0].node_id)
self.assertEqual(self.bios_setting['name'], bios_obj_list[0].name)
self.assertEqual(self.bios_setting['value'], bios_obj_list[0].value)
self.assertEqual(bios_setting2['node_id'], bios_obj_list[1].node_id)
self.assertEqual(bios_setting2['name'], bios_obj_list[1].name)
@mock.patch.object(dbapi.IMPL, 'update_bios_setting_list', autospec=True)
def test_list_save(self, mock_update_list):
bios_setting2 = db_utils.get_test_bios_setting(name='hyperthread',
value='enabled')
settings = db_utils.get_test_bios_setting_setting_list()[:-1]
mock_update_list.return_value = [self.bios_setting, bios_setting2]
bios_obj_list = objects.BIOSSettingList.save(
self.context, self.node_id, settings)
mock_update_list.assert_called_once_with(self.node_id, settings, '1.1')
self.assertEqual(self.context, bios_obj_list._context)
self.assertEqual(2, len(bios_obj_list))
self.assertEqual(self.bios_setting['node_id'],
bios_obj_list[0].node_id)
self.assertEqual(self.bios_setting['name'], bios_obj_list[0].name)
self.assertEqual(self.bios_setting['value'], bios_obj_list[0].value)
self.assertEqual(bios_setting2['node_id'], bios_obj_list[1].node_id)
self.assertEqual(bios_setting2['name'], bios_obj_list[1].name)
self.assertEqual(bios_setting2['value'], bios_obj_list[1].value)
@mock.patch.object(dbapi.IMPL, 'delete_bios_setting_list', autospec=True)
def test_delete(self, mock_delete):
objects.BIOSSetting.delete(self.context, self.node_id,
self.bios_setting['name'])
mock_delete.assert_called_once_with(self.node_id,
[self.bios_setting['name']])
@mock.patch.object(dbapi.IMPL, 'delete_bios_setting_list', autospec=True)
def test_list_delete(self, mock_delete):
bios_setting2 = db_utils.get_test_bios_setting(name='hyperthread')
name_list = [self.bios_setting['name'], bios_setting2['name']]
objects.BIOSSettingList.delete(self.context, self.node_id, name_list)
mock_delete.assert_called_once_with(self.node_id, name_list)
@mock.patch('ironic.objects.bios.BIOSSettingList.get_by_node_id',
spec_set=types.FunctionType)
def test_sync_node_setting_create_and_update(self, mock_get):
node = obj_utils.create_test_node(self.ctxt)
bios_obj = [obj_utils.create_test_bios_setting(
self.ctxt, node_id=node.id)]
mock_get.return_value = bios_obj
settings = db_utils.get_test_bios_setting_setting_list()
settings[0]['value'] = 'off'
create, update, delete, nochange = (
objects.BIOSSettingList.sync_node_setting(self.ctxt, node.id,
settings))
self.assertEqual(create, settings[1:])
self.assertEqual(update, [settings[0]])
self.assertEqual(delete, [])
self.assertEqual(nochange, [])
@mock.patch('ironic.objects.bios.BIOSSettingList.get_by_node_id',
spec_set=types.FunctionType)
def test_sync_node_setting_delete_nochange(self, mock_get):
node = obj_utils.create_test_node(self.ctxt)
bios_obj_1 = obj_utils.create_test_bios_setting(
self.ctxt, node_id=node.id)
bios_obj_2 = obj_utils.create_test_bios_setting(
self.ctxt, node_id=node.id, name='numlock', value='off')
mock_get.return_value = [bios_obj_1, bios_obj_2]
settings = db_utils.get_test_bios_setting_setting_list()
settings[0]['name'] = 'fake-bios-option'
create, update, delete, nochange = (
objects.BIOSSettingList.sync_node_setting(self.ctxt, node.id,
settings))
expected_delete = [{'name': 'virtualization', 'value': 'on'}]
self.assertEqual(create, settings[:2])
self.assertEqual(update, [])
self.assertEqual(delete, expected_delete)
self.assertEqual(nochange, [settings[2]])
| apache-2.0 | 6,987,378,050,527,866,000 | 52.61039 | 79 | 0.591085 | false |
yagop/AGClient | AGClient.py | 1 | 4126 | #!/usr/bin/python
# -*- coding: utf-8 -*-
''' UC3M Moodle (AulaGlobal)
Only allow three webservice methods:
core_webservice_get_site_info
core_enrol_get_users_courses
core_course_get_contents '''
import urllib2
import json
import os
import argparse
import xml.etree.ElementTree as et
domain = 'aulaglobal.uc3m.es'
webservice = '/webservice/rest/server.php'
service = 'ag_mobile'
# First we need the token
def get_token(user, passwd):
url_token = 'https://' + domain + '/login/token.php?username=' \
+ user + '&password=' + passwd + '&service=' + service
req = urllib2.Request(url_token)
resp = urllib2.urlopen(req).read()
# JSON :)
data = json.loads(resp.decode('utf8'))
token = data.get('token')
# Error, password / username wrong?
if token is None:
print data.get('error')
exit()
return token
# Get the userid necessary for get user courses
def get_user_info(token):
url_info = 'https://' + domain + webservice + '?wstoken=' + token \
+ '&wsfunction=core_webservice_get_site_info'
req = urllib2.Request(url_info)
resp = urllib2.urlopen(req).read()
# Yes, is a XML
root = et.fromstring(resp)
name = root.find("SINGLE/KEY[@name='fullname']/VALUE") # Who am i
user_id = root.find("SINGLE/KEY[@name='userid']/VALUE").text
print 'User ID: ' + user_id + ', ' + name.text
return user_id
# Just simply return a list of courses ids
def get_courses(token, userid):
url_courses = 'https://' + domain + webservice + '?wstoken=' \
+ token + '&wsfunction=core_enrol_get_users_courses&userid=' \
+ userid
req = urllib2.Request(url_courses)
resp = urllib2.urlopen(req).read()
# print url_courses
root = et.fromstring(resp)
ids = root.findall("MULTIPLE/SINGLE/KEY[@name='id']/VALUE") # This is a list
return ids
# Get the course contents (files urls)
def get_course_content(token, course_id):
url_course = 'https://' + domain + webservice + '?wstoken=' + token \
+ '&wsfunction=core_course_get_contents&courseid=' + course_id
req = urllib2.Request(url_course)
resp = urllib2.urlopen(req).read()
root = et.fromstring(resp)
xml_modules = "MULTIPLE/SINGLE/KEY[@name='modules']/MULTIPLE/"
xml_contents = "SINGLE/KEY[@name='contents']/MULTIPLE/SINGLE"
file_contents = root.findall(xml_modules+xml_contents )
files = []
for file_content in file_contents:
file_url = file_content.find("KEY[@name='fileurl']/VALUE").text
file_name = file_content.find("KEY[@name='filename']/VALUE"
).text
file_type = file_content.find("KEY[@name='type']/VALUE").text
if file_type == 'file':
moodle_file = {}
# print 'File: ' + file_name
moodle_file['file_name'] = file_name
moodle_file['file_url'] = file_url
files.append(moodle_file)
return files
def save_files(token, course_id, files):
path = 'cursos/' + course_id
if not os.path.exists(path):
os.makedirs(path)
for moodle_file in files:
print 'Downloading: ' + moodle_file['file_name']
url = moodle_file['file_url'] + '&token=' + token
file = path + '/' + moodle_file['file_name']
response = urllib2.urlopen(url)
fh = open(file, 'wb')
fh.write(response.read())
fh.close()
def main():
parser = \
argparse.ArgumentParser(description='Aula Global from Command Line'
)
parser.add_argument('-u', metavar='User (NIA)', action='store',
required=True)
parser.add_argument('-p', metavar='Password', action='store',
required=True)
args = parser.parse_args()
token = get_token(args.u, args.p)
userid = get_user_info(token)
ids = get_courses(token, userid)
for course_id in ids:
print 'Course ID: ' + course_id.text
files_url = get_course_content(token, course_id.text)
save_files(token, course_id.text, files_url)
if __name__ == '__main__':
main()
| mit | -6,753,358,924,567,978,000 | 28.898551 | 81 | 0.605914 | false |
xinwu/horizon | openstack_dashboard/dashboards/project/network_topology/tests.py | 17 | 8874 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.core.urlresolvers import reverse
from django import http
import django.test
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
JSON_URL = reverse('horizon:project:network_topology:json')
INDEX_URL = reverse('horizon:project:network_topology:index')
class NetworkTopologyTests(test.TestCase):
@test.create_stubs({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'network_list',
'router_list',
'port_list')})
def test_json_view(self):
self._test_json_view()
@django.test.utils.override_settings(
OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
@test.create_stubs({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'port_list')})
def test_json_view_router_disabled(self):
self._test_json_view(router_enable=False)
def _test_json_view(self, router_enable=True):
api.nova.server_list(
IsA(http.HttpRequest)).AndReturn([self.servers.list(), False])
tenant_networks = [net for net in self.networks.list()
if not net['router:external']]
external_networks = [net for net in self.networks.list()
if net['router:external']]
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest),
self.tenant.id).AndReturn(tenant_networks)
if router_enable:
api.neutron.network_list(
IsA(http.HttpRequest),
**{'router:external': True}).AndReturn(external_networks)
# router1 : gateway port not in the port list
# router2 : no gateway port
# router3 : gateway port included in port list
routers = self.routers.list() + self.routers_with_rules.list()
if router_enable:
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(routers)
api.neutron.port_list(
IsA(http.HttpRequest)).AndReturn(self.ports.list())
self.mox.ReplayAll()
res = self.client.get(JSON_URL)
self.assertEqual('text/json', res['Content-Type'])
data = json.loads(res.content)
# servers
# result_server_urls = [(server['id'], server['url'])
# for server in data['servers']]
expect_server_urls = [
{'id': server.id,
'name': server.name,
'status': server.status,
'task': None,
'url': '/project/instances/%s/' % server.id}
for server in self.servers.list()]
self.assertEqual(expect_server_urls, data['servers'])
# rotuers
# result_router_urls = [(router['id'], router['url'])
# for router in data['routers']]
if router_enable:
expect_router_urls = [
{'id': router.id,
'external_gateway_info':
router.external_gateway_info,
'name': router.name,
'status': router.status,
'url': '/project/routers/%s/' % router.id}
for router in routers]
self.assertEqual(expect_router_urls, data['routers'])
else:
self.assertFalse(data['routers'])
# networks
expect_net_urls = []
if router_enable:
expect_net_urls += [{'id': net.id,
'url': None,
'name': net.name,
'router:external': net.router__external,
'subnets': [{'cidr': subnet.cidr}
for subnet in net.subnets]}
for net in external_networks]
expect_net_urls += [{'id': net.id,
'url': '/project/networks/%s/detail' % net.id,
'name': net.name,
'router:external': net.router__external,
'subnets': [{'cidr': subnet.cidr}
for subnet in net.subnets]}
for net in tenant_networks]
for exp_net in expect_net_urls:
if exp_net['url'] is None:
del exp_net['url']
self.assertEqual(expect_net_urls, data['networks'])
# ports
expect_port_urls = [
{'id': port.id,
'device_id': port.device_id,
'device_owner': port.device_owner,
'fixed_ips': port.fixed_ips,
'network_id': port.network_id,
'status': port.status,
'url': '/project/networks/ports/%s/detail' % port.id}
for port in self.ports.list()]
if router_enable:
# fake port for router1 gateway (router1 on ext_net)
router1 = routers[0]
ext_net = external_networks[0]
expect_port_urls.append(
{'id': 'gateway%s' % ext_net.id,
'device_id': router1.id,
'network_id': ext_net.id,
'fixed_ips': []})
self.assertEqual(expect_port_urls, data['ports'])
class NetworkTopologyCreateTests(test.TestCase):
def _test_new_button_disabled_when_quota_exceeded(
self, expected_string, networks_quota=10,
routers_quota=10, instances_quota=10):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = networks_quota
quota_data['routers']['available'] = routers_quota
quota_data['instances']['available'] = instances_quota
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/network_topology/index.html')
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_create_network_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createnetwork')
classes = 'btn btn-default btn-sm ajax-modal'
link_name = "Create Network (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='networks__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(
expected_string, networks_quota=0)
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_create_router_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createrouter')
classes = 'btn btn-default btn-sm ajax-modal'
link_name = "Create Router (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='Routers__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(
expected_string, routers_quota=0)
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_launch_instance_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:launchinstance')
classes = 'btn btn-default btn-sm btn-launch ajax-modal'
link_name = "Launch Instance (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='instances__action_launch'>" \
"<span class='fa fa-cloud-upload'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(
expected_string, instances_quota=0)
| apache-2.0 | 8,837,398,736,079,561,000 | 40.858491 | 78 | 0.557809 | false |
razziel89/ManipulateAggregates | ManipulateAggregates/bin/manipagg.py | 1 | 54452 | # -*- coding: utf-8 -*-
"""This is the executable for manipulating aggregates.
"""
# This file is part of ManipulateAggregates.
#
# Copyright (C) 2016 by Torsten Sachse
#
# ManipulateAggregates is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ManipulateAggregates is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ManipulateAggregates. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import io
import copy
import re
import ManipulateAggregates as ma
from maagbel import pybel
global use_np
try:
import numpy
use_np = True
except ImportError:
use_np = False
# default process name
PROCNAME = "ManipAgg"
try:
from FireDeamon import set_procname
set_procname(PROCNAME)
except ImportError:
set_procname = lambda s: None
global FUNCTIONDICT, VSDICT, CPDICT, AGGREGATES, CURRENTAGG, FF, CONFORMER, FORMAT, ENVIRONMENTS, SET, TAGGING, PART
AGGREGATES = []
CURRENTAGG = None
ENVIRONMENTS = []
SET = True
TAGGING = False
global HELPTEXT, RENDERHELPTEXT, POTHELPTEXT, VISHELPTEXT, MANIPHELPTEXT, AUXHELPTEXT
global CLOSERHELPTEXT
# the short help text message
HELPTEXT = """This script manipulates internal degrees of freedom of a molecule or aggregate.
The custom reduced version of OpenBabel (https://github.com/razziel89/MaAg-bel) is
required. Supports all filetypes supported by that reduced version (you can run the
command `manipagg --list formats` to get a list). The default input filetype is guessed
from the extension. The output filetype is the input filetype by default. Much
functionality requires libFireDeamon (https://github.com/razziel89/libFireDeamon).
Usage (switches are positional, i.e., affect everything behind them):
manipagg [GEOMETRYFILE] [SWITCHES]
Some switches require a GEOMETRYFILE, some others don't.
Mandatory options for long versions are mandatory for short versions, too. You can
separate switch and value by either space or the equals sign. A "#" following a swich
means it requires a number of parameters separated by spaces. E.g., --switch #2 means
that this switch requires 2 arguments which must be separated by any number of spaces >0.
The symbol "[#X]" means that X parameters are optional but if the first is given, all the
others have to be given, too.
Please note that parameters starting with dashes are fine unless they are the same as an
option. In that case, you cannot use it.
Command line switches:
~~~~~~~~~~~~~~~~~~~~~~
--help|-h Print this help and exit
--render-help See more detailed help about automatically rendering a visualization
--pot-help See more detailed help about how to define an electrostatic potential
and how to obtain it
--vis-help See more detailed help about how to modify visualizations
--manip-help See more detailed help about how to manipulate a geometry
--aux-help See information about some auxilliary switches.
--full-help See all help texts (useful for grep'ing for switches)
--ff #1 Declare force field to use ("None" is also possible, switching
off everything that requires one) (default: mmff94)
--intype|-i #1 Set the type of the input file (default: guess from filename)
--infile|-I #1 Give the name of the input file (not required if the input file is the
first argument)
--outtype|-o #1 Set the type of the output file (default: guess from filename)
--outfile|-O #1 Set the name of the output file (default: do not output anything)
--conf #1 Declare which conformer from a file that can contain
multiple conformers (such as the xyz-format) you wish to load
--list [#1] List supported plugin options. To get a list of plugins, pass 'plugins'
as argument to this switch or pass no argument
--example-vdw Run an example visualization of the electrostatic potential on a
molecule's van-der-Waals surface as publised in the paper "Introducing
double polar heads to highly fluorescent Thiazoles: Influence on
supramolecular structures and photonic properties" by Kaufmann et al,
accessible at https://doi.org/10.1016/j.jcis.2018.04.105
--example-iso Run an example visualization of the same molecule as --example-vdw on an
isosurface of the molecule's electron density. This will put some files
in your current directory.
"""
## help message for molecule manipulation
MANIPHELPTEXT = """More information about geometry manipulation switches:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--bond|-b #1 Set the length of the bond defined by 2 atom indices to the desired
value in Anstroms, e.g., set the bond between atoms 1 and to to 5.5
Angstroms: 1,2=5.5 . If one atom is marked with a star (i.e. a
preceeding `*` without the tics) it will be kept fixed. Otherwise,
both atoms wil be moved halfway.
--angle|-a #1 Set an angle defined by 3 atom indices to a desired value in degrees,
e.g., 1,2,3=90.5 .
--dihedral|-d Set the dihedral angle defined by 4 atom indices to the desired value
in degrees. A cis configuration corresponds to 0 degrees, a trans
configuration to 180 degrees, e.g., 1,2,3,4=90
--get|-g Instead of setting the desired internal degrees of freedom the script
outputs them (ignores all angle or bondlength values given, e.g.
--bond 1,2=5 would result in the bondlength defined by atoms 1 and 2
to be output and the 5 will be ignored.)
--set|-s Unset --get for everything following --set
--write Do an intermediate write out of the output file to the specified file.
Very handy if things should be done in succession
--tag UNDOCUMENTED
--app #1 Append the given second molecule to the current one. Uses the current
input file type. If options are given after --app and before --end,
they will be applied to the to-be-appended molecule.
--dup Like --app, but it does not take an argument. Instead, the
current molecule is being duplicated. This switch also
requires a matching --end.
--end|--licate Declare that no more actions are to applied to the to be appended
molecule. Both can be used with --app and --dup.
--gl #1 Glue the given second molecule to the one given. Behaves just as
--app with respect to --ue instead of --end.
--ue #2 Declare that no more actions are to applied to the to be glued
molecule. As arguments, give two pairs of indices as i1,i2 and
m1,m2. Those pairs declare pairs of atoms the bonds between which
are to be cut. Then, the molecules will be glued together and i1 and
m1 will remain in the molecule and i2 and m2 (and all atoms connected
to them) will be cleaved.
--translate|-t #1 Translate the molecule by the given vector, e.g., --translate=1,0,0
would translate the molecule by 1 Angstrom in the x-direction
--rotate|-r #1 Rotate the molecule arount the given axis by the given angle e.g.
--rotate=1,0,0=90 would rotate the molecule by 90 degrees around the
x-axis
--rotate-main #1 Rotate the molecule around the given main axis, e.g.,
--rotate-main=1=90 would rotate the molecule by 90 degrees around its
first main axis
--mirror #2 Mirror the molecule at a plane or a point (inversion). Declare the
normal vector first and a point in the plane next. If the former is
0,0,0, point inversion will be performed. Example: 1,0,0 0,0,0 would
mirror at the yz-plane and 0,0,0 0,0,0 would invert at the origin.
--mirror-center #2 The same as --mirror, but perform all these actions after centering
to 0,0,0. It will be moved back to original center afterwards.
--align #3 Align the molecule with its center to a given point and align the
third and second main axes to the two axes given e.g. --align 0,0,0
1,0,0 0,1,0 would align the molecule's center to the origin and its
third/second main axis with the x/y-axis The third main axis is
usually the longest extent.
--part [#1] Apply all subsequent manipulateions (translation, rotation, alignment,
mirroring) only to the specified covalently bound subunit (usually
a molecule). Counting starts at 0. Leave out options to switch back to
treating everyting together.
--cleave #1 Cleave one part of a molecule by cutting a bond in half. That bond is
specified as an argument consisting of two indices separated by a
comma.
--optimize #1 Perform a force-field optimization with the given number of steps
(default: 500)
--closer #2 [#1] Move two parts of an aggregate closer together with respect to their
centers until a vdW-clash occurs. Give as ---move-closer p1,p2
s (f,a) See --closer-help for additional information.
--closer-vec #3 [#1]
Move two parts of an aggregate closer together in the direction of
the vector given until a vdW-clash occurs or the distance between the
centers increases. Give as ---closer-vec p1,p2 v1,v2,v3 s (f,a) See
--closer-help for additional information.
"""
## help message for molecule visualization
VISHELPTEXT = """More information about visualization switches:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--visualize-pot|--vp #1 [#1] [#1]
Visualize the molecule and the potential on a van-der-Waals surface
using OpenGL, Python, libFireDeamon and CGAL. It will automatically
be aligned with its longest axis from right to left.
You specify: Z [R] [F1,F2]
- Z is a zoom factor (float)
- R is the number of refinement steps (int).
- F1 and F2 (floats) are the scaling factor and the shrink factor
for the skin-surface generation, respectively.
--visualize-iso|--vi #2 [#1] [#1]
Like --visualize-pot, but plot the electrostatic potential on an isosurface
instead of a vdW surface.
You specify: Z I [L] [C]
- Z is a zoom factor (float)
- I is the iso value (default: 0.005)
- L is a comma-separated list of atom indices (ints) around which
the isosurface shall be built. Special values: all, noH, auto
(auto is the default)
- C is P1,P2,A,R:
- P1 is the first of CGAL's surface mesh precisions (float)
- P2 is the second of CGAL's surface mesh precisions (float)
- A is the minimum angle between surface facets (float)
- R is the relative surface mesh generation precision (float)
--visualize-simple|--vs #1 [#1]
Visualize the molecule as spheres with corresponding
vdW-radii using OpenGL and Python.
You specify: Z S
- Z is a zoom factor (float)
- S is a scaling factor for the vdW spheres (float)
--window-title|--title #1
Set the title of the visualization window, which is also the prefix
for images saved to disk.
--window-resolution|--resolution|--res #1
Set the resolution of the visualization window as x,y (two ints).
--hide Do not show the OpenGL window (useful for rendering a renderpath)
--swap-align Usually, the molecule's third main axis is aligned perpendicular to
the visualization plane, its second main axis is aligned to the
left-right direction and it's center of mass is moved to the center
of the screen. This command suspends and re-enables this alignment
procedure (first occurence disables, second occurence enables, etc.)
--contrast #1 By supplying "high" as a parameter (default), the color scale is:
blue (negative) - black (vanishing) - red (positive) for the
visualization of the electrostatic potential.
By supplying "low" as a parameter, the color scale is: red (negative)
- yellow (less negative) - green (vanishing) - turquoise (less
positive) - blue (positive) for the visualization of the
electrostatic potential.
--invert Invert potential data no matter where it has been obtained from
--svgscale #1 Save an SVG file with the given name that shows the color scale.
--save-vis #2 When visualizing, save the visualization state. You specify: W F
- W is an arbitrary combination of the words start and end
stating whether you want the visualization state saved at the
beginning or the end, respectively. "None" turns that off.
- F is the name of the file to which to save the sate.
A prefix might be added.
Press comma during visualization to save additional visualization
states. Does not work for --visualize-simple.
--load-vis #1 Load visualization data from the given file. Will also initiate
visualization.
--povray #1 Declare an integer. If this integer is >0, the resolution of an image
rendered using PoVRay will have this times the resolution of the
OpenGL window. If the integer is <=0, support for PoVRay is switched
off (default: 1, <=0 not recommended).
--povlight #1 [#1]
Declare an axis (three comma-separated floats) and an angle
(in degrees) that define a rotation for all normal vectors
prior to PoVRay visualization. If the axis is "frontal",
"front" or "straight", illumination will happen directly
from the front. The default is to slightly rotate all
normal vectors as this looks nicer.
--refscale #2 [#n] You provide: R D1 [D2] [...]
- R is a Python regular expression that will be used to match
against files in the given directories
- D1 is a directory (as are all other DN)
The color scale of the potential plot will be adjusted so that all
scales, defined in the save files whose names match the regular
expression in the given directory, fall within the same overall scale
(to make them comparable). Incompatible with --colorscale.
--colorscale #1 [#1]
You provide C1 [C2]:
- C1 is a special keyword (see below) or the float value used as
the lower end of the color scale
- C2 is the float value used as the upper end of the color scale
(ignored if C1 is a special keyword)
Special values are: "auto" (default), or "independent" (only first
letter checked), which causes the use of independent color scales for
positive and negative values. The special value "dependent" (same as
independent) causes the use of the same color scale for positive and
negative values.
"""
## auxilliary help message
AUXHELPTEXT = """Auxilliary switches that only output information:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--dipole-moment #1 Output the molecule's dipole moment as obtained from the
specified charge method. See "manipagg --list charges" for a list of
available methods
--energy Output the energy of the molecule according to the current force
field
--rmsd #1 Output RMSD between the current molecule and the given one as
well as the maximum difference in a single coordinate and for an
atom. This will use the currently defined intype and perform an
alignment beforehand to exclude influences from translation or
rotation.
--vdw-check Output "True" if no two atoms from different molecules are closer
together than the sum of their vdW-radii and "False" otherwise
--spinmultiplicity Output the molecule's spinmultiplicity
--pbond|--pb #2 Output the length of the bond defined by 2 atom indices in Angstroms
projected onto a given vector (2nd atgument).
--hlb quick estimation of a molecule's HLB value
--repickle if a visualization state cannot be loaded, that migh tbe because the
state was saved in Python 2 and you try to load it using Python 3.
This will try to convert the state to a more compatible representation.
WARNING: the original file will be overwritten!
--pgroup|--pg Print the point group of the given structure.
"""
## help message for moving molecules closer together
CLOSERHELPTEXT = """Help text for the --closer command and the --closer-vec command:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
WARNING: if one geometry file that was read in contains multiple geometries, that has to
be considered!
--closer #2 [#1] Move two parts of an aggregate closer together with respect to their
centers until a vdW-clash occurs. Give as ---move-closer p1,p2
s (f,a)
--closer-vec #3 [#2]
Move two parts of an aggregate closer together in the direction of
the vector given until a vdW-clash occurs or the distance between the
centers increases. Give as ---closer-vec p1,p2 v1,v2,v3 s (f,a)
p1 and p2:
indices, starting at 0, indicating the molecules in the aggregate that shall
be moved closer together.
v1,v2,v3:
components of the vector in which the first molecule shall be moved (will be
inverted for the second one).
s: stepsize for movement (good value: 0.2).
f: factor by which all vdW-radii will be multiplied (default: 0.9).
a: value that is added to all vdW-radii (default: 0.0).
"""
## help message for rendering a visualization path
RENDERHELPTEXT = """Help text for the --renderpath command:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The --renderpath command It takes one argument.
A renderpath tells the script how to manipulate the visualization of the given molecule.
All images that are rendered can be saved to disk.
A simple trajectory string looks as follows (spaces only to emphasize logical groups):
chain_of_commands | chain_of_values_separated_by_dashes / number_of_frames
The above can be repeated as often as desired if separated by commas. Apart from the
first command, each chain of commands has to follow a comma. You have to declare as many
values (separated by dashes) as you have declared commands.
Commands that can be chained:
r1+: rotate positively around first axis
r1-: rotate negatively around first axis
r2+: rotate positively around second axis
r2-: rotate negatively around second axis
r3+: rotate positively around third axis
r3-: rotate negatively around third axis
t1+: translate positively along first axis
t1-: translate negatively along first axis
t2+: translate positively along second axis
t2-: translate negatively along second axis
t3+: translate positively along third axis
t3-: translate negatively along third axis
z+: increase zoom level (default zoom level: 10)
z-: decrease zoom level (default zoom level: 10)
Special commands that do not take values or a number of frames and have to be the last
ones in the trajectory:
n: Do not save OpenGL images to disk
p: Render every image via PoVRay
d: Drop to an interactive view first where the user can rotate
the molecule using the keybord. After a press of ESC, the
path will be followed
s: At each image, save the visualization state to disk. Requires --save-vis to
be set
Values:
rotation: angles in degrees
translation: lengths in the unit given in the geometry file (usually Angstroms)
zoom: change in zoom level
Number of frames:
The number of frames during which the given change in visualization will
be performed. A linear mapping from change to frame number is applied.
Example:
`r1+r2-t3+z-|180-90-2-5/100,t1-z+|1-2/200,n,d`
First, the user will see the molecule and have the opportunity to change the view by
using the keyboard. After pressing ESC, the trajectory will start: In the first 100
frames, rotate around the first axis by 180° whilst rotating negatively around the second
axis by 90°, translating the molecule by 2 along the third axis and reducing the zoom
level by 5. In the next 200 frames, traslate negatively along the first axis by 1 whilst
increasing the zoom level by 2. None of the frames will be rendered to disk.
"""
## help message for obtaining an electrostatic potential or charges
POTHELPTEXT = """More information about switches regarding potentials, charges and densities:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Please note that --density only works together with --orbitals.
--charges #1 Compute the potential from existing volumetric charge data
--empirical #1 Compute the electrostatic potential from charges obtained via an
empirical method. You specify the method (see "obabel -L charges").
Default is "mmff94".
--inter #1 [#3] Compute the potential by interpolating existing data.
You specify: M [C1 C2 C3]
- M is the interpolation method ("distance" or "nearest" for inverse
distance weighting or nearest neighbour interpolation (default))
- C1 is the expotential parameter
- C2 is the root
- C3 is the cutoff (negative value switches this off)
C2, C2 and C3 are only used for inverse distance weighting.
--orbitals Compute the density or potential from molecular orbital data
--absolute Charges at the atomic sites are absolute ones (opposite of --partial)
--partial Charges at the atomic sites are partial ones (default)
--cube #1 Specify a CUBE file as the input file for volumetric data
--cube-vis #1 Specify a CUBE file as the input file for volumetric data (for isosurface
generation, overwrites a previous --cube for this purpose)
--dx #1 Specify a DX file as the input file for volumetric data
--dx-vis #1 Specify a DX file as the input file for volumetric data (for isosurface
generation, overwrites a previous --dx for this purpose)
--molden #1 Specify a Molden file as the input file for orbital data
--xyz #1 Specify a XYZ file as the input file for volumetric data. This type
of file has Cartesian coordinates in the first 3 columns followed by the
value at that point in space.
--density The property to compute is the electron density. Only useful with --grid.
--potential The property to compute is the electrostatic potential (default).
--grid [#1] [#1]
Compute the specified property on a grid. You specify: P F
- P is the number of points in each of the 3 Cartesian directions for
the regular grid (default: 100)
- F is the file of type DX to which the data shall be saved (default:
potential.dx or density.dx depending on the property)
"""
## help message for default key bindings for visualization
KEYSHELPTEXT = """Key bindings for the visualization window:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Key bindings:
ESC : quit
= : zoom in
- : zoom out
w : move molecule up
s : move molecule down
a : move molecule left
d : move molecule right
q : move molecule to front
e : move molecule to back
i : rotate molecule positively around 1st axis
k : rotate molecule negatively around 1st axis
j : rotate molecule positively around 2nd axis
l : rotate molecule negatively around 2nd axis
u : rotate molecule positively around 3rd axis
o : rotate molecule negatively around 3rd axis
. : save OpenGL snapshot of current view
, : save current visualization for later restore
p : render approximation of current view via PoVRay
"""
def _le(arg, l):
if len(arg) != l:
raise ValueError("Wrong format for argument.")
def _nn(arg):
return arg is not None
def _vs():
if CURRENTAGG is None:
return VSDICT.__setitem__
else:
return CURRENTAGG.set_vs
def _gvs():
if CURRENTAGG is None:
return VSDICT.__getitem__
else:
return CURRENTAGG.get_vs
def _cp():
if CURRENTAGG is None:
return CPDICT.__setitem__
else:
return CURRENTAGG.set_cp
def _gcp():
if CURRENTAGG is None:
return CPDICT.__getitem__
else:
return CURRENTAGG.get_cp
def __help():
print(HELPTEXT)
sys.exit(0)
def __hlb():
hlb, discard1, discard2 = ma.aggregate.hlb.compute(CURRENTAGG)
print(hlb)
def __aux_help():
print(AUXHELPTEXT)
sys.exit(0)
def __closer_help():
print(CLOSERHELPTEXT)
sys.exit(0)
def __full_help():
print(HELPTEXT)
print(MANIPHELPTEXT)
print(POTHELPTEXT)
print(VISHELPTEXT)
print(AUXHELPTEXT)
print(RENDERHELPTEXT)
print(CLOSERHELPTEXT)
print(KEYSHELPTEXT)
sys.exit(0)
def __manip_help():
print(MANIPHELPTEXT)
sys.exit(0)
def __pot_help():
print(POTHELPTEXT)
sys.exit(0)
def __render_help():
print(RENDERHELPTEXT)
sys.exit(0)
def __renderpath(path):
_vs()("renderpath", path)
def __vis_help():
print(VISHELPTEXT)
print(KEYSHELPTEXT)
sys.exit(0)
def __absolute():
_cp()("partial", False)
def __add_h():
CURRENTAGG.obmol.AddHydrogens()
def __align(center, axis3, axis2): # 3
center = list(map(float, center.split(",")))
axis3 = list(map(float, axis3.split(",")))
axis2 = list(map(float, axis2.split(",")))
CURRENTAGG.align(center, axis3, axis2, part=PART)
def __angle(arg): # 1
if SET:
temp = arg.split("=")
_le(temp, 2)
atoms, angle = temp
angle = float(angle)
atoms = list(map(int, atoms.split(",")))
_le(atoms, 3)
CURRENTAGG.set_angle(atoms[0], atoms[1], atoms[2], angle)
else:
temp = arg.split("=")
atoms = temp[0]
atoms = list(map(int, atoms.split(",")))
_le(atoms, 3)
print(CURRENTAGG.get_angle(atoms[0], atoms[1], atoms[2]))
def __app(filename): # 1
global ENVIRONMENTS, AGGREGATES, CURRENTAGG
ENVIRONMENTS.append("append")
AGGREGATES.append(CURRENTAGG)
CURRENTAGG = None
__infile(filename)
def __bond(arg): # 1
if SET:
temp = arg.split("=")
_le(temp, 2)
atoms, angle = temp
angle = float(angle)
atoms = list(map(int, atoms.split(",")))
_le(atoms, 2)
CURRENTAGG.set_bondlength(atoms[0], atoms[1], angle)
else:
temp = arg.split("=")
atoms = temp[0]
atoms = list(map(int, atoms.split(",")))
_le(atoms, 2)
print(CURRENTAGG.get_bondlength(atoms[0], atoms[1]))
def __charges(arg): # 1
_cp()("type", "charges")
def __cleave(arg): # 1
atoms = list(map(int, arg.split(",")))
_le(atoms, 2)
CURRENTAGG.cleave(atoms[0], atoms[1])
def __closer(parts, stepsize, config=None): # 2 [#1]
parts = list(map(int, parts.split(",")))
_le(parts, 2)
step = float(stepsize)
if _nn(config):
config = list(map(float, config.split(",")))
_le(config, 2)
CURRENTAGG.move_closer(
parts[0],
parts[1],
stepsize=step,
vdw_factor=config[0],
vdw_added=config[1],
vec=None,
)
else:
CURRENTAGG.move_closer(parts[0], parts[1], stepsize=step)
def __closer_vec(parts, vector, stepsize, config=None): # 3 [#1]
parts = list(map(int, parts.split(",")))
_le(parts, 2)
step = float(stepsize)
vector = list(map(float, vector.split(",")))
_le(vector, 3)
if _nn(config):
config = list(map(float, config.split(",")))
_le(config, 2)
CURRENTAGG.move_closer(
parts[0],
parts[1],
stepsize=step,
vdw_factor=config[0],
vdw_added=config[1],
vec=vector,
)
else:
CURRENTAGG.move_closer(parts[0], parts[1], stepsize=step, vec=vector)
def __colorscale(start, end=None): # 1 [#1]
try:
lowerscale = float(start)
except ValueError:
if start.startswith("d"):
start = "dependent"
elif start.startswith("i"):
start = "independent"
if start != "auto":
_vs()("colorscale", start)
return
if _nn(end):
upperscale = float(end)
try:
import cPickle as p
except ImportError:
import pickle as p
scalefile = "SCALE_%f_%f" % (lowerscale, upperscale)
d = {"face_colourscale": (lowerscale, upperscale)}
f = io.open(scalefile, "wb")
p.dump(d, f, 2)
f.close()
_vs()("colorscale", ("^%s$" % (scalefile), "."))
else:
raise TypeError(
"The first argument is no special keyword but no second argument given."
)
def __conf(conf): # 1
global CONFORMER
if conf in ("first", "last"):
CONFORMER = conf
else:
CONFORMER = int(conf)
def __contrast(c): # 1
if c.lower().startswith("h"):
_vs()("high_contrast", True)
elif c.lower().startswith("l"):
_vs()("high_contrast", False)
else:
raise ValueError("--contrast only accepts 'high' or 'low'.")
def __cube(filename): # 1
_cp()("chargefiletype", "cube")
_cp()("potfiletype", "cube")
_vs()("isofiletype", "cube")
_cp()("chargefiletype", "cube")
_cp()("chargefile", filename)
_cp()("potfile", filename)
_vs()("isofile", filename)
_cp()("chargefile", filename)
def __cube_vis(filename): # 1
_vs()("isofiletype", "cube")
_vs()("isofile", filename)
def __density():
_cp()("property", "density")
def __dihedral(arg):
if SET:
temp = arg.split("=")
_le(temp, 2)
atoms, angle = temp
angle = float(angle)
atoms = list(map(int, atoms.split(",")))
_le(atoms, 4)
CURRENTAGG.set_dihedral(atoms[0], atoms[1], atoms[2], atoms[3], angle)
else:
temp = arg.split("=")
atoms = temp[0]
atoms = list(map(int, atoms.split(",")))
_le(atoms, 4)
print(CURRENTAGG.get_dihedral(atoms[0], atoms[1], atoms[2], atoms[3]))
def __dipole_moment(method): # 1
oldmethod = CURRENTAGG.get_cp("method")
_cp()("method", method)
print(CURRENTAGG.get_dipole_moment())
_cp()("method", oldmethod)
def __dup(): # 1
global ENVIRONMENTS, AGGREGATES, CURRENTAGG
ENVIRONMENTS.append("append")
AGGREGATES.append(CURRENTAGG)
CURRENTAGG = CURRENTAGG.duplicate()
def __dx(filename): # 1
_cp()("chargefiletype", "dx")
_cp()("potfiletype", "dx")
_vs()("isofiletype", "dx")
_cp()("chargefiletype", "dx")
_cp()("chargefile", filename)
_cp()("potfile", filename)
_vs()("isofile", filename)
_cp()("chargefile", filename)
def __dx_vis(filename): # 1
_vs()("isofiletype", "dx")
_vs()("isofile", filename)
def __empirical(method): # 1
_cp()("type", "empirical")
_cp()("method", method)
def __end():
global ENVIRONMENTS, AGGREGATES, CURRENTAGG
if not ENVIRONMENTS.pop() == "append":
raise ValueError("The switch --end has to follow --app.")
newagg = CURRENTAGG
CURRENTAGG = AGGREGATES.pop()
CURRENTAGG.append(newagg)
def __energy():
print(CURRENTAGG.get_energy())
def __ff(ff): # 1
global FF
FF = ff
def __get():
global SET
SET = False
def __gl(filename): # 1
global ENVIRONMENTS, AGGREGATES, CURRENTAGG
ENVIRONMENTS.append("glue")
AGGREGATES.append(CURRENTAGG)
CURRENTAGG = None
__infile(filename)
def __grid(points="100", filename=None): # [#1] [#1]
if not use_np:
raise RuntimeError("Could not import numpy, cannot perform a grid computation.")
points = int(points)
coordinates = numpy.array(CURRENTAGG.get_coordinates())
min_corner = numpy.amin(coordinates, axis=0) - 10.0
max_corner = numpy.amax(coordinates, axis=0) + 10.0
counts_xyz = numpy.array([points, points, points])
org_xyz = min_corner
# grid creation copied from energyscan.scan but slightly altered
space = [
numpy.linspace(s, e, num=c)
for s, e, c in zip(min_corner, max_corner, counts_xyz)
]
# just take the difference between the first elements in every direction to get the stepsize
delta_x = numpy.array([space[0][1] - space[0][0], 0.0, 0.0])
delta_y = numpy.array([0.0, space[1][1] - space[1][0], 0.0])
delta_z = numpy.array([0.0, 0.0, space[2][1] - space[2][0]])
a1 = numpy.array(
[(x,) for x in space[0] for y in space[1] for z in space[2]], dtype=float
)
a2 = numpy.array(
[(y,) for x in space[0] for y in space[1] for z in space[2]], dtype=float
)
a3 = numpy.array(
[(z,) for x in space[0] for y in space[1] for z in space[2]], dtype=float
)
# a1,a2,a3 = numpy.array(numpy.meshgrid(*space,indexing="ij"))
# a1.shape = (-1,1)
# a2.shape = (-1,1)
# a3.shape = (-1,1)
grid = numpy.concatenate((a1, a2, a3), axis=1)
if _gcp()("property") == "density":
if not _nn(filename):
filename = "density.dx"
prop = CURRENTAGG.get_density(grid)
else:
if not _nn(filename):
filename = "potential.dx"
prop = CURRENTAGG.get_potential(grid)
ma.collection.write.print_dx_file(
filename, counts_xyz, org_xyz, delta_x, delta_y, delta_z, prop
)
def __hide():
_vs()("hide", True)
def __infile(filename): # 1
global CURRENTAGG, VSDICT
if CURRENTAGG is None:
CURRENTAGG = ma.aggregate.read_from_file(
filename, fileformat=FORMAT, conf_nr=CONFORMER, ff=FF
)
for k in VSDICT:
CURRENTAGG.set_vs(k, VSDICT[k])
VSDICT.clear()
for k in CPDICT:
CURRENTAGG.set_cp(k, CPDICT[k])
CPDICT.clear()
else:
raise ValueError("You already specified a primary input file.")
return
def __inter(method, exp=None, root=None, cutoff=None): # 1 [#3]
_cp()("type", "interpolation")
_cp()("interpolation", method)
if _nn(exp):
_cp()("int_exponent", exp)
if _nn(root):
_cp()("int_root", root)
if _nn(cutoff):
_cp()("cutoff", cutoff)
def __intype(informat): # 1
global FORMAT
FORMAT = informat
def __invert():
_cp()("invert_potential", True)
def __list(type="plugins"): # 1
configs = pybel.getpluginconfigs(type)
print("Configs for plugin '{}': {}".format(type, ", ".join(configs)))
def __load_vis(filename): # 1
_vs()("savestart", False)
_vs()("saveend", False)
ma.aggregate.visualize.RenderExtern(filename, agg=CURRENTAGG, dictionary=VSDICT),
def __mirror(point, normal): # 2
point = list(map(float, point.split(",")))
normal = list(map(float, normal.split(",")))
_le(point, 3)
_le(normal, 3)
CURRENTAGG.mirror(normal, point, center_it=False, part=PART)
def __mirror_center(point, normal): # 2
point = list(map(float, point.split(",")))
normal = list(map(float, normal.split(",")))
_le(point, 3)
_le(normal, 3)
CURRENTAGG.mirror(normal, point, center_it=True, part=PART)
def __molden(filename): # 1
_cp()("orbfiletype", "molden")
_cp()("orbfile", filename)
def __optimize(steps): # 1
CURRENTAGG.optimize(int(steps))
def __orbitals():
_cp()("type", "orbitals")
def __outfile(filename): # 1
CURRENTAGG.write(filename)
def __outtype(outtype): # 1
CURRENTAGG.info["outformat"] = outtype
def __part(part=None):
global PART
if part is not None:
PART = int(part)
else:
PART = part
def __partial():
_cp()("partial", True)
def __pbond(bond, vector): # 2
atoms = list(map(int, bond.split(",")))
_le(atoms, 2)
vector = list(map(float, vector.split(",")))
_le(vector, 3)
print(CURRENTAGG.get_bondlength(atoms[0], atoms[1], projection=vector))
def __pgroup(): # 0
print(CURRENTAGG.get_pointgroup())
def __potential():
_cp()("property", "potential")
def __povray(scale): # 1
_vs()("povray", int(scale))
def __povlight(axis, angle=None): # 2
if axis.lower() in ("frontal", "front", "straight"):
axis, angle = ([1.0, 0.0, 0.0], 0.0)
else:
axis = axis.split(",")
_le(axis, 3)
axis = list(map(float, axis))
angle = float(angle)
_vs()("visrotmat", (axis, angle))
def __refscale(regex, dir1, *dirs): # 2 [#n]
dirs = [dir1] + list(dirs)
_vs()("colorscale", (regex, "|".join(dirs)))
def __repickle(filename): # 1
try:
import cPickle as p
except ImportError:
import pickle as p
print("WARNING: cPickle module should be present in Python 2.", file=sys.stderr)
print(
" Are you sure you are running this using Python 2?",
file=sys.stderr,
)
f = io.open(filename, "rb")
try:
obj = p.load(f)
except UnicodeDecodeError as e:
print(
"ERROR during unpickling. Maybe you did not use the same Python version as for pickling?",
file=sys.stderr,
)
raise e
f.close()
for key in (
"povray_data",
"faces",
):
if key not in obj:
raise ValueError(
"The loaded file %s is most likely no saved visualization state."
% (filename)
)
try:
obj[key] = [a.tolist() for a in obj[key]]
except AttributeError:
raise ValueError(
"The loaded file %s has most likely already been repickled."
% (filename)
)
f = io.open(filename, "wb")
# protocol version 2 stays compatible with Python 2 (but is slower than more recent versions)
p.dump(obj, f, 2)
f.close()
def __rmsd(filename): # 1
global AGGREGATES, CURRENTAGG
AGGREGATES.append(CURRENTAGG)
CURRENTAGG = None
__infile(filename)
newagg = CURRENTAGG
CURRENTAGG = AGGREGATES.pop()
CURRENTAGG.rmsd(newagg, True)
def __rotate(arg): # 1
temp = arg.split("=")
_le(temp, 2)
axis, angle = temp
angle = float(angle)
axis = list(map(float, axis.split(",")))
_le(axis, 3)
CURRENTAGG.rotate(axis, angle, part=PART)
def __rotate_center(arg): # 1
if PART is not None:
center = CURRENTAGG.obmol.GetCenterPart(PART)
center = [-center.GetX(), -center.GetY(), -center.GetZ()]
else:
center = [-c for c in CURRENTAGG.get_center()]
CURRENTAGG.translate(center, part=PART)
__rotate(arg)
CURRENTAGG.translate([-c for c in center], part=PART)
def __rotate_main(arg): # 1
temp = arg.split("=")
_le(temp, 2)
axis, angle = temp
angle = float(angle)
axis = int(axis)
CURRENTAGG.rotate_main(axis, angle, part=PART)
def __rotate_main_center(arg): # 1
if PART is not None:
center = CURRENTAGG.obmol.GetCenterPart(PART)
center = [-center.GetX(), -center.GetY(), -center.GetZ()]
else:
center = [-c for c in CURRENTAGG.get_center()]
CURRENTAGG.translate(center, part=PART)
__rotate_main(arg)
CURRENTAGG.translate([-c for c in center], part=PART)
def __save_vis(words, filename=None): # 2
if words.startswith("start") or words.endswith("start"):
_vs()("savestart", True)
if words.startswith("end") or words.endswith("end"):
_vs()("saveend", True)
if words.lower() in ("none", "never", "no"):
_vs()("savestart", False)
_vs()("saveend", False)
if filename is not None:
_vs()("savefile", filename)
else:
if filename is None:
raise TypeError(
"__save_vis() takes exactly 2 arguments when the first is not 'none'."
)
_vs()("savefile", filename)
def __svgscale(filename):
_vs()("svgscale", filename)
def __set():
global SET
SET = True
def __spinmultiplicity():
print(CURRENTAGG.obmol.GetTotalSpinMultiplicity())
def __swap_align():
_vs()("align", not (_gvs()("align")))
def __tag(*args):
global TAGGING
if len(args) == 0:
if TAGGING:
CURRENTAGG.tag_parts(-1)
else:
CURRENTAGG.tag_parts(1)
TAGGING = not (TAGGING)
else:
args = list(map(int, args))
CURRENTAGG.tag_parts(args)
TAGGING = True
def __translate(arg): # 1
arg = list(map(float, arg.split(",")))
_le(arg, 3)
CURRENTAGG.translate(arg, part=PART)
def __ue(pair1, pair2): # 2
global ENVIRONMENTS, AGGREGATES, CURRENTAGG
pair1 = list(map(int, pair1.split(",")))
pair2 = list(map(int, pair2.split(",")))
if not ENVIRONMENTS.pop() == "glue":
raise ValueError("The switch --ue has to follow --gl.")
newagg = CURRENTAGG
CURRENTAGG = AGGREGATES.pop()
CURRENTAGG.glue(newagg, pair1[0], pair1[1], pair2[0], pair2[1])
def __vdw_check(scale=None):
if _nn(scale):
print(CURRENTAGG.vdw_check(scale))
else:
print(CURRENTAGG.vdw_check())
def __visualize_iso(zoom, iso=None, atoms=None, config=None): # 2 [#1] [#1]
_vs()("type", "iso")
_vs()("zoom", float(zoom))
if _nn(iso):
_vs()("isovalue", float(iso))
if _nn(atoms):
temp = atoms.split(",")
if len(temp) == 1:
try:
int(temp[0])
_vs()("iso_atoms", int(temp[0]))
except ValueError:
_vs()("iso_atoms", temp[0])
else:
_vs()("iso_atoms", list(map(int, temp)))
if _nn(config):
temp = config.split(",")
_le(temp, 4)
_vs()("mesh_criteria", list(map(float, [temp[1], temp[2], temp[0]])))
_vs()("rel_precision", float(temp[3]))
CURRENTAGG.visualize()
def __visualize_pot(zoom, refine=None, factors=None): # 1 [#1] [#1]
_vs()("type", "vdw")
_vs()("zoom", float(zoom))
if _nn(refine):
_vs()("refine", int(refine))
if _nn(factors):
temp = factors.split(",")
_le(temp, 2)
_vs()("vdw_scale", float(temp[0]))
_vs()("shrink_factor", float(temp[1]))
CURRENTAGG.visualize()
def __visualize_simple(zoom, scale=None): # 1 [#1]
_vs()("type", "simple")
_vs()("zoom", float(zoom))
if _nn(scale):
_vs()("vdw_scale", float(scale))
CURRENTAGG.visualize()
def __window_resolution(res): # 1
res = RESOLUTIONS.get(res.lower(), list(map(int, res.split(","))))
_le(res, 2)
_vs()("resolution", res)
def __window_title(title): # 1
_vs()("title", title)
def __write(filename):
CURRENTAGG.write(filename)
def __xyz(filename): # 1
_cp()("chargefiletype", "xyz")
_cp()("potfiletype", "xyz")
_vs()("isofiletype", "xyz")
_cp()("chargefiletype", "xyz")
_cp()("chargefile", filename)
_cp()("potfile", filename)
_vs()("isofile", filename)
_cp()("chargefile", filename)
def __example_iso():
# Determine files to use for this example
data_dir = ma.get_data_dir()
molden_file = os.path.join(data_dir, "dye5.molden")
print(
r"""Running example visualization on an electron density iso-surface.
This will effectively execute the following command:
manipagg -I $FILE --orbitals --density --molden $FILE --grid 100 dens.dx \
--dx-vis dens.dx --potential --save-vis start vis_iso.masave \
--visualize-iso 1.0 0.001
After this computation finishes, run the following command to visualize the already
computed visualization without recomputation:
manipagg --load-vis start_vis_iso.masave
Remember that you can press "p" to render your current view via PoVRay if you opted to
install it. Unfortunately, the OpenGL view cannot be translated perfectly to PoVRay,
which means you should zoom out a bit to avoid clipping the sides.
For $FILE, we use:
"""
)
print(" " + molden_file + "\n\nKeybindings follow.\n\n")
print(KEYSHELPTEXT)
# Execute commands in order
__infile(molden_file)
__orbitals()
__density()
__molden(molden_file)
__grid(points=100, filename="dens.dx")
__dx_vis("dens.dx")
__potential()
__save_vis("start", filename="vis_iso.masave")
__visualize_iso(1.0, iso=0.001)
def __example_vdw():
# Determine files to use for this example
data_dir = ma.get_data_dir()
molden_file = os.path.join(data_dir, "dye5.molden")
print(
r"""Running example visualization on a van-der-Waals surface.
This will effectively execute the following command:
manipagg -I $FILE --orbitals --molden $FILE \
--potential --save-vis start vis_vdw.masave \
--visualize-pot 1.0
After this computation finishes, run the following command to visualize the already
computed visualization without recomputation:
manipagg --load-vis start_vis_vdw.masave
Remember that you can press "p" to render your current view via PoVRay if you opted to
install it. Unfortunately, the OpenGL view cannot be translated perfectly to PoVRay,
which means you should zoom out a bit to avoid clipping the sides.
For $FILE, we use:
"""
)
print(" " + molden_file + "\n\nKeybindings follow.\n\n")
print(KEYSHELPTEXT)
# Execute commands in order
__infile(molden_file)
__orbitals()
__molden(molden_file)
__potential()
__save_vis("start", filename="vis_vdw.masave")
__visualize_pot(1.0)
global RESOLUTIONS
## special resolution keywords accepted by this script and their resolutions
RESOLUTIONS = {
"vga": (640, 480),
"svga": (800, 600),
"qhd": (960, 540),
"wsvga": (1024, 600),
"xga": (1024, 768),
"xga+": (1152, 864),
"wxga": (1280, 720),
"wxga": (1280, 768),
"wxga": (1280, 800),
"sxga": (1280, 1024),
"wxga": (1440, 900),
"uxga": (1600, 1200),
"wsxga+": (1680, 1050),
"fhd": (1920, 1080),
"hd": (1920, 1080),
"wuxga": (1920, 1200),
"wqhd": (2560, 1440),
"wqxga": (2560, 1600),
}
## this dictionary associates each switch with a function that performs an operation
FUNCTIONDICT = {
"--absolute": __absolute,
"--add-h": __add_h,
"-h": __add_h,
"--align": __align,
"--angle": __angle,
"-a": __angle,
"--app": __app,
"--aux-help": __aux_help,
"--bond": __bond,
"-b": __bond,
"--charges": __charges,
"--cleave": __cleave,
"--closer": __closer,
"--closer-help": __closer_help,
"--closer-vec": __closer_vec,
"--colorscale": __colorscale,
"--conf": __conf,
"--contrast": __contrast,
"--cube": __cube,
"--cube-vis": __cube_vis,
"--density": __density,
"--dihedral": __dihedral,
"-d": __dihedral,
"--dipole-moment": __dipole_moment,
"--dup": __dup,
"--dx": __dx,
"--dx-vis": __dx_vis,
"--empirical": __empirical,
"--end": __end,
"--energy": __energy,
"--example-vdw": __example_vdw,
"--example-iso": __example_iso,
"--ff": __ff,
"--full-help": __full_help,
"--get": __get,
"-g": __get,
"--gl": __gl,
"--grid": __grid,
"--help": __help,
"-h": __help,
"--hlb": __hlb,
"--hide": __hide,
"--infile": __infile,
"-I": __infile,
"--inter": __inter,
"--intype": __intype,
"-i": __intype,
"--invert": __invert,
"--licate": __end,
"--list": __list,
"--load-vis": __load_vis,
"--manip-help": __manip_help,
"--mirror": __mirror,
"--mirror-center": __mirror_center,
"--molden": __molden,
"--optimize": __optimize,
"--orbitals": __orbitals,
"--outfile": __outfile,
"-O": __outfile,
"--outtype": __outtype,
"-o": __outtype,
"--part": __part,
"--partial": __partial,
"--pbond": __pbond,
"--pb": __pbond,
"--pgroup": __pgroup,
"--pg": __pgroup,
"--potential": __potential,
"--pot-help": __pot_help,
"--povray": __povray,
"--povlight": __povlight,
"--refscale": __refscale,
"--render-help": __render_help,
"--renderpath": __renderpath,
"--repickle": __repickle,
"--rmsd": __rmsd,
"--rotate-main": __rotate_main,
"--rotate-main-center": __rotate_main_center,
"--rotate": __rotate,
"--rotate-center": __rotate_center,
"-r": __rotate,
"--save-vis": __save_vis,
"--svgscale": __svgscale,
"--set": __set,
"-s": __set,
"--spinmultiplicity": __spinmultiplicity,
"--swap-align": __swap_align,
"--tag": __tag,
"--translate": __translate,
"-t": __translate,
"--ue": __ue,
"--vdw-check": __vdw_check,
"--vis-help": __vis_help,
"--visualize-iso": __visualize_iso,
"--vi": __visualize_iso,
"--visualize-pot": __visualize_pot,
"--vp": __visualize_pot,
"--visualize-simple": __visualize_simple,
"--vs": __visualize_simple,
"--window-resolution": __window_resolution,
"--resolution": __window_resolution,
"--res": __window_resolution,
"--window-title": __window_title,
"--title": __window_title,
"--write": __write,
"--xyz": __xyz,
}
# the default conformer to be read in
CONFORMER = 1
# the default force field to use
FF = "mmff94"
# the default file format (None means: guess)
FORMAT = None
# store all visualization options until an input molecule was specified
VSDICT = {}
# store all porential options until an input molecule was specified
CPDICT = {}
# which covalently bound entity shall be treated
PART = None
def _expand_tilde(filename):
if filename.startswith("~"):
homedir = filename.split(os.sep)[0]
if homedir == "~":
filename = re.sub("^~", os.environ["HOME"], filename)
else:
username = homedir.split("~")[1]
homedir = os.sep.join(os.environ["HOME"].split(os.sep)[:-1])
filename = re.sub("^~", homedir + os.sep, filename)
return filename
def _parse_commandline(argv):
while len(argv) > 0:
temp = argv.pop().split("=")
switch = temp[0]
if switch.startswith("--"):
switch = switch.lower()
try:
func = FUNCTIONDICT[switch]
except KeyError as e:
raise ValueError("Switch %s not known, aborting." % (switch))
if len(temp) == 1:
sargs = ""
fargs = []
else:
sargs = "=".join(temp[1:])
fargs = ["=".join(temp[1:])]
while len(argv) > 0 and not argv[-1].split("=")[0] in FUNCTIONDICT:
temp = argv.pop()
sargs += " %s" % (temp)
fargs.append(temp)
fargs = [_expand_tilde(f) for f in fargs]
yield (switch, sargs, func, fargs)
def entrypoint():
argv = sys.argv
argv.reverse()
argv.pop()
if len(argv) == 0:
raise ValueError("No arguments provided.")
# treat special case of geometry file being the first argument
if not argv[-1] in FUNCTIONDICT and os.path.isfile(argv[-1]):
argv.append("--infile")
for switch, sargs, func, fargs in _parse_commandline(argv):
try:
func(*fargs)
except TypeError as e:
print(
"You probably specified a wrong number of arguments for the switch %s. Arguments: %s. Stacktrace follows."
% (switch, sargs),
file=sys.stderr,
)
raise e
except AttributeError as e:
print(
"You probably did not specify a primary input file, but switch %s requires one. Stacktrace follows."
% (switch),
file=sys.stderr,
)
raise e
except IndexError as e:
print(
"You probably specified --end or --ue without specifying --app or --gl beforehand. Your switch was: %s. Stacktrace follows."
% (switch),
file=sys.stderr,
)
raise e
if __name__ == "__main__":
entrypoint()
| gpl-3.0 | -4,710,836,400,271,232,000 | 34.541775 | 140 | 0.597815 | false |
e-koch/VLA_Lband | 16B/pipeline4.7.1_custom/EVLA_pipe_fake_flagall.py | 1 | 2655 |
'''
On mixed setups, flagall is already run. This defines the variables set
during that script so it doesn't need to be run multiple times.
BUT, if we're re-running the pipeline, the script will check for manual
flagging scripts saved in the appropriate project folder for the track.
'''
import os
import glob
import numpy as np
from paths import a_path
logprint("Starting EVLA_pipe_fake_flagall.py",
logfileout='logs/flagall.log')
time_list = runtiming('flagall', 'start')
QA2_flagall = 'Pass'
logprint("These value are based on the current flags on the MS. All"
" deterministic was performed prior to splitting into separate line"
" and continuum MSs.")
# Path to the manual flagging scripts
# Do any of them match?
folder_name = os.getcwd().split("/")[-1]
proj_code = "16B-242" if "16B-242" in folder_name else "16B-236"
flagging_path = os.path.join(a_path, proj_code, "track_flagging")
track_flags = glob.glob(flagging_path + "/*.py")
hits = np.where([os.getcwd().split("/")[-1] in track for
track in track_flags])[0]
if len(hits) == 1:
track_flag_script = track_flags[hits[0]]
logprint("Found a manual flagging script to run: "
"{}".format(track_flag_script))
execfile(track_flag_script)
elif len(hits) > 1:
from warnings import warn
import sys
logprint("Multiple script hits? Something is wrong!")
sys.exit()
else:
logprint("No manual flagging script found.")
# report initial statistics
default('flagdata')
vis=ms_active
mode='summary'
spwchan=True
spwcorr=True
basecnt=True
action='calculate'
savepars=False
myinitialflags = flagdata()
#clearstat()
logprint ("Initial flags summary", logfileout='logs/flagall.log')
start_total = myinitialflags['total']
start_flagged = myinitialflags['flagged']
logprint("Initial flagged fraction = {}".format(start_flagged / start_total),
logfileout='logs/flagall.log')
init_on_source_vis = start_total
afterzero_total = start_total
afterzero_flagged = start_flagged
zero_flagged = 0.0
aftershadow_total = start_total
aftershadow_flagged = start_flagged
shadow_flagged = 0.0
flagdata_list = []
cmdreason_list = []
frac_flagged_on_source1 = 1.0 - ((start_total - start_flagged) / init_on_source_vis)
if (frac_flagged_on_source1 >= 0.3):
logprint("Initial flagging appears extensive. Check pipeline results to "
"ensure usable solution are attained.")
QA2_flagall='Fail'
logprint("Finished EVLA_pipe_fake_flagall.py", logfileout='logs/flagall.log')
logprint("QA2 score: "+QA2_flagall, logfileout='logs/flagall.log')
time_list = runtiming('flagall', 'end')
pipeline_save()
| mit | -7,545,904,781,648,430,000 | 27.244681 | 85 | 0.707721 | false |
mikewiebe-ansible/ansible | lib/ansible/modules/network/fortios/fortios_firewall_schedule_onetime.py | 13 | 11113 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_schedule_onetime
short_description: Onetime schedule configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall_schedule feature and onetime category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_schedule_onetime:
description:
- Onetime schedule configuration.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
color:
description:
- Color of icon on the GUI.
type: int
end:
description:
- "Schedule end date and time, format hh:mm yyyy/mm/dd."
type: str
expiration_days:
description:
- Write an event log message this many days before the schedule expires.
type: int
name:
description:
- Onetime schedule name.
required: true
type: str
start:
description:
- "Schedule start date and time, format hh:mm yyyy/mm/dd."
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Onetime schedule configuration.
fortios_firewall_schedule_onetime:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_schedule_onetime:
color: "3"
end: "<your_own_value>"
expiration_days: "5"
name: "default_name_6"
start: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_schedule_onetime_data(json):
option_list = ['color', 'end', 'expiration_days',
'name', 'start']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_schedule_onetime(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_schedule_onetime'] and data['firewall_schedule_onetime']:
state = data['firewall_schedule_onetime']['state']
else:
state = True
firewall_schedule_onetime_data = data['firewall_schedule_onetime']
filtered_data = underscore_to_hyphen(filter_firewall_schedule_onetime_data(firewall_schedule_onetime_data))
if state == "present":
return fos.set('firewall.schedule',
'onetime',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall.schedule',
'onetime',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall_schedule(data, fos):
if data['firewall_schedule_onetime']:
resp = firewall_schedule_onetime(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_schedule_onetime": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"end": {"required": False, "type": "str"},
"expiration_days": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"start": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall_schedule(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall_schedule(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,339,620,378,644,035,000 | 30.216292 | 111 | 0.591919 | false |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Tools/bgen/bgen/bgenModule.py | 48 | 2748 | from bgenOutput import *
from bgenGeneratorGroup import GeneratorGroup
class Module(GeneratorGroup):
def __init__(self, name, prefix = None,
includestuff = None,
finalstuff = None,
initstuff = None,
variablestuff = None,
longname = None):
GeneratorGroup.__init__(self, prefix or name)
self.name = name
if longname:
self.longname = longname
else:
self.longname = name
self.includestuff = includestuff
self.initstuff = initstuff
self.finalstuff = finalstuff
self.variablestuff = variablestuff
self.typeobjects = []
def addobject(self, od):
self.generators.append(od)
self.typeobjects.append(od)
od.setmodulename(self.longname)
def generate(self):
OutHeader1("Module " + self.name)
Output("#include \"Python.h\"")
Output()
if self.includestuff:
Output()
Output("%s", self.includestuff)
self.declareModuleVariables()
GeneratorGroup.generate(self)
if self.finalstuff:
Output()
Output("%s", self.finalstuff)
Output()
Output("void init%s(void)", self.name)
OutLbrace()
Output("PyObject *m;")
Output("PyObject *d;")
Output()
if self.initstuff:
Output("%s", self.initstuff)
Output()
Output("m = Py_InitModule(\"%s\", %s_methods);",
self.name, self.prefix)
Output("d = PyModule_GetDict(m);")
self.createModuleVariables()
OutRbrace()
OutHeader1("End module " + self.name)
def declareModuleVariables(self):
self.errorname = self.prefix + "_Error"
Output("static PyObject *%s;", self.errorname)
def createModuleVariables(self):
Output("""%s = %s;""", self.errorname, self.exceptionInitializer())
Output("""if (%s == NULL ||""", self.errorname)
Output(""" PyDict_SetItemString(d, "Error", %s) != 0)""",
self.errorname)
IndentLevel()
Output("""return;""")
DedentLevel()
for tp in self.typeobjects:
tp.outputTypeObjectInitializer()
if self.variablestuff:
Output("%s", self.variablestuff)
Output()
def exceptionInitializer(self):
return """PyErr_NewException("%s.Error", NULL, NULL)""" % self.name
def _test():
from bgenGenerator import FunctionGenerator
m = Module("spam", "", "#include <stdio.h>")
g = FunctionGenerator(None, "bacon")
m.add(g)
m.generate()
if __name__ == "__main__":
_test()
| gpl-2.0 | 72,715,581,016,451,730 | 28.234043 | 75 | 0.550582 | false |
MagazinnikIvan/pywinauto | pywinauto/actionlogger.py | 1 | 3306 | # GUI Application automation and testing library
# Copyright (C) 2006-2017 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from logger import logger
foundLogger = True
except ImportError:
foundLogger = False
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
def set_level(level):
"""Set pywinauto logging level for default logger.
Use logging.WARNING (30) or higher to disable pywinauto logging."""
logger = logging.getLogger(__package__)
logger.level = level
def reset_level():
"""Reset pywinauto logging level to default one (logging.NOTSET)"""
logger = logging.getLogger(__package__)
logger.level = logging.NOTSET
def disable():
"""Disable logging pywinauto actions"""
set_level(logging.WARNING)
def enable():
"""Enable logging pywinauto actions"""
reset_level()
class CustomLogger(object):
def __init__(self, logFilePath = None):
self.logger = logger.Logger(logFilePath)
def log(self, *args):
for msg in args:
self.logger.message(msg)
def logSectionStart(self, msg):
self.logger.sectionStart(msg)
def logSectionEnd(self):
self.logger.sectionEnd()
class StandardLogger(object):
def __init__(self, logFilePath = None):
self.logFilePath = logFilePath
self.logger = logging.getLogger(__package__)
def log(self, *args):
self.logger.info(*args)
def logSectionStart(self, msg):
pass
def logSectionEnd(self):
pass
if foundLogger:
ActionLogger = CustomLogger
else:
ActionLogger = StandardLogger
disable() # disable standard logging by default | bsd-3-clause | -6,132,762,158,328,259,000 | 32.744898 | 80 | 0.727465 | false |
victordion/cusp-library | performance/spmv/scripts/benchmark.py | 12 | 3818 | #!/usr/bin/env python
import os,csv
device_id = '0' # index of the device to use
binary_filename = '../spmv' # command used to run the tests
output_file = 'benchmark_output.log' # file where results are stored
# The unstructured matrices are available online:
# http://www.nvidia.com/content/NV_Research/matrices.zip
mats = []
unstructured_path = '~/scratch/Matrices/williams/mm/'
unstructured_mats = [('Dense','dense2.mtx'),
('Protein','pdb1HYS.mtx'),
('FEM/Spheres','consph.mtx'),
('FEM/Cantilever','cant.mtx'),
('Wind Tunnel','pwtk.mtx'),
('FEM/Harbor','rma10.mtx'),
('QCD','qcd5_4.mtx'),
('FEM/Ship','shipsec1.mtx'),
('Economics','mac_econ_fwd500.mtx'),
('Epidemiology','mc2depi.mtx'),
('FEM/Accelerator','cop20k_A.mtx'),
('Circuit','scircuit.mtx'),
('Webbase','webbase-1M.mtx'),
('LP','rail4284.mtx') ]
unstructured_mats = [ mat + (unstructured_path,) for mat in unstructured_mats]
structured_path = '~/scratch/Matrices/stencil/'
structured_mats = [('Laplacian_3pt_stencil', '3pt_1000000.mtx'),
('Laplacian_5pt_stencil', '5pt_1000x1000.mtx'),
('Laplacian_7pt_stencil', '7pt_100x100x100.mtx'),
('Laplacian_9pt_stencil', '9pt_1000x1000.mtx'),
('Laplacian_27pt_stencil', '27pt_100x100x100.mtx')]
structured_mats = [ mat + (structured_path,) for mat in structured_mats]
# assemble suite of matrices
trials = unstructured_mats + structured_mats
def run_tests(value_type):
# remove previous result (if present)
open(output_file,'w').close()
# run benchmark for each file
for matrix,filename,path in trials:
matrix_filename = path + filename
# setup the command to execute
cmd = binary_filename
cmd += ' ' + matrix_filename # e.g. pwtk.mtx
cmd += ' --device=' + device_id # e.g. 0 or 1
cmd += ' --value_type=' + value_type # e.g. float or double
# execute the benchmark on this file
os.system(cmd)
# process output_file
matrices = {}
results = {}
kernels = set()
#
fid = open(output_file)
for line in fid.readlines():
tokens = dict( [tuple(part.split('=')) for part in line.split()] )
if 'file' in tokens:
file = os.path.split(tokens['file'])[1]
matrices[file] = tokens
results[file] = {}
else:
kernel = tokens['kernel']
results[file][kernel] = tokens
kernels.add(tokens['kernel'])
## put CPU results before GPU results
#kernels = ['csr_serial'] + sorted(kernels - set(['csr_serial']))
kernels = sorted(kernels)
# write out CSV formatted results
def write_csv(field):
fid = open('bench_' + value_type + '_' + field + '.csv','w')
writer = csv.writer(fid)
writer.writerow(['matrix','file','rows','cols','nonzeros'] + kernels)
for (matrix,file,path) in trials:
line = [matrix, file, matrices[file]['rows'], matrices[file]['cols'], matrices[file]['nonzeros']]
matrix_results = results[file]
for kernel in kernels:
if kernel in matrix_results:
line.append( matrix_results[kernel][field] )
else:
line.append(' ')
writer.writerow( line )
fid.close()
write_csv('gflops') #GFLOP/s
write_csv('gbytes') #GBytes/s
run_tests('float')
run_tests('double')
| apache-2.0 | 1,764,975,447,773,099,500 | 35.018868 | 109 | 0.535097 | false |
Subsets and Splits