code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3CAPModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_rheader",
"cap_alert_list_layout",
#"cap_gis_location_xml_post_parse",
#"cap_gis_location_xml_post_render",
)
import datetime
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
from ..s3 import *
# =============================================================================
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ("cap_alert",
"cap_alert_represent",
"cap_warning_priority",
"cap_info",
"cap_info_represent",
"cap_resource",
"cap_area",
"cap_area_represent",
"cap_area_location",
"cap_area_tag",
"cap_info_category_opts",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a
# few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed
# as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
# http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.amberAlert": T("Child Abduction Emergency"),
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
# http://en.wikipedia.org/wiki/Silver_Alert
"missingPerson.silver": T("Missing Senior Citizen"),
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# ---------------------------------------------------------------------
# CAP alerts
#
# CAP alert Status Code (status)
cap_alert_status_code_opts = OrderedDict([
("Actual", T("Actual - actionable by all targeted recipients")),
("Exercise", T("Exercise - only for designated participants (decribed in note)")),
("System", T("System - for internal functions")),
("Test", T("Test - testing, all recipients disregard")),
("Draft", T("Draft - not actionable in its current form")),
])
# CAP alert message type (msgType)
cap_alert_msgType_code_opts = OrderedDict([
("Alert", T("Alert: Initial information requiring attention by targeted recipients")),
("Update", T("Update: Update and supercede earlier message(s)")),
("Cancel", T("Cancel: Cancel earlier message(s)")),
("Ack", T("Ack: Acknowledge receipt and acceptance of the message(s)")),
("Error", T("Error: Indicate rejection of the message(s)")),
])
# CAP alert scope
cap_alert_scope_code_opts = OrderedDict([
("Public", T("Public - unrestricted audiences")),
("Restricted", T("Restricted - to users with a known operational requirement (described in restriction)")),
("Private", T("Private - only to specified addresses (mentioned as recipients)"))
])
# CAP info categories
cap_info_category_opts = OrderedDict([
("Geo", T("Geophysical (inc. landslide)")),
("Met", T("Meteorological (inc. flood)")),
("Safety", T("General emergency and public safety")),
("Security", T("Law enforcement, military, homeland and local/private security")),
("Rescue", T("Rescue and recovery")),
("Fire", T("Fire suppression and rescue")),
("Health", T("Medical and public health")),
("Env", T("Pollution and other environmental")),
("Transport", T("Public and private transportation")),
("Infra", T("Utility, telecommunication, other non-transport infrastructure")),
("CBRNE", T("Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack")),
("Other", T("Other events")),
])
tablename = "cap_alert"
define_table(tablename,
Field("is_template", "boolean",
readable = False,
writable = True,
),
Field("template_id", "reference cap_alert",
label = T("Template"),
ondelete = "RESTRICT",
represent = self.template_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
comment = T("Apply a template"),
),
Field("template_title",
label = T("Template Title"),
),
Field("template_settings", "text",
default = "{}",
readable = False,
),
Field("identifier", unique=True, length=128,
default = self.generate_identifier,
label = T("Identifier"),
),
Field("sender",
label = T("Sender"),
default = self.generate_sender,
# @todo: can not be empty in alerts (validator!)
),
s3_datetime("sent",
default = "now",
writable = False,
),
Field("status",
default = "Draft",
label = T("Status"),
requires = IS_IN_SET(cap_alert_status_code_opts),
),
Field("msg_type",
label = T("Message Type"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_msgType_code_opts)
),
),
Field("source",
label = T("Source"),
default = self.generate_source,
),
Field("scope",
label = T("Scope"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_scope_code_opts)
),
),
# Text describing the restriction for scope=restricted
Field("restriction", "text",
label = T("Restriction"),
),
Field("addresses", "list:string",
label = T("Recipients"),
represent = self.list_string_represent,
#@ToDo: provide a better way to add multiple addresses,
# do not ask the user to delimit it themselves
# this should eventually use the CAP contacts
#widget = S3CAPAddressesWidget,
),
Field("codes", "text",
default = settings.get_cap_codes(),
label = T("Codes"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
Field("note", "text",
label = T("Note"),
),
Field("reference", "list:reference cap_alert",
label = T("Reference"),
represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ",
multiple = True,
),
# @ToDo: This should not be manually entered,
# needs a widget
#widget = S3ReferenceWidget(table,
# one_to_many=True,
# allow_create=False),
),
# @ToDo: Switch to using event_incident_type_id
Field("incidents", "list:string",
label = T("Incidents"),
represent = S3Represent(options = cap_incident_type_opts,
multiple = True),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_incident_type_opts,
multiple = True,
sort = True,
)),
widget = S3MultiSelectWidget(),
),
# approved_on field for recording when the alert was approved
s3_datetime("approved_on",
readable = False,
writable = False,
),
*s3_meta_fields())
filter_widgets = [
S3TextFilter(["identifier",
"sender",
"incidents",
"cap_info.headline",
"cap_info.event_type_id",
],
label = T("Search"),
comment = T("Search for an Alert by sender, incident, headline or event."),
),
S3OptionsFilter("info.category",
label = T("Category"),
options = cap_info_category_opts,
),
S3LocationFilter("location.location_id",
label = T("Location(s)"),
# options = gis.get_countries().keys(),
),
S3OptionsFilter("info.language",
label = T("Language"),
),
]
configure(tablename,
context = {"location": "location.location_id",
},
filter_widgets = filter_widgets,
list_layout = cap_alert_list_layout,
list_orderby = "cap_info.expires desc",
onvalidation = self.cap_alert_form_validation,
# update the approved_on field on approve of the alert
onapprove = self.cap_alert_approve,
)
# Components
add_components(tablename,
cap_area = "alert_id",
cap_area_location = {"name": "location",
"joinby": "alert_id",
},
cap_info = "alert_id",
cap_resource = "alert_id",
)
self.set_method("cap", "alert",
method = "import_feed",
action = CAPImportFeed())
if crud_strings["cap_template"]:
crud_strings[tablename] = crud_strings["cap_template"]
else:
ADD_ALERT = T("Create Alert")
crud_strings[tablename] = Storage(
label_create = ADD_ALERT,
title_display = T("Alert Details"),
title_list = T("Alerts"),
# If already-published, this should create a new "Update"
# alert instead of modifying the original
title_update = T("Edit Alert"),
title_upload = T("Import Alerts"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert created"),
msg_record_modified = T("Alert modified"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No alerts to show"))
alert_represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ")
alert_id = S3ReusableField("alert_id", "reference %s" % tablename,
comment = T("The alert message containing this information"),
label = T("Alert"),
ondelete = "CASCADE",
represent = alert_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
alert_represent)),
)
# ---------------------------------------------------------------------
# CAP info segments
#
cap_info_responseType_opts = OrderedDict([
("Shelter", T("Shelter - Take shelter in place or per instruction")),
("Evacuate", T("Evacuate - Relocate as instructed in the instruction")),
("Prepare", T("Prepare - Make preparations per the instruction")),
("Execute", T("Execute - Execute a pre-planned activity identified in instruction")),
("Avoid", T("Avoid - Avoid the subject event as per the instruction")),
("Monitor", T("Monitor - Attend to information sources as described in instruction")),
("Assess", T("Assess - Evaluate the information in this message.")),
("AllClear", T("AllClear - The subject event no longer poses a threat")),
("None", T("None - No action recommended")),
])
cap_info_urgency_opts = OrderedDict([
("Immediate", T("Response action should be taken immediately")),
("Expected", T("Response action should be taken soon (within next hour)")),
("Future", T("Responsive action should be taken in the near future")),
("Past", T("Responsive action is no longer required")),
("Unknown", T("Unknown")),
])
cap_info_severity_opts = OrderedDict([
("Extreme", T("Extraordinary threat to life or property")),
("Severe", T("Significant threat to life or property")),
("Moderate", T("Possible threat to life or property")),
("Minor", T("Minimal to no known threat to life or property")),
("Unknown", T("Severity unknown")),
])
cap_info_certainty_opts = OrderedDict([
("Observed", T("Observed: determined to have occurred or to be ongoing")),
("Likely", T("Likely (p > ~50%)")),
("Possible", T("Possible but not likely (p <= ~50%)")),
("Unlikely", T("Not expected to occur (p ~ 0)")),
("Unknown", T("Certainty unknown")),
])
# ---------------------------------------------------------------------
# Warning Priorities for CAP
tablename = "cap_warning_priority"
define_table(tablename,
Field("priority_rank", "integer",
label = T("Priority Rank"),
length = 2,
),
Field("event_code",
label = T("Event Code"),
),
Field("name", notnull = True, length = 64,
label = T("Name"),
),
Field("event_type",
label = T("Event Type"),
),
Field("urgency",
label = T("Urgency"),
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
label = T("Severity"),
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
label = T("Certainty"),
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("color_code",
label = T("Color Code"),
),
*s3_meta_fields())
priority_represent = S3Represent(lookup = tablename)
crud_strings[tablename] = Storage(
label_create = T("Create Warning Priority"),
title_display = T("Warning Priority Details"),
title_list = T("Warning Priorities"),
title_update = T("Edit Warning Priority"),
title_upload = T("Import Warning Priorities"),
label_list_button = T("List Warning Priorities"),
label_delete_button = T("Delete Warning Priority"),
msg_record_created = T("Warning Priority added"),
msg_record_modified = T("Warning Priority updated"),
msg_record_deleted = T("Warning Priority removed"),
msg_list_empty = T("No Warning Priorities currently registered")
)
# ---------------------------------------------------------------------
# CAP info priority
# @ToDo: i18n: Need label=T("")
tablename = "cap_info"
define_table(tablename,
alert_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("template_info_id", "reference cap_info",
ondelete = "RESTRICT",
readable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
widget = S3HiddenWidget(),
),
Field("template_settings", "text",
readable = False,
),
Field("language",
default = "en",
requires = IS_EMPTY_OR(
IS_IN_SET(settings.get_cap_languages())
),
),
Field("category", "list:string",
represent = S3Represent(options = cap_info_category_opts,
multiple = True,
),
required = True,
requires = IS_IN_SET(cap_info_category_opts,
multiple = True,
),
widget = S3MultiSelectWidget(),
), # 1 or more allowed
self.event_type_id(empty = False,
script = '''
$.filterOptionsS3({
'trigger':'event_type_id',
'target':'priority',
'lookupURL':S3.Ap.concat('/cap/priority_get/'),
'lookupResource':'event_type'
})'''
),
Field("response_type", "list:string",
represent = S3Represent(options = cap_info_responseType_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_responseType_opts,
multiple = True),
widget = S3MultiSelectWidget(),
), # 0 or more allowed
Field("priority",
represent = priority_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(
db, "cap_warning_priority.id",
priority_represent
),
),
),
Field("urgency",
required = True,
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
required = True,
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
required = True,
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("audience", "text"),
Field("event_code", "text",
default = settings.get_cap_event_codes(),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
s3_datetime("effective",
default = "now",
),
s3_datetime("onset"),
s3_datetime("expires",
past = 0,
),
Field("sender_name"),
Field("headline"),
Field("description", "text"),
Field("instruction", "text"),
Field("contact", "text"),
Field("web",
requires = IS_EMPTY_OR(IS_URL()),
),
Field("parameter", "text",
default = settings.get_cap_parameters(),
label = T("Parameters"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
*s3_meta_fields())
# @ToDo: Move labels into main define_table (can then be lazy & performs better anyway)
info_labels = cap_info_labels()
for field in info_labels:
db.cap_info[field].label = info_labels[field]
if crud_strings["cap_template_info"]:
crud_strings[tablename] = crud_strings["cap_template_info"]
else:
ADD_INFO = T("Add alert information")
crud_strings[tablename] = Storage(
label_create = ADD_INFO,
title_display = T("Alert information"),
title_list = T("Information entries"),
title_update = T("Update alert information"), # this will create a new "Update" alert?
title_upload = T("Import alert information"),
subtitle_list = T("Listing of alert information items"),
label_list_button = T("List information entries"),
label_delete_button = T("Delete Information"),
msg_record_created = T("Alert information created"),
msg_record_modified = T("Alert information modified"),
msg_record_deleted = T("Alert information deleted"),
msg_list_empty = T("No alert information to show"))
info_represent = S3Represent(lookup = tablename,
fields = ["language", "headline"],
field_sep = " - ")
info_id = S3ReusableField("info_id", "reference %s" % tablename,
label = T("Information Segment"),
ondelete = "CASCADE",
represent = info_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
info_represent)
),
sortby = "identifier",
)
configure(tablename,
#create_next = URL(f="info", args=["[id]", "area"]),
onaccept = self.info_onaccept,
)
# Components
add_components(tablename,
cap_resource = "info_id",
cap_area = "info_id",
)
# ---------------------------------------------------------------------
# CAP Resource segments
#
# Resource elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
tablename = "cap_resource"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
self.super_link("doc_id", "doc_entity"),
Field("resource_desc",
requires = IS_NOT_EMPTY(),
),
Field("mime_type",
requires = IS_NOT_EMPTY(),
),
Field("size", "integer",
writable = False,
),
Field("uri",
# needs a special validation
writable = False,
),
#Field("file", "upload"),
Field("deref_uri", "text",
readable = False,
writable = False,
),
Field("digest",
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Resource"),
title_display = T("Alert Resource"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
subtitle_list = T("List Resources"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No resources currently defined for this alert"))
# @todo: complete custom form
crud_form = S3SQLCustomForm(#"name",
"info_id",
"resource_desc",
S3SQLInlineComponent("image",
label=T("Image"),
fields=["file",
],
),
S3SQLInlineComponent("document",
label=T("Document"),
fields=["file",
],
),
)
configure(tablename,
super_entity = "doc_entity",
crud_form = crud_form,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# CAP Area segments
#
# Area elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
# Each <area> can have multiple elements which are one of <polygon>,
# <circle>, or <geocode>.
# <polygon> and <circle> are explicit geometry elements.
# <geocode> is a key-value pair in which the key is a standard
# geocoding system like SAME, FIPS, ZIP, and the value is a defined
# value in that system. The region described by the <area> is the
# union of the areas described by the individual elements, but the
# CAP spec advises that, if geocodes are included, the concrete
# geometry elements should outline the area specified by the geocodes,
# as not all recipients will have access to the meanings of the
# geocodes. However, since geocodes are a compact way to describe an
# area, it may be that they will be used without accompanying geometry,
# so we should not count on having <polygon> or <circle>.
#
# Geometry elements are each represented by a gis_location record, and
# linked to the cap_area record via the cap_area_location link table.
# For the moment, <circle> objects are stored with the center in the
# gis_location's lat, lon, and radius (in km) as a tag "radius" and
# value. ToDo: Later, we will add CIRCLESTRING WKT.
#
# Geocode elements are currently stored as key value pairs in the
# cap_area record.
#
# <area> can also specify a minimum altitude and maximum altitude
# ("ceiling"). These are stored in explicit fields for now, but could
# be replaced by key value pairs, if it is found that they are rarely
# used.
#
# (An alternative would be to have cap_area link to a gis_location_group
# record. In that case, the geocode tags could be stored in the
# gis_location_group's overall gis_location element's tags. The altitude
# could be stored in the overall gis_location's elevation, with ceiling
# stored in a tag. We could consider adding a maximum elevation field.)
tablename = "cap_area"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
Field("name",
label = T("Area description"),
required = True,
),
Field("altitude", "integer"), # Feet above Sea-level in WGS84 (Specific or Minimum is using a range)
Field("ceiling", "integer"), # Feet above Sea-level in WGS84 (Maximum)
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Area"),
title_display = T("Alert Area"),
title_list = T("Areas"),
title_update = T("Edit Area"),
subtitle_list = T("List Areas"),
label_list_button = T("List Areas"),
label_delete_button = T("Delete Area"),
msg_record_created = T("Area added"),
msg_record_modified = T("Area updated"),
msg_record_deleted = T("Area deleted"),
msg_list_empty = T("No areas currently defined for this alert"))
crud_form = S3SQLCustomForm("name",
"info_id",
# Not yet working with default formstyle or multiple=True
#S3SQLInlineComponent("location",
# name = "location",
# label = "",
# multiple = False,
# fields = [("", "location_id")],
# ),
S3SQLInlineComponent("tag",
name = "tag",
label = "",
fields = ["tag",
"value",
],
),
"altitude",
"ceiling",
)
area_represent = S3Represent(lookup=tablename)
configure(tablename,
#create_next = URL(f="area", args=["[id]", "location"]),
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
crud_form = crud_form,
)
# Components
add_components(tablename,
cap_area_location = {"name": "location",
"joinby": "area_id",
},
cap_area_tag = {"name": "tag",
"joinby": "area_id",
},
)
area_id = S3ReusableField("area_id", "reference %s" % tablename,
label = T("Area"),
ondelete = "CASCADE",
represent = area_represent,
requires = IS_ONE_OF(db, "cap_area.id",
area_represent),
)
# ToDo: Use a widget tailored to entering <polygon> and <circle>.
# Want to be able to enter them by drawing on the map.
# Also want to allow selecting existing locations that have
# geometry, maybe with some filtering so the list isn't cluttered
# with irrelevant locations.
tablename = "cap_area_location"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
self.gis_location_id(
widget = S3LocationSelector(points = False,
polygons = True,
show_map = True,
catalog_layers = True,
show_address = False,
show_postcode = False,
),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Location"),
title_display = T("Alert Location"),
title_list = T("Locations"),
title_update = T("Edit Location"),
subtitle_list = T("List Locations"),
label_list_button = T("List Locations"),
label_delete_button = T("Delete Location"),
msg_record_created = T("Location added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location deleted"),
msg_list_empty = T("No locations currently defined for this alert"))
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# Area Tags
# - Key-Value extensions
# - Used to hold for geocodes: key is the geocode system name, and
# value is the specific value for this area.
# - Could store other values here as well, to avoid dedicated fields
# in cap_area for rarely-used items like altitude and ceiling, but
# would have to distinguish those from geocodes.
#
# ToDo: Provide a mechanism for pre-loading geocodes that are not tied
# to individual areas.
# ToDo: Allow sharing the key-value pairs. Cf. Ruby on Rails tagging
# systems such as acts-as-taggable-on, which has a single table of tags
# used by all classes. Each tag record has the class and field that the
# tag belongs to, as well as the tag string. We'd want tag and value,
# but the idea is the same: There would be a table with tag / value
# pairs, and individual cap_area, event_event, org_whatever records
# would link to records in the tag table. So we actually would not have
# duplicate tag value records as we do now.
tablename = "cap_area_tag"
define_table(tablename,
area_id(),
# ToDo: Allow selecting from a dropdown list of pre-defined
# geocode system names.
Field("tag",
label = T("Geocode Name"),
),
# ToDo: Once the geocode system is selected, fetch a list
# of current values for that geocode system. Allow adding
# new values, e.g. with combo box menu.
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
#configure(tablename,
# deduplicate = self.cap_area_tag_deduplicate,
# )
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(cap_alert_id = alert_id,
cap_alert_represent = alert_represent,
cap_area_represent = area_represent,
cap_info_represent = info_represent,
cap_info_category_opts = cap_info_category_opts
)
# -------------------------------------------------------------------------
@staticmethod
def generate_identifier():
"""
Generate an identifier for a new form
"""
db = current.db
table = db.cap_alert
r = db().select(table.id,
limitby=(0, 1),
orderby=~table.id).first()
_time = datetime.datetime.strftime(datetime.datetime.utcnow(), "%Y%m%d")
if r:
next_id = int(r.id) + 1
else:
next_id = 1
# Format: prefix-time+-timezone+sequence-suffix
settings = current.deployment_settings
prefix = settings.get_cap_identifier_prefix() or current.xml.domain
oid = settings.get_cap_identifier_oid()
suffix = settings.get_cap_identifier_suffix()
return "%s-%s-%s-%03d%s%s" % \
(prefix, oid, _time, next_id, ["", "-"][bool(suffix)], suffix)
# -------------------------------------------------------------------------
@staticmethod
def generate_sender():
"""
Generate a sender for a new form
"""
try:
user_id = current.auth.user.id
except AttributeError:
return ""
return "%s/%d" % (current.xml.domain, user_id)
# -------------------------------------------------------------------------
@staticmethod
def generate_source():
"""
Generate a source for CAP alert
"""
return "%s@%s" % (current.xml.domain,
current.deployment_settings.get_base_public_url())
# -------------------------------------------------------------------------
@staticmethod
def template_represent(id, row=None):
"""
Represent an alert template concisely
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.is_template,
table.template_title,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1)).first()
try:
# @ToDo: Should get headline from "info"?
if row.is_template:
return row.template_title
else:
return s3db.cap_alert_represent(id)
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def list_string_represent(string, fmt=lambda v: v):
try:
if isinstance(string, list):
return ", ".join([fmt(i) for i in string])
elif isinstance(string, basestring):
return ", ".join([fmt(i) for i in string[1:-1].split("|")])
except IndexError:
return current.messages.UNKNOWN_OPT
return ""
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_form_validation(form):
"""
On Validation for CAP alert form
"""
form_vars = form.vars
if form_vars.get("scope") == "Private" and not form_vars.get("addresses"):
form.errors["addresses"] = \
current.T("'Recipients' field mandatory in case of 'Private' scope")
return
# -------------------------------------------------------------------------
@staticmethod
def info_onaccept(form):
"""
After DB I/O
"""
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
info_id = form_vars.id
if not info_id:
return
db = current.db
atable = db.cap_alert
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
if info:
alert_id = info.alert_id
if alert_id and cap_alert_is_template(alert_id):
db(itable.id == info_id).update(is_template = True)
return True
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_approve(record=None):
"""
Update the approved_on field when alert gets approved
"""
if not record:
return
alert_id = record["id"]
# Update approved_on at the time the alert is approved
if alert_id:
db = current.db
approved_on = record["approved_on"]
db(db.cap_alert.id == alert_id).update(approved_on = current.request.utcnow)
# =============================================================================
def cap_info_labels():
"""
Labels for CAP info segments
"""
T = current.T
return dict(language=T("Language"),
category=T("Category"),
event_type_id=T("Event"),
response_type=T("Response type"),
urgency=T("Urgency"),
severity=T("Severity"),
certainty=T("Certainty"),
audience=T("Audience"),
event_code=T("Event code"),
effective=T("Effective"),
onset=T("Onset"),
expires=T("Expires at"),
sender_name=T("Sender's name"),
headline=T("Headline"),
description=T("Description"),
instruction=T("Instruction"),
web=T("URL"),
contact=T("Contact information"),
parameter=T("Parameters")
)
# =============================================================================
def cap_alert_is_template(alert_id):
"""
Tell whether an alert entry is a template
"""
if not alert_id:
return False
table = current.s3db.cap_alert
query = (table.id == alert_id)
r = current.db(query).select(table.is_template,
limitby=(0, 1)).first()
return r and r.is_template
# =============================================================================
def cap_rheader(r):
""" Resource Header for CAP module """
rheader = None
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
tablename = r.tablename
if tablename == "cap_alert":
record_id = record.id
table = s3db.cap_info
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if record.is_template:
if not (row and row.id):
error = DIV(T("An alert needs to contain at least one info item."),
_class="error")
else:
error = ""
tabs = [(T("Template"), None),
(T("Information template"), "info"),
#(T("Area"), "area"),
#(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record_id, record),
_href=URL(c="cap", f="template",
args=[record_id, "update"]))),
),
),
rheader_tabs,
error
)
else:
if not (row and row.id):
error = DIV(T("You need to create at least one alert information item in order to be able to broadcast this alert!"),
_class="error")
export_btn = ""
else:
error = ""
export_btn = A(DIV(_class="export_cap_large"),
_href=URL(c="cap", f="alert", args=["%s.cap" % record_id]),
_target="_blank",
)
auth = current.auth
# Display 'Submit for Approval' based on permission
# and deployment settings
if not r.record.approved_by and \
current.deployment_settings.get_cap_authorisation() and \
auth.s3_has_permission("update", "cap_alert", record_id=r.id):
# Get the user ids for the role alert_approver
db = current.db
agtable = db.auth_group
rows = db(agtable.role == "Alert Approver")._select(agtable.id)
group_rows = db(agtable.id.belongs(rows)).select(agtable.id)
if group_rows:
for group_row in group_rows:
group_id = group_row.id
user_ids = auth.s3_group_members(group_id) # List of user_ids
pe_ids = [] # List of pe_ids
for user_id in user_ids:
pe_ids.append(auth.s3_user_pe_id(int(user_id)))
submit_btn = A(T("Submit for Approval"),
_href = URL(f = "compose",
vars = {"cap_alert.id": record.id,
"pe_ids": pe_ids,
},
),
_class = "action-btn"
)
else:
submit_btn = None
else:
submit_btn = None
table = s3db.cap_area
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
# We have an Area, so we can add Locations
location_tab = (T("Location"), "location")
else:
location_tab = ""
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
(T("Area"), "area"),
location_tab,
(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record_id, record),
_href=URL(c="cap", f="alert",
args=[record_id, "update"]))),
),
TR(export_btn)
),
rheader_tabs,
error
)
if submit_btn:
rheader.insert(1, TR(submit_btn))
elif tablename == "cap_area":
# Shouldn't ever be called
tabs = [(T("Area"), None),
(T("Locations"), "location"),
#(T("Geocodes"), "tag"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.id, "update"])))
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.info_id),
_href=URL(c="cap", f="info",
args=[record.info_id, "update"]))),
),
TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.id, record),
_href=URL(c="cap", f="area",
args=[record.id, "update"]))),
),
),
rheader_tabs
)
elif tablename == "cap_area_location":
# Shouldn't ever be called
# We need the rheader only for the link back to the area.
rheader = DIV(TABLE(TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.area_id),
_href=URL(c="cap", f="area",
args=[record.area_id, "update"]))),
),
))
elif tablename == "cap_info":
# Shouldn't ever be called
tabs = [(T("Information"), None),
(T("Resource Files"), "resource"),
]
if cap_alert_is_template(record.alert_id):
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record.alert_id),
_href=URL(c="cap", f="template",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Info template")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs,
_class="cap_info_template_form"
)
current.response.s3.js_global.append('''i18n.cap_locked="%s"''' % T("Locked"))
else:
tabs.insert(1, (T("Areas"), "area"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs
)
return rheader
# =============================================================================
def update_alert_id(tablename):
""" On-accept for area and resource records """
def func(form):
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
if form_vars.get("alert_id", None):
# Nothing to do
return
# Look up from the info/area
_id = form_vars.id
if not _id:
return
db = current.db
table = db[tablename]
if tablename == "cap_area_location":
area_id = form_vars.get("area_id", None)
if not area_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.area_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
area_id = item.area_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
atable = db.cap_area
area = db(atable.id == area_id).select(atable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = area.alert_id
except:
# Nothing we can do
return
else:
info_id = form_vars.get("info_id", None)
if not info_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.info_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
info_id = item.info_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = info.alert_id
except:
# Nothing we can do
return
db(table.id == _id).update(alert_id = alert_id)
return func
# =============================================================================
def cap_gis_location_xml_post_parse(element, record):
"""
UNUSED - done in XSLT
Convert CAP polygon representation to WKT; extract circle lat lon.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Extract altitude and ceiling from the enclosing <area>, and
# compute an elevation value to apply to all enclosed gis_locations.
cap_polygons = element.xpath("cap_polygon")
if cap_polygons:
cap_polygon_text = cap_polygons[0].text
# CAP polygons and WKT have opposite separator conventions:
# CAP has spaces between coordinate pairs and within pairs the
# coordinates are separated by comma, and vice versa for WKT.
# Unfortunately, CAP and WKT (as we use it) also have opposite
# orders of lat and lon. CAP has lat lon, WKT has lon lat.
# Both close the polygon by repeating the first point.
cap_points_text = cap_polygon_text.split()
cap_points = [cpoint.split(",") for cpoint in cap_points_text]
# @ToDo: Should we try interpreting all the points as decimal numbers,
# and failing validation if they're wrong?
wkt_points = ["%s %s" % (cpoint[1], cpoint[0]) for cpoint in cap_points]
wkt_polygon_text = "POLYGON ((%s))" % ", ".join(wkt_points)
record.wkt = wkt_polygon_text
return
cap_circle_values = element.xpath("resource[@name='gis_location_tag']/data[@field='tag' and text()='cap_circle']/../data[@field='value']")
if cap_circle_values:
cap_circle_text = cap_circle_values[0].text
coords, radius = cap_circle_text.split()
lat, lon = coords.split(",")
try:
# If any of these fail to interpret as numbers, the circle was
# badly formatted. For now, we don't try to fail validation,
# but just don't set the lat, lon.
lat = float(lat)
lon = float(lon)
radius = float(radius)
except ValueError:
return
record.lat = lat
record.lon = lon
# Add a bounding box for the given radius, if it is not zero.
if radius > 0.0:
bbox = current.gis.get_bounds_from_radius(lat, lon, radius)
record.lat_min = bbox["lat_min"]
record.lon_min = bbox["lon_min"]
record.lat_max = bbox["lat_max"]
record.lon_max = bbox["lon_max"]
# =============================================================================
def cap_gis_location_xml_post_render(element, record):
"""
UNUSED - done in XSLT
Convert Eden WKT polygon (and eventually circle) representation to
CAP format and provide them in the rendered s3xml.
Not all internal formats have a parallel in CAP, but an effort is made
to provide a resonable substitute:
Polygons are supported.
Circles that were read in from CAP (and thus carry the original CAP
circle data) are supported.
Multipolygons are currently rendered as their bounding box.
Points are rendered as zero radius circles.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Can we rely on gis_feature_type == 3 to tell if the location is a
# polygon, or is it better to look for POLYGON in the wkt? For now, check
# both.
# @ToDo: CAP does not support multipolygons. Do we want to extract their
# outer polygon if passed MULTIPOLYGON wkt? For now, these are exported
# with their bounding box as the polygon.
# @ToDo: What if a point (gis_feature_type == 1) that is not a CAP circle
# has a non-point bounding box? Should it be rendered as a polygon for
# the bounding box?
try:
from lxml import etree
except:
# This won't fail, since we're in the middle of processing xml.
return
SubElement = etree.SubElement
s3xml = current.xml
TAG = s3xml.TAG
RESOURCE = TAG["resource"]
DATA = TAG["data"]
ATTRIBUTE = s3xml.ATTRIBUTE
NAME = ATTRIBUTE["name"]
FIELD = ATTRIBUTE["field"]
VALUE = ATTRIBUTE["value"]
loc_tablename = "gis_location"
tag_tablename = "gis_location_tag"
tag_fieldname = "tag"
val_fieldname = "value"
polygon_tag = "cap_polygon"
circle_tag = "cap_circle"
fallback_polygon_tag = "cap_polygon_fallback"
fallback_circle_tag = "cap_circle_fallback"
def __cap_gis_location_add_polygon(element, cap_polygon_text, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds the CAP polygon
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_polygon_tag
else:
tag_field.text = polygon_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
val_field.text = cap_polygon_text
def __cap_gis_location_add_circle(element, lat, lon, radius, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds CAP circle
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_circle_tag
else:
tag_field.text = circle_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
# Construct a CAP circle string: latitude,longitude radius
cap_circle_text = "%s,%s %s" % (lat, lon, radius)
val_field.text = cap_circle_text
# Sort out the geometry case by wkt, CAP tags, gis_feature_type, bounds,...
# Check the two cases for CAP-specific locations first, as those will have
# definite export values. For others, we'll attempt to produce either a
# circle or polygon: Locations with a bounding box will get a box polygon,
# points will get a zero-radius circle.
# Currently wkt is stripped out of gis_location records right here:
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1332
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1426
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L3152
# Until we provide a way to configure that choice, this will not work for
# polygons.
wkt = record.get("wkt", None)
# WKT POLYGON: Although there is no WKT spec, according to every reference
# that deals with nested polygons, the outer, enclosing, polygon must be
# listed first. Hence, we extract only the first polygon, as CAP has no
# provision for nesting.
if wkt and wkt.startswith("POLYGON"):
# ToDo: Is it sufficient to test for adjacent (( to find the start of
# the polygon, or might there be whitespace between them?
start = wkt.find("((")
end = wkt.find(")")
if start >=0 and end >=0:
polygon_text = wkt[start + 2 : end]
points_text = polygon_text.split(",")
points = [p.split() for p in points_text]
cap_points_text = ["%s,%s" % (point[1], point[0]) for point in points]
cap_polygon_text = " ".join(cap_points_text)
__cap_gis_location_add_polygon(element, cap_polygon_text)
return
# Fall through if the wkt string was mal-formed.
# CAP circle stored in a gis_location_tag with tag = cap_circle.
# If there is a cap_circle tag, we don't need to do anything further, as
# export.xsl will use it. However, we don't know if there is a cap_circle
# tag...
#
# @ToDo: The export calls xml_post_render after processing a resource's
# fields, but before its components are added as children in the xml tree.
# If this were delayed til after the components were added, we could look
# there for the cap_circle gis_location_tag record. Since xml_post_parse
# isn't in use yet (except for this), maybe we could look at moving it til
# after the components?
#
# For now, with the xml_post_render before components: We could do a db
# query to check for a real cap_circle tag record, and not bother with
# creating fallbacks from bounding box or point...but we don't have to.
# Instead, just go ahead and add the fallbacks under different tag names,
# and let the export.xsl sort them out. This only wastes a little time
# compared to a db query.
# ToDo: MULTIPOLYGON -- Can stitch together the outer polygons in the
# multipolygon, but would need to assure all were the same handedness.
# The remaining cases are for locations that don't have either polygon wkt
# or a cap_circle tag.
# Bounding box: Make a four-vertex polygon from the bounding box.
# This is a fallback, as if there is a circle tag, we'll use that.
lon_min = record.get("lon_min", None)
lon_max = record.get("lon_max", None)
lat_min = record.get("lat_min", None)
lat_max = record.get("lat_max", None)
if lon_min and lon_max and lat_min and lat_max and \
(lon_min != lon_max) and (lat_min != lat_max):
# Although there is no WKT requirement, arrange the points in
# counterclockwise order. Recall format is:
# lat1,lon1 lat2,lon2 ... latN,lonN, lat1,lon1
cap_polygon_text = \
"%(lat_min)s,%(lon_min)s %(lat_min)s,%(lon_max)s %(lat_max)s,%(lon_max)s %(lat_max)s,%(lon_min)s %(lat_min)s,%(lon_min)s" \
% {"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": lat_min,
"lat_max": lat_max}
__cap_gis_location_add_polygon(element, cap_polygon_text, fallback=True)
return
# WKT POINT or location with lat, lon: This can be rendered as a
# zero-radius circle.
# Q: Do we put bounding boxes around POINT locations, and are they
# meaningful?
lat = record.get("lat", None)
lon = record.get("lon", None)
if not lat or not lon:
# Look for POINT.
if wkt and wkt.startswith("POINT"):
start = wkt.find("(")
end = wkt.find(")")
if start >=0 and end >=0:
point_text = wkt[start + 2 : end]
point = point_text.split()
try:
lon = float(point[0])
lat = float(point[1])
except ValueError:
pass
if lat and lon:
# Add a (fallback) circle with zero radius.
__cap_gis_location_add_circle(element, lat, lon, 0, True)
return
# ToDo: Other WKT.
# Did not find anything to use. Presumably the area has a text description.
return
# =============================================================================
def cap_alert_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for CAP Alerts on the Home page.
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["cap_alert.id"]
item_class = "thumbnail"
#raw = record._row
headline = record["cap_info.headline"]
location = record["cap_area.name"]
description = record["cap_info.description"]
sender = record["cap_info.sender_name"]
headline = A(headline,
# @ToDo: Link to nicely-formatted version of Display page
_href = URL(c="cap", f="alert", args=record_id),
)
headline = DIV(headline,
current.T("in %(location)s") % dict(location=location)
)
item = DIV(headline,
P(description),
P(sender, style="bold"),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class CAPImportFeed(S3Method):
"""
Import CAP alerts from a URL
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
request = current.request
response = current.response
title = T("Import from Feed URL")
# @ToDo: use Formstyle
form = FORM(
TABLE(
TR(TD(DIV(B("%s:" % T("URL")),
SPAN(" *", _class="req"))),
TD(INPUT(_type="text", _name="url",
_id="url", _value="")),
TD(),
),
TR(TD(B("%s: " % T("User"))),
TD(INPUT(_type="text", _name="user",
_id="user", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Password"))),
TD(INPUT(_type="text", _name="password",
_id="password", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors")),
TD(),
),
TR(TD(),
TD(INPUT(_type="submit", _value=T("Import"))),
TD(),
)
)
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
url = form_vars.get("url", None)
if not url:
response.error = T("URL is required")
return output
# @ToDo:
username = form_vars.get("username", None)
password = form_vars.get("password", None)
try:
file = fetch(url)
except urllib2.URLError:
response.error = str(sys.exc_info()[1])
return output
except urllib2.HTTPError:
response.error = str(sys.exc_info()[1])
return output
File = StringIO(file)
stylesheet = os.path.join(request.folder, "static", "formats",
"cap", "import.xsl")
xml = current.xml
tree = xml.parse(File)
resource = current.s3db.resource("cap_alert")
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=form_vars.get("ignore_errors", None))
except:
response.error = str(sys.exc_info()[1])
else:
import_count = resource.import_count
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("Alerts successfully imported."))
else:
response.information = T("No Alerts available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# END =========================================================================
| michaelhowden/eden | modules/s3db/cap.py | Python | mit | 86,498 |
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
# Create your views here.
from django.views.generic import ListView, DetailView
from .models import Category, Product
from cart.forms import CartAddProductForm
def category_list(request):
return render(request, "shop/category_list.html",
{'nodes': Category.objects.all()})
'''
class CategoryList(ListView):
model = Category
template_name = "category_list.html"
'''
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
return render(request, "shop/product_list.html",
{'category': category,
'nodes': categories,
'products': products,})
'''
class ProductList(ListView):
model = DesignProduct
template_name = "shop/product_list.html"
'''
def product_detail(request, id, slug):
categories = Category.objects.all()
product = get_object_or_404(Product,
id=id,
slug=slug,
available=True)
cart_product_form = CartAddProductForm()
return render(request,
'shop/product_detail.html',
{'product': product,
'nodes': categories,
'cart_product_form': cart_product_form}) | sunlaiqi/fundiy | src/shop/views.py | Python | mit | 1,636 |
# 005_cleaner.py
#####################################################################
##################################
# Import des modules et ajout du path de travail pour import relatif
import sys
sys.path.insert(0 , 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/')
from voca import AddLog , StringFormatter , OutFileCreate , OdditiesFinder
##################################
# Init des paths et noms de fichiers
missionName = '005'
AddLog('title' , '{} : Début du nettoyage du fichier'.format(missionName))
work_dir = 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/raw/{}_raw/'.format(missionName)
# Nom du fichier source
raw_file = 'src'
##################################
# retreiving raw string
raw_string_with_tabs = open(work_dir + raw_file , 'r').read()
# replacing tabs with carriage return
raw_string_with_cr = raw_string_with_tabs.replace( '\t', '\n' )
# turning the string into a list
raw_list = raw_string_with_cr.splitlines()
# going through oddities finder
AddLog('subtitle' , 'Début de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( raw_list )
# going through string formatter
ref_list = []
AddLog('subtitle' , 'Début de la fonction StringFormatter')
for line in list_without_oddities:
ref_list.append( StringFormatter( line ) )
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/out/','{}_src'.format(missionName),ref_list,'prenoms masculins italiens')
| sighill/shade_app | apis/raw/005_raw/005_cleaner.py | Python | mit | 1,625 |
#!/usr/bin/python
"""
title : testtermopi.py
description : This program runs the termopi.py
: Displays the status of the resources (cpu load and memory usage) consumed by a Raspberry Pi
computer and the resources consumed by one or more containers instantiated in the Pi.
source :
author : Carlos Molina-Jimenez ([email protected])
date : 27 Mar 2017
institution : Computer Laboratory, University of Cambridge
version : 1.0
usage :
notes :
compile and run : % python termopi.py
: It imports pidict.py, dockerctl.py and picheck.py which are found in
: ./modules.
: You need to include "./modules" in the PYTHONPATH environment variable to
: indicate python where to find the pidict.py, dockerctl.py and picheck.py.
: For example, in a bash shell, you need to include the following lines
: in your .bash_profile file located in you home directory (you can see it with
: (# ls -la).
:
: PYTHONPATH="./modules"
: export PYTHONPATH
python_version : Python 2.7.12
====================================================
"""
from modules.tools.termopi import termopi # class with dictionary data structure
# Threshold of cpu exhaustion
cpuUsageThreshold= 50
cpuLoadThreshold= 3
termo= termopi()
termo.prt_pi_resources()
termo.create_jsonfile_with_pi_status()
#termo.check_pi_resource_status(cpuUsageThreshold)
| AdL1398/PiCasso | source/modules/tester/testtermopi.py | Python | mit | 1,603 |
from . import packet
class Packet5(packet.Packet):
def __init__(self, player, slot):
super(Packet5, self).__init__(0x5)
self.add_data(player.playerID)
self.add_data(slot)
self.add_structured_data("<h", 0) # Stack
self.add_data(0) # Prefix
self.add_structured_data("<h", 0) # ItemID
| flammified/terrabot | terrabot/packets/packet5.py | Python | mit | 340 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('managers', '0011_auto_20150422_2018'),
]
operations = [
migrations.AlterField(
model_name='managerprofile',
name='picture',
field=models.ImageField(default=b'/static/assets/admin/layout/img/avatar.jpg', upload_to=b'profiles'),
preserve_default=True,
),
]
| ritashugisha/ASUEvents | ASUEvents/managers/migrations/0012_auto_20150422_2019.py | Python | mit | 511 |
from distutils.core import setup
version = '1.1.1'
setup(name='CacheGenerator',
version=version,
description="CacheGenerator for Django",
author="Ricardo Santos",
author_email="[email protected]",
url="http://github.com/ricardovice/CacheGenerator/",
packages = ['cachegenerator']
)
| ricardovice/CacheGenerator | setup.py | Python | mit | 329 |
"""Shared pytest fixtures and test data."""
import copy
import uuid
import pytest
from django.contrib.auth import get_user_model
from onfido.models import Applicant, Check, Event, Report
APPLICANT_ID = str(uuid.uuid4())
CHECK_ID = str(uuid.uuid4())
IDENTITY_REPORT_ID = str(uuid.uuid4())
DOCUMENT_REPORT_ID = str(uuid.uuid4())
DOCUMENT_ID = str(uuid.uuid4())
User = get_user_model()
@pytest.fixture
def user():
return User.objects.create_user(
"fred", first_name="Fred", last_name="Flinstone", email="[email protected]"
)
@pytest.fixture
def applicant(user):
data = copy.deepcopy(TEST_APPLICANT)
return Applicant.objects.create_applicant(user=user, raw=data)
@pytest.fixture
def check(applicant):
data = copy.deepcopy(TEST_CHECK)
return Check.objects.create_check(applicant, raw=data)
@pytest.fixture
def identity_report(check):
data = copy.deepcopy(TEST_REPORT_IDENTITY_ENHANCED)
return Report.objects.create_report(check, raw=data)
@pytest.fixture
def document_report(check):
data = copy.deepcopy(TEST_REPORT_DOCUMENT)
return Report.objects.create_report(check, raw=data)
@pytest.fixture
def report(identity_report):
return identity_report
@pytest.fixture
def event(check):
data = copy.deepcopy(TEST_EVENT)
return Event().parse(data)
# Test data taken from Onfido v3 API docs.
# https://documentation.onfido.com/#applicant-object
TEST_APPLICANT = {
"id": APPLICANT_ID,
"created_at": "2019-10-09T16:52:42Z",
"sandbox": True,
"first_name": "Jane",
"last_name": "Doe",
"email": None,
"dob": "1990-01-01",
"delete_at": None,
"href": f"/v3/applicants/{APPLICANT_ID}",
"id_numbers": [],
"address": {
"flat_number": None,
"building_number": None,
"building_name": None,
"street": "Second Street",
"sub_street": None,
"town": "London",
"state": None,
"postcode": "S2 2DF",
"country": "GBR",
"line1": None,
"line2": None,
"line3": None,
},
}
# https://documentation.onfido.com/#check-object
TEST_CHECK = {
"id": CHECK_ID,
"created_at": "2019-10-09T17:01:59Z",
"status": "in_progress",
"redirect_uri": None,
"result": None,
"sandbox": True,
"tags": [],
"results_uri": f"https://onfido.com/checks/{CHECK_ID}/reports",
"form_uri": None,
"paused": False,
"version": "3.0",
"report_ids": [IDENTITY_REPORT_ID],
"href": f"/v3/checks/{CHECK_ID}",
"applicant_id": APPLICANT_ID,
"applicant_provides_data": False,
}
# https://documentation.onfido.com/#identity-enhanced-report
TEST_REPORT_IDENTITY_ENHANCED = {
"created_at": "2019-10-03T15:54:20Z",
"href": f"/v3/reports/{IDENTITY_REPORT_ID}",
"id": IDENTITY_REPORT_ID,
"name": "identity_enhanced",
"properties": {
"matched_address": 19099121,
"matched_addresses": [
{"id": 19099121, "match_types": ["credit_agencies", "voting_register"]}
],
},
"result": "clear",
"status": "complete",
"sub_result": None,
"breakdown": {
"sources": {
"result": "clear",
"breakdown": {
"total_sources": {
"result": "clear",
"properties": {"total_number_of_sources": "3"},
}
},
},
"address": {
"result": "clear",
"breakdown": {
"credit_agencies": {
"result": "clear",
"properties": {"number_of_matches": "1"},
},
"telephone_database": {"result": "clear", "properties": {}},
"voting_register": {"result": "clear", "properties": {}},
},
},
"date_of_birth": {
"result": "clear",
"breakdown": {
"credit_agencies": {"result": "clear", "properties": {}},
"voting_register": {"result": "clear", "properties": {}},
},
},
"mortality": {"result": "clear"},
},
"check_id": CHECK_ID,
"documents": [],
}
TEST_REPORT_DOCUMENT = {
"created_at": "2019-10-03T14:05:48Z",
"documents": [{"id": DOCUMENT_ID}],
"href": f"/v3/reports/{DOCUMENT_REPORT_ID}",
"id": DOCUMENT_REPORT_ID,
"name": "document",
"properties": {
"nationality": "",
"last_name": "Names",
"issuing_country": "GBR",
"gender": "",
"first_name": "Report",
"document_type": "passport",
"document_numbers": [{"value": "123456789", "type": "document_number"}],
"date_of_expiry": "2030-01-01",
"date_of_birth": "1990-01-01",
},
"result": "clear",
"status": "complete",
"sub_result": "clear",
"breakdown": {
"data_comparison": {
"result": "clear",
"breakdown": {
"issuing_country": {"result": "clear", "properties": {}},
"gender": {"result": "clear", "properties": {}},
"date_of_expiry": {"result": "clear", "properties": {}},
"last_name": {"result": "clear", "properties": {}},
"document_type": {"result": "clear", "properties": {}},
"document_numbers": {"result": "clear", "properties": {}},
"first_name": {"result": "clear", "properties": {}},
"date_of_birth": {"result": "clear", "properties": {}},
},
},
"data_validation": {
"result": "clear",
"breakdown": {
"gender": {"result": "clear", "properties": {}},
"date_of_birth": {"result": "clear", "properties": {}},
"document_numbers": {"result": "clear", "properties": {}},
"document_expiration": {"result": "clear", "properties": {}},
"expiry_date": {"result": "clear", "properties": {}},
"mrz": {"result": "clear", "properties": {}},
},
},
"age_validation": {
"result": "clear",
"breakdown": {
"minimum_accepted_age": {"result": "clear", "properties": {}}
},
},
"image_integrity": {
"result": "clear",
"breakdown": {
"image_quality": {"result": "clear", "properties": {}},
"conclusive_document_quality": {"result": "clear", "properties": {}},
"supported_document": {"result": "clear", "properties": {}},
"colour_picture": {"result": "clear", "properties": {}},
},
},
"visual_authenticity": {
"result": "clear",
"breakdown": {
"fonts": {"result": "clear", "properties": {}},
"picture_face_integrity": {"result": "clear", "properties": {}},
"template": {"result": "clear", "properties": {}},
"security_features": {"result": "clear", "properties": {}},
"original_document_present": {"result": "clear", "properties": {}},
"digital_tampering": {"result": "clear", "properties": {}},
"other": {"result": "clear", "properties": {}},
"face_detection": {"result": "clear", "properties": {}},
},
},
"data_consistency": {
"result": "clear",
"breakdown": {
"date_of_expiry": {"result": "clear", "properties": {}},
"document_numbers": {"result": "clear", "properties": {}},
"issuing_country": {"result": "clear", "properties": {}},
"document_type": {"result": "clear", "properties": {}},
"date_of_birth": {"result": "clear", "properties": {}},
"gender": {"result": "clear", "properties": {}},
"first_name": {"result": "clear", "properties": {}},
"last_name": {"result": "clear", "properties": {}},
"nationality": {"result": "clear", "properties": {}},
},
},
"police_record": {"result": "clear"},
"compromised_document": {"result": "clear"},
},
"check_id": CHECK_ID,
}
TEST_EVENT = {
"payload": {
"resource_type": "check",
"action": "check.form_opened",
"object": {
"id": CHECK_ID,
"status": "complete",
"completed_at_iso8601": "2019-10-28T15:00:39Z",
"href": f"https://api.onfido.com/v3/checks/{CHECK_ID}",
},
}
}
| yunojuno/django-onfido | tests/conftest.py | Python | mit | 8,575 |
#!/usr/bin/env python3
# JN 2015-07-29
"""
Log file parser for Cheetah by Johannes Niediek
This script reads out the reference settings
by sequentially following all crs, rbs, and gbd commands.
Please keep in mind that the following scenario is possible with Cheetah:
Start the recording
Stop the recording
Change the reference settings
Start the recording
If you do this there will be .ncs with their reference changing
at some point during the recording.
In most cases, this is probably not what you want,
so this script displays a warning message if you did it.
Cheetah ATLAS:
There is an undocumented channel nummber 32000038.
I reverse-engineered its use, but that might depend on the exact version
of ATLAS etc.
This script partially mirrors the system of variable definitions
in Cheeatah. For complex arithmethic with variables, the script might fail.
Please check the GitHub repository (github.com/jniediek/combinato.git)
for updates and manual.
Contact me ([email protected]) for access to the repository.
"""
from __future__ import print_function, division
import os
import re
from collections import defaultdict
import datetime
from csv import writer as csv_writer
DATE_FNAME = 'start_stop_datetime.txt'
def parse_times(setting):
"""
read out the date and times of a recording
"""
def timestr2timeobj(time_str):
"""
convert a time string with milliseconds to a datetime object
"""
time, milli = time_str.split('.')
time = datetime.datetime.strptime(time, '%H:%M:%S')
time += datetime.timedelta(seconds=int(milli)/1000)
return time
tstart, tstop = [timestr2timeobj(rec[1])
for rec in setting.start_rec, setting.stop_rec]
if setting.folder is None:
folder_date_obj = None
else:
date_str = date_pattern.match(setting.folder).groups()[0]
folder_date_obj = datetime.datetime.strptime(date_str,
r'%Y-%m-%d_%H-%M-%S')
tstart = datetime.datetime.combine(folder_date_obj, tstart.time())
tstop = datetime.datetime.combine(folder_date_obj, tstop.time())
# by default assume that recording is stopped once every day
if tstop < tstart:
tstop += datetime.timedelta(days=1)
return folder_date_obj, tstart, tstop
class Setting(object):
"""
simple class that stores reference settings
"""
def __init__(self):
self.num2name = None
self.name2num = None
self.lrefs = None
self.grefs = None
self.crefs = None
self.start_rec = None
self.stop_rec = None
self.start_timestamp = None
self.stop_timestamp = None
self.folder = None
DEBUG = False
# The following are the interesting commands
# You can still trick the parser, e.g. by sending -SetChannelNumber commands
# via NetCom.
# But it's very easy to adapt the parser to such situations
set_drs_strings = ('Processing line: -SetDRS', # old systems
'Processing line: -SetAcqEntReference') # new systems
set_channel_pattern = re.compile(r'Processing line:\s*-SetChannelNumber')
channel_number_pattern = re.compile(r'.*\"(.*)\" (\d.*)')
channel_number_pattern_var = re.compile(r'.* (.*) (.*)')
drs_command_pattern = re.compile(r'DRS Command\(b(\w) (\w*)\s{1,2}'
r'(\d*)\s{0,2}(\d*)')
variable_pattern = re.compile(r'.*(%\w*) = \"?(\w*)\"?')
date_pattern = re.compile(r'.*(\d{4}-\d{1,2}-\d{1,2}_'
'\d{1,2}-\d{1,2}-\d{1,2}).*')
def board_num_to_chan(board, num):
return (board - 1) * 16 + num
def chan_to_board_num(chan):
return 2 * int(chan/32) + 1, chan % 32
def parser(fname):
"""
transform logfile into header, log, and ignored lines
"""
with open(fname, 'r') as fid:
lines = fid.readlines()
fid.close()
in_header = True
is_notice = False
ignored_lines = []
protocol = []
header = {}
for line in lines:
if line[:13] == '-* NOTICE *-':
is_notice = True
else:
is_notice = False
if in_header:
# this means header is over
if is_notice:
in_header = False
else:
if len(line) > 3:
key, value = line.split(':', 1)
header[key] = value.strip()
else:
if is_notice:
fields = line[15:].split(' - ', 4)
time = fields[0]
stamp = int(fields[1])
msg = fields[2].strip().replace('\r', '')
if len(fields) == 4:
msg2 = fields[3].strip().replace('\r', '')
else:
msg2 = ''
protocol.append((time, stamp, msg, msg2))
elif line.startswith('Log file successfully moved to'):
target = line.split()[-1]
# this indicates a log file move
# mov is our key
protocol.append((0, 0, 'mov', target))
else:
ignored_lines.append(line.strip())
try:
bn = 'Cheetah ' + header['Cheetah Build Number']
except KeyError:
bn = 'ATLAS ' + header['Cheetah ATLAS Build Number']
print(bn)
return header, protocol, ignored_lines
def all_defined_check(chnum2name, crefs):
"""
check if a reference has been defined for all existing channels
"""
# print(chnum2name)
for chnum in chnum2name:
board, lnum = chan_to_board_num(chnum)
try:
ref = crefs[chnum]
if DEBUG:
print('Channel {} (board {} channel {}) - {}'.
format(chnum, board, lnum, ref))
except KeyError:
print('No reference defined for channel {} ({})'.
format(chnum, chnum2name[chnum][0]))
def print_refs(lrefs, grefs):
"""
overview of local and global refrences
"""
sorted_keys = sorted(lrefs.keys())
for board, ref in sorted_keys:
lref = lrefs[(board, ref)]
if lref in grefs:
gboard = grefs[lref]
stri = 'global, board {}'.format(gboard)
else:
stri = 'local'
print('board {} ref {} - {} ({})'.
format(board, ref, lrefs[(board, ref)], stri))
def analyze_drs(protocol):
"""
go through a protocol and analyze all drs settings
"""
# for each board, store the 8 local refs
# 32..35 are the 4 local reference wires
# 36, 37 are subject ground, patient ground
# 38 seems to be specific to ATLAS
# this is a (board, ref) -> local_num dict
local_refs = dict()
# 8 ref numbers can be driven globally
# this is a ref_num -> board dict
global_refs = dict()
# each channel has a reference which
# refers to its board's local referenes
# this is a ch_num -> ref_num dict
channel_refs = dict()
# name2num is unique
ch_name2num = dict()
# num2name is *not* unique, values are lists
ch_num2name = defaultdict(list)
# save the settings
all_settings = []
variables = dict()
temp_setting = None
for line in protocol:
time, timestamp, msg1, msg2 = line
if temp_setting is None:
temp_setting = Setting()
if msg1 == 'mov':
temp_setting.folder = msg2
elif '::SendDRSCommand()' in msg1:
# log all reference settings (command file and GUI interaction)
board, cmd, arg1, arg2 = drs_command_pattern.match(msg2).groups()
arg1 = int(arg1)
board = int(board, 16)
if cmd != 'hsp':
arg2 = int(arg2)
else:
arg2 = ''
if cmd == 'gbd':
# this is the global drive
# if a reference is driven globally, it overrides
# the local settings of that reference
if arg2 == 1:
global_refs[arg1] = board
print('{} is now driven by board {}'.format(arg1, board))
elif arg2 == 0:
if arg1 in global_refs:
del global_refs[arg1]
if cmd == 'rbs':
# each board stores 8 references
# arg1 is the stored number
# arg2 is the channel it points to
if (board, arg1) in local_refs:
if DEBUG:
print('board {} ref {} was {}, is now {}'.
format(board, arg1,
local_refs[(board, arg1)], arg2))
local_refs[(board, arg1)] = arg2
elif cmd == 'crs':
# each channel is indexed by board and local number
# arg1 is the local channel number
# arg2 is the local reference it points to
# try:
# local_ref = local_refs[(board, arg2)]
# except KeyError:
# print(msg2)
# raise Warning('Using undefined reference!')
chnum = board_num_to_chan(board, arg1)
channel_refs[chnum] = arg2
# print(cmd, board, arg1, chnum, local_ref)
elif 'StartRecording' in msg1:
temp_setting.num2name = ch_num2name.copy()
temp_setting.name2num = ch_name2num.copy()
temp_setting.lrefs = local_refs.copy()
temp_setting.grefs = global_refs.copy()
temp_setting.crefs = channel_refs.copy()
temp_setting.start_rec = (msg1, time)
temp_setting.start_timestamp = timestamp
elif 'StopRecording' in msg1:
# here, the setting is definite and has to be saved
temp_setting.stop_rec = (msg1, time)
temp_setting.stop_timestamp = timestamp
all_settings.append(temp_setting)
temp_setting = None
elif ' = ' in msg2:
# assigning a variable
var, val = variable_pattern.match(msg2).groups()
variables[var] = val
elif '%currentADChannel += 1' in msg2:
# this is a hack, but it seems to work well
print('Applying hack for += 1 syntax, check results!')
var, val = msg2.split('+=')
variables['%currentADChannel'] = str(int(variables['%currentADChannel']) + 1)
if set_channel_pattern.match(msg2):
# log channel numbers
if '%' in msg2:
var, ch_num = channel_number_pattern_var.match(msg2).groups()
var = var.strip()
ch_num = ch_num.strip()
try:
ch_name = variables[var]
except KeyError:
print('{}, but something is wrong with setting channel'
'numbers. Check for errors'
' in the Cheetah logfile itself.'.format(msg2))
continue
if '%' in ch_num:
ch_num = variables[ch_num]
else:
result = channel_number_pattern.match(msg2)
if result is not None:
ch_name, ch_num = result.groups()
else:
print('Parser skipped the following line: ' + msg2)
continue
ch_num = int(ch_num)
if ch_name in ch_name2num:
raise Warning('channel number reset')
ch_name2num[ch_name] = ch_num
ch_num2name[ch_num].append(ch_name)
elif msg2.startswith(set_drs_strings):
# if needed, insert code to
# log reference settings from command file
pass
return all_settings
def create_rep(num2name, name2num, crefs, lrefs, grefs):
"""
create a human readable representation of the referencing
"""
all_defined_check(num2name, crefs)
if DEBUG:
print_refs(lrefs, grefs)
chnames = []
for num in sorted(num2name.keys()):
chnames += num2name[num]
out_str = []
for name in chnames:
try:
chan = name2num[name]
except KeyError:
print('Processing {}, but no channel number was '
'assigned. Check results carefully!'.format(name))
continue
ch_board, ch_board_num = chan_to_board_num(chan)
local_ref_num = crefs[chan] # gives the local ref number
# this is now a local number, so it's in 0..7
maybe_global = False
if local_ref_num in grefs:
ref_board = grefs[local_ref_num]
if ref_board != ch_board:
maybe_global = True
# here, I have to check whether the
# driving channel is the same number on my local board
# i.e., if b1_15 is b1_ref_2 and b1_ref_2 is gd
# and b3_7 has ref_2, then it's global only if b3_15 is b3_ref_2
else:
ref_board = ch_board
ref_num = lrefs[(ref_board, local_ref_num)]
ref_num2 = lrefs[(ch_board, local_ref_num)]
add_str = ''
if maybe_global:
# print('Special case, global ref {}, local ref {}'
# .format(ref_num, lrefs[(ch_board, local_ref_num)]))
if ref_num2 != 38:
add_str = ' ?'
if ref_num != ref_num2:
# print(ref_num, lrefs[(ch_board, local_ref_num)])
ref_board = ch_board
ref_num = ref_num2
else:
add_str = ' ???'
ref_board = ch_board
ref_num = ref_num2
pass
# print('Using channel 38')
if ref_board == ch_board:
board_str = 'local{}'.format(add_str)
else:
board_str = 'global{}'.format(add_str)
if ref_num > 31:
# these are the reference wires
if ref_num == 38:
ref_name = 'board {} Unknown Ground'.format(ref_board)
elif ref_num == 36:
ref_name = 'board {} Patient Ground'.format(ref_board)
else:
tnum = (ref_num - 32) * 8
refchan = board_num_to_chan(ref_board, tnum)
if refchan in num2name:
pref_name = num2name[refchan]
idx = 0
if len(pref_name) == 2:
if pref_name[0][0] == 'u':
idx = 1
ref_name = pref_name[idx][:-1] + ' reference wire'
else:
ref_name = 'board {} head stage {} reference wire'.\
format(ref_board, ref_num - 32)
else:
global_num = board_num_to_chan(ref_board, ref_num)
chlist = num2name[global_num]
if len(chlist):
ref_name = chlist[0]
else:
ref_name = 'UNDEF'
if name == ref_name:
board_str += ' ZERO'
out_str.append(('{:03d}'.format(chan), name, ref_name, board_str))
return out_str
def check_logfile(fname, write_csv=False, nback=0, write_datetime=False):
"""
run over a Cheetah logfile and analyzed reference settings etc
"""
_, protocol, _ = parser(fname)
base_name = os.path.splitext(os.path.basename(fname))[0]
all_settings = analyze_drs(protocol)
for i_setting, setting in enumerate(all_settings):
print()
if setting.folder is None:
msg = 'Warning: Recording Stop -> Start without folder change!'
else:
msg = setting.folder
print('Start: {} ({})'.format(setting.start_rec[1],
setting.start_timestamp))
print('Stop: {} ({})'.format(setting.stop_rec[1],
setting.stop_timestamp))
# print('Duration: {} min'.
# format((setting.stop_rec[1] - setting.start_rec[1])))
out_str = create_rep(setting.num2name, setting.name2num,
setting.crefs, setting.lrefs, setting.grefs)
if write_csv:
setting = all_settings[-nback-1]
if setting.folder is None:
msg = 'Warning: Recording Stop -> Start without folder change!'
else:
msg = setting.folder
out_str = create_rep(setting.num2name, setting.name2num,
setting.crefs, setting.lrefs, setting.grefs)
outfname = base_name + '_{:02d}.csv'.\
format(len(all_settings) - nback - 1)
with open(outfname, 'w') as outf:
outf.write('# {} {} {}\n'.format(msg,
setting.start_rec[1],
setting.stop_rec[1]))
csvwriter = csv_writer(outf)
for line in out_str:
csvwriter.writerow(line)
if write_datetime:
setting = all_settings[-nback-1]
date, start, stop = parse_times(setting)
print(date, start, stop)
if date is None:
out = '# Date not guessed because Recording was stopped'\
' and re-started without folder change!\n'
else:
out = '# {}\ncreate_folder {}\n'.\
format(setting.folder, date.strftime('%Y-%m-%d %H:%M:%S'))
start_ts = setting.start_timestamp
stop_ts = setting.stop_timestamp
for name, d, t in (('start', start, start_ts),
('stop', stop, stop_ts)):
out += name + '_recording {} {} {}\n'.\
format(d.date().isoformat(), d.time().isoformat(), t)
diff_time = (stop_ts - start_ts)/1e6 - (stop - start).seconds
out += 'cheetah_ahead: {}\n'.format(diff_time)
if os.path.exists(DATE_FNAME):
print('{} exists, not overwriting!'.format(DATE_FNAME))
else:
with open(DATE_FNAME, 'w') as fid:
fid.write(out)
if __name__ == '__main__':
from argparse import ArgumentParser
aparser = ArgumentParser(epilog='Johannes Niediek ([email protected])')
aparser.add_argument('--write-csv', action='store_true', default=False,
help='Write out to logfile_number.csv')
aparser.add_argument('--write-datetime', action='store_true',
default=False, help='Write start/stop timestamps to'
' file {}'.format(DATE_FNAME))
aparser.add_argument('--logfile', nargs=1,
help='Logfile, default: CheetahLogFile.txt')
aparser.add_argument('--nback', nargs=1, type=int,
help='Save last-n\'th setting', default=[0])
args = aparser.parse_args()
if not args.logfile:
logfile = 'CheetahLogFile.txt'
else:
logfile = args.logfile[0]
check_logfile(logfile, args.write_csv, args.nback[0], args.write_datetime)
| jniediek/combinato | tools/parse_cheetah_logfile.py | Python | mit | 19,113 |
# Simple plotter for Gut books.
# All this does is draw a sqare for every stat in the input
# and color it based on the score.
#
# Another fine example of how Viz lies to you. You will
# need to fudge the range by adjusting the clipping.
import numpy as np
import matplotlib.pyplot as plt
import sys as sys
if len(sys.argv) <2:
print "Need an input file with many rows of 'id score'\n"
sys.exit(1)
fname = sys.argv[1]
vals = np.loadtxt(fname)
ids = vals[:,0]
score = vals[:,1]
score_max = 400; #max(score)
#score_max = max(score)
score = np.clip(score, 10, score_max)
score = score/score_max
# want 3x1 ratio, so 3n*n= 30824 (max entries), horiz=3n=300
NUM_COLS=300
fig = plt.figure(figsize=(12,9))
ax = fig.add_subplot(111)
ax.set_axis_bgcolor('0.50')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for i in range(len(vals)):
#print i, ids[i]
row = int(ids[i]) / NUM_COLS
col = int(ids[i]) % NUM_COLS
cval = score[i] #score[i]*score[i] # Square the values to drop the lower end
cmap = plt.get_cmap('hot')
val = cmap(cval)
ax.add_patch(plt.Rectangle((col,row),1,1,color=val)); #, cmap=plt.cm.autumn))
ax.set_aspect('equal')
print cmap(0.1)
print cmap(0.9)
plt.xlim([0,NUM_COLS])
plt.ylim([0,1+int(max(ids))/NUM_COLS])
plt.show()
| craigulmer/gut-buster | squareplot.py | Python | mit | 1,320 |
from verbs.baseforms import forms
class SuspendForm(forms.VerbForm):
name = "Suspend"
slug = "suspend"
duration_min_time = forms.IntegerField() | Bionetbook/bionetbook | bnbapp/bionetbook/_old/verbs/forms/suspend.py | Python | mit | 159 |
from __future__ import absolute_import
import json
from twisted.internet import defer, error
from twisted.python import failure
from twisted.test import proto_helpers
from twisted.trial import unittest
from txjsonrpc import jsonrpc, jsonrpclib
class TestJSONRPC(unittest.TestCase):
def setUp(self):
self.deferred = defer.Deferred()
exposed = {
"foo" : lambda : setattr(self, "fooFired", True),
"bar" : lambda p : setattr(self, "barResult", p ** 2),
"baz" : lambda p, q : (q, p),
"late" : lambda p : self.deferred,
}
self.factory = jsonrpc.JSONRPCFactory(exposed.get)
self.proto = self.factory.buildProtocol(("127.0.0.1", 0))
self.tr = proto_helpers.StringTransportWithDisconnection()
self.proto.makeConnection(self.tr)
def assertSent(self, expected):
expected["jsonrpc"] = "2.0"
self.assertEqual(json.loads(self.tr.value()[2:]), expected)
def test_notify(self):
"""
notify() sends a valid JSON RPC notification.
"""
self.proto.notify("foo")
self.assertSent({"method" : "foo", "params" : []})
self.tr.clear()
self.proto.notify("bar", [3])
self.assertSent({"method" : "bar", u"params" : [3]})
def test_request(self):
"""
request() sends a valid JSON RPC request and returns a deferred.
"""
d = self.proto.request("foo")
self.assertSent({"id" : "1", "method" : "foo", "params" : []})
d.addCallback(lambda r : self.assertEqual(r, [2, 3, "bar"]))
receive = {"jsonrpc" : "2.0", "id" : "1", "result" : [2, 3, "bar"]}
self.proto.stringReceived(json.dumps(receive))
return d
def test_unhandledError(self):
"""
An unhandled error gets logged and disconnects the transport.
"""
v = failure.Failure(ValueError("Hey a value error"))
self.proto.unhandledError(v)
errors = self.flushLoggedErrors(ValueError)
self.assertEqual(errors, [v])
def test_invalid_json(self):
"""
Invalid JSON causes a JSON RPC ParseError and disconnects.
"""
self.proto.stringReceived("[1,2,")
err = {"id" : None, "error" : jsonrpclib.ParseError().toResponse()}
self.assertSent(err)
errors = self.flushLoggedErrors(jsonrpclib.ParseError)
self.assertEqual(len(errors), 1)
def test_invalid_request(self):
"""
An invalid request causes a JSON RPC InvalidRequest and disconnects.
"""
self.proto.stringReceived(json.dumps({"id" : 12}))
err = jsonrpclib.InvalidRequest({"reason" : "jsonrpc"})
self.assertSent({"id" : None, "error" : err.toResponse()})
errors = self.flushLoggedErrors(jsonrpclib.InvalidRequest)
self.assertEqual(len(errors), 1)
def test_unsolicited_result(self):
"""
An incoming result for an id that does not exist raises an error.
"""
receive = {"jsonrpc" : "2.0", "id" : "1", "result" : [2, 3, "bar"]}
self.proto.stringReceived(json.dumps(receive))
err = jsonrpclib.InternalError({
"exception" : "KeyError", "message" : "u'1'",
})
expect = {"jsonrpc" : "2.0", "id" : None, "error" : err.toResponse()}
sent = json.loads(self.tr.value()[2:])
tb = sent["error"]["data"].pop("traceback")
self.assertEqual(sent, expect)
self.assertTrue(tb)
# TODO: Raises original exception. Do we want InternalError instead?
errors = self.flushLoggedErrors(KeyError)
self.assertEqual(len(errors), 1)
def _errorTest(self, err):
d = self.proto.request("foo").addErrback(lambda f : self.assertEqual(
f.value.toResponse(), err.toResponse()
))
receive = {"jsonrpc" : "2.0", "id" : "1", "error" : {}}
receive["error"] = {"code" : err.code, "message" : err.message}
self.proto.stringReceived(json.dumps(receive))
return d
def test_parse_error(self):
self._errorTest(jsonrpclib.ParseError())
def test_invalid_request(self):
self._errorTest(jsonrpclib.InvalidRequest())
def test_method_not_found(self):
self._errorTest(jsonrpclib.MethodNotFound())
def test_invalid_params(self):
self._errorTest(jsonrpclib.InvalidParams())
def test_internal_error(self):
self._errorTest(jsonrpclib.InternalError())
def test_application_error(self):
self._errorTest(jsonrpclib.ApplicationError(code=2400, message="Go."))
def test_server_error(self):
self._errorTest(jsonrpclib.ServerError(code=-32020))
def test_received_notify(self):
receive = {"jsonrpc" : "2.0", "method" : "foo"}
self.proto.stringReceived(json.dumps(receive))
self.assertTrue(self.fooFired)
receive = {"jsonrpc" : "2.0", "method" : "bar", "params" : [2]}
self.proto.stringReceived(json.dumps(receive))
self.assertEqual(self.barResult, 4)
def test_received_notify_no_method(self):
receive = {"jsonrpc" : "2.0", "method" : "quux"}
self.proto.stringReceived(json.dumps(receive))
errors = self.flushLoggedErrors(jsonrpclib.MethodNotFound)
self.assertEqual(len(errors), 1)
def test_received_notify_wrong_param_type(self):
receive = {"jsonrpc" : "2.0", "method" : "foo", "params" : [1, 2]}
self.proto.stringReceived(json.dumps(receive))
receive = {"jsonrpc" : "2.0", "method" : "bar", "params" : "foo"}
self.proto.stringReceived(json.dumps(receive))
errors = self.flushLoggedErrors(TypeError)
self.assertEqual(len(errors), 2)
def test_received_request(self):
receive = {
"jsonrpc" : "2.0", "id" : "1", "method" : "baz", "params" : [1, 2]
}
self.proto.stringReceived(json.dumps(receive))
self.assertSent({"jsonrpc" : "2.0", "id" : "1", "result" : [2, 1]})
def test_received_request_deferred(self):
receive = {
"jsonrpc" : "2.0", "id" : "3",
"method" : "late", "params" : {"p" : 3}
}
self.proto.stringReceived(json.dumps(receive))
self.deferred.callback(27)
self.assertSent({"jsonrpc" : "2.0", "id" : "3", "result" : 27})
def test_received_request_no_method(self):
receive = {"jsonrpc" : "2.0", "id" : "3", "method" : "quux"}
self.proto.stringReceived(json.dumps(receive))
errors = self.flushLoggedErrors(jsonrpclib.MethodNotFound)
self.assertEqual(len(errors), 1)
sent = json.loads(self.tr.value()[2:])
self.assertIn("error", sent)
self.assertEqual(sent["error"]["code"], jsonrpclib.MethodNotFound.code)
def test_received_request_error(self):
receive = {
"jsonrpc" : "2.0", "id" : "1", "method" : "foo", "params" : [1, 2]
}
self.proto.stringReceived(json.dumps(receive))
response = json.loads(self.tr.value()[2:])
self.assertNotIn("result", response)
self.assertEqual(response["id"], "1")
self.assertEqual(response["error"]["data"]["exception"], "TypeError")
self.assertTrue(response["error"]["data"]["traceback"])
errors = self.flushLoggedErrors(TypeError)
self.assertEqual(len(errors), 1)
errors = self.flushLoggedErrors(error.ConnectionLost)
self.assertEqual(len(errors), 1)
def test_fail_all(self):
d1, d2 = self.proto.request("foo"), self.proto.request("bar", [1, 2])
exc = failure.Failure(ValueError("A ValueError"))
self.proto.failAll(exc)
d3 = self.proto.request("baz", "foo")
for d in d1, d2, d3:
d.addErrback(lambda reason: self.assertIs(reason, exc))
def test_connection_lost(self):
self.proto.connectionLost(failure.Failure(error.ConnectionLost("Bye")))
return self.proto.request("foo").addErrback(
lambda f : self.assertIs(f.type, error.ConnectionLost)
)
| Julian/txjsonrpc-tcp | txjsonrpc/tests/test_jsonrpc.py | Python | mit | 8,101 |
import pytest
from canon.seq.seqreader import SeqReader
from .. import resource
def test_read_seq():
reader = SeqReader(resource('seq/Quartz_500Mpa_.SEQ'))
reader.get_Om()
Z, _, N = reader.get_Zmap('orsnr___')
def test_merge_Zmap():
reader = SeqReader()
reader.read_seq(resource('seq/au30_a1_.SEQ'))
Z1, _, N1 = reader.get_Zmap('orsnr___')
reader.read_seq(resource('seq/au30_m1_.SEQ'))
Z2, _, N2 = reader.get_Zmap('orsnr___')
Z, N = SeqReader.merge_Zmap(Z1, Z2, N1, N2)
if __name__ == '__main__':
pytest.main()
| structrans/Canon | test/seq/test_seqreader.py | Python | mit | 560 |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.descriptors import descsum_create
from test_framework.script import OP_NOP, CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import get_key, get_multisig, test_address
class ImportMultiTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None,
error_message=None, warnings=None):
"""Run importmulti and assert success"""
if warnings is None:
warnings = []
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal(
"\n".join(
sorted(warnings)), "\n".join(
sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
# Sync the timestamp to the wallet, so that importmulti works
self.nodes[1].syncwithvalidationinterfacequeue()
node0_address1 = self.nodes[0].getaddressinfo(
self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address (implicit non-internal)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info(
"Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Unsuccessful labelling for internal addresses"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info(
"Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info(
"Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info(
"Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info(
"Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info(
"Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info(
"Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info(
"Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(
0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(
self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
iswatchonly=True,
ismine=False,
solvable=True)
p2shunspent = self.nodes[1].listunspent(
0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info(
"Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(
0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info(
"Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info(
"Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info(
"Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info(
"Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info(
"Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should
# replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of
# watch only address
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Test that importing of a P2PKH address via descriptor without
# checksum fails
key = get_key(self.nodes[0])
self.log.info(
"Should fail to import a p2pkh address from descriptor with no checksum")
self.test_importmulti({"desc": "pkh(" + key.pubkey + ")",
"timestamp": "now",
"label": "Descriptor import test"},
success=False,
error_code=-5,
error_message='Missing checksum')
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
# hdkeypath=m/0'/0'/0' and 1'
addresses = [
"ecregtest:prvn9ycvgr5atuyh49sua3mapskh2mnnzg7t9yp6dt",
"ecregtest:pp3n087yx0njv2e5wcvltahfxqst7l66rutz8ceeat"]
# pkh subscripts corresponding to the above addresses
addresses += [
"ecregtest:qqdkxd2xnzftq2p8wr3sqqyw8lntap7tncs546s6pr",
"ecregtest:qpyryy83jfaec5u0gpzldk6teadsuq8zlyqh5l30uq",
]
desc = "sh(pkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info(
"Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor with xpriv
self.log.info(
"Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True)
for address in addresses:
test_address(self.nodes[1], address, solvable=True, ismine=True)
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
# Note: in Core's test, this address refers to the sh(wpkh()) address.
# For a sh(pkh()) this does not refer to a key, so we use the subscript
# address instead, which returns the same privkey.
address = "ecregtest:qzh6rch6st3wjvp0h2ud87gn7xnxvf6h8yrk8gcg8t"
desc = "sh(pkh(" + wif_priv + "))"
self.log.info(
"Should import a descriptor with a WIF private key as spendable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
# dump the private key to ensure it matches what was imported
privkey = self.nodes[1].dumpprivkey(address)
assert_equal(privkey, wif_priv)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
p2pkh_label = "P2PKH descriptor import"
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": p2pkh_label},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
labels=[p2pkh_label])
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info(
"Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info(
"Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info(
"Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info(
"Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc': descsum_create("pkh([" + pub_fpr + pub_keypath[1:] + "]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc': descsum_create("pkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(
wallet_name="noprivkeys",
disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('pkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress()
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey
# wallet
self.log.info(
"Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('pkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress()
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info(
'Imported scripts with pubkeys shoud not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('sh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info(
"Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc(self.default_wallet_name)
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(
result[0]['error']['message'],
"Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'ecregtest:qp0v86h53rc92hjrlpwzpjtdlgzsxu25svv6g40fpl', # m/0'/0'/0
'ecregtest:qqasy0zlkdleqt4pkn8fs4ehm5gnnz6qpgdcpt90fq', # m/0'/0'/1
'ecregtest:qp0sp4wlhctvprqvdt2dgvqcfdjssu04xgey0l3syw', # m/0'/0'/2
'ecregtest:qrhn24tegn04cptfv4ldhtkduxq55zcwrycjfdj9vr', # m/0'/0'/3
'ecregtest:qzpqhett2uwltq803vrxv7zkqhft5vsnmcjeh50v0p', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range': [0, 4],
}]
)
self.log.info(result)
for i in range(0, 5):
addr = wrpc.getnewaddress('')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
| Bitcoin-ABC/bitcoin-abc | test/functional/wallet_importmulti.py | Python | mit | 37,468 |
import os
import json
from base import BaseController
from nqueens.models import Piece, Panel, Meta
class NQueensController(BaseController):
def __init__(self, view):
super(NQueensController, self)
self._piece_data = None
self._piece_cache = None
self.view = view
@classmethod
def get_instance(cls, view):
return cls(view)
def pre_switch(self):
pass
def start(self):
dim = self.view.get_dimension()
# Cached factory, only 1 file read per list
pieces = [Piece.from_file(self._piece_data) for i in range(dim)]
panel = Panel(dim)
self.view.notify({
'func': 'update_panel',
'data': {
'pieces': {},
}
})
res = self.run(panel, pieces, idx=0, ci=0)
if res:
self.view.notify({
'func': 'update_panel',
'data': {
'pieces': panel.pieces,
}
})
else:
self.view.notify({
'func': 'display_error',
'data': {
'message': 'No solution found :(',
}
})
def run(self, panel, pieces, idx, ci):
dim = panel.dimension
# Base case
if idx == len(pieces):
return True
else:
# Ultra-fast because:
# 1. All the pieces are the same (less combinations and shit)
# 2. We start from the previous index, this makes the panel smaller
# each time
# 3. Instead of keeping track of the killing positions we do a
# check each time a piece is added in order to avoid a kill
# (which is faster)
# 4. Python dict operations are astonishingly fast
for i in range(ci, dim):
for j in range(dim):
if panel.add_piece(pieces[idx], (i, j)):
if self.run(panel, pieces, idx+1, i):
return True
else:
panel.remove_piece(pieces[idx])
return False
def piece_selected(self, piece_name):
if not self._piece_cache:
self._piece_cache = Meta.get_piece_definitions()
self._piece_data = self._piece_cache.get(piece_name)
if self._piece_data:
self._piece_data = self._piece_data[1]
self.view.notify({
'func': 'enable_run',
'data': {
'enable': bool(self._piece_data),
}
})
@staticmethod
def get_pieces_attr(attr):
candidates = Meta.get_piece_definitions()
if all(attr in candidate[0].keys() for candidate in candidates.values()):
return [candidate[0][attr] for candidate in candidates.values()]
else:
return []
| PereBal/advanced-algorithms | nqueens/controller.py | Python | mit | 2,927 |
#!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""
Organizaţia Internaţională a Aviaţiei Civile propune un alfabet în care
fiecărei litere îi este asignat un cuvânt pentru a evita problemele în
înțelegerea mesajelor critice.
Pentru a se păstra un istoric al conversațiilor s-a decis transcrierea lor
conform următoarelor reguli:
- fiecare cuvânt este scris pe o singură linie
- literele din alfabet sunt separate de o virgulă
Următoarea sarcină ți-a fost asignată:
Scrie un program care să primească un fișier ce conține mesajul
brut (scris folosind alfabetul ICAO) și generează un fișier
numit icao_intrare ce va conține mesajul inițial.
Mai jos găsiți un dicționar ce conține o versiune a alfabetului ICAO:
"""
ICAO = {
'a': 'alfa', 'b': 'bravo', 'c': 'charlie', 'd': 'delta', 'e': 'echo',
'f': 'foxtrot', 'g': 'golf', 'h': 'hotel', 'i': 'india', 'j': 'juliett',
'k': 'kilo', 'l': 'lima', 'm': 'mike', 'n': 'november', 'o': 'oscar',
'p': 'papa', 'q': 'quebec', 'r': 'romeo', 's': 'sierra', 't': 'tango',
'u': 'uniform', 'v': 'victor', 'w': 'whiskey', 'x': 'x-ray', 'y': 'yankee',
'z': 'zulu'
}
def din_icao(fisier_intrare):
"""Funcția va primi calea către fișierul ce conține mesajul brut și
va genera un fișier numit icao_intrare ce va conține mesajul inițial.
"""
try:
in_file = open(fisier_intrare, 'r')
content = in_file.read()
in_file.close()
except IOError:
print "Error! Could not open file."
return
final_message = ''
for line in content.splitlines():
for word in line.split():
for key, value in ICAO.iteritems():
if value == word:
final_message += key
final_message += ' '
print final_message
if __name__ == "__main__":
din_icao("mesaj.icao")
| iulianbute/labs | python/solutii/iulian_andrei/icao/from_icao.py | Python | mit | 1,888 |
#!/usr/bin/env python3
import os
import random
import unittest
import warnings
from math import exp, pi
import gpytorch
import torch
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import InducingPointKernel, RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.priors import SmoothedBoxPrior
from gpytorch.test.utils import least_used_cuda_device
from gpytorch.utils.warnings import NumericalWarning
from torch import optim
# Simple training data: let's try to learn a sine function,
# but with SGPR
# let's use 100 training examples.
def make_data(cuda=False):
train_x = torch.linspace(0, 1, 100)
train_y = torch.sin(train_x * (2 * pi))
train_y.add_(torch.randn_like(train_y), alpha=1e-2)
test_x = torch.rand(51)
test_y = torch.sin(test_x * (2 * pi))
if cuda:
train_x = train_x.cuda()
train_y = train_y.cuda()
test_x = test_x.cuda()
test_y = test_y.cuda()
return train_x, train_y, test_x, test_y
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5))
self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1)))
self.covar_module = InducingPointKernel(
self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
class TestSGPRRegression(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_sgpr_mean_abs_error(self):
# Suppress numerical warnings
warnings.simplefilter("ignore", NumericalWarning)
train_x, train_y, test_x, test_y = make_data()
likelihood = GaussianLikelihood()
gp_model = GPRegressionModel(train_x, train_y, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
for _ in range(30):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
test_preds = likelihood(gp_model(test_x)).mean
mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
self.assertLess(mean_abs_error.squeeze().item(), 0.05)
def test_sgpr_fast_pred_var(self):
# Suppress numerical warnings
warnings.simplefilter("ignore", NumericalWarning)
train_x, train_y, test_x, test_y = make_data()
likelihood = GaussianLikelihood()
gp_model = GPRegressionModel(train_x, train_y, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
with gpytorch.settings.max_preconditioner_size(5), gpytorch.settings.max_cg_iterations(50):
with gpytorch.settings.fast_pred_var(True):
fast_var = gp_model(test_x).variance
fast_var_cache = gp_model(test_x).variance
self.assertLess(torch.max((fast_var_cache - fast_var).abs()), 1e-3)
with gpytorch.settings.fast_pred_var(False):
slow_var = gp_model(test_x).variance
self.assertLess(torch.max((fast_var_cache - slow_var).abs()), 1e-3)
def test_sgpr_mean_abs_error_cuda(self):
# Suppress numerical warnings
warnings.simplefilter("ignore", NumericalWarning)
if not torch.cuda.is_available():
return
with least_used_cuda_device():
train_x, train_y, test_x, test_y = make_data(cuda=True)
likelihood = GaussianLikelihood().cuda()
gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda()
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
for _ in range(25):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
test_preds = likelihood(gp_model(test_x)).mean
mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
self.assertLess(mean_abs_error.squeeze().item(), 0.02)
if __name__ == "__main__":
unittest.main()
| jrg365/gpytorch | test/examples/test_sgpr_regression.py | Python | mit | 6,354 |
"""
Django settings for apps project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from local_settings import SECRET_KEY, DATABASES, DEBUG
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
SECRET_KEY = SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = DEBUG
ALLOWED_HOSTS = [
'learningdjango.in',
'localhost',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'home.apps.HomeConfig',
'polls.apps.PollsConfig',
'blog.apps.BlogConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'apps.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'apps.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = DATABASES
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Media Files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| pattu777/LearningDjango | apps/apps/settings.py | Python | mit | 3,307 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for spanner databases create."""
from googlecloudsdk.api_lib.spanner import database_operations
from googlecloudsdk.api_lib.spanner import databases
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.spanner import flags
class Create(base.CreateCommand):
"""Cloud Spanner databases create command."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
flags.Instance(positional=False).AddToParser(parser)
flags.Database().AddToParser(parser)
flags.Ddl(help_text='Semi-colon separated DDL (data definition language) '
'statements to run inside the '
'newly created database. If there is an error in any statement, '
'the database is not created. Full DDL specification is at '
'https://cloud.google.com/spanner/docs/data-definition-language'
).AddToParser(parser)
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
op = databases.Create(
args.instance, args.database, flags.FixDdl(args.ddl or []))
if args.async:
return op
return database_operations.Await(op, 'Creating database')
| Sorsly/subtle | google-cloud-sdk/lib/surface/spanner/databases/create.py | Python | mit | 2,377 |
# -*- coding: utf-8 -*-
#
# M2Crypto documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 20 11:15:12 2017.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join('..')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'M2Crypto'
copyright = u'2017, Matej Cepl <[email protected]>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'M2Cryptodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'M2Crypto.tex', u'M2Crypto Documentation',
u'Matej Cepl \\textless{}[email protected]\\textgreater{}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'm2crypto', u'M2Crypto Documentation',
[u'Matej Cepl <[email protected]>'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'M2Crypto', u'M2Crypto Documentation',
u'Matej Cepl <[email protected]>', 'M2Crypto', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'M2Crypto'
epub_author = u'Matej Cepl <[email protected]>'
epub_publisher = u'Matej Cepl <[email protected]>'
epub_copyright = u'2017, Matej Cepl <[email protected]>'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| Edzvu/Edzvu.github.io | M2Crypto-0.35.2/doc/conf.py | Python | mit | 9,150 |
#!/usr/local/bin/python3
from zeroconf import Zeroconf, ServiceInfo
import socket
import configparser
from . import hazc_cmd
# import pdb
class hazc_device:
#Forward constants
NO_PARAM = hazc_cmd.NO_PARAM
BOOL = hazc_cmd.BOOL
FLOAT = hazc_cmd.FLOAT
STRING = hazc_cmd.STRING
INT = hazc_cmd.INT
global running
running = False
def __init__(self, ipaddr):
self.version = "0.1"
self.config = configparser.ConfigParser()
self.config.read('config.ini')
self.MSGLEN = 1024
self.END_OF_MSG = '*'
self.ip = ipaddr
self.buffer = 20
# self.commands = {'version?':self.version_cmd,'commands?':self.commands_cmd,'status?':self.status_cmd}
hcvc = hazc_cmd.hazc_cmd('version?', self.version_cmd, self.NO_PARAM)
hccc = hazc_cmd.hazc_cmd('commands?', self.commands_cmd, self.NO_PARAM)
hcsc = hazc_cmd.hazc_cmd('status?', self.status_cmd, self.STRING)
self.commands = {'version': hcvc, 'commands': hccc, 'status': hcsc}
# probably want to add a debug log status
self.status = {'exec_status': self.exec_status}
#Adds a function - not as preferred as addControl
#Does NOT auto add status
def addFunction(self, name, handler, paramtype):
# pdb.settrace()
#log("This is not the preferred way to add controls, see addControl")
if not('?' in name or '!' in name):
# log("Function name requires a '?' or '!', assuming '!'")
name += '!'
self.commands[name] = hazc_cmd.hazc_cmd(name, handler, paramtype)
#Adds a control vector
#controlname should just be a name like 'temp' or 'position' - it'll be the same for the status
def addControl(self, controlname, handler, statushandler, paramtype=NO_PARAM):
cmd_name = 'set-'+controlname
self.commands[cmd_name] = hazc_cmd.hazc_cmd(cmd_name+'?', handler, paramtype)
self.addStatus(controlname, statushandler)
#adds a unique status not already included in control vector. name is just the name, as in 'temp'
def addStatus(self, name, handler):
self.status[name] = handler
def advertise(self):
postfix = self.config['global']['service_prefix']
self.port = int(self.config['global']['port'])
#print(self.config['device']['hostname']+postfix)
info = ServiceInfo(postfix, self.config['device']['hostname']+"."+postfix,
socket.inet_aton(self.ip), self.port, 0, 0,
{'info': self.config['device']['description']}, "hazc.local.")
self.bindConnection()
zeroconf = Zeroconf()
zeroconf.register_service(info)
try:
while True:
# try:
print("Ready")
self.conn, self.addr = self.webcontrol.accept()
self.listen()
self.conn.close()
except KeyboardInterrupt:
pass
finally:
print()
print("Unregistering...")
zeroconf.unregister_service(info)
zeroconf.close()
try:
print("Shutting down socket")
self.webcontrol.shutdown(socket.SHUT_RDWR)
except Exception as e:
print(e)
def listen(self):
data = bytes()
rbytes = 0
while rbytes < self.MSGLEN:
d = self.conn.recv(self.buffer)
if not d: break
data += d
rbytes += len(d)
# print data.decode('utf-8')
self.handledata(data)
def handledata(self, data):
command, param = self.cleanandstringdata(data)
print('->' + command + ';' + param)
# replystr = "ERROR"
try:
replystr = self.commands[command].execute(param)
except KeyError:
if(command==''):
command = "(empty string)"
print("ERROR! Unknown command: " + command)
replystr = ""
# replystr = self.commands['version'].execute('')
if(replystr == None):
print("WARNING! " + command + " should return a string to send to the master. Sending 'NO_REPLY'")
replystr = 'NO_REPLY'
print(replystr)
self.reply(replystr)
def reply(self, msg):
longmsg = msg
while len(longmsg) < self.MSGLEN:
longmsg += self.END_OF_MSG
# print(longmsg)
self.conn.send(longmsg.encode('utf-8'))
def cleanandstringdata(self, data):
dstr = data.decode('utf-8')
full = dstr.strip(self.END_OF_MSG)
if '?' in full:
li = full.split('?')
param = li[-1]
cmd = li[0]
elif '!' in full:
li = full.split('!')
param = li[-1]
cmd = li[0]
else:
param = ''
cmd = full
return (cmd, param)
def bindConnection(self):
try:
self.webcontrol = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.webcontrol.bind((self.ip, self.port))
self.webcontrol.listen(1)
except OSError as e:
print(e)
quit()
def exec_status(self):
return "Running"
def version_cmd(self):
return self.version
def paramtype_tostring(self, paramnum):
if paramnum == self.BOOL:
return 'BOOL'
elif paramnum == self.FLOAT:
return 'FLOAT'
elif paramnum == self.STRING:
return 'STRING'
elif paramnum == self.INT:
return 'INT'
else:
return 'PARAM_ERROR'
def commands_cmd(self):
rstr = ""
for key in self.commands:
rstr += key
if self.commands[key].paramtype is not self.NO_PARAM:
# pdb.set_trace()
rstr += ':' + self.paramtype_tostring(self.commands[key].paramtype)
rstr += ";"
return rstr
def status_cmd(self, specific_status=''):
str = ''
if len(specific_status) > 0:
str = self.status[specific_status]
else:
for st in self.status:
str += st + ',' + self.status[st]() + ';'
return str[:self.MSGLEN-1]
# Some debugging methods
def debug_cmds(self):
print("Commands: " + str(self.commands))
print("Statuses: " + str(self.status)) | ArcAwe/hazc | hazc_device.py | Python | mit | 6,518 |
from victor.exceptions import (
FieldValidationException,
FieldTypeConversionError,
FieldRequiredError,
VectorInputTypeError
)
class Field(object):
required = True
"""Field is required and an exception will be raised if missing"""
missing_value = None
"""Value to use when field is missing and not required"""
strict = False
"""Field value must pass validation or an exception will be raised"""
cast_cls = None
data = None
def __init__(self, required=True, missing_value=None, strict=False):
self.required = required
self.missing_value = missing_value
self.strict = strict
def _validate(self, value):
return True
def _cast_type(self, value):
return self.cast_cls(value)
def set_data(self, value):
if self.strict:
if not self._validate(value):
raise FieldValidationException('%s does not '
'except this value'
% self.__class__.__name__)
elif self.cast_cls is not None:
value = self._cast_type(value)
self.data = value
class CharField(Field):
pass
class StringField(Field):
cast_cls = str
def _validate(self, value):
if not isinstance(value, (str, unicode)):
return False
return True
class IntField(Field):
cast_cls = int
_cast_fallback_value = 0
def __init__(self, *args, **kwargs):
super(IntField, self).__init__(*args, **kwargs)
if self.missing_value is None:
self.missing_value = self._cast_fallback_value
def _cast_type(self, value):
try:
return self.cast_cls(value)
except ValueError, exc:
if self.missing_value is False:
raise FieldTypeConversionError('Could not convert '
'data or use missing_value: %s'
% exc)
return self.missing_value
class FloatField(IntField):
cast_class = float
_cast_fallback_value = 0.0
class ListField(Field):
cls = None
"""Field class to represent list items"""
def __init__(self, cls, *args, **kwargs):
assert isinstance(cls, Field), 'cls is not a valid Field instance'
self.cls = cls
super(ListField, self).__init__(*args, **kwargs)
def _validate(self, value):
if not isinstance(value, (list, tuple)):
raise FieldValidationException('ListField requires data '
'to be a sequence type')
for x in value:
self.cls.set_data(value)
self.data = value
return True
class Vector(object):
def __init__(self):
self.input_data = {}
self._fields = {}
self._map = {}
self._required = []
self._setup_fields()
def get_name(self):
return self.__class__.__name__
def __call__(self, data):
return self.input(data)
def input(self, data):
self._map = {}
if not isinstance(data, dict):
raise VectorInputTypeError('Vector input not a dictionary')
self._validate(data)
self._map_attrs(data)
def _setup_fields(self):
self._fields = {}
for a in dir(self):
v = getattr(self, a)
if isinstance(v, Field):
self._fields[a] = v
if v.required:
self._required.append(a)
self._reset_fields()
def _reset_fields(self):
for f in self.get_fields():
setattr(self, f, None)
def _validate(self, input_data):
for f in self._required:
if f not in input_data:
raise FieldRequiredError('Missing field %s is a required field'
% f)
for k, v in input_data.iteritems():
if k in self.get_fields():
f = self.get_field(k)
f.set_data(v)
def _map_attrs(self, input_data):
self.input_data = input_data
for k, v in self.input_data.iteritems():
if k in self.get_fields():
# setattr(self, k, self.get_field(k).data)
self._map[k] = self.get_field(k).data
else:
# setattr(self, k, v)
self._map[k] = v
for k, v in self._map.iteritems():
setattr(self, k, v)
def get_fields(self):
return self._fields
def get_field(self, name):
return self._fields[name]
@property
def data(self):
return self._map
| alexph/victor | victor/vector.py | Python | mit | 4,717 |
#!/usr/bin/env python
#
# Copyright (c) 2014 Hamilton Kibbe <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
""" PyAbleton
A library for creating and editing Ableton Live instrument/effect presets in Python.
"""
__author__ = '[email protected]'
__version__ = '1.0'
import presets
| hamiltonkibbe/PyAbleton | pyableton/__init__.py | Python | mit | 1,356 |
"""
Django settings for figexample project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pp&p7ex-&+#n4waijg96v&txz$=y*rh=t$u-!hri@(-s@6^51='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'figexample.urls'
WSGI_APPLICATION = 'figexample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db_1',
'PORT': 5432,
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| arbiterofcool/fig-seed | template/fig-django/figexample/settings.py | Python | mit | 2,114 |
# -*- coding:utf-8 -*-
import abc
import platform
from UserList import UserList
class Monitor(object):
@abc.abstractmethod
def current(self):
pass
@abc.abstractmethod
def percent(self, range):
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def max(self):
pass
@abc.abstractmethod
def min(self):
pass
class Monitors(UserList):
@abc.abstractmethod
def percent(self, range):
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def max(self):
pass
@abc.abstractmethod
def min(self):
pass
def get_monitors():
if platform.system() == "Windows":
from .driver_win_wmi import WinWMIMonitors
return WinWMIMonitors()
elif platform.system() == "Darwin":
from .driver_mac import MacMonitors
return MacMonitors()
elif platform.system() == "Linux":
from .driver_linux import LinuxMonitors
return LinuxMonitors()
else:
raise OSError()
| x007007007/pyScreenBrightness | src/pyScreenBrightness/base.py | Python | mit | 1,127 |
# -*- coding: utf-8 -*-
"""
Forms for day forms
"""
from django.conf import settings
from django import forms
from django.utils.translation import ugettext as _
from arrow import Arrow
from datebook.models import DayEntry
from datebook.forms import CrispyFormMixin
from datebook.utils.imports import safe_import_module
DATETIME_FORMATS = {
'input_date_formats': ['%d/%m/%Y'],
'input_time_formats': ['%H:%M'],
'widget': forms.SplitDateTimeWidget(date_format='%d/%m/%Y', time_format='%H:%M'),
}
class DayBaseFormMixin(object):
"""
DayBase form mixin
"""
crispy_form_helper_path = 'datebook.forms.crispies.day_helper'
crispy_form_helper_kwargs = {}
def fill_initial_data(self, *args, **kwargs):
# Pass initial data for start and stop to their SplitDateTimeField clones
if 'start' in kwargs['initial']:
kwargs['initial']['start_datetime'] = kwargs['initial']['start']
if 'stop' in kwargs['initial']:
kwargs['initial']['stop_datetime'] = kwargs['initial']['stop']
# For existing instance (in edit mode) pass the start and stop values to their
# clone with SplitDateTimeField via initial datas
if kwargs.get('instance'):
kwargs['initial']['start_datetime'] = kwargs['instance'].start
kwargs['initial']['stop_datetime'] = kwargs['instance'].stop
return kwargs
def init_fields(self, *args, **kwargs):
self.fields['start_datetime'] = forms.SplitDateTimeField(label=_('start'), **DATETIME_FORMATS)
self.fields['stop_datetime'] = forms.SplitDateTimeField(label=_('stop'), **DATETIME_FORMATS)
# Set the form field for DayEntry.content
field_helper = safe_import_module(settings.DATEBOOK_TEXT_FIELD_HELPER_PATH)
if field_helper is not None:
self.fields['content'] = field_helper(self, **{'label':_('content'), 'required': False})
def clean_content(self):
"""
Text content validation
"""
content = self.cleaned_data.get("content")
validation_helper = safe_import_module(settings.DATEBOOK_TEXT_VALIDATOR_HELPER_PATH)
if validation_helper is not None:
return validation_helper(self, content)
else:
return content
def clean_start_datetime(self):
start = self.cleaned_data['start_datetime']
# Day entry can't start before the targeted day date
if start and start.date() < self.daydate:
raise forms.ValidationError(_("You can't start a day before itself"))
# Day entry can't start after the targeted day date
if start and start.date() > self.daydate:
raise forms.ValidationError(_("You can't start a day after itself"))
return start
def clean_stop_datetime(self):
start = self.cleaned_data.get('start_datetime')
stop = self.cleaned_data['stop_datetime']
# Day entry can't stop before the start
if start and stop and stop <= start:
raise forms.ValidationError(_("Stop time can't be less or equal to start time"))
# Day entry can't stop in more than one futur day from the targeted day date
if stop and stop.date() > Arrow.fromdate(self.daydate).replace(days=1).date():
raise forms.ValidationError(_("Stop time can't be more than the next day"))
return stop
# TODO: overtime must not be more than effective worked time
#def clean_overtime(self):
#overtime = self.cleaned_data.get('overtime')
#return overtime
# TODO
#def clean_pause(self):
#start = self.cleaned_data.get('start_datetime')
#stop = self.cleaned_data.get('stop_datetime')
#pause = self.cleaned_data['pause']
## Pause time can't be more than elapsed time between start and stop
#if start and stop and pause and False:
#raise forms.ValidationError("Pause time is more than the elapsed time")
#return pause
class DayEntryForm(DayBaseFormMixin, CrispyFormMixin, forms.ModelForm):
"""
DayEntry form
"""
def __init__(self, datebook, day, *args, **kwargs):
self.datebook = datebook
self.daydate = datebook.period.replace(day=day)
# Args to give to the form layout method
self.crispy_form_helper_kwargs.update({
'next_day': kwargs.pop('next_day', None),
'day_to_model_url': kwargs.pop('day_to_model_url', None),
'form_action': kwargs.pop('form_action'),
'remove_url': kwargs.pop('remove_url', None),
})
# Fill initial datas
kwargs = self.fill_initial_data(*args, **kwargs)
super(DayEntryForm, self).__init__(*args, **kwargs)
super(forms.ModelForm, self).__init__(*args, **kwargs)
# Init some special fields
kwargs = self.init_fields(*args, **kwargs)
def clean(self):
cleaned_data = super(DayBaseFormMixin, self).clean()
content = cleaned_data.get("content")
vacation = cleaned_data.get("vacation")
# Content text is only required when vacation is not checked
if not vacation and not content:
raise forms.ValidationError(_("Worked days require a content text"))
return cleaned_data
def save(self, *args, **kwargs):
instance = super(DayEntryForm, self).save(commit=False, *args, **kwargs)
instance.start = self.cleaned_data['start_datetime']
instance.stop = self.cleaned_data['stop_datetime']
instance.datebook = self.datebook
instance.activity_date = self.daydate
instance.save()
return instance
class Meta:
model = DayEntry
exclude = ('datebook', 'activity_date', 'start', 'stop')
widgets = {
'pause': forms.TimeInput(format=DATETIME_FORMATS['input_time_formats'][0]),
'overtime': forms.TimeInput(format=DATETIME_FORMATS['input_time_formats'][0]),
}
class DayEntryCreateForm(DayEntryForm):
def clean(self):
cleaned_data = super(DayEntryCreateForm, self).clean()
# Validate that there is not allready a day entry for the same day
try:
obj = DayEntry.objects.get(datebook=self.datebook, activity_date=self.daydate)
except DayEntry.DoesNotExist:
pass
else:
raise forms.ValidationError(_("This day entry has allready been created"))
return cleaned_data
| sveetch/django-datebook | datebook/forms/day.py | Python | mit | 6,638 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.tools.special Special functions.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
# Import the relevant PTS classes and modules
from ...magic.core.frame import Frame
from ..basics.remote import Remote, connected_remotes
from . import time
from . import filesystem as fs
from .logging import log
# -----------------------------------------------------------------
def remote_convolution(image, kernel, host_id):
"""
This function ...
:param image:
:param kernel:
:param host_id:
"""
# Check whether we are already connected to the specified remote host
if host_id in connected_remotes and connected_remotes[host_id] is not None:
remote = connected_remotes[host_id]
else:
# Debugging
log.debug("Logging in to remote host ...")
# Create a remote instance for the specified host ID
remote = Remote()
remote.setup(host_id)
# Debugging
log.debug("Creating temporary directory remotely ...")
# Create a temporary directory to do the convolution
remote_home_directory = remote.home_directory
remote_temp_path = fs.join(remote_home_directory, time.unique_name("convolution"))
remote.create_directory(remote_temp_path)
# Debugging
#log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel FITS file to the remote directory
#remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
#remote.upload(kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Creating a local temporary directory ...")
# Create a temporary directory locally to contain the frames
local_temp_path = fs.join(fs.home(), time.unique_name("convolution"))
fs.create_directory(local_temp_path)
# Debugging
log.debug("Saving the image frames to the temporary directory ...")
# Save the frames
local_frame_paths = []
constant_frames = []
for frame_name in image.frames:
frame_path = fs.join(local_temp_path, frame_name + ".fits")
# Only upload and convolve non-constant frames
if not image.frames[frame_name].is_constant():
image.frames[frame_name].save(frame_path)
local_frame_paths.append(frame_path)
else:
log.debug("The " + frame_name + " frame is constant, so this won't be uploaded and convolved")
constant_frames.append(frame_name)
# Debugging
log.debug("Saving the kernel to the temporary directory ...")
local_kernel_path = fs.join(local_temp_path, "kernel.fits")
kernel.save(local_kernel_path)
# Debugging
log.debug("Uploading the image frames to the remote directory ...")
# Upload the frames
remote_frame_paths = []
for local_frame_path in local_frame_paths:
# Determine the name of the local frame file
frame_file_name = fs.name(local_frame_path)
# Debugging
log.debug("Uploading the " + fs.strip_extension(frame_file_name) + " frame ...")
# Upload the frame file
remote_frame_path = fs.join(remote_temp_path, frame_file_name)
remote.upload(local_frame_path, remote_temp_path, new_name=frame_file_name, compress=True, show_output=True)
remote_frame_paths.append(remote_frame_path)
# Debugging
log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel
remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
remote.upload(local_kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Creating a python script to perform the convolution remotely ...")
# Create a python script that does the convolution
#script_file = tempfile.NamedTemporaryFile()
#local_script_path = script_file.name
local_script_path = fs.join(local_temp_path, "convolve.py")
script_file = open(local_script_path, 'w')
script_file.write("#!/usr/bin/env python\n")
script_file.write("# -*- coding: utf8 -*-\n")
script_file.write("\n")
script_file.write("# Import astronomical modules\n")
script_file.write("from astropy.units import Unit\n")
script_file.write("\n")
script_file.write("# Import the relevant PTS classes and modules\n")
script_file.write("from pts.magic.core.frame import Frame\n")
script_file.write("from pts.magic.core.image import Image\n")
script_file.write("from pts.magic.core.kernel import ConvolutionKernel\n")
script_file.write("from pts.core.tools.logging import log\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the kernel frame ...')\n")
script_file.write("\n")
script_file.write("# Open the kernel\n")
script_file.write("kernel = ConvolutionKernel.from_file('" + remote_kernel_path + "')\n")
script_file.write("\n")
for remote_frame_path in remote_frame_paths:
frame_name = fs.strip_extension(fs.name(remote_frame_path))
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the " + frame_name + " frame ...')\n")
script_file.write("\n")
script_file.write("# Open the frame\n")
script_file.write("frame = Frame.from_file('" + remote_frame_path + "')\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Convolving the " + frame_name + " frame ...')\n")
script_file.write("\n")
script_file.write("# Do the convolution and save the result\n")
script_file.write("frame.convolve(kernel, allow_huge=True)\n")
script_file.write("frame.save('" + remote_frame_path + "')\n") # overwrite the frame
script_file.write("\n")
#script_file.write("# Save the image\n")
#script_file.write("image.save(" + remote_image_path + ")\n")
# Write to disk
#script_file.flush()
script_file.close()
# Debugging
log.debug("Uploading the python script ...")
# Upload the script file
remote_script_path = fs.join(remote_temp_path, "convolve.py")
remote.upload(local_script_path, remote_temp_path, new_name="convolve.py", show_output=True)
# Close the local script (it is automatically removed)
#script_file.close()
# Debugging
log.debug("Executing the script remotely ...")
# Execute the script file remotely
remote.execute("python " + remote_script_path, output=False, show_output=True)
# Debugging
log.debug("Downloading the results ...")
# Download the resulting FITS file (the convolved image)
#local_result_path = self.full_output_path("convolved.fits")
#remote.download(remote_image_path, fs.directory_of(local_result_path), new_name="convolved.fits", compress=True)
for remote_frame_path in remote_frame_paths:
# Determine the name of the local frame file
frame_file_name = fs.name(remote_frame_path)
# Debugging
log.debug("Downloading the " + fs.strip_extension(frame_file_name) + " frame ...")
# Download
remote.download(remote_frame_path, local_temp_path, new_name=frame_file_name, compress=True, show_output=True)
# Remove the temporary directory on the remote's filesystem
remote.remove_directory(remote_temp_path)
# Load the result
#self.image = Image.from_file(local_result_path)
for frame_name in image.frames.keys():
if frame_name in constant_frames: continue # Skip constant frames, these are not convolved
local_frame_path = fs.join(local_temp_path, frame_name + ".fits")
image.frames[frame_name] = Frame.from_file(local_frame_path)
# Remove the local temporary directory
fs.remove_directory(local_temp_path)
# -----------------------------------------------------------------
def remote_convolution_frame(frame, kernel_path, host_id):
"""
This function ...
:param frame:
:param kernel_path:
:param host_id:
:return:
"""
# Check whether the frame is constant. If it is, we don't have to convolve!
if frame.is_constant(): return frame.copy()
# Check whether we are already connected to the specified remote host
if host_id in connected_remotes and connected_remotes[host_id] is not None:
remote = connected_remotes[host_id]
else:
# Debugging
log.debug("Logging in to remote host ...")
# Create a remote instance for the specified host ID
remote = Remote()
remote.setup(host_id)
# Debugging
log.debug("Creating temporary directory remotely ...")
# Create a temporary directory to do the convolution
remote_home_directory = remote.home_directory
remote_temp_path = fs.join(remote_home_directory, time.unique_name("convolution"))
remote.create_directory(remote_temp_path)
# Debugging
log.debug("Creating local temporary directory ...")
# Create a temporary directory locally to contain the frames
local_temp_path = fs.join(fs.home(), time.unique_name("convolution"))
fs.create_directory(local_temp_path)
# Debugging
log.debug("Writing the frame to the temporary directory ...")
# Write the frame
local_frame_path = fs.join(local_temp_path, frame.name + ".fits")
frame.save(local_frame_path)
# Debugging
#log.debug("Writing the kernel to the temporary directory ...")
# Write the kernel
#local_kernel_path = fs.join(local_temp_path, "kernel.fits")
#kernel.save(local_kernel_path)
# Debugging
log.debug("Uploading the frame to the remote directory ...")
# Upload the frame file
remote_frame_path = fs.join(remote_temp_path, frame.name)
remote.upload(local_frame_path, remote_temp_path, new_name=frame.name, compress=True, show_output=True)
# Debugging
#log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel FITS file to the remote directory
#remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
#remote.upload(local_kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel FITS file to the remote directory
remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
remote.upload(kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Creating a python script to perform the convolution remotely ...")
# Create the script
local_script_path = fs.join(local_temp_path, "convolve.py")
script_file = open(local_script_path, 'w')
script_file.write("#!/usr/bin/env python\n")
script_file.write("# -*- coding: utf8 -*-\n")
script_file.write("\n")
script_file.write("# Import the relevant PTS classes and modules\n")
script_file.write("from pts.magic.core.frame import Frame\n")
script_file.write("from pts.core.tools.logging import log\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the kernel frame ...')\n")
script_file.write("\n")
script_file.write("# Open the kernel frame\n")
script_file.write("kernel = Frame.from_file('" + remote_kernel_path + "')\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the frame ...')\n")
script_file.write("\n")
script_file.write("# Open the frame\n")
script_file.write("frame = Frame.from_file('" + remote_frame_path + "')\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Convolving the frame ...')\n")
script_file.write("\n")
script_file.write("# Do the convolution and save the result\n")
script_file.write("convolved = frame.convolved(kernel, allow_huge=True)\n")
script_file.write("convolved.save('" + remote_frame_path + "')\n") # overwrite the frame
# Write to disk
script_file.close()
# Debugging
log.debug("Uploading the python script ...")
# Upload the script file
remote_script_path = fs.join(remote_temp_path, "convolve.py")
remote.upload(local_script_path, remote_temp_path, new_name="convolve.py", show_output=True)
# Debugging
log.debug("Executing the script remotely ...")
# Execute the script file remotely
remote.execute("python " + remote_script_path, output=False, show_output=True)
# Debugging
log.debug("Downloading the result ...")
# Determine the name of the local frame file
frame_file_name = fs.name(remote_frame_path)
# Debugging
log.debug("Downloading the " + fs.strip_extension(frame_file_name) + " frame ...")
# Download
remote.download(remote_frame_path, local_temp_path, new_name=frame_file_name, compress=True, show_output=True)
# Remove the temporary directory on the remote's filesystem
remote.remove_directory(remote_temp_path)
# Load the convolved frame
convolved = Frame.from_file(local_frame_path)
# Remove the local temporary directory
fs.remove_directory(local_temp_path)
# Return the convolved frame
return convolved
# -----------------------------------------------------------------
def remote_filter_convolution_no_pts(host_id, datacube_path, wavelengths, filters):
"""
This function ...
:param host_id:
:param datacube_path:
:param wavelengths:
:param filters:
:return:
"""
# Check whether we are already connected to the specified remote host
if host_id in connected_remotes and connected_remotes[host_id] is not None:
remote = connected_remotes[host_id]
else:
# Debugging
log.debug("Logging in to remote host ...")
# Create a remote instance for the specified host ID
remote = Remote()
remote.setup(host_id)
# Debugging
log.debug("Creating temporary directory remotely ...")
# Create a temporary directory to do the convolution
remote_home_directory = remote.home_directory
remote_temp_path = fs.join(remote_home_directory, time.unique_name("filter-convolution"))
remote.create_directory(remote_temp_path)
# Debugging
log.debug("Creating local temporary directory ...")
# Create a temporary directory locally to contain the frames
local_temp_path = fs.join(fs.home(), time.unique_name("filter-convolution"))
fs.create_directory(local_temp_path)
integrated_transmissions = dict()
# Loop over the filters
for fltr in filters:
# Get the transmission data
fltr_wavelengths = fltr._Wavelengths
fltr_transmission = fltr._Transmission
fltr_integrated_transmission = fltr._IntegratedTransmission
integrated_transmissions[fltr.name] = fltr_integrated_transmission
# Save the transmission data
path = fs.join(local_temp_path, "transmission__" + str(fltr) + ".dat")
np.savetxt(path, (fltr_wavelengths, fltr_transmission))
#print(integrated_transmissions)
#print(local_temp_path)
integrated_path = fs.join(local_temp_path, "integrated_transmissions.txt")
with open(integrated_path, 'w') as integrated_trans_file:
for fltr_name in integrated_transmissions:
integrated_trans_file.write(fltr_name + ": " + str(integrated_transmissions[fltr_name]) + "\n")
# NOT FINISHED ...
# -----------------------------------------------------------------
def remote_filter_convolution(host_id, datacube_path, wavelengths, filters, keep_output=False):
"""
This function ...
:param host_id:
:param datacube_path:
:param wavelengths:
:param filters:
:param keep_output:
:return:
"""
# Check whether we are already connected to the specified remote host
if host_id in connected_remotes and connected_remotes[host_id] is not None:
remote = connected_remotes[host_id]
else:
# Debugging
log.debug("Logging in to remote host ...")
# Create a remote instance for the specified host ID
remote = Remote()
remote.setup(host_id)
# Debugging
log.debug("Creating temporary directory remotely ...")
# Create a temporary directory to do the convolution
remote_home_directory = remote.home_directory
remote_temp_path = fs.join(remote_home_directory, time.unique_name("filter-convolution"))
remote.create_directory(remote_temp_path)
# Debugging
log.debug("Creating local temporary directory ...")
# Create a temporary directory locally to contain the frames
local_temp_path = fs.join(fs.home(), time.unique_name("filter-convolution"))
fs.create_directory(local_temp_path)
# Debugging
log.debug("Uploading the datacube to the temporary remote directory ...")
# Upload the frame file
datacube_name = fs.name(datacube_path)
remote_datacube_path = fs.join(remote_temp_path, datacube_name)
remote.upload(datacube_path, remote_temp_path, compress=True, show_output=True)
# Debugging
log.debug("Writing the wavelengths to the temporary local directory ...")
local_wavelengths_path = fs.join(local_temp_path, "wavelengths.txt")
np.savetxt(local_wavelengths_path, wavelengths)
# Debugging
log.debug("Uploading the wavelengths file to the remote directory ...")
# Upload the kernel FITS file to the remote directory
remote_wavelengths_path = fs.join(remote_temp_path, "wavelengths.txt")
remote.upload(local_wavelengths_path, remote_temp_path, compress=True, show_output=True)
# Debugging
log.debug("Creating a python script to perform the filter convolution remotely ...")
# Create the script
local_script_path = fs.join(local_temp_path, "make_images.py")
script_file = open(local_script_path, 'w')
script_file.write("#!/usr/bin/env python\n")
script_file.write("# -*- coding: utf8 -*-\n")
script_file.write("\n")
script_file.write("# Import standard modules\n")
script_file.write("import numpy as np\n")
script_file.write("\n")
script_file.write("# Import the relevant PTS classes and modules\n")
script_file.write("from pts.magic.core.image import Image\n")
script_file.write("from pts.magic.core.frame import Frame\n")
script_file.write("from pts.core.basics.filter import Filter\n")
script_file.write("from pts.core.tools.logging import log\n")
script_file.write("from pts.core.tools import filesystem as fs\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Loading the datacube ...')\n")
script_file.write("\n")
script_file.write("# Open the datacube as an Image\n")
script_file.write("datacube = Image.from_file('" + remote_datacube_path + "', always_call_first_primary=False)\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Loading the wavelengths ...')\n")
script_file.write("\n")
script_file.write("# Load the wavelengths from the text file\n")
script_file.write("wavelengths = np.loadtxt('" + remote_wavelengths_path + "')\n")
script_file.write("\n")
script_file.write("# Convert the frames from neutral surface brightness to wavelength surface brightness\n")
script_file.write("for l in range(len(wavelengths)):\n")
script_file.write("\n")
script_file.write(" # Get the wavelength\n")
script_file.write(" wavelength = wavelengths[l]\n")
script_file.write("\n")
script_file.write(" # Determine the name of the frame in the datacube\n")
script_file.write(" frame_name = 'frame' + str(l)\n")
script_file.write("\n")
script_file.write(" # Divide this frame by the wavelength in micron\n")
script_file.write(" datacube.frames[frame_name] /= wavelength\n")
script_file.write("\n")
script_file.write(" # Set the new unit\n")
script_file.write(" datacube.frames[frame_name].unit = 'W / (m2 * arcsec2 * micron)'\n")
script_file.write("\n")
script_file.write("# Convert the datacube to a numpy array where wavelength is the third dimension\n")
script_file.write("fluxdensities = datacube.asarray()\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Creating the filters ...')\n")
script_file.write("\n")
script_file.write("filters = dict()\n")
script_file.write("\n")
for filter_name in filters:
fltr = filters[filter_name]
script_file.write("# Inform the user\n")
script_file.write("log.info('Creating the " + str(fltr) + " filter')\n")
script_file.write("\n")
script_file.write("fltr = Filter.from_string('" + str(fltr) + "')\n")
script_file.write("filters['" + filter_name + "'] = fltr\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Performing the filter convolutions ...')\n")
script_file.write("\n")
script_file.write("# Loop over the filters, perform the convolution\n")
script_file.write("for filter_name in filters:\n")
script_file.write("\n")
script_file.write(" log.info('Making the observed image for the ' + str(fltr) + ' filter ...')\n")
script_file.write(" fltr = filters[filter_name]\n")
script_file.write(" data = fltr.convolve(wavelengths, fluxdensities)\n")
script_file.write(" frame = Frame(data)\n")
script_file.write(" frame.unit = 'W/(m2 * arcsec2 * micron)'\n")
script_file.write(" path = fs.join('" + remote_temp_path + "', filter_name + '.fits')\n")
script_file.write(" frame.save(path)\n")
# Write to disk
script_file.close()
# Debugging
log.debug("Uploading the python script ...")
# Upload the script file
remote_script_path = fs.join(remote_temp_path, "make_images.py")
remote.upload(local_script_path, remote_temp_path, new_name="make_images.py", show_output=True)
# Debugging
log.debug("Executing the script remotely ...")
# Execute the script file remotely
remote.execute("python " + remote_script_path, output=False, show_output=True)
# Remove the datacube in the remote directory
remote.remove_file(remote_datacube_path)
# Debugging
log.debug("Downloading the convolved frames ...")
# Download
local_downloaded_temp_path = fs.join(fs.home(), fs.name(remote_temp_path))
fs.create_directory(local_downloaded_temp_path)
remote.download(remote_temp_path, local_downloaded_temp_path, compress=True, show_output=True)
# Remove the temporary directory on the remote's filesystem
remote.remove_directory(remote_temp_path)
# Remove the local temporary directory
fs.remove_directory(local_temp_path)
# Create a dictionary to contain the frames
frames = dict()
# Loop over the filters, load the frame
for filter_name in filters:
# Determine the path to the resulting FITS file
path = fs.join(local_downloaded_temp_path, filter_name + ".fits")
# Check whether the frame exists
if not fs.is_file(path): raise RuntimeError("The image for filter " + str(filters[filter_name]) + " is missing")
# Load the FITS file
frame = Frame.from_file(path)
# Add the frame to the dictionary
frames[filter_name] = frame
# Remove the downloaded temporary directory
if not keep_output: fs.remove_directory(local_downloaded_temp_path)
# Return the dictionary of frames
return frames
# -----------------------------------------------------------------
| Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/core/tools/special.py | Python | mit | 24,211 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Provides:
- List; like list but returns None instead of IndexOutOfBounds
- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']`
"""
try:
import cPickle as pickle
except:
import pickle
import copy_reg
import gluon.portalocker as portalocker
__all__ = ['List', 'Storage', 'Settings', 'Messages',
'StorageList', 'load_storage', 'save_storage']
DEFAULT = lambda:0
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
Example::
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
__slots__ = ()
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getitem__ = dict.get
__getattr__ = dict.get
__getnewargs__ = lambda self: getattr(dict,self).__getnewargs__(self)
__repr__ = lambda self: '<Storage %s>' % dict.__repr__(self)
# http://stackoverflow.com/questions/5247250/why-does-pickle-getstate-accept-as-a-return-value-the-very-instance-it-requi
__getstate__ = lambda self: None
__copy__ = lambda self: Storage(self)
def getlist(self, key):
"""
Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value]
def getfirst(self, key, default=None):
"""
Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
def getlast(self, key, default=None):
"""
Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
def pickle_storage(s):
return Storage, (dict(s),)
copy_reg.pickle(Storage, pickle_storage)
PICKABLE = (str, int, long, float, bool, list, dict, tuple, set)
class StorageList(Storage):
"""
Behaves like Storage but missing elements defaults to [] instead of None
"""
def __getitem__(self, key):
return self.__getattr__(key)
def __getattr__(self, key):
if key in self:
return self.get(key)
else:
r = []
self[key] = r
return r
def load_storage(filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'rb')
storage = pickle.load(fp)
finally:
if fp:
fp.close()
return Storage(storage)
def save_storage(storage, filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'wb')
pickle.dump(dict(storage), fp)
finally:
if fp:
fp.close()
class Settings(Storage):
def __setattr__(self, key, value):
if key != 'lock_keys' and self['lock_keys'] and key not in self:
raise SyntaxError('setting key \'%s\' does not exist' % key)
if key != 'lock_values' and self['lock_values']:
raise SyntaxError('setting value cannot be changed: %s' % key)
self[key] = value
class Messages(Settings):
def __init__(self, T):
Storage.__init__(self, T=T)
def __getattr__(self, key):
value = self[key]
if isinstance(value, str):
return self.T(value)
return value
class FastStorage(dict):
"""
Eventually this should replace class Storage but causes memory leak
because of http://bugs.python.org/issue1469629
>>> s = FastStorage()
>>> s.a = 1
>>> s.a
1
>>> s['a']
1
>>> s.b
>>> s['b']
>>> s['b']=2
>>> s['b']
2
>>> s.b
2
>>> isinstance(s,dict)
True
>>> dict(s)
{'a': 1, 'b': 2}
>>> dict(FastStorage(s))
{'a': 1, 'b': 2}
>>> import pickle
>>> s = pickle.loads(pickle.dumps(s))
>>> dict(s)
{'a': 1, 'b': 2}
>>> del s.b
>>> del s.a
>>> s.a
>>> s.b
>>> s['a']
>>> s['b']
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
def __getattr__(self, key):
return getattr(self, key) if key in self else None
def __getitem__(self, key):
return dict.get(self, key, None)
def copy(self):
self.__dict__ = {}
s = FastStorage(self)
self.__dict__ = self
return s
def __repr__(self):
return '<Storage %s>' % dict.__repr__(self)
def __getstate__(self):
return dict(self)
def __setstate__(self, sdict):
dict.__init__(self, sdict)
self.__dict__ = self
def update(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
class List(list):
"""
Like a regular python list but a[i] if i is out of bounds returns None
instead of `IndexOutOfBounds`
"""
def __call__(self, i, default=DEFAULT, cast=None, otherwise=None):
"""Allows to use a special syntax for fast-check of `request.args()`
validity
Args:
i: index
default: use this value if arg not found
cast: type cast
otherwise: can be:
- None: results in a 404
- str: redirect to this address
- callable: calls the function (nothing is passed)
Example:
You can use::
request.args(0,default=0,cast=int,otherwise='http://error_url')
request.args(0,default=0,cast=int,otherwise=lambda:...)
"""
n = len(self)
if 0 <= i < n or -n <= i < 0:
value = self[i]
elif default is DEFAULT:
value = None
else:
value, cast = default, False
if cast:
try:
value = cast(value)
except (ValueError, TypeError):
from http import HTTP, redirect
if otherwise is None:
raise HTTP(404)
elif isinstance(otherwise, str):
redirect(otherwise)
elif callable(otherwise):
return otherwise()
else:
raise RuntimeError("invalid otherwise")
return value
if __name__ == '__main__':
import doctest
doctest.testmod()
| shashisp/blumix-webpy | app/gluon/storage.py | Python | mit | 8,573 |
#!/usr/bin/env python
from ..debugging import bacpypes_debugging, ModuleLogger
from ..capability import Capability
from ..basetypes import ErrorType, PropertyIdentifier
from ..primitivedata import Atomic, Null, Unsigned
from ..constructeddata import Any, Array, ArrayOf, List
from ..apdu import \
SimpleAckPDU, ReadPropertyACK, ReadPropertyMultipleACK, \
ReadAccessResult, ReadAccessResultElement, ReadAccessResultElementChoice
from ..errors import ExecutionError
from ..object import PropertyError
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# handy reference
ArrayOfPropertyIdentifier = ArrayOf(PropertyIdentifier)
#
# ReadProperty and WriteProperty Services
#
@bacpypes_debugging
class ReadWritePropertyServices(Capability):
def __init__(self):
if _debug: ReadWritePropertyServices._debug("__init__")
Capability.__init__(self)
def do_ReadPropertyRequest(self, apdu):
"""Return the value of some property of one of our objects."""
if _debug: ReadWritePropertyServices._debug("do_ReadPropertyRequest %r", apdu)
# extract the object identifier
objId = apdu.objectIdentifier
# check for wildcard
if (objId == ('device', 4194303)) and self.localDevice is not None:
if _debug: ReadWritePropertyServices._debug(" - wildcard device identifier")
objId = self.localDevice.objectIdentifier
# get the object
obj = self.get_object_id(objId)
if _debug: ReadWritePropertyServices._debug(" - object: %r", obj)
if not obj:
raise ExecutionError(errorClass='object', errorCode='unknownObject')
try:
# get the datatype
datatype = obj.get_datatype(apdu.propertyIdentifier)
if _debug: ReadWritePropertyServices._debug(" - datatype: %r", datatype)
# get the value
value = obj.ReadProperty(apdu.propertyIdentifier, apdu.propertyArrayIndex)
if _debug: ReadWritePropertyServices._debug(" - value: %r", value)
if value is None:
raise PropertyError(apdu.propertyIdentifier)
# change atomic values into something encodeable
if issubclass(datatype, Atomic) or (issubclass(datatype, (Array, List)) and isinstance(value, list)):
value = datatype(value)
elif issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None):
if apdu.propertyArrayIndex == 0:
value = Unsigned(value)
elif issubclass(datatype.subtype, Atomic):
value = datatype.subtype(value)
elif not isinstance(value, datatype.subtype):
raise TypeError("invalid result datatype, expecting {0} and got {1}" \
.format(datatype.subtype.__name__, type(value).__name__))
elif issubclass(datatype, List):
value = datatype(value)
elif not isinstance(value, datatype):
raise TypeError("invalid result datatype, expecting {0} and got {1}" \
.format(datatype.__name__, type(value).__name__))
if _debug: ReadWritePropertyServices._debug(" - encodeable value: %r", value)
# this is a ReadProperty ack
resp = ReadPropertyACK(context=apdu)
resp.objectIdentifier = objId
resp.propertyIdentifier = apdu.propertyIdentifier
resp.propertyArrayIndex = apdu.propertyArrayIndex
# save the result in the property value
resp.propertyValue = Any()
resp.propertyValue.cast_in(value)
if _debug: ReadWritePropertyServices._debug(" - resp: %r", resp)
except PropertyError:
raise ExecutionError(errorClass='property', errorCode='unknownProperty')
# return the result
self.response(resp)
def do_WritePropertyRequest(self, apdu):
"""Change the value of some property of one of our objects."""
if _debug: ReadWritePropertyServices._debug("do_WritePropertyRequest %r", apdu)
# get the object
obj = self.get_object_id(apdu.objectIdentifier)
if _debug: ReadWritePropertyServices._debug(" - object: %r", obj)
if not obj:
raise ExecutionError(errorClass='object', errorCode='unknownObject')
try:
# check if the property exists
if obj.ReadProperty(apdu.propertyIdentifier, apdu.propertyArrayIndex) is None:
raise PropertyError(apdu.propertyIdentifier)
# get the datatype, special case for null
if apdu.propertyValue.is_application_class_null():
datatype = Null
else:
datatype = obj.get_datatype(apdu.propertyIdentifier)
if _debug: ReadWritePropertyServices._debug(" - datatype: %r", datatype)
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None):
if apdu.propertyArrayIndex == 0:
value = apdu.propertyValue.cast_out(Unsigned)
else:
value = apdu.propertyValue.cast_out(datatype.subtype)
else:
value = apdu.propertyValue.cast_out(datatype)
if _debug: ReadWritePropertyServices._debug(" - value: %r", value)
# change the value
value = obj.WriteProperty(apdu.propertyIdentifier, value, apdu.propertyArrayIndex, apdu.priority)
# success
resp = SimpleAckPDU(context=apdu)
if _debug: ReadWritePropertyServices._debug(" - resp: %r", resp)
except PropertyError:
raise ExecutionError(errorClass='property', errorCode='unknownProperty')
# return the result
self.response(resp)
#
# read_property_to_any
#
@bacpypes_debugging
def read_property_to_any(obj, propertyIdentifier, propertyArrayIndex=None):
"""Read the specified property of the object, with the optional array index,
and cast the result into an Any object."""
if _debug: read_property_to_any._debug("read_property_to_any %s %r %r", obj, propertyIdentifier, propertyArrayIndex)
# get the datatype
datatype = obj.get_datatype(propertyIdentifier)
if _debug: read_property_to_any._debug(" - datatype: %r", datatype)
if datatype is None:
raise ExecutionError(errorClass='property', errorCode='datatypeNotSupported')
# get the value
value = obj.ReadProperty(propertyIdentifier, propertyArrayIndex)
if _debug: read_property_to_any._debug(" - value: %r", value)
if value is None:
raise ExecutionError(errorClass='property', errorCode='unknownProperty')
# change atomic values into something encodeable
if issubclass(datatype, Atomic) or (issubclass(datatype, (Array, List)) and isinstance(value, list)):
value = datatype(value)
elif issubclass(datatype, Array) and (propertyArrayIndex is not None):
if propertyArrayIndex == 0:
value = Unsigned(value)
elif issubclass(datatype.subtype, Atomic):
value = datatype.subtype(value)
elif not isinstance(value, datatype.subtype):
raise TypeError("invalid result datatype, expecting %s and got %s" \
% (datatype.subtype.__name__, type(value).__name__))
elif not isinstance(value, datatype):
raise TypeError("invalid result datatype, expecting %s and got %s" \
% (datatype.__name__, type(value).__name__))
if _debug: read_property_to_any._debug(" - encodeable value: %r", value)
# encode the value
result = Any()
result.cast_in(value)
if _debug: read_property_to_any._debug(" - result: %r", result)
# return the object
return result
#
# read_property_to_result_element
#
@bacpypes_debugging
def read_property_to_result_element(obj, propertyIdentifier, propertyArrayIndex=None):
"""Read the specified property of the object, with the optional array index,
and cast the result into an Any object."""
if _debug: read_property_to_result_element._debug("read_property_to_result_element %s %r %r", obj, propertyIdentifier, propertyArrayIndex)
# save the result in the property value
read_result = ReadAccessResultElementChoice()
try:
if not obj:
raise ExecutionError(errorClass='object', errorCode='unknownObject')
read_result.propertyValue = read_property_to_any(obj, propertyIdentifier, propertyArrayIndex)
if _debug: read_property_to_result_element._debug(" - success")
except PropertyError as error:
if _debug: read_property_to_result_element._debug(" - error: %r", error)
read_result.propertyAccessError = ErrorType(errorClass='property', errorCode='unknownProperty')
except ExecutionError as error:
if _debug: read_property_to_result_element._debug(" - error: %r", error)
read_result.propertyAccessError = ErrorType(errorClass=error.errorClass, errorCode=error.errorCode)
# make an element for this value
read_access_result_element = ReadAccessResultElement(
propertyIdentifier=propertyIdentifier,
propertyArrayIndex=propertyArrayIndex,
readResult=read_result,
)
if _debug: read_property_to_result_element._debug(" - read_access_result_element: %r", read_access_result_element)
# fini
return read_access_result_element
#
# ReadWritePropertyMultipleServices
#
@bacpypes_debugging
class ReadWritePropertyMultipleServices(Capability):
def __init__(self):
if _debug: ReadWritePropertyMultipleServices._debug("__init__")
Capability.__init__(self)
def do_ReadPropertyMultipleRequest(self, apdu):
"""Respond to a ReadPropertyMultiple Request."""
if _debug: ReadWritePropertyMultipleServices._debug("do_ReadPropertyMultipleRequest %r", apdu)
# response is a list of read access results (or an error)
resp = None
read_access_result_list = []
# loop through the request
for read_access_spec in apdu.listOfReadAccessSpecs:
# get the object identifier
objectIdentifier = read_access_spec.objectIdentifier
if _debug: ReadWritePropertyMultipleServices._debug(" - objectIdentifier: %r", objectIdentifier)
# check for wildcard
if (objectIdentifier == ('device', 4194303)) and self.localDevice is not None:
if _debug: ReadWritePropertyMultipleServices._debug(" - wildcard device identifier")
objectIdentifier = self.localDevice.objectIdentifier
# get the object
obj = self.get_object_id(objectIdentifier)
if _debug: ReadWritePropertyMultipleServices._debug(" - object: %r", obj)
# build a list of result elements
read_access_result_element_list = []
# loop through the property references
for prop_reference in read_access_spec.listOfPropertyReferences:
# get the property identifier
propertyIdentifier = prop_reference.propertyIdentifier
if _debug: ReadWritePropertyMultipleServices._debug(" - propertyIdentifier: %r", propertyIdentifier)
# get the array index (optional)
propertyArrayIndex = prop_reference.propertyArrayIndex
if _debug: ReadWritePropertyMultipleServices._debug(" - propertyArrayIndex: %r", propertyArrayIndex)
# check for special property identifiers
if propertyIdentifier in ('all', 'required', 'optional'):
if not obj:
# build a property access error
read_result = ReadAccessResultElementChoice()
read_result.propertyAccessError = ErrorType(errorClass='object', errorCode='unknownObject')
# make an element for this error
read_access_result_element = ReadAccessResultElement(
propertyIdentifier=propertyIdentifier,
propertyArrayIndex=propertyArrayIndex,
readResult=read_result,
)
# add it to the list
read_access_result_element_list.append(read_access_result_element)
else:
for propId, prop in obj._properties.items():
if _debug: ReadWritePropertyMultipleServices._debug(" - checking: %r %r", propId, prop.optional)
# skip propertyList for ReadPropertyMultiple
if (propId == 'propertyList'):
if _debug: ReadWritePropertyMultipleServices._debug(" - ignore propertyList")
continue
if (propertyIdentifier == 'all'):
pass
elif (propertyIdentifier == 'required') and (prop.optional):
if _debug: ReadWritePropertyMultipleServices._debug(" - not a required property")
continue
elif (propertyIdentifier == 'optional') and (not prop.optional):
if _debug: ReadWritePropertyMultipleServices._debug(" - not an optional property")
continue
# read the specific property
read_access_result_element = read_property_to_result_element(obj, propId, propertyArrayIndex)
# check for undefined property
if read_access_result_element.readResult.propertyAccessError \
and read_access_result_element.readResult.propertyAccessError.errorCode == 'unknownProperty':
continue
# add it to the list
read_access_result_element_list.append(read_access_result_element)
else:
# read the specific property
read_access_result_element = read_property_to_result_element(obj, propertyIdentifier, propertyArrayIndex)
# add it to the list
read_access_result_element_list.append(read_access_result_element)
# build a read access result
read_access_result = ReadAccessResult(
objectIdentifier=objectIdentifier,
listOfResults=read_access_result_element_list
)
if _debug: ReadWritePropertyMultipleServices._debug(" - read_access_result: %r", read_access_result)
# add it to the list
read_access_result_list.append(read_access_result)
# this is a ReadPropertyMultiple ack
if not resp:
resp = ReadPropertyMultipleACK(context=apdu)
resp.listOfReadAccessResults = read_access_result_list
if _debug: ReadWritePropertyMultipleServices._debug(" - resp: %r", resp)
# return the result
self.response(resp)
# def do_WritePropertyMultipleRequest(self, apdu):
# """Respond to a WritePropertyMultiple Request."""
# if _debug: ReadWritePropertyMultipleServices._debug("do_ReadPropertyMultipleRequest %r", apdu)
#
# raise NotImplementedError()
| JoelBender/bacpypes | py27/bacpypes/service/object.py | Python | mit | 15,660 |
# Copyright (C) 2012-2020 Ben Kurtovic <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Contains data about certain markup, like HTML tags and external links.
When updating this file, please also update the the C tokenizer version:
- mwparserfromhell/parser/ctokenizer/definitions.c
- mwparserfromhell/parser/ctokenizer/definitions.h
"""
__all__ = [
"get_html_tag",
"is_parsable",
"is_visible",
"is_single",
"is_single_only",
"is_scheme",
]
URI_SCHEMES = {
# [wikimedia/mediawiki.git]/includes/DefaultSettings.php @ 5c660de5d0
"bitcoin": False,
"ftp": True,
"ftps": True,
"geo": False,
"git": True,
"gopher": True,
"http": True,
"https": True,
"irc": True,
"ircs": True,
"magnet": False,
"mailto": False,
"mms": True,
"news": False,
"nntp": True,
"redis": True,
"sftp": True,
"sip": False,
"sips": False,
"sms": False,
"ssh": True,
"svn": True,
"tel": False,
"telnet": True,
"urn": False,
"worldwind": True,
"xmpp": False,
}
PARSER_BLACKLIST = [
# https://www.mediawiki.org/wiki/Parser_extension_tags @ 2020-12-21
"categorytree",
"ce",
"chem",
"gallery",
"graph",
"hiero",
"imagemap",
"inputbox",
"math",
"nowiki",
"pre",
"score",
"section",
"source",
"syntaxhighlight",
"templatedata",
"timeline",
]
INVISIBLE_TAGS = [
# https://www.mediawiki.org/wiki/Parser_extension_tags @ 2020-12-21
"categorytree",
"gallery",
"graph",
"imagemap",
"inputbox",
"math",
"score",
"section",
"templatedata",
"timeline",
]
# [wikimedia/mediawiki.git]/includes/parser/Sanitizer.php @ 95e17ee645
SINGLE_ONLY = ["br", "wbr", "hr", "meta", "link", "img"]
SINGLE = SINGLE_ONLY + ["li", "dt", "dd", "th", "td", "tr"]
MARKUP_TO_HTML = {
"#": "li",
"*": "li",
";": "dt",
":": "dd",
}
def get_html_tag(markup):
"""Return the HTML tag associated with the given wiki-markup."""
return MARKUP_TO_HTML[markup]
def is_parsable(tag):
"""Return if the given *tag*'s contents should be passed to the parser."""
return tag.lower() not in PARSER_BLACKLIST
def is_visible(tag):
"""Return whether or not the given *tag* contains visible text."""
return tag.lower() not in INVISIBLE_TAGS
def is_single(tag):
"""Return whether or not the given *tag* can exist without a close tag."""
return tag.lower() in SINGLE
def is_single_only(tag):
"""Return whether or not the given *tag* must exist without a close tag."""
return tag.lower() in SINGLE_ONLY
def is_scheme(scheme, slashes=True):
"""Return whether *scheme* is valid for external links."""
scheme = scheme.lower()
if slashes:
return scheme in URI_SCHEMES
return scheme in URI_SCHEMES and not URI_SCHEMES[scheme]
| earwig/mwparserfromhell | src/mwparserfromhell/definitions.py | Python | mit | 3,915 |
from __future__ import print_function
from __future__ import division
# python bpzchisq2run.py ACS-Subaru
# PRODUCES ACS-Subaru_bpz.cat
# ADDS A FEW THINGS TO THE BPZ CATALOG
# INCLUDING chisq2 AND LABEL HEADERS
# ~/p/bpzchisq2run.py NOW INCLUDED!
# ~/Tonetti/colorpro/bpzfinalize7a.py
# ~/UDF/Elmegreen/phot8/bpzfinalize7.py
# ~/UDF/bpzfinalize7a.py, 7, 5, 4, 23_djh, 23, 3
# NOW TAKING BPZ OUTPUT w/ 3 REDSHIFT PEAKS
# ALSO USING NEW i-band CATALOG istel.cat -- w/ CORRECT IDs
# python bpzfinalize.py bvizjh_cut_sexseg2_allobjs_newres_offset3_djh_Burst_1M
from builtins import range
from past.utils import old_div
from coetools import *
sum = add.reduce # Just to make sure
##################
# add nf, jhgood, stellarity, x, y
inbpz = capfile(sys.argv[1], 'bpz')
inroot = inbpz[:-4]
infile = loadfile(inbpz)
for line in infile:
if line[:7] == '##INPUT':
incat = line[8:]
break
for line in infile:
if line[:9] == '##N_PEAKS':
npeaks = string.atoi(line[10])
break
#inchisq2 = inroot + '.chisq2'
#outbpz = inroot + '_b.bpz'
outbpz = inroot + '_bpz.cat'
if npeaks == 1:
labels = string.split(
'id zb zbmin zbmax tb odds zml tml chisq')
elif npeaks == 3:
labels = string.split(
'id zb zbmin zbmax tb odds zb2 zb2min zb2max tb2 odds2 zb3 zb3min zb3max tb3 odds3 zml tml chisq')
else:
print('N_PEAKS = %d!?' % npeaks)
sys.exit(1)
labelnicks = {'Z_S': 'zspec', 'M_0': 'M0'}
read = 0
ilabel = 0
for iline in range(len(infile)):
line = infile[iline]
if line[:2] == '##':
if read:
break
else:
read = 1
if read == 1:
ilabel += 1
label = string.split(line)[-1]
if ilabel >= 10:
labels.append(labelnicks.get(label, label))
mybpz = loadvarswithclass(inbpz, labels=labels)
mycat = loadvarswithclass(incat)
#icat = loadvarswithclass('/home/coe/UDF/istel.cat')
#icat = icat.takeids(mycat.id)
#bpzchisq2 = loadvarswithclass(inchisq2)
#################################
# CHISQ2, nfdet, nfobs
if os.path.exists(inroot + '.flux_comparison'):
data = loaddata(inroot + '.flux_comparison+')
#nf = 6
nf = old_div((len(data) - 5), 3)
# id M0 zb tb*3
id = data[0]
ft = data[5:5 + nf] # FLUX (from spectrum for that TYPE)
fo = data[5 + nf:5 + 2 * nf] # FLUX (OBSERVED)
efo = data[5 + 2 * nf:5 + 3 * nf] # FLUX_ERROR (OBSERVED)
# chisq 2
eft = old_div(ft, 15.)
eft = max(eft) # for each galaxy, take max eft among filters
ef = sqrt(efo**2 + eft**2) # (6, 18981) + (18981) done correctly
dfosq = (old_div((ft - fo), ef))**2
dfosqsum = sum(dfosq)
detected = greater(fo, 0)
nfdet = sum(detected)
observed = less(efo, 1)
nfobs = sum(observed)
# DEGREES OF FREEDOM
dof = clip2(nfobs - 3., 1, None) # 3 params (z, t, a)
chisq2clip = old_div(dfosqsum, dof)
sedfrac = divsafe(max(fo - efo), max(ft), -1) # SEDzero
chisq2 = chisq2clip[:]
chisq2 = where(less(sedfrac, 1e-10), 900., chisq2)
chisq2 = where(equal(nfobs, 1), 990., chisq2)
chisq2 = where(equal(nfobs, 0), 999., chisq2)
#################################
#print 'BPZ tb N_PEAKS BUG FIX'
#mybpz.tb = mybpz.tb + 0.667
#mybpz.tb2 = where(greater(mybpz.tb2, 0), mybpz.tb2 + 0.667, -1.)
#mybpz.tb3 = where(greater(mybpz.tb3, 0), mybpz.tb3 + 0.667, -1.)
mybpz.add('chisq2', chisq2)
mybpz.add('nfdet', nfdet)
mybpz.add('nfobs', nfobs)
#mybpz.add('jhgood', jhgood)
if 'stel' in mycat.labels:
mybpz.add('stel', mycat.stel)
elif 'stellarity' in mycat.labels:
mybpz.add('stel', mycat.stellarity)
if 'maxsigisoaper' in mycat.labels:
mybpz.add('sig', mycat.maxsigisoaper)
if 'sig' in mycat.labels:
mybpz.assign('sig', mycat.maxsigisoaper)
#mybpz.add('x', mycat.x)
#mybpz.add('y', mycat.y)
if 'zspec' not in mybpz.labels:
if 'zspec' in mycat.labels:
mybpz.add('zspec', mycat.zspec)
print(mycat.zspec)
if 'zqual' in mycat.labels:
mybpz.add('zqual', mycat.zqual)
print(mybpz.labels)
mybpz.save(outbpz, maxy=None)
##################
# det
# 0 < mag < 99
# dmag > 0
# fo > 0
# efo -> 1.6e-8, e.g.
# undet
# mag = 99
# dmag = -m_1sigma
# fo = 0
# efo = 0 -> 5e13, e.g.
# unobs
# mag = -99
# dmag = 0
# fo = 0
# efo = inf (1e108)
## # original chisq usually matches this:
## dfosq = ((ft - fo) / efo) ** 2
## dfosqsum = sum(dfosq)
## observed = less(efo, 1)
## nfobs = sum(observed)
## chisq = dfosqsum / (nfobs - 1.)
| boada/planckClusters | MOSAICpipe/bpz-1.99.3/bpzfinalize.py | Python | mit | 4,563 |
#==============================================================================
#description : Solves travelling salesman problem by using Hill Climbing.
#author : Yakup Cengiz
#date : 20151121
#version : 0.1
#notes :
#python_version : 3.5.0
#Reference : http://www.psychicorigami.com/category/tsp/
#==============================================================================
import math
import sys
import os
import random
CommonPath = os.path.abspath(os.path.join('..', 'Common'))
sys.path.append(CommonPath)
import tsp
def GenerateInitialPath(tour_length):
tour=list(range(tour_length))
random.shuffle(tour)
return tour
MAX_ITERATION = 50000
def reversed_sections(tour):
'''generator to return all possible variations where the section between two cities are swapped'''
for i,j in tsp.AllEdges(len(tour)):
if i != j:
copy=tour[:]
if i < j:
copy[i:j+1]=reversed(tour[i:j+1])
else:
copy[i+1:]=reversed(tour[:j])
copy[:j]=reversed(tour[i+1:])
if copy != tour: # no point returning the same tour
yield copy
def kirkpatrick_cooling(start_temp, alpha):
T = start_temp
while True:
yield T
T = alpha * T
def P(prev_score,next_score,temperature):
if next_score > prev_score:
return 1.0
else:
return math.exp( -abs(next_score-prev_score)/temperature )
class ObjectiveFunction:
'''class to wrap an objective function and
keep track of the best solution evaluated'''
def __init__(self,objective_function):
self.objective_function=objective_function
self.best=None
self.best_score=None
def __call__(self,solution):
score=self.objective_function(solution)
if self.best is None or score > self.best_score:
self.best_score=score
self.best=solution
return score
def ApplySimulatedAnnealing(init_function,move_operator,objective_function,max_evaluations,start_temp,alpha):
# wrap the objective function (so we record the best)
objective_function=ObjectiveFunction(objective_function)
current = init_function()
current_score = objective_function(current)
iterationCount = 1
cooling_schedule = kirkpatrick_cooling(start_temp, alpha)
for temperature in cooling_schedule:
done = False
# examine moves around our current position
for next in move_operator(current):
if iterationCount >= max_evaluations:
done=True
break
next_score=objective_function(next)
iterationCount+=1
# probablistically accept this solution always accepting better solutions
p = P(current_score, next_score, temperature)
# random.random() basic function random() generates a random float uniformly in the range [0.0, 1.0).
# p function returns data in range [0.0, 1.0]
if random.random() < p:
current = next
current_score= next_score
break
# see if completely finished
if done: break
best_score = objective_function.best_score
best = objective_function.best
return (iterationCount,best_score,best)
def SolveTSP():
print("Starting to solve travel salesman problem")
coordinates = tsp.ReadCoordinatesFromFile(".\cityCoordinates.csv")
distance_matrix = tsp.ComputeDistanceMatrix(coordinates);
init_function = lambda: GenerateInitialPath(len(coordinates))
objective_function = lambda tour: -tsp.ComputeTourLength(distance_matrix, tour)
start_temp,alpha = 100, 0.995
iterationCount,best_score,shortestPath = ApplySimulatedAnnealing(init_function, reversed_sections, objective_function, MAX_ITERATION,start_temp,alpha)
print(iterationCount, best_score, shortestPath);
tsp.DrawPath(coordinates, shortestPath, "TSP.png");
if __name__ == "__main__":
SolveTSP(); | yakupc/Artificial-Intelligence | Algorithms/SolveTSPSimulatedAnnealing/SolveTSPSimulatedAnnealing.py | Python | mit | 4,196 |
"""django_todo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^todo/', include('todo.urls')),
url(r'^accounts/', include('accounts.urls')),
]
| GunnerJnr/_CodeInstitute | Stream-3/Full-Stack-Development/21.Django REST Framework/4.User-Authentication/django_todo/django_todo/urls.py | Python | mit | 869 |
"""
Example
-------
class SystemSetting(KVModel):
pass
setting = SystemSetting.create(key='foo', value=100)
loaded_setting = SystemSetting.get_by_key('foo')
"""
from django.db import models
from .fields import SerializableField
class KVModel(models.Model):
"""
An Abstract model that has key and value fields
key -- Unique CharField of max_length 255
value -- SerializableField by default could be used to store bool, int,
float, str, list, dict and date
"""
key = models.CharField(max_length=255, unique=True)
value = SerializableField(blank=True, null=True)
def __unicode__(self):
return 'KVModel instance: ' + self.key + ' = ' + unicode(self.value)
@classmethod
def get_by_key(cls, key):
"""
A static method that returns a KVModel instance.
key -- unique key that is used for the search.
this method will throw a DoesNotExist exception if an object with the
key provided is not found.
"""
return cls.objects.get(key=key)
class Meta:
abstract = True
| amdorra/django-kvmodel | kvmodel/models.py | Python | mit | 1,100 |
import re
from datetime import datetime
from flask import current_app as app
from flask_jwt import current_identity
from flask_restplus import Namespace, Resource, fields, reqparse
from sqlalchemy.exc import IntegrityError
from packr.models import Message
api = Namespace('contact',
description='Operations related to the contact form')
message = api.model('Contact', {
'email': fields.String(required=True,
description='Contact email'),
'content': fields.String(required=True,
description='Message'),
})
message_id = api.model('ContactCompletion', {
'id': fields.Integer(required=True,
description='id')
})
@api.route('/')
class MessageItem(Resource):
@api.expect(message)
@api.response(204, 'Message successfully received.')
def post(self):
req_parse = reqparse.RequestParser(bundle_errors=True)
req_parse.add_argument('email', type=str, required=True,
help='No email provided',
location='json')
req_parse.add_argument('content', type=str, required=True,
help='No message provided',
location='json')
args = req_parse.parse_args()
email = args.get('email')
content = args.get('content')
if email == '':
return {'message': {'email': 'No email provided'}}, 400
elif not re.match(r"^[A-Za-z0-9.+_-]+@[A-Za-z0-9._-]+\.[a-zA-Z]*$",
email):
return {'message': {'email': 'Invalid email provided'}}, 400
if content == '':
return {'message': {'content': 'No content provided'}}, 400
new_message = Message(email=email,
content=content,
time=datetime.now())
try:
new_message.save()
except IntegrityError as e:
print(e)
return {
'description': 'Failed to send message.'
}, 409
except Exception as e:
print(e)
return {'description': 'Server encountered an error.'}, 500
return {'email': new_message.email}, 201
def get(self):
if not current_identity and not app.config.get('TESTING'):
return {'message': 'User not authenticated'}, 401
if app.config.get('TESTING') \
or current_identity.role.role_name == "ADMIN":
messages = dict()
for message_row in Message.query.filter_by(done=False).all():
messages[message_row.id] = {
"email": message_row.email,
"time": message_row.time.isoformat(),
"content": message_row.content
}
return messages, 201
else:
return {'message': 'Not authorised'}, 401
@api.route('/complete')
class CompleteItem(Resource):
@api.expect(message_id)
@api.response(204, 'Message successfully updated.')
def post(self):
req_parse = reqparse.RequestParser(bundle_errors=True)
req_parse.add_argument('id', type=int, required=True,
help='No id provided',
location='json')
args = req_parse.parse_args()
id = args.get('id')
if id == 0:
return {'message': {'id': 'No id provided'}}, 400
completed_message = Message.query.filter_by(id=id).first()
completed_message.done = True
try:
completed_message.save()
except IntegrityError as e:
print(e)
return {
'description': 'Failed to update message.'
}, 409
except Exception as e:
print(e)
return {'description': 'Server encountered an error.'}, 500
return {'message': "Message updated"}, 201
| KnightHawk3/packr | packr/api/contact.py | Python | mit | 4,011 |
# Copyright (c) 2013-2014 Will Thames <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from typing import TYPE_CHECKING, Any, Dict, Union
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from typing import Optional
from ansiblelint.file_utils import Lintable
class MercurialHasRevisionRule(AnsibleLintRule):
id = 'hg-latest'
shortdesc = 'Mercurial checkouts must contain explicit revision'
description = (
'All version control checkouts must point to '
'an explicit commit or tag, not just ``latest``'
)
severity = 'MEDIUM'
tags = ['idempotency']
version_added = 'historic'
def matchtask(
self, task: Dict[str, Any], file: 'Optional[Lintable]' = None
) -> Union[bool, str]:
return bool(
task['action']['__ansible_module__'] == 'hg'
and task['action'].get('revision', 'default') == 'default'
)
| ansible/ansible-lint | src/ansiblelint/rules/MercurialHasRevisionRule.py | Python | mit | 1,951 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from setuptools import setup
setup(
name='libipa',
version='0.0.6',
author='Andrew Udvare',
author_email='[email protected]',
packages=['ipa'],
scripts=['bin/ipa-unzip-bin', 'bin/ipa-dump-info'],
url='https://github.com/Tatsh/libipa',
license='LICENSE.txt',
description='Library to read IPA files (iOS application archives).',
test_suite='ipa.test',
long_description='No description.',
install_requires=[
'biplist>=0.7',
'six>=1.7.3',
],
)
| Tatsh/libipa | setup.py | Python | mit | 555 |
"""
Support to interface with Sonos players (via SoCo).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.sonos/
"""
import datetime
import logging
from os import path
import socket
import urllib
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_ENQUEUE, DOMAIN, MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_CLEAR_PLAYLIST,
SUPPORT_SELECT_SOURCE, MediaPlayerDevice)
from homeassistant.const import (
STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN, STATE_OFF,
ATTR_ENTITY_ID)
from homeassistant.config import load_yaml_config_file
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['SoCo==0.12']
_LOGGER = logging.getLogger(__name__)
# The soco library is excessively chatty when it comes to logging and
# causes a LOT of spam in the logs due to making a http connection to each
# speaker every 10 seconds. Quiet it down a bit to just actual problems.
_SOCO_LOGGER = logging.getLogger('soco')
_SOCO_LOGGER.setLevel(logging.ERROR)
_REQUESTS_LOGGER = logging.getLogger('requests')
_REQUESTS_LOGGER.setLevel(logging.ERROR)
SUPPORT_SONOS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE |\
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_PLAY_MEDIA |\
SUPPORT_SEEK | SUPPORT_CLEAR_PLAYLIST | SUPPORT_SELECT_SOURCE
SERVICE_GROUP_PLAYERS = 'sonos_group_players'
SERVICE_UNJOIN = 'sonos_unjoin'
SERVICE_SNAPSHOT = 'sonos_snapshot'
SERVICE_RESTORE = 'sonos_restore'
SERVICE_SET_TIMER = 'sonos_set_sleep_timer'
SERVICE_CLEAR_TIMER = 'sonos_clear_sleep_timer'
SUPPORT_SOURCE_LINEIN = 'Line-in'
SUPPORT_SOURCE_TV = 'TV'
# Service call validation schemas
ATTR_SLEEP_TIME = 'sleep_time'
SONOS_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
SONOS_SET_TIMER_SCHEMA = SONOS_SCHEMA.extend({
vol.Required(ATTR_SLEEP_TIME): vol.All(vol.Coerce(int),
vol.Range(min=0, max=86399))
})
# List of devices that have been registered
DEVICES = []
# pylint: disable=unused-argument, too-many-locals
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Sonos platform."""
import soco
global DEVICES
if discovery_info:
player = soco.SoCo(discovery_info)
# if device allready exists by config
if player.uid in DEVICES:
return True
if player.is_visible:
device = SonosDevice(hass, player)
add_devices([device])
if not DEVICES:
register_services(hass)
DEVICES.append(device)
return True
return False
players = None
hosts = config.get('hosts', None)
if hosts:
# Support retro compatibility with comma separated list of hosts
# from config
hosts = hosts.split(',') if isinstance(hosts, str) else hosts
players = []
for host in hosts:
players.append(soco.SoCo(socket.gethostbyname(host)))
if not players:
players = soco.discover(interface_addr=config.get('interface_addr',
None))
if not players:
_LOGGER.warning('No Sonos speakers found.')
return False
DEVICES = [SonosDevice(hass, p) for p in players]
add_devices(DEVICES)
register_services(hass)
_LOGGER.info('Added %s Sonos speakers', len(players))
return True
def register_services(hass):
"""Register all services for sonos devices."""
descriptions = load_yaml_config_file(
path.join(path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_GROUP_PLAYERS,
_group_players_service,
descriptions.get(SERVICE_GROUP_PLAYERS),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_UNJOIN,
_unjoin_service,
descriptions.get(SERVICE_UNJOIN),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SNAPSHOT,
_snapshot_service,
descriptions.get(SERVICE_SNAPSHOT),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_RESTORE,
_restore_service,
descriptions.get(SERVICE_RESTORE),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_TIMER,
_set_sleep_timer_service,
descriptions.get(SERVICE_SET_TIMER),
schema=SONOS_SET_TIMER_SCHEMA)
hass.services.register(DOMAIN, SERVICE_CLEAR_TIMER,
_clear_sleep_timer_service,
descriptions.get(SERVICE_CLEAR_TIMER),
schema=SONOS_SCHEMA)
def _apply_service(service, service_func, *service_func_args):
"""Internal func for applying a service."""
entity_ids = service.data.get('entity_id')
if entity_ids:
_devices = [device for device in DEVICES
if device.entity_id in entity_ids]
else:
_devices = DEVICES
for device in _devices:
service_func(device, *service_func_args)
device.update_ha_state(True)
def _group_players_service(service):
"""Group media players, use player as coordinator."""
_apply_service(service, SonosDevice.group_players)
def _unjoin_service(service):
"""Unjoin the player from a group."""
_apply_service(service, SonosDevice.unjoin)
def _snapshot_service(service):
"""Take a snapshot."""
_apply_service(service, SonosDevice.snapshot)
def _restore_service(service):
"""Restore a snapshot."""
_apply_service(service, SonosDevice.restore)
def _set_sleep_timer_service(service):
"""Set a timer."""
_apply_service(service,
SonosDevice.set_sleep_timer,
service.data[ATTR_SLEEP_TIME])
def _clear_sleep_timer_service(service):
"""Set a timer."""
_apply_service(service,
SonosDevice.clear_sleep_timer)
def only_if_coordinator(func):
"""Decorator for coordinator.
If used as decorator, avoid calling the decorated method if player is not
a coordinator. If not, a grouped speaker (not in coordinator role) will
throw soco.exceptions.SoCoSlaveException.
Also, partially catch exceptions like:
soco.exceptions.SoCoUPnPException: UPnP Error 701 received:
Transition not available from <player ip address>
"""
def wrapper(*args, **kwargs):
"""Decorator wrapper."""
if args[0].is_coordinator:
from soco.exceptions import SoCoUPnPException
try:
func(*args, **kwargs)
except SoCoUPnPException:
_LOGGER.error('command "%s" for Sonos device "%s" '
'not available in this mode',
func.__name__, args[0].name)
else:
_LOGGER.debug('Ignore command "%s" for Sonos device "%s" (%s)',
func.__name__, args[0].name, 'not coordinator')
return wrapper
# pylint: disable=too-many-instance-attributes, too-many-public-methods
# pylint: disable=abstract-method
class SonosDevice(MediaPlayerDevice):
"""Representation of a Sonos device."""
# pylint: disable=too-many-arguments
def __init__(self, hass, player):
"""Initialize the Sonos device."""
from soco.snapshot import Snapshot
self.hass = hass
self.volume_increment = 5
self._player = player
self._speaker_info = None
self._name = None
self._coordinator = None
self._media_content_id = None
self._media_duration = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
self.update()
self.soco_snapshot = Snapshot(self._player)
@property
def should_poll(self):
"""Polling needed."""
return True
def update_sonos(self, now):
"""Update state, called by track_utc_time_change."""
self.update_ha_state(True)
@property
def unique_id(self):
"""Return an unique ID."""
return self._player.uid
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status == 'PAUSED_PLAYBACK':
return STATE_PAUSED
if self._status == 'PLAYING':
return STATE_PLAYING
if self._status == 'STOPPED':
return STATE_IDLE
if self._status == 'OFF':
return STATE_OFF
return STATE_UNKNOWN
@property
def is_coordinator(self):
"""Return true if player is a coordinator."""
return self._player.is_coordinator
def update(self):
"""Retrieve latest state."""
self._speaker_info = self._player.get_speaker_info()
self._name = self._speaker_info['zone_name'].replace(
' (R)', '').replace(' (L)', '')
if self.available:
self._status = self._player.get_current_transport_info().get(
'current_transport_state')
trackinfo = self._player.get_current_track_info()
if trackinfo['uri'].startswith('x-rincon:'):
# this speaker is a slave, find the coordinator
# the uri of the track is 'x-rincon:{coordinator-id}'
coordinator_id = trackinfo['uri'][9:]
coordinators = [device for device in DEVICES
if device.unique_id == coordinator_id]
self._coordinator = coordinators[0] if coordinators else None
else:
self._coordinator = None
if not self._coordinator:
mediainfo = self._player.avTransport.GetMediaInfo([
('InstanceID', 0)
])
duration = trackinfo.get('duration', '0:00')
# if the speaker is playing from the "line-in" source, getting
# track metadata can return NOT_IMPLEMENTED, which breaks the
# volume logic below
if duration == 'NOT_IMPLEMENTED':
duration = None
else:
duration = sum(60 ** x[0] * int(x[1]) for x in enumerate(
reversed(duration.split(':'))))
media_image_url = trackinfo.get('album_art', None)
media_artist = trackinfo.get('artist', None)
media_album_name = trackinfo.get('album', None)
media_title = trackinfo.get('title', None)
if media_image_url in ('', 'NOT_IMPLEMENTED', None):
# fallback to asking the speaker directly
media_image_url = \
'http://{host}:{port}/getaa?s=1&u={uri}'.format(
host=self._player.ip_address,
port=1400,
uri=urllib.parse.quote(mediainfo['CurrentURI'])
)
if media_artist in ('', 'NOT_IMPLEMENTED', None):
# if listening to a radio stream the media_artist field
# will be empty and the title field will contain the
# filename that is being streamed
current_uri_metadata = mediainfo["CurrentURIMetaData"]
if current_uri_metadata not in \
('', 'NOT_IMPLEMENTED', None):
# currently soco does not have an API for this
import soco
current_uri_metadata = soco.xml.XML.fromstring(
soco.utils.really_utf8(current_uri_metadata))
md_title = current_uri_metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
if md_title not in ('', 'NOT_IMPLEMENTED', None):
media_artist = ''
media_title = md_title
self._media_content_id = trackinfo.get('title', None)
self._media_duration = duration
self._media_image_url = media_image_url
self._media_artist = media_artist
self._media_album_name = media_album_name
self._media_title = media_title
else:
self._status = 'OFF'
self._coordinator = None
self._media_content_id = None
self._media_duration = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player.volume / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player.mute
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self._coordinator:
return self._coordinator.media_content_id
else:
return self._media_content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._coordinator:
return self._coordinator.media_duration
else:
return self._media_duration
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._coordinator:
return self._coordinator.media_image_url
else:
return self._media_image_url
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_artist
else:
return self._media_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_album_name
else:
return self._media_album_name
@property
def media_title(self):
"""Title of current playing media."""
if self._player.is_playing_line_in:
return SUPPORT_SOURCE_LINEIN
if self._player.is_playing_tv:
return SUPPORT_SOURCE_TV
if self._coordinator:
return self._coordinator.media_title
else:
return self._media_title
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
if not self.source_list:
# some devices do not allow source selection
return SUPPORT_SONOS ^ SUPPORT_SELECT_SOURCE
return SUPPORT_SONOS
def volume_up(self):
"""Volume up media player."""
self._player.volume += self.volume_increment
def volume_down(self):
"""Volume down media player."""
self._player.volume -= self.volume_increment
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._player.volume = str(int(volume * 100))
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._player.mute = mute
def select_source(self, source):
"""Select input source."""
if source == SUPPORT_SOURCE_LINEIN:
self._player.switch_to_line_in()
elif source == SUPPORT_SOURCE_TV:
self._player.switch_to_tv()
@property
def source_list(self):
"""List of available input sources."""
model_name = self._speaker_info['model_name']
if 'PLAY:5' in model_name:
return [SUPPORT_SOURCE_LINEIN]
elif 'PLAYBAR' in model_name:
return [SUPPORT_SOURCE_LINEIN, SUPPORT_SOURCE_TV]
@property
def source(self):
"""Name of the current input source."""
if self._player.is_playing_line_in:
return SUPPORT_SOURCE_LINEIN
if self._player.is_playing_tv:
return SUPPORT_SOURCE_TV
return None
@only_if_coordinator
def turn_off(self):
"""Turn off media player."""
self._player.pause()
def media_play(self):
"""Send play command."""
if self._coordinator:
self._coordinator.media_play()
else:
self._player.play()
def media_pause(self):
"""Send pause command."""
if self._coordinator:
self._coordinator.media_pause()
else:
self._player.pause()
def media_next_track(self):
"""Send next track command."""
if self._coordinator:
self._coordinator.media_next_track()
else:
self._player.next()
def media_previous_track(self):
"""Send next track command."""
if self._coordinator:
self._coordinator.media_previous_track()
else:
self._player.previous()
def media_seek(self, position):
"""Send seek command."""
if self._coordinator:
self._coordinator.media_seek(position)
else:
self._player.seek(str(datetime.timedelta(seconds=int(position))))
def clear_playlist(self):
"""Clear players playlist."""
if self._coordinator:
self._coordinator.clear_playlist()
else:
self._player.clear_queue()
@only_if_coordinator
def turn_on(self):
"""Turn the media player on."""
self._player.play()
def play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if self._coordinator:
self._coordinator.play_media(media_type, media_id, **kwargs)
else:
if kwargs.get(ATTR_MEDIA_ENQUEUE):
from soco.exceptions import SoCoUPnPException
try:
self._player.add_uri_to_queue(media_id)
except SoCoUPnPException:
_LOGGER.error('Error parsing media uri "%s", '
"please check it's a valid media resource "
'supported by Sonos', media_id)
else:
self._player.play_uri(media_id)
def group_players(self):
"""Group all players under this coordinator."""
if self._coordinator:
self._coordinator.group_players()
else:
self._player.partymode()
@only_if_coordinator
def unjoin(self):
"""Unjoin the player from a group."""
self._player.unjoin()
@only_if_coordinator
def snapshot(self):
"""Snapshot the player."""
self.soco_snapshot.snapshot()
@only_if_coordinator
def restore(self):
"""Restore snapshot for the player."""
self.soco_snapshot.restore(True)
@only_if_coordinator
def set_sleep_timer(self, sleep_time):
"""Set the timer on the player."""
self._player.set_sleep_timer(sleep_time)
@only_if_coordinator
def clear_sleep_timer(self):
"""Clear the timer on the player."""
self._player.set_sleep_timer(None)
@property
def available(self):
"""Return True if player is reachable, False otherwise."""
try:
sock = socket.create_connection(
address=(self._player.ip_address, 1443),
timeout=3)
sock.close()
return True
except socket.error:
return False
| betrisey/home-assistant | homeassistant/components/media_player/sonos.py | Python | mit | 20,355 |
"""
"""
import logging
import time
import hiro
import mock
from flask import Flask, request
from werkzeug.exceptions import BadRequest
from flask_limiter.extension import C, Limiter
from flask_limiter.util import get_remote_address
def test_reset(extension_factory):
app, limiter = extension_factory({C.DEFAULT_LIMITS: "1 per day"})
@app.route("/")
def null():
return "Hello Reset"
with app.test_client() as cli:
cli.get("/")
assert "1 per 1 day" in cli.get("/").data.decode()
limiter.reset()
assert "Hello Reset" == cli.get("/").data.decode()
assert "1 per 1 day" in cli.get("/").data.decode()
def test_reset_unsupported(extension_factory, memcached_connection):
app, limiter = extension_factory(
{C.DEFAULT_LIMITS: "1 per day", C.STORAGE_URI: "memcached://localhost:31211"}
)
@app.route("/")
def null():
return "Hello Reset"
with app.test_client() as cli:
cli.get("/")
assert "1 per 1 day" in cli.get("/").data.decode()
# no op with memcached but no error raised
limiter.reset()
assert "1 per 1 day" in cli.get("/").data.decode()
def test_combined_rate_limits(extension_factory):
app, limiter = extension_factory({C.DEFAULT_LIMITS: "1 per hour; 10 per day"})
@app.route("/t1")
@limiter.limit("100 per hour;10/minute")
def t1():
return "t1"
@app.route("/t2")
def t2():
return "t2"
with hiro.Timeline().freeze():
with app.test_client() as cli:
assert 200 == cli.get("/t1").status_code
assert 200 == cli.get("/t2").status_code
assert 429 == cli.get("/t2").status_code
def test_defaults_per_method(extension_factory):
app, limiter = extension_factory(
{C.DEFAULT_LIMITS: "1 per hour", C.DEFAULT_LIMITS_PER_METHOD: True}
)
@app.route("/t1", methods=["GET", "POST"])
def t1():
return "t1"
with hiro.Timeline().freeze():
with app.test_client() as cli:
assert 200 == cli.get("/t1").status_code
assert 429 == cli.get("/t1").status_code
assert 200 == cli.post("/t1").status_code
assert 429 == cli.post("/t1").status_code
def test_default_limit_with_exemption(extension_factory):
def is_backdoor():
return request.headers.get("backdoor") == "true"
app, limiter = extension_factory(
{C.DEFAULT_LIMITS: "1 per hour", C.DEFAULT_LIMITS_EXEMPT_WHEN: is_backdoor}
)
@app.route("/t1")
def t1():
return "test"
with hiro.Timeline() as timeline:
with app.test_client() as cli:
assert cli.get("/t1", headers={"backdoor": "true"}).status_code == 200
assert cli.get("/t1", headers={"backdoor": "true"}).status_code == 200
assert cli.get("/t1").status_code == 200
assert cli.get("/t1").status_code == 429
timeline.forward(3600)
assert cli.get("/t1").status_code == 200
def test_default_limit_with_conditional_deduction(extension_factory):
def failed_request(response):
return response.status_code != 200
app, limiter = extension_factory(
{C.DEFAULT_LIMITS: "1 per hour", C.DEFAULT_LIMITS_DEDUCT_WHEN: failed_request}
)
@app.route("/t1/<path:path>")
def t1(path):
if path != "1":
raise BadRequest()
return path
with hiro.Timeline() as timeline:
with app.test_client() as cli:
assert cli.get("/t1/1").status_code == 200
assert cli.get("/t1/1").status_code == 200
assert cli.get("/t1/2").status_code == 400
assert cli.get("/t1/1").status_code == 429
assert cli.get("/t1/2").status_code == 429
timeline.forward(3600)
assert cli.get("/t1/1").status_code == 200
assert cli.get("/t1/2").status_code == 400
def test_key_func(extension_factory):
app, limiter = extension_factory()
@app.route("/t1")
@limiter.limit("100 per minute", lambda: "test")
def t1():
return "test"
with hiro.Timeline().freeze():
with app.test_client() as cli:
for i in range(0, 100):
assert (
200
== cli.get(
"/t1", headers={"X_FORWARDED_FOR": "127.0.0.2"}
).status_code
)
assert 429 == cli.get("/t1").status_code
def test_logging(caplog):
app = Flask(__name__)
limiter = Limiter(app, key_func=get_remote_address)
@app.route("/t1")
@limiter.limit("1/minute")
def t1():
return "test"
with app.test_client() as cli:
assert 200 == cli.get("/t1").status_code
assert 429 == cli.get("/t1").status_code
assert len(caplog.records) == 1
assert caplog.records[0].levelname == "WARNING"
def test_reuse_logging():
app = Flask(__name__)
app_handler = mock.Mock()
app_handler.level = logging.INFO
app.logger.addHandler(app_handler)
limiter = Limiter(app, key_func=get_remote_address)
for handler in app.logger.handlers:
limiter.logger.addHandler(handler)
@app.route("/t1")
@limiter.limit("1/minute")
def t1():
return "42"
with app.test_client() as cli:
cli.get("/t1")
cli.get("/t1")
assert app_handler.handle.call_count == 1
def test_disabled_flag(extension_factory):
app, limiter = extension_factory(
config={C.ENABLED: False}, default_limits=["1/minute"]
)
@app.route("/t1")
def t1():
return "test"
@app.route("/t2")
@limiter.limit("10 per minute")
def t2():
return "test"
with app.test_client() as cli:
assert cli.get("/t1").status_code == 200
assert cli.get("/t1").status_code == 200
for i in range(0, 10):
assert cli.get("/t2").status_code == 200
assert cli.get("/t2").status_code == 200
def test_multiple_apps():
app1 = Flask(__name__)
app2 = Flask(__name__)
limiter = Limiter(default_limits=["1/second"], key_func=get_remote_address)
limiter.init_app(app1)
limiter.init_app(app2)
@app1.route("/ping")
def ping():
return "PONG"
@app1.route("/slowping")
@limiter.limit("1/minute")
def slow_ping():
return "PONG"
@app2.route("/ping")
@limiter.limit("2/second")
def ping_2():
return "PONG"
@app2.route("/slowping")
@limiter.limit("2/minute")
def slow_ping_2():
return "PONG"
with hiro.Timeline().freeze() as timeline:
with app1.test_client() as cli:
assert cli.get("/ping").status_code == 200
assert cli.get("/ping").status_code == 429
timeline.forward(1)
assert cli.get("/ping").status_code == 200
assert cli.get("/slowping").status_code == 200
timeline.forward(59)
assert cli.get("/slowping").status_code == 429
timeline.forward(1)
assert cli.get("/slowping").status_code == 200
with app2.test_client() as cli:
assert cli.get("/ping").status_code == 200
assert cli.get("/ping").status_code == 200
assert cli.get("/ping").status_code == 429
timeline.forward(1)
assert cli.get("/ping").status_code == 200
assert cli.get("/slowping").status_code == 200
timeline.forward(59)
assert cli.get("/slowping").status_code == 200
assert cli.get("/slowping").status_code == 429
timeline.forward(1)
assert cli.get("/slowping").status_code == 200
def test_headers_no_breach():
app = Flask(__name__)
limiter = Limiter(
app,
default_limits=["10/minute"],
headers_enabled=True,
key_func=get_remote_address,
)
@app.route("/t1")
def t1():
return "test"
@app.route("/t2")
@limiter.limit("2/second; 5 per minute; 10/hour")
def t2():
return "test"
with hiro.Timeline().freeze():
with app.test_client() as cli:
resp = cli.get("/t1")
assert resp.headers.get("X-RateLimit-Limit") == "10"
assert resp.headers.get("X-RateLimit-Remaining") == "9"
assert resp.headers.get("X-RateLimit-Reset") == str(int(time.time() + 61))
assert resp.headers.get("Retry-After") == str(60)
resp = cli.get("/t2")
assert resp.headers.get("X-RateLimit-Limit") == "2"
assert resp.headers.get("X-RateLimit-Remaining") == "1"
assert resp.headers.get("X-RateLimit-Reset") == str(int(time.time() + 2))
assert resp.headers.get("Retry-After") == str(1)
assert limiter.current_limit.remaining == 1
assert limiter.current_limit.reset_at == int(time.time() + 2)
assert not limiter.current_limit.breached
def test_headers_breach():
app = Flask(__name__)
limiter = Limiter(
app,
default_limits=["10/minute"],
headers_enabled=True,
key_func=get_remote_address,
)
@app.route("/t1")
@limiter.limit("2/second; 10 per minute; 20/hour")
def t():
return "test"
with hiro.Timeline().freeze() as timeline:
with app.test_client() as cli:
for i in range(10):
resp = cli.get("/t1")
timeline.forward(1)
assert len(limiter.current_limits) == 3
assert all(not limit.breached for limit in limiter.current_limits)
resp = cli.get("/t1")
timeline.forward(1)
assert resp.headers.get("X-RateLimit-Limit") == "10"
assert resp.headers.get("X-RateLimit-Remaining") == "0"
assert resp.headers.get("X-RateLimit-Reset") == str(int(time.time() + 50))
assert resp.headers.get("Retry-After") == str(int(50))
assert limiter.current_limit.remaining == 0
assert limiter.current_limit.reset_at == int(time.time() + 50)
assert limiter.current_limit.breached
def test_retry_after():
app = Flask(__name__)
_ = Limiter(
app,
default_limits=["1/minute"],
headers_enabled=True,
key_func=get_remote_address,
)
@app.route("/t1")
def t():
return "test"
with hiro.Timeline().freeze() as timeline:
with app.test_client() as cli:
resp = cli.get("/t1")
retry_after = int(resp.headers.get("Retry-After"))
assert retry_after > 0
timeline.forward(retry_after)
resp = cli.get("/t1")
assert resp.status_code == 200
def test_retry_after_exists_seconds():
app = Flask(__name__)
_ = Limiter(
app,
default_limits=["1/minute"],
headers_enabled=True,
key_func=get_remote_address,
)
@app.route("/t1")
def t():
return "", 200, {"Retry-After": "1000000"}
with app.test_client() as cli:
resp = cli.get("/t1")
retry_after = int(resp.headers.get("Retry-After"))
assert retry_after > 1000
def test_retry_after_exists_rfc1123():
app = Flask(__name__)
_ = Limiter(
app,
default_limits=["1/minute"],
headers_enabled=True,
key_func=get_remote_address,
)
@app.route("/t1")
def t():
return "", 200, {"Retry-After": "Sun, 06 Nov 2032 01:01:01 GMT"}
with app.test_client() as cli:
resp = cli.get("/t1")
retry_after = int(resp.headers.get("Retry-After"))
assert retry_after > 1000
def test_custom_headers_from_config():
app = Flask(__name__)
app.config.setdefault(C.HEADER_LIMIT, "X-Limit")
app.config.setdefault(C.HEADER_REMAINING, "X-Remaining")
app.config.setdefault(C.HEADER_RESET, "X-Reset")
limiter = Limiter(
app,
default_limits=["10/minute"],
headers_enabled=True,
key_func=get_remote_address,
)
@app.route("/t1")
@limiter.limit("2/second; 10 per minute; 20/hour")
def t():
return "test"
with hiro.Timeline().freeze() as timeline:
with app.test_client() as cli:
for i in range(11):
resp = cli.get("/t1")
timeline.forward(1)
assert resp.headers.get("X-Limit") == "10"
assert resp.headers.get("X-Remaining") == "0"
assert resp.headers.get("X-Reset") == str(int(time.time() + 50))
def test_application_shared_limit(extension_factory):
app, limiter = extension_factory(application_limits=["2/minute"])
@app.route("/t1")
def t1():
return "route1"
@app.route("/t2")
def t2():
return "route2"
with hiro.Timeline().freeze():
with app.test_client() as cli:
assert 200 == cli.get("/t1").status_code
assert 200 == cli.get("/t2").status_code
assert 429 == cli.get("/t1").status_code
def test_callable_default_limit(extension_factory):
app, limiter = extension_factory(default_limits=[lambda: "1/minute"])
@app.route("/t1")
def t1():
return "t1"
@app.route("/t2")
def t2():
return "t2"
with hiro.Timeline().freeze():
with app.test_client() as cli:
assert cli.get("/t1").status_code == 200
assert cli.get("/t2").status_code == 200
assert cli.get("/t1").status_code == 429
assert cli.get("/t2").status_code == 429
def test_callable_application_limit(extension_factory):
app, limiter = extension_factory(application_limits=[lambda: "1/minute"])
@app.route("/t1")
def t1():
return "t1"
@app.route("/t2")
def t2():
return "t2"
with hiro.Timeline().freeze():
with app.test_client() as cli:
assert cli.get("/t1").status_code == 200
assert cli.get("/t2").status_code == 429
def test_no_auto_check(extension_factory):
app, limiter = extension_factory(auto_check=False)
@app.route("/", methods=["GET", "POST"])
@limiter.limit("1/second", per_method=True)
def root():
return "root"
with hiro.Timeline().freeze():
with app.test_client() as cli:
assert 200 == cli.get("/").status_code
assert 200 == cli.get("/").status_code
# attach before_request to perform check
@app.before_request
def _():
limiter.check()
with hiro.Timeline().freeze():
with app.test_client() as cli:
assert 200 == cli.get("/").status_code
assert 429 == cli.get("/").status_code
def test_fail_on_first_breach(extension_factory):
app, limiter = extension_factory(fail_on_first_breach=True)
@app.route("/", methods=["GET", "POST"])
@limiter.limit("1/second", per_method=True)
@limiter.limit("2/minute", per_method=True)
def root():
return "root"
with hiro.Timeline().freeze() as timeline:
with app.test_client() as cli:
assert 200 == cli.get("/").status_code
assert 429 == cli.get("/").status_code
assert [True] == [k.breached for k in limiter.current_limits]
timeline.forward(1)
assert 200 == cli.get("/").status_code
assert [False, False] == [k.breached for k in limiter.current_limits]
timeline.forward(1)
assert 429 == cli.get("/").status_code
assert [False, True] == [k.breached for k in limiter.current_limits]
def test_no_fail_on_first_breach(extension_factory):
app, limiter = extension_factory(fail_on_first_breach=False)
@app.route("/", methods=["GET", "POST"])
@limiter.limit("1/second", per_method=True)
@limiter.limit("2/minute", per_method=True)
def root():
return "root"
with hiro.Timeline().freeze() as timeline:
with app.test_client() as cli:
assert 200 == cli.get("/").status_code
assert 429 == cli.get("/").status_code
assert [True, False] == [k.breached for k in limiter.current_limits]
timeline.forward(1)
assert 429 == cli.get("/").status_code
assert [False, True] == [k.breached for k in limiter.current_limits]
def test_custom_key_prefix(redis_connection, extension_factory):
app1, limiter1 = extension_factory(
key_prefix="moo", storage_uri="redis://localhost:46379"
)
app2, limiter2 = extension_factory(
{C.KEY_PREFIX: "cow"}, storage_uri="redis://localhost:46379"
)
app3, limiter3 = extension_factory(storage_uri="redis://localhost:46379")
@app1.route("/test")
@limiter1.limit("1/day")
def app1_test():
return "app1 test"
@app2.route("/test")
@limiter2.limit("1/day")
def app2_test():
return "app1 test"
@app3.route("/test")
@limiter3.limit("1/day")
def app3_test():
return "app1 test"
with app1.test_client() as cli:
resp = cli.get("/test")
assert 200 == resp.status_code
resp = cli.get("/test")
assert 429 == resp.status_code
with app2.test_client() as cli:
resp = cli.get("/test")
assert 200 == resp.status_code
resp = cli.get("/test")
assert 429 == resp.status_code
with app3.test_client() as cli:
resp = cli.get("/test")
assert 200 == resp.status_code
resp = cli.get("/test")
assert 429 == resp.status_code
def test_second_instance_bypassed_by_shared_g():
app = Flask(__name__)
limiter1 = Limiter(app, key_func=get_remote_address)
limiter2 = Limiter(app, key_func=get_remote_address)
@app.route("/test1")
@limiter2.limit("1/second")
def app_test1():
return "app test1"
@app.route("/test2")
@limiter1.limit("10/minute")
@limiter2.limit("1/second")
def app_test2():
return "app test2"
with hiro.Timeline().freeze() as timeline:
with app.test_client() as cli:
assert cli.get("/test1").status_code == 200
assert cli.get("/test2").status_code == 200
assert cli.get("/test1").status_code == 429
assert cli.get("/test2").status_code == 200
for i in range(8):
assert cli.get("/test1").status_code == 429
assert cli.get("/test2").status_code == 200
assert cli.get("/test2").status_code == 429
timeline.forward(1)
assert cli.get("/test1").status_code == 200
assert cli.get("/test2").status_code == 429
timeline.forward(59)
assert cli.get("/test1").status_code == 200
assert cli.get("/test2").status_code == 200
def test_independent_instances_by_key_prefix():
app = Flask(__name__)
limiter1 = Limiter(app, key_prefix="lmt1", key_func=get_remote_address)
limiter2 = Limiter(app, key_prefix="lmt2", key_func=get_remote_address)
@app.route("/test1")
@limiter2.limit("1/second")
def app_test1():
return "app test1"
@app.route("/test2")
@limiter1.limit("10/minute")
@limiter2.limit("1/second")
def app_test2():
return "app test2"
with hiro.Timeline().freeze() as timeline:
with app.test_client() as cli:
assert cli.get("/test1").status_code == 200
assert cli.get("/test2").status_code == 200
resp = cli.get("/test1")
assert resp.status_code == 429
assert "1 per 1 second" in resp.data.decode()
resp = cli.get("/test2")
assert resp.status_code == 429
assert "1 per 1 second" in resp.data.decode()
for i in range(8):
assert cli.get("/test1").status_code == 429
assert cli.get("/test2").status_code == 429
assert cli.get("/test2").status_code == 429
timeline.forward(1)
assert cli.get("/test1").status_code == 200
assert cli.get("/test2").status_code == 429
timeline.forward(59)
assert cli.get("/test1").status_code == 200
assert cli.get("/test2").status_code == 200
| alisaifee/flask-limiter | tests/test_flask_ext.py | Python | mit | 20,218 |
#!/usr/bin/env python
##############################################
#
# This module contains some utilities
#
##############################################
class argpasser(object):
"""
ComEst use the arguments that are almost repeatedly. Therefore, it will be useful to create a customized arguemnt passer like this.
"""
def __init__(self,
stamp_size_arcsec = 20.0,
mag_dict = {"lo":20.0, "hi":25.0 },
hlr_dict = {"lo":0.35, "hi":0.75 },
fbulge_dict = {"lo":0.5 , "hi":0.9 },
q_dict = {"lo":0.4 , "hi":1.0 },
pos_ang_dict = {"lo":0.0 , "hi":180.0},
ngals_arcmin2 = 15.0,
nsimimages = 50,
ncpu = 2,
):
"""
:param stamp_size_arcsec: The size of the stamp of each simulated source by **GalSim**. The stamp is with the size of ``stamp_size_arcsec`` x ``stamp_size_arcsec`` (``stamp_size_arcsec`` in arcsec) where the **GalSim** will simulate one single source on. By default, it is ``stamp_size_arcsec = 15.0``.
:param mag_dict: The magnitude range which **GalSim** will simulate sources. It must be in the form of ``{"lo": _value_, "hi": _value_}``, where _value_ is expressed in magnitude. By default, it is ``mag_dict = {"lo":20.0, "hi":25.0 }``.
:param hlr_dict: The half light radius configuration of the sources simulated by **GalSim**. It is in the unit of arcsec. It has to be in the form of ``{"lo": _value_, "high": _value_}``. By default, it is ``hlr_dict = {"lo":0.35 , "hi":0.75 }``.
:param fbulge_dict: The configuration of the fraction of the bulge component. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and 1 means the galaxy has zero fraction of light from the disk component. By default, it is ``fbulge_dict = {"lo":0.5 , "hi":0.9 }``.
:param q_dict: The minor-to-major axis ratio configuration of the sources simulated by **GalSim**. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and ``q = 1`` means spherical. By default, it is ``q_dict = {"lo":0.4 , "hi":1.0 }``.
:param pos_ang_dict: The position angle configuration of the sources simulated by **GalSim**. It is in the unit of degree. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,180.0] and it is counter-clockwise with +x is 0 degree. By default, it is ``pos_ang_dict={"lo":0.0 , "hi":180.0 }``.
:param ngals_arcmin2: The projected number of the sources simulated by **GalSim** per arcmin square. You dont want to set this number too high because it will cause the problem from blending in the source detection. However, you dont want to lose the statistic power if you set this number too low. By defualt, it is ``ngals_arcmin2 = 15.0``.
:param nsimimages: The number of the images you want to simulate. It will be saved in the multi-extension file with the code name ``sims_nameroot``. By default, it is ``nsimimages = 50``.
:param ncpu: The number of cpu for parallel running. By default, it is ``ncpu = 2``. Please do not set this number higher than the CPU cores you have.
"""
self.stamp_size_arcsec = float(stamp_size_arcsec)
self.mag_dict = mag_dict
self.hlr_dict = hlr_dict
self.fbulge_dict = fbulge_dict
self.q_dict = q_dict
self.pos_ang_dict = pos_ang_dict
self.ngals_arcmin2 = float(ngals_arcmin2)
self.nsimimages = int(nsimimages)
self.ncpu = int(ncpu)
return
# i_am function
def i_am(self):
"""
"""
print "#", "stamp_size_arcsec:", self.stamp_size_arcsec
print "#", "mag_dict:", self.mag_dict
print "#", "hlr_dict:", self.hlr_dict
print "#", "fbulge_dict:", self.fbulge_dict
print "#", "q_dict:", self.q_dict
print "#", "pos_ang_dict:", self.pos_ang_dict
print "#", "ngals_arcmin2:", self.ngals_arcmin2
print "#", "nsimimages:", self.nsimimages
print "#", "ncpu:", self.ncpu
return
| inonchiu/ComEst | comest/utils.py | Python | mit | 4,398 |
import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-email-subscription',
url='https://github.com/MagicSolutions/django-email-subscription',
version='0.0.1',
description='Django app for creating subcription accoutns.',
long_description=README,
install_requires=[
'django-simple-captcha>=0.4.2',
],
packages=find_packages(),
package_data={'': ['LICENSE']},
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
],
)
| MagicSolutions/django-email-subscription | setup.py | Python | mit | 888 |
# ~*~ encoding: utf-8 ~*~
from pymongo import MongoClient
from pandas import read_csv
from datetime import date
mongodb = MongoClient('192.168.178.82', 9999)
db = mongodb['dev']
drug_collection = db['drug']
drugs = read_csv('~/Dokumente/bfarm_lieferenpass_meldung.csv', delimiter=';', encoding='iso8859_2').to_dict()
drugs.pop('Id', None)
drugs.pop('aktuelle Bescheidart', None)
drugs.pop('Meldungsart', None)
drugs.pop('aktuelle Bescheidart', None)
data = dict()
for x in range(drugs['Verkehrsfähig'].__len__()):
"""
if drugs['Ende Engpass'][x] == '-':
data['end'] = None
else:
day, month, year = drugs['Ende Engpass'][x].split('.')
data['end'] = date(int(year), int(month), int(day)).__str__()
if drugs['Beginn Engpass'][x] == '-':
data['initial_report'] = None
else:
day, month, year = drugs['Beginn Engpass'][x].split('.')
data['initial_report'] = date(int(year), int(month), int(day)).__str__()
if drugs['Datum der letzten Meldung'][x] == '-':
data['last_report'] = None
else:
day, month, year = drugs['Datum der letzten Meldung'][x].split('.')
data['last_report'] = date(int(year), int(month), int(day)).__str__()
"""
data['substance'] = drugs['Wirkstoffe'][x].replace(' ', '').split(';')
data['enr'] = int(drugs['Enr'][x])
data['marketability'] = True if drugs['Verkehrsfähig'][x] == 'ja' else False
data['atc_code'] = drugs['ATC-Code'][x]
data['pzn'] = int(drugs['PZN'][x].split(' ')[0].replace(';', '')) if drugs['PZN'][x] != '-' else None
data['drug_title'] = drugs['Arzneimittelbezeichnung'][x]
data['hospital'] = True if drugs['Krankenhausrelevant'][x] == 'ja' else False
drug_collection.update_one({'enr': data['enr']}, {'$set': data}, upsert=True)
| schenkd/webdev-project | data_import.py | Python | mit | 1,815 |
"""Molt Web API with Interface."""
import re
import redis
import docker
import subprocess
import os
import shlex
import requests
import sys
import argparse
from flask import Flask, Response, render_template, abort, request
from molt import Molt, MoltError
app = Flask(__name__)
# コマンドライン引数のパース
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='config file path')
args = parser.parse_args()
if args.config:
cfg_file = args.config
else:
cfg_file = 'config/molt_app.cfg'
if not os.path.exists(cfg_file):
app.logger.error("{} が存在しません".format(cfg_file))
sys.exit(1)
app.config.from_pyfile(cfg_file, silent=True)
@app.route('/<virtual_host>', strict_slashes=False)
def index(virtual_host):
"""Moltの実行をプレビューするページ."""
try:
rev, repo, user = virtual_host_parse(virtual_host)
except Exception:
abort(404)
vhost = {'rev': rev, 'repo': repo, 'user': user, 'full': virtual_host}
redirect_url = '//{}.{}/'.format(virtual_host, app.config['BASE_DOMAIN'])
return render_template('index.html', vhost=vhost,
redirect_url=redirect_url)
@app.route('/molt/<virtual_host>', methods=['GET'], strict_slashes=False)
def molt(virtual_host):
"""Moltの実行をストリーミングする(Server-Sent Eventを使ったAPI)."""
try:
rev, repo, user = virtual_host_parse(virtual_host)
except Exception:
abort(404)
m = Molt(rev, repo, user, app.config['BASE_DOMAIN'],
app.config['GITHUB_USER'], app.config['GITHUB_TOKEN'])
r = redis.StrictRedis(host=app.config['REDIS_HOST'],
port=app.config['REDIS_PORT'])
def generate(m, r):
"""Dockerイメージ立ち上げ(ストリーミングするための関数).
git clone から docker-compose upまでの一連の処理のSTDIOの送信と、Dockerイメージ
の情報取得・設定をする
"""
# コマンド群の実行
try:
for row in m.molt():
row = row.decode()
data = row.split('\r')[-1] # CRのみの行は保留されるので取り除く
yield event_stream_parser(data)
except MoltError as e:
yield event_stream_parser(e, event='failure')
except Exception:
yield event_stream_parser('Molt内部でエラーが発生しました。終了します...',
event='failure')
else:
# RedisへIPアドレスとバーチャルホストの対応を書き込む
r.hset('mirror-store', virtual_host, m.get_container_ip())
yield event_stream_parser('', event='success')
return Response(generate(m, r), mimetype='text/event-stream')
@app.route('/favicon.ico')
def favicon():
"""favicon.ico."""
abort(404)
@app.template_filter('base_domain')
def base_domain_filter(path):
"""Staticファイルを呼び出す際のドメインを指定する."""
return '//' + app.config['BASE_DOMAIN'] + ':' + str(app.config['PORT']) + \
'/' + path
@app.route("/hook", methods=['POST'])
def hook():
event = request.headers["X-GitHub-Event"]
req = request.json
if event != "pull_request":
return "ok", 200
elif req["action"] not in {"opened", "synchronize"}:
return "ok", 200
pr = req["pull_request"]
pr_url = pr["comments_url"]
pr_sha = pr["head"]["sha"][:7]
pr_reponame = pr["head"]["repo"]["name"]
pr_owner = pr["head"]["repo"]["owner"]["login"]
payload = {
"event": "COMMENT",
"body": "Launched the preview environment!\nhttp://{}.{}.{}.{}\
".format(pr_sha, pr_reponame, pr_owner, app.config["BASE_DOMAIN"]),
}
headers = {
"Accept": "application/vnd.github.v3+json",
"Content-Type": "application/json",
"Authorization": "token {}".format(app.config["GITHUB_TOKEN"]),
}
requests.post(
pr_url,
json=payload,
headers=headers,
)
return "ok", 200
def virtual_host_parse(virtual_host):
"""Virtual_hostの文字列を 'rev', 'repo', 'user' に分割する.
e.g.(1) "host.repo.sitory.user" => "host", "repo.sitory", "user"
e.g.(2) "host.repository.user" => "host", "repository", "user"
"""
p = re.compile(r'(?P<rev>^.+?)\.(?P<repo>.+)\.(?P<user>.+)$')
m = p.search(virtual_host)
return m.group('rev'), m.group('repo'), m.group('user')
def event_stream_parser(data, event=None, id=None, retry=None):
"""Server-Sent Event 形式へのパーサ."""
event_stream = ''
if event:
event_stream += 'event: {}\n'.format(event)
event_stream += 'data: {}\n'.format(data)
if id:
event_stream += 'id: {}\n'.format(id)
if retry:
event_stream += 'retry: {}\n'.format(id)
event_stream += '\n'
return event_stream
if __name__ == '__main__':
# RSA鍵の生成
user = os.getenv('USER')
ssh_key_path = os.path.expanduser("~")+"/.ssh/molt_deploy_key"
if not os.path.exists(ssh_key_path):
command = 'ssh-keygen -t rsa -N "" -f {}'.format(ssh_key_path)
command = shlex.split(command)
subprocess.Popen(command)
# Dockerネットワークの作成
clinet = docker.from_env()
networks = clinet.networks.list()
if 'molt-network' not in [network.name for network in networks]:
command = 'docker network create --subnet=172.31.255.0/24 \
--ip-range=172.31.255.0/24 --gateway=172.31.255.254 \
-o "com.docker.network.bridge.host_binding_ipv4"="0.0.0.0" \
molt-network'
command = shlex.split(command)
subprocess.Popen(command)
app.run(host=app.config['HOST'], port=app.config['PORT'])
| swkoubou/molt | molt_app.py | Python | mit | 5,850 |
from django.contrib import admin
try:
from django.contrib.auth import get_permission_codename
except ImportError: # pragma: no cover
# Django < 1.6
def get_permission_codename(action, opts):
return '%s_%s' % (action, opts.object_name.lower())
class ObjectPermissionsModelAdminMixin(object):
def has_change_permission(self, request, obj=None):
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm('%s.%s' % (opts.app_label, codename), obj)
def has_delete_permission(self, request, obj=None):
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm('%s.%s' % (opts.app_label, codename), obj)
class ObjectPermissionsInlineModelAdminMixin(ObjectPermissionsModelAdminMixin):
def has_change_permission(self, request, obj=None): # pragma: no cover
opts = self.opts
if opts.auto_created:
for field in opts.fields:
if field.rel and field.rel.to != self.parent_model:
opts = field.rel.to._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm('%s.%s' % (opts.app_label, codename), obj)
def has_delete_permission(self, request, obj=None): # pragma: no cover
if self.opts.auto_created:
return self.has_change_permission(request, obj)
return super(ObjectPermissionsInlineModelAdminMixin, self).has_delete_permission(request, obj)
class ObjectPermissionsModelAdmin(ObjectPermissionsModelAdminMixin, admin.ModelAdmin):
pass
class ObjectPermissionsStackedInline(ObjectPermissionsInlineModelAdminMixin, admin.StackedInline):
pass
class ObjectPermissionsTabularInline(ObjectPermissionsInlineModelAdminMixin, admin.TabularInline):
pass
| smcoll/django-rules | rules/contrib/admin.py | Python | mit | 1,879 |
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use Redis for cache
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ('gunicorn', )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
# See:http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
#MEDIA_URL = 'https://s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader',
['django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# EMAIL
# ------------------------------------------------------------------------------
# for now, send emails to console, even in production
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
| Patrick-and-Michael/trumptweets | config/settings/production.py | Python | mit | 4,984 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
getname
~~~~~~~
Get popular cat/dog/superhero/supervillain names.
:copyright: (c) 2015 by lord63.
:license: MIT, see LICENSE for more details.
"""
from getname.main import random_name
__title__ = "getname"
__version__ = '0.1.1'
__author__ = "lord63"
__license__ = "MIT"
__copyright__ = "Copyright 2015 lord63"
| lord63/getname | getname/__init__.py | Python | mit | 385 |
from cse.util import Util
from collections import OrderedDict
from cse.pipeline import Handler
class WpApiParser(Handler):
def __init__(self):
super()
def parse(self, comments, url, assetId, parentId):
data = self.__buildDataSkeleton(url, assetId)
data["comments"] = self.__iterateComments(comments, parentId)
return data
def __buildDataSkeleton(self, url, assetId):
return {
"article_url" : url,
"article_id" : assetId,
"comments" : None
}
def __iterateComments(self, comments, parentId=None):
commentList = OrderedDict()
for comment in comments:
votes = 0
for action_summary in comment["action_summaries"]:
if action_summary["__typename"] == "LikeActionSummary":
votes = action_summary["count"]
commentObject = {
"comment_author": comment["user"]["username"],
"comment_text" : comment["body"],
"timestamp" : comment["created_at"],
"parent_comment_id" : parentId,
"upvotes" : votes,
"downvotes": 0
}
commentList[comment["id"]] = commentObject
try:
commentReplies = self.__iterateComments(comment["replies"]["nodes"], comment["id"])
except KeyError: # There may be a limit of the nesting level of comments on wp
commentReplies = {}
commentList.update(commentReplies)
return commentList
# inherited from cse.pipeline.Handler
def registeredAt(self, ctx):
pass
def process(self, ctx, data):
result = self.parse(
comments=data["comments"],
url=data["url"],
assetId=data["assetId"],
parentId=data["parentId"]
)
ctx.write(result)
| CodeLionX/CommentSearchEngine | cse/WpApiParser.py | Python | mit | 1,919 |
#!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for syscoin utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
import binascii
import configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "syscoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, syscoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| syscoin/syscoin2 | test/util/syscoin-util-test.py | Python | mit | 6,594 |
# pre_NAMD.py
# Creates the files used for NAMD based on the .pdb file dowloaded from PDB bank
#
# Usage:
# python pre_NAMD.py $PDBID
#
# $PDBID=the 4 characters identification code of the .pdb file
#
# Input:
# $PDBID.pdb: .pdb file downloaded from PDB bank
#
# Output:
# $PDBID_p.pdb: .pdb file with water molecules removed
# $PDBID_p_h.pdb: .pdb file with water removed and hydrogen atoms added
# $PDBID_p_h.psf: .psf file of $PDBID_p_h.pdb
# $PDBID_p_h.log: Log file of adding hydrogen atoms
# $PDBID_wb.pdb: .pdb file of the water box model
# $PDBID_wb.psf: .psf file of $PDBID_wb.pdb
# $PDBID_wb.log: Log file of the water box model generation
# $PDBID_wb_i.pdb: .pdb file of the ionized water box model (For NAMD)
# $PDBID_wb_i.psf: .psf file of PDBID_wb_i.pdb (For NAMD)
# $PDBID.log: Log file of the whole process (output of VMD)
# $PDBID_center.txt: File contains the grid and center information of
# the ionized water box model
#
# Author: Xiaofei Zhang
# Date: June 20 2016
from __future__ import print_function
import sys, os
def print_error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# main
if len(sys.argv) != 2:
print_error("Usage: python pre_NAMD.py $PDBID")
sys.exit(-1)
mypath = os.path.realpath(__file__)
tclpath = os.path.split(mypath)[0] + os.path.sep + 'tcl' + os.path.sep
pdbid = sys.argv[1]
logfile = pdbid+'.log'
# Using the right path of VMD
vmd = "/Volumes/VMD-1.9.2/VMD 1.9.2.app/Contents/vmd/vmd_MACOSXX86"
print("Input: "+pdbid+".pdb")
# Remove water
print("Remove water..")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'remove_water.tcl' + ' ' + '-args' + ' '+ pdbid +'> '+ logfile
os.system(cmdline)
# Create .psf
print("Create PSF file...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'create_psf.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Build water box
print("Build water box...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'build_water_box.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Add ions
print("Add ions...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'add_ion.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Calculate grid and center
print("Calculate center coordinates...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'get_center.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
print("Finish!")
# end main
| Xiaofei-Zhang/NAMD_Docking_pipeline | pre_NAMD/pre_NAMD.py | Python | mit | 2,553 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-12-03 13:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('busshaming', '0013_auto_20170917_0502'),
]
operations = [
migrations.CreateModel(
name='StopSequence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence_hash', models.CharField(max_length=64)),
('stop_sequence', models.TextField()),
('length', models.SmallIntegerField()),
('trip_headsign', models.CharField(blank=True, max_length=200, null=True)),
('trip_short_name', models.CharField(blank=True, max_length=200, null=True)),
('direction', models.SmallIntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('route', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='busshaming.Route')),
],
),
migrations.AddField(
model_name='trip',
name='stop_sequence',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='busshaming.StopSequence'),
),
migrations.AlterUniqueTogether(
name='stopsequence',
unique_together=set([('sequence_hash', 'route')]),
),
]
| katharosada/bus-shaming | busshaming/migrations/0014_auto_20171203_1316.py | Python | mit | 1,559 |
"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
| danmackinlay/branching_process | branching_process/nonlattice/fit.py | Python | mit | 6,351 |
"""useful context managers"""
from contextlib import suppress
with suppress(ModuleNotFoundError):
from lag import *
import os
import contextlib
def clog(*args, condition=True, log_func=print, **kwargs):
if condition:
return log_func(*args, **kwargs)
@contextlib.contextmanager
def cd(newdir, verbose=True):
"""Change your working directory, do stuff, and change back to the original"""
_clog = partial(clog, condition=verbose, log_func=print)
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
_clog(f'cd {newdir}')
yield
finally:
_clog(f'cd {prevdir}')
os.chdir(prevdir)
# from pathlib import Path
# _clog("Called before cd", Path().absolute())
# with cd(Path.home()):
# if verbose: print("Called under cd", Path().absolute())
# _clog("Called after cd and same as before", Path().absolute())
| thorwhalen/ut | util/context_managers.py | Python | mit | 908 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import warnings
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ha_to_eV, bohr_to_ang
from pymatgen.io.abinit.abiobjects import *
from pymatgen.util.testing import PymatgenTest
class LatticeFromAbivarsTest(PymatgenTest):
def test_rprim_acell(self):
l1 = lattice_from_abivars(acell=3 * [10], rprim=np.eye(3))
self.assertAlmostEqual(l1.volume, bohr_to_ang ** 3 * 1000)
assert l1.angles == (90, 90, 90)
l2 = lattice_from_abivars(acell=3 * [10], angdeg=(90, 90, 90))
assert l1 == l2
l2 = lattice_from_abivars(acell=3 * [8], angdeg=(60, 60, 60))
abi_rprimd = (
np.reshape(
[
4.6188022,
0.0000000,
6.5319726,
-2.3094011,
4.0000000,
6.5319726,
-2.3094011,
-4.0000000,
6.5319726,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l2.matrix, abi_rprimd)
l3 = lattice_from_abivars(acell=[3, 6, 9], angdeg=(30, 40, 50))
abi_rprimd = (
np.reshape(
[
3.0000000,
0.0000000,
0.0000000,
3.8567257,
4.5962667,
0.0000000,
6.8944000,
4.3895544,
3.7681642,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l3.matrix, abi_rprimd)
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(90, 90, 90), rprim=np.eye(3))
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(-90, 90, 90))
def test_znucl_typat(self):
"""Test the order of typat and znucl in the Abinit input and enforce_typat, enforce_znucl."""
# Ga Ga1 1 0.33333333333333 0.666666666666667 0.500880 1.0
# Ga Ga2 1 0.66666666666667 0.333333333333333 0.000880 1.0
# N N3 1 0.333333333333333 0.666666666666667 0.124120 1.0
# N N4 1 0.666666666666667 0.333333333333333 0.624120 1.0
gan = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "abinit", "gan.cif"))
# By default, znucl is filled using the first new type found in sites.
def_vars = structure_to_abivars(gan)
def_znucl = def_vars["znucl"]
self.assertArrayEqual(def_znucl, [31, 7])
def_typat = def_vars["typat"]
self.assertArrayEqual(def_typat, [1, 1, 2, 2])
# But it's possible to enforce a particular value of typat and znucl.
enforce_znucl = [7, 31]
enforce_typat = [2, 2, 1, 1]
enf_vars = structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=enforce_typat)
self.assertArrayEqual(enf_vars["znucl"], enforce_znucl)
self.assertArrayEqual(enf_vars["typat"], enforce_typat)
self.assertArrayEqual(def_vars["xred"], enf_vars["xred"])
assert [s.symbol for s in species_by_znucl(gan)] == ["Ga", "N"]
for itype1, itype2 in zip(def_typat, enforce_typat):
assert def_znucl[itype1 - 1] == enforce_znucl[itype2 - 1]
with self.assertRaises(Exception):
structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=None)
class SpinModeTest(PymatgenTest):
def test_base(self):
polarized = SpinMode.as_spinmode("polarized")
other_polarized = SpinMode.as_spinmode("polarized")
unpolarized = SpinMode.as_spinmode("unpolarized")
polarized.to_abivars()
self.assertTrue(polarized is other_polarized)
self.assertTrue(polarized == other_polarized)
self.assertTrue(polarized != unpolarized)
# Test pickle
self.serialize_with_pickle(polarized)
# Test dict methods
self.assertMSONable(polarized)
self.assertMSONable(unpolarized)
class SmearingTest(PymatgenTest):
def test_base(self):
fd1ev = Smearing.as_smearing("fermi_dirac:1 eV")
fd1ev.to_abivars()
self.assertTrue(fd1ev)
same_fd = Smearing.as_smearing("fermi_dirac:" + str(1.0 / Ha_to_eV))
self.assertTrue(same_fd == fd1ev)
nosmear = Smearing.nosmearing()
assert nosmear == Smearing.as_smearing("nosmearing")
self.assertFalse(nosmear)
self.assertTrue(nosmear != fd1ev)
self.assertMSONable(nosmear)
new_fd1ev = Smearing.from_dict(fd1ev.as_dict())
self.assertTrue(new_fd1ev == fd1ev)
# Test pickle
self.serialize_with_pickle(fd1ev)
# Test dict methods
self.assertMSONable(fd1ev)
class ElectronsAlgorithmTest(PymatgenTest):
def test_base(self):
algo = ElectronsAlgorithm(nstep=70)
abivars = algo.to_abivars()
# Test pickle
self.serialize_with_pickle(algo)
# Test dict methods
self.assertMSONable(algo)
class ElectronsTest(PymatgenTest):
def test_base(self):
default_electrons = Electrons()
self.assertTrue(default_electrons.nsppol == 2)
self.assertTrue(default_electrons.nspinor == 1)
self.assertTrue(default_electrons.nspden == 2)
abivars = default_electrons.to_abivars()
# new = Electron.from_dict(default_electrons.as_dict())
# Test pickle
self.serialize_with_pickle(default_electrons, test_eq=False)
custom_electrons = Electrons(
spin_mode="unpolarized",
smearing="marzari4:0.2 eV",
algorithm=ElectronsAlgorithm(nstep=70),
nband=10,
charge=1.0,
comment="Test comment",
)
# Test dict methods
self.assertMSONable(custom_electrons)
class KSamplingTest(PymatgenTest):
def test_base(self):
monkhorst = KSampling.monkhorst((3, 3, 3), (0.5, 0.5, 0.5), 0, False, False)
gamma_centered = KSampling.gamma_centered((3, 3, 3), False, False)
monkhorst.to_abivars()
# Test dict methods
self.assertMSONable(monkhorst)
self.assertMSONable(gamma_centered)
class RelaxationTest(PymatgenTest):
def test_base(self):
atoms_and_cell = RelaxationMethod.atoms_and_cell()
atoms_only = RelaxationMethod.atoms_only()
atoms_and_cell.to_abivars()
# Test dict methods
self.assertMSONable(atoms_and_cell)
self.assertMSONable(atoms_only)
class PPModelTest(PymatgenTest):
def test_base(self):
godby = PPModel.as_ppmodel("godby:12 eV")
# print(godby)
# print(repr(godby))
godby.to_abivars()
self.assertTrue(godby)
same_godby = PPModel.as_ppmodel("godby:" + str(12.0 / Ha_to_eV))
self.assertTrue(same_godby == godby)
noppm = PPModel.get_noppmodel()
self.assertFalse(noppm)
self.assertTrue(noppm != godby)
new_godby = PPModel.from_dict(godby.as_dict())
self.assertTrue(new_godby == godby)
# Test pickle
self.serialize_with_pickle(godby)
# Test dict methods
self.assertMSONable(godby)
| vorwerkc/pymatgen | pymatgen/io/abinit/tests/test_abiobjects.py | Python | mit | 7,463 |
import _plotly_utils.basevalidators
class ArrowcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="arrowcolor", parent_name="layout.annotation", **kwargs
):
super(ArrowcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/layout/annotation/_arrowcolor.py | Python | mit | 479 |
# Copyright (c) 2015 SnapDisco Pty Ltd, Australia.
# All rights reserved.
#
# This source code is licensed under the terms of the MIT license
# found in the "LICENSE" file in the root directory of this source tree.
import sys
if sys.version_info.major >= 3:
from configparser import RawConfigParser
else:
from ConfigParser import RawConfigParser
from .OrderedMultiDict import OrderedMultiDict
class UsefulConfigParser(object):
"""A config parser that sucks less than those in module `ConfigParser`."""
def __init__(self, filenames_to_try=[]):
# FUN FACT: In Python 3.2, they spontaneously changed the behaviour of
# RawConfigParser so that it no longer considers ';' a comment delimiter
# for inline comments.
#
# Compare:
# "Configuration files may include comments, prefixed by specific
# characters (# and ;). Comments may appear on their own in an otherwise
# empty line, or may be entered in lines holding values or section names.
# In the latter case, they need to be preceded by a whitespace character
# to be recognized as a comment. (For backwards compatibility, only ;
# starts an inline comment, while # does not.)"
# -- https://docs.python.org/2/library/configparser.html
# vs:
# "Comment prefixes are strings that indicate the start of a valid comment
# within a config file. comment_prefixes are used only on otherwise empty
# lines (optionally indented) whereas inline_comment_prefixes can be used
# after every valid value (e.g. section names, options and empty lines as
# well). By default inline comments are disabled and '#' and ';' are used
# as prefixes for whole line comments.
# Changed in version 3.2: In previous versions of configparser behaviour
# matched comment_prefixes=('#',';') and inline_comment_prefixes=(';',)."
# -- https://docs.python.org/3/library/configparser.html#customizing-parser-behaviour
#
# Grrr...
if sys.version_info.major >= 3:
self._cp = RawConfigParser(dict_type=OrderedMultiDict, inline_comment_prefixes=(';',))
else:
self._cp = RawConfigParser(dict_type=OrderedMultiDict)
if isinstance(filenames_to_try, str):
filenames_to_try = [filenames_to_try]
self._filenames_to_try = filenames_to_try[:]
def read(self, filenames_to_try=[]):
if isinstance(filenames_to_try, str):
filenames_to_try = [filenames_to_try]
self._filenames_to_try.extend(filenames_to_try)
return self._cp.read(self._filenames_to_try)
def sections(self):
return self._cp.sections()
def options(self, section_name):
## The client code doesn't need to check in advance that the requested
## section name is present in the config; this function will check
## this automatically, so no exception is raised by RawConfigParser.
## Check that `section_name` is present in the config.
## Otherwise, RawConfigParser will raise ConfigParser.NoSectionError.
if not self._cp.has_section(section_name):
return []
return self._cp.options(section_name)
def get(self, section_name, option_name, do_optionxform=True):
if do_optionxform:
# https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.optionxform
option_name = self._cp.optionxform(option_name)
if section_name is None:
return self._get_optval_in_sections(self.sections(), option_name)
elif isinstance(section_name, str):
return self._get_optval_in_sections([section_name], option_name)
else:
return self._get_optval_in_sections(section_name, option_name)
def _get_optval_in_sections(self, section_names, option_name):
## The client code doesn't need to check in advance that the requested
## section name(s) are present in the config; this function will check
## this automatically, so no exception is raised by RawConfigParser.
optvals = []
for section_name in section_names:
## Check that `section_name` is present in the config.
## Otherwise, RawConfigParser will raise ConfigParser.NoSectionError.
if not self._cp.has_section(section_name):
continue
optvals.extend([optval
for optname, optval in self._cp.items(section_name)
if optname == option_name])
return optvals
def getboolean(self, section_name, option_name, do_optionxform=True):
# https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.getboolean
return [self._coerce_to_boolean(optval)
for optval in self.get(section_name, option_name, do_optionxform)]
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def _coerce_to_boolean(self, optval_str):
# 'The accepted values for the option are "1", "yes", "true", and "on",
# which cause this method to return True, and "0", "no", "false", and
# "off", which cause it to return False. These string values are checked
# in a case-insensitive manner. Any other value will cause it to raise
# ValueError.'
# https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.getboolean
ovs_lower = optval_str.lower()
if ovs_lower not in self._boolean_states:
raise ValueError("Not a boolean: %s" % optval_str)
return self._boolean_states[ovs_lower]
| jboy/nim-pymod | libpy/UsefulConfigParser.py | Python | mit | 5,807 |
#!/usr/bin/python -*- coding:utf-8 -*-
__Author__ = "Riyaz Ahmad Bhat"
__Email__ = "[email protected]"
import re
from collections import namedtuple
from sanity_checker import SanityChecker
class DefaultList(list):
"""Equivalent of Default dictionaries for Indexing Errors."""
def __init__(self, default=None):
self.default = default
list.__init__(self)
def __getitem__(self, index):
try: return list.__getitem__(self, index)
except IndexError: return self.default
class SSFReader (SanityChecker):
def __init__ (self, sentence):
super(SSFReader, self).__init__()
self.id_ = int()
self.nodeList = list()
self.chunk_word = dict()
self.sentence = sentence
self.modifierModified = dict()
self.node = namedtuple('node',
('id', 'head', 'children', 'pos', 'poslcat', 'af', 'vpos', 'name','drel','parent',
'chunkId', 'chunkType', 'mtype', 'troot', 'coref', 'stype','voicetype', 'posn'))
self.features = namedtuple('features',
('lemma','cat','gen','num','per','case','vib','tam'))
def getAnnotations (self):
children_ = list()
for line in self.sentence.split("\n"):
nodeInfo = line.decode("utf-8").split("\t")
if nodeInfo[0].isdigit():
assert len(nodeInfo) == 4 # no need to process trash! FIXME
attributeValue_pairs = self.FSPairs(nodeInfo[3][4:-1])
attributes = self.updateFSValues(attributeValue_pairs)
h = attributes.get #NOTE h -> head node attributes
elif nodeInfo[0].replace(".",'',1).isdigit():
assert (len(nodeInfo) == 4) and (nodeInfo[1] and nodeInfo[2] != '') # FIXME
self.id_ += 1
pos_ = nodeInfo[2].encode("utf-8").decode("ascii",'ignore').encode("ascii")
wordForm_ = nodeInfo[1]
attributeValue_pairs = self.FSPairs(nodeInfo[3][4:-1])
if attributeValue_pairs['name'] == h('head_'):# NOTE head word of the chunk
self.nodeList.append(self.node(str(self.id_),wordForm_,children_,pos_,h('poslcat_'),
self.features(h('lemma_') if h('lemma_') else wordForm_ ,h('cat_'),h('gen_'), h('num_'),
h('per_'),h('case_'),h('vib_'),h('tam_')),h('vpos_'),h('head_'),h('drel_'),
h('parent_'),h('chunkId_'),":".join(('head',h('chunkId_'))),h('mtype_'),h('troot_'),
h('coref_'),h('stype_'),h('voicetype_'),h('posn_')))
self.modifierModified[h('chunkId_')] = h('parent_')
self.chunk_word[h('chunkId_')] = h('head_')
else:
attributes = self.updateFSValues(attributeValue_pairs)
c = attributes.get #NOTE c -> child node attributes
children_.append(self.node(str(self.id_),wordForm_,[],pos_,c('poslcat_'),self.features(c('lemma_') \
if c('lemma_') else wordForm_ ,c('cat_'),c('gen_'),c('num_'),c('per_'),c('case_'),c('vib_'),
c('tam_')),c('vpos_'),c('name_'),"_","_",None,":".join(('child',h('chunkId_'))),c('mtype_'),
c('troot_'),c('coref_'),None, None, c('posn_')))
else: children_ = list()
return self
def FSPairs (self, FS) :
feats = dict()
for feat in FS.split():
if "=" not in feat:continue
feat = re.sub("af='+","af='",feat.replace("dmrel=",'drel='))
assert len(feat.split("=")) == 2
attribute,value = feat.split("=")
feats[attribute] = value
return feats
def morphFeatures (self, AF):
"LEMMA,CAT,GEN,NUM,PER,CASE,VIB,TAM"
assert len(AF[:-1].split(",")) == 8 # no need to process trash! FIXME
lemma_,cat_,gen_,num_,per_,case_,vib_,tam_ = AF.split(",")
if len(lemma_) > 1: lemma_ = lemma_.strip("'")
return lemma_.strip("'"),cat_,gen_,num_,per_,case_,vib_,tam_.strip("'")
def updateFSValues (self, attributeValue_pairs):
attributes = dict(zip(['head_','poslcat_','af_','vpos_','name_','drel_','parent_','mtype_','troot_','chunkId_',\
'coref_','stype_','voicetype_','posn_'], [None] * 14))
attributes.update(dict(zip(['lemma_','cat_','gen_','num_','per_','case_','vib_','tam_'], [''] * 8)))
for key,value in attributeValue_pairs.items():
if key == "af":
attributes['lemma_'],attributes['cat_'],attributes['gen_'],attributes['num_'],\
attributes['per_'],attributes['case_'],attributes['vib_'],attributes['tam_'] = \
self.morphFeatures (value)
elif key == "drel":
assert len(value.split(":")) == 2 # no need to process trash! FIXME
attributes['drel_'], attributes['parent_'] = re.sub("'|\"",'',value).split(":")
assert attributes['drel_'] and attributes['parent_'] != "" # no need to process trash! FIXME
else:
variable = str(key) + "_"
if variable == "name_": attributes['chunkId_'] = re.sub("'|\"",'',value)
attributes[variable] = re.sub("'|\"",'',value)
return attributes
| darshan95/Shift-Reduce-Chunk-Expander | src/ssf_reader.py | Python | mit | 4,608 |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.md')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pyramid_jinja2',
'pyramid_debugtoolbar',
'pyramid_tm',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
'waitress',
]
setup(name='guestbook',
version='0.1',
description='guestbook',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python :: 3",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='guestbook',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = guestbook:main
[console_scripts]
initialize_guestbook_db = guestbook.scripts.initializedb:main
""",
)
| necaris/embedded-js-in-python-example | setup.py | Python | mit | 1,216 |
"""
atomorder/parse_args.py
Parses command line arguments and overwrites setting defaults
"""
from . import settings
import argparse
import sys
description = ""
epilog = ""
parser = argparse.ArgumentParser(
description = description,
formatter_class = argparse.RawDescriptionHelpFormatter,
epilog = epilog)
parser = argparse.ArgumentParser(description='Fit probability density functions to data-files')
parser.add_argument('-r', '--reactants', help='Reactant structures in a coordinate file format.', action='store', type=str, nargs='+')
parser.add_argument('-p', '--products', help='Product structures in a coordinate file format.', action='store', type=str, nargs='+')
parser.add_argument('--print-level', help='Print-level - 0: quiet, 1: results and errors, 2: +warnings, 3: +progress, 4: excess, 5: EXTREME',
action='store', choices = range(0,6), default=1, type=int)
parser.add_argument('-f', '--format', help='File format', type=str, action='store', default='guess', choices=["guess","xyz","pdb"])
parser.add_argument('-m', '--method', help='Method to use.\n \
rotate: Ignore bond order, align a single reactant and product molecule and match all atoms\n \
no-bond: Atom matching by rotation and atomic similarity\n \
full: Atom matching by rotation and bond similarity\n \
info: Information about molecule sybyl atom types, bond types and conjugated sub systems',
choices = ['rotate', 'full', 'info', 'no-bond'], action='store', default='full')
parser.add_argument('-o', '--output', help='Given a filename, output the reordered product in xyz format instead of printing to stdout', action='store', type=str, default=sys.stdout)
parser.add_argument('--atomic-sybyl-weight', action='store', default=1, type=float)
parser.add_argument('--bond-weight', action='store', default=1, type=float)
# TODO output to folder
# TODO output atom mapping oneline, save reordered products
# TODO allow possibility to give pickle with reaction object
# TODO output sybyl
# TODO batch reactions
# TODO output aromatic/conjugated subgroups
args = parser.parse_args()
# override setting defaults
settings.update(args)
| larsbratholm/atomorder | atomorder/parse_args.py | Python | mit | 2,393 |
#!/usr/bin/python
import glob
import os
import shutil
import subprocess
import sys
import yaml
def create_role(role):
ret = subprocess.check_output(
'ansible-galaxy init {}'.format(role).split())
if not ret.strip().endswith('created successfully'):
raise Exception('could not create role "{}"'.format(role))
def get_metadata(role):
try:
main = open(os.path.join(role, 'meta/main.yml'))
return yaml.load(main)
except IOError:
return {}
def ensure_meta(role):
"""Ensure the role has a meta directory"""
try:
os.makedirs(os.path.join(role, 'meta'))
except OSError:
pass
def set_metadata(role, metadata):
ensure_meta(role)
new_main = os.path.join(role, 'meta/main.yml.new')
orig_main = os.path.join(role, 'meta/main.yml')
with open(new_main, 'w') as out:
yaml.dump(metadata, out, default_flow_style=False, explicit_start=True)
os.rename(new_main, orig_main)
def add_dependency(src_role, target_role):
"""Add metadata saying that 'target_role' depends on 'src_role'"""
md = get_metadata(target_role)
deps = md.setdefault('dependencies', [])
deps.append(os.path.join(target_role, 'roles', src_role))
set_metadata(target_role, md)
def sub_roles(role):
try:
return glob.glob(os.path.join(role, 'roles/*'))
except OSError:
return []
def fix_dependency(role, for_destination):
"""Fix the sub-role dependency.
Dependency on a sub-role has to be changed once we move the base
role.
"""
metadata = get_metadata(role)
deps = metadata.setdefault('dependencies', [])
def f(dep):
if dep.startswith(role):
return os.path.join(for_destination, 'roles', dep)
else:
return dep
metadata['dependencies'] = [f(dep) for dep in deps]
set_metadata(role, metadata)
def fix_dependencies(src_role, for_destination):
for role in sub_roles(src_role):
fix_dependencies(role, for_destination)
fix_dependency(src_role, for_destination)
def move(src_role, target_role, copy=False):
op = shutil.copytree if copy else shutil.move
try:
os.makedirs(os.path.join(target_role, 'roles'))
except OSError:
pass
fix_dependencies(src_role, for_destination=target_role)
op(src_role, os.path.join(target_role, 'roles', src_role))
add_dependency(src_role, target_role)
def concat(roles, into, copy=False):
create_role(into)
for role in roles:
move(role, target_role=into, copy=copy)
def test():
roles = ['foo', 'bar', 'spam']
try:
for role in roles:
create_role(role)
move('foo', 'bar')
assert get_metadata('bar')['dependencies'] == ['bar/roles/foo']
move('bar', 'spam')
assert get_metadata('spam')['dependencies'] == ['spam/roles/bar']
assert get_metadata('spam/roles/bar')['dependencies'] == ['spam/roles/bar/roles/foo']
finally:
for role in roles:
shutil.rmtree(role, ignore_errors=True)
def main():
roles_path = None
if roles_path is not None:
os.chdir(roles_path)
concat([sys.argv[1], sys.argv[2]], into=sys.argv[3])
if __name__ == '__main__':
main()
| waltermoreira/dockeransible | app_builder/app_builder_image/concat_roles.py | Python | mit | 3,259 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Make use of synaptic as backend."""
# Copyright (C) 2008-2010 Sebastian Heinlein <[email protected]>
# Copyright (C) 2005-2007 Canonical
#
# Licensed under the GNU General Public License Version 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
__author__ = "Sebastian Heinlein <[email protected]>, " \
"Michael Vogt <[email protected]"
import tempfile
from gettext import gettext as _
from gi.repository import GObject
from defer import Deferred
import sessioninstaller.errors
class SynapticBackend(object):
"""Make use of Synaptic to install and remove packages."""
def _run_synaptic(self, xid, opt, tempf, interaction):
deferred = Deferred()
if tempf:
opt.extend(["--set-selections-file", "%s" % tempf.name])
#FIXME: Take interaction into account
opt.extend(["-o", "Synaptic::closeZvt=true"])
if xid:
opt.extend(["--parent-window-id", "%s" % (xid)])
cmd = ["/usr/bin/gksu",
"--desktop", "/usr/share/applications/update-manager.desktop",
"--", "/usr/sbin/synaptic", "--hide-main-window",
"--non-interactive"]
cmd.extend(opt)
flags = GObject.SPAWN_DO_NOT_REAP_CHILD
(pid, stdin, stdout, stderr) = GObject.spawn_async(cmd, flags=flags)
GObject.child_watch_add(pid, self._on_synaptic_exit, (tempf, deferred))
return deferred
def _on_synaptic_exit(self, pid, condition, (tempf, deferred)):
if tempf:
tempf.close()
if condition == 0:
deferred.callback()
else:
deferred.errback(sessioninstaller.errors.ModifyFailed())
def remove_packages(self, xid, package_names, interaction):
opt = []
# custom progress strings
#opt.append("--progress-str")
#opt.append("%s" % _("Please wait, this can take some time."))
#opt.append("--finish-str")
#opt.append("%s" % _("Update is complete"))
tempf = tempfile.NamedTemporaryFile()
for pkg_name in package_names:
tempf.write("%s\tuninstall\n" % pkg_name)
tempf.flush()
return self._run_synaptic(xid, opt, tempf, interaction)
def install_packages(self, xid, package_names, interaction):
opt = []
# custom progress strings
#opt.append("--progress-str")
#opt.append("%s" % _("Please wait, this can take some time."))
#opt.append("--finish-str")
#opt.append("%s" % _("Update is complete"))
tempf = tempfile.NamedTemporaryFile()
for pkg_name in package_names:
tempf.write("%s\tinstall\n" % pkg_name)
tempf.flush()
return self._run_synaptic(xid, opt, tempf, interaction)
def install_package_files(self, xid, package_names, interaction):
raise NotImplemented
# vim:ts=4:sw=4:et
| yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/sessioninstaller/backends/synaptic.py | Python | mit | 3,567 |
from django.test import TestCase
from medicine.models import Medicine
from medicine.views import ListAllMedicines
from user.models import HealthProfessional
class TestListAllMedicines(TestCase):
def setUp(self):
# Making a HealthProfessional
self.view = ListAllMedicines
# Making medicati
self.medicine = Medicine()
self.medicine.name = "Medicamento Teste"
self.medicine.active_ingredient = "Teste Lab"
self.medicine.save()
self.listing = Medicine.objects.all()
def test_medicine_is_show(self):
instance = self.view()
self.assertEqual(instance.get_queryset()[0], self.listing[0])
| fga-gpp-mds/2017.2-Receituario-Medico | medical_prescription/medicine/test/test_view_list_all_medications.py | Python | mit | 676 |
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="sankey.node.hoverlabel", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/sankey/node/hoverlabel/_bgcolor.py | Python | mit | 521 |
from pyperator.decorators import inport, outport, component, run_once
from pyperator.nodes import Component
from pyperator.DAG import Multigraph
from pyperator.utils import InputPort, OutputPort
import pyperator.components | baffelli/pyperator | pyperator/__init__.py | Python | mit | 222 |
import itertools
import os.path
import sys
import time
from . import core
from . import file_io
from . import geometry
from . import stringconv
from . import version
#
# Functions
#
def save_output(profileli, opt):
""" Save a summary of results of evaluated profiles
"""
def m(x, pixelwidth):
return geometry.to_metric_units(x, pixelwidth)
def m2(x, pixelwidth):
# For area units...
return geometry.to_metric_units(x, pixelwidth**2)
def na(x):
if x in (None, -1):
return "N/A"
else:
return x
def write_session_summary():
with file_io.FileWriter("session.summary", opt) as f:
f.writerow(["%s version:" % version.title,
"%s (Last modified %s %s, %s)"
% ((version.version,) + version.date)])
f.writerow(["Number of evaluated profiles:", len(eval_proli)])
if err_fli:
f.writerow(["Number of non-evaluated profiles:", len(err_fli)])
f.writerow(["Metric unit:", eval_proli[0].metric_unit])
f.writerow(["Spatial resolution:", opt.spatial_resolution, eval_proli[0].metric_unit])
f.writerow(["Shell width:", opt.shell_width, eval_proli[0].metric_unit])
f.writerow(["Interpoint distances calculated:",
stringconv.yes_or_no(opt.determine_interpoint_dists)])
if opt.determine_interpoint_dists:
f.writerow(["Interpoint distance mode:", opt.interpoint_dist_mode])
f.writerow(["Shortest interpoint distances:",
stringconv.yes_or_no(opt.interpoint_shortest_dist)])
f.writerow(["Lateral interpoint distances:",
stringconv.yes_or_no(opt.interpoint_lateral_dist)])
f.writerow(["Monte Carlo simulations performed:",
stringconv.yes_or_no(opt.run_monte_carlo)])
if opt.run_monte_carlo:
f.writerow(["Number of Monte Carlo runs:", opt.monte_carlo_runs])
f.writerow(["Monte Carlo simulation window:", opt.monte_carlo_simulation_window])
f.writerow(["Strict localization in simulation window:",
stringconv.yes_or_no(opt.monte_carlo_strict_location)])
f.writerow(["Clusters determined:", stringconv.yes_or_no(opt.determine_clusters)])
if opt.determine_clusters:
f.writerow(["Within-cluster distance:",
opt.within_cluster_dist, eval_proli[0].metric_unit])
if clean_fli:
f.writerow(["Input files processed cleanly:"])
f.writerows([[fn] for fn in clean_fli])
if nop_fli:
f.writerow(["Input files processed but which generated no point distances:"])
f.writerows([[fn] for fn in nop_fli])
if warn_fli:
f.writerow(["Input files processed but which generated "
"warnings (see log for details):"])
f.writerows([[fn] for fn in warn_fli])
if err_fli:
f.writerow(["Input files not processed or not included in "
"summary (see log for details):"])
f.writerows([[fn] for fn in err_fli])
def write_profile_summary():
with file_io.FileWriter("profile.summary", opt) as f:
f.writerow(["Postsynaptic element length",
"Presynaptic element length",
"Number of PSDs:",
"Total postsynaptic membrane length incl perforations:",
"Total postsynaptic membrane length excl perforations:",
"Total PSD area:",
"Particles (total)",
"Particles in PSD",
"Particles within %s %s of PSD"
% (opt.spatial_resolution, eval_proli[0].metric_unit),
"Shell particles strictly synaptic and postsynaptic",
"Shell particles strictly synaptic and postsynaptic "
"or associated with postsynaptic membrane",
"Synaptic particles associated w/ postsynaptic "
"membrane",
"Synaptic particles associated w/ presynaptic membrane",
"Perisynaptic particles associated w/ postsynaptic "
"membrane",
"Perisynaptic particles associated w/ presynaptic "
"membrane",
"Within-perforation particles associated w/ "
"postsynaptic membrane",
"Within-perforation particles associated w/ "
"presynaptic membrane",
"Presynaptic profile",
"Postsynaptic profile",
"ID",
"Input file",
"Comment"])
f.writerows([[m(pro.posel.length(), pro.pixelwidth),
m(pro.prsel.length(), pro.pixelwidth),
len(pro.psdli),
m(pro.total_posm.length(), pro.pixelwidth),
sum([m(psd.posm.length(), pro.pixelwidth)
for psd in pro.psdli]),
sum([m2(psd.psdposm.area(), pro.pixelwidth)
for psd in pro.psdli]),
len(pro.pli),
len([p for p in pro.pli if p.is_within_psd]),
len([p for p in pro.pli if p.is_associated_with_psd]),
len([p for p in pro.pli
if p.strict_lateral_location == "synaptic" and
p.axodendritic_location == "postsynaptic" and
p.is_within_postsynaptic_membrane_shell]),
len([p for p in pro.pli
if p.strict_lateral_location == "synaptic" and
(p.axodendritic_location == "postsynaptic" and
p.is_within_postsynaptic_membrane_shell) or
p.is_postsynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "synaptic" and
p.is_postsynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "synaptic" and
p.is_presynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "perisynaptic" and
p.is_postsynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "perisynaptic" and
p.is_presynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "within perforation"
and p.is_postsynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "within perforation"
and p.is_presynaptic_membrane_associated]),
pro.presyn_profile,
pro.postsyn_profile,
pro.id,
pro.comment,
os.path.basename(pro.inputfn)] for pro in eval_proli])
def write_point_summary(ptype):
if ptype == "particle":
pli = "pli"
pstr = "particle"
elif ptype == "random":
if not opt.use_random:
return
else:
pli = "randomli"
pstr = "point"
else:
return
with file_io.FileWriter("%s.summary" % ptype, opt) as f:
f.writerow(["%s number (as appearing in input file)" % pstr.capitalize(),
"Coordinates (in pixels)",
"Axodendritic location",
"Distance to postsynaptic element membrane",
"Distance to presynaptic element membrane",
"Lateral location",
"Strict lateral location",
"Lateral distance to nearest PSD center",
"Normalized lateral distance to nearest PSD center",
"Within PSD",
"Within %s %s of PSD" % (opt.spatial_resolution, eval_proli[0].metric_unit),
"Total postsynaptic membrane length incl perforations",
"Total postsynaptic membrane length excl perforations",
"Length of laterally closest PSD",
"Presynaptic profile",
"Postsynaptic profile",
"Profile ID",
"Input file",
"Comment"])
f.writerows([[n+1,
p,
p.axodendritic_location,
m(p.dist_to_posel, pro.pixelwidth),
m(p.dist_to_prsel, pro.pixelwidth),
p.lateral_location,
p.strict_lateral_location,
m(p.lateral_dist_psd, pro.pixelwidth),
p.norm_lateral_dist_psd,
stringconv.yes_or_no(p.is_within_psd),
stringconv.yes_or_no(p.is_associated_with_psd),
m(pro.total_posm.length(), pro.pixelwidth),
m(sum([psd.posm.length() for psd in pro.psdli]),
pro.pixelwidth),
m(p.nearest_psd.posm.length(), pro.pixelwidth),
pro.presyn_profile,
pro.postsyn_profile,
pro.id,
os.path.basename(pro.inputfn),
pro.comment] for pro in eval_proli for n, p in
enumerate(pro.__dict__[pli])])
def write_cluster_summary():
if not opt.determine_clusters:
return
with file_io.FileWriter("cluster.summary", opt) as f:
f.writerow(["Cluster number",
"Number of particles in cluster",
"Distance to postsynaptic membrane of centroid",
"Distance to nearest cluster along postsynaptic element membrane",
"Profile ID",
"Input file",
"Comment"])
f.writerows([[n + 1,
len(c),
m(c.dist_to_posel, pro.pixelwidth),
m(na(c.dist_to_nearest_cluster), pro.pixelwidth),
pro.id,
os.path.basename(pro.inputfn),
pro.comment]for pro in eval_proli for n, c in
enumerate(pro.clusterli)])
def write_interpoint_summaries():
if not opt.determine_interpoint_dists:
return
ip_rels = dict([(key, val)
for key, val in opt.interpoint_relations.items()
if val and 'simulated' not in key])
if not opt.use_random:
for key, val in opt.interpoint_relations.items():
if 'random' in key and val:
del ip_rels[key]
if (len(ip_rels) == 0 or not
(opt.interpoint_shortest_dist or opt.interpoint_lateral_dist)):
return
table = []
if opt.interpoint_dist_mode == 'all':
s = "all distances"
else:
s = "nearest neighbour distances"
table.append(["Mode: " + s])
headerli = list(ip_rels.keys())
prefixli = []
for key, val in ip_rels.items():
prefix = key[0] + key[key.index("- ") + 2] + "_"
prefixli.append(prefix)
if opt.interpoint_shortest_dist and opt.interpoint_lateral_dist:
headerli.extend(headerli)
prefixli.extend([t + 'lat' for t in prefixli])
topheaderli = []
if opt.interpoint_shortest_dist:
topheaderli.append("Shortest distances")
if opt.interpoint_lateral_dist:
topheaderli.extend([""] * (len(ip_rels) - 1))
if opt.interpoint_lateral_dist:
topheaderli.append("Lateral distances along postsynaptic element "
"membrane")
table.extend([topheaderli, headerli])
cols = [[] for _ in prefixli]
for pro in eval_proli:
for n, li in enumerate([pro.__dict__[prefix + "distli"]
for prefix in prefixli]):
cols[n].extend([m(e, pro.pixelwidth) for e in li])
# transpose cols and append to table
table.extend(list(itertools.zip_longest(*cols, fillvalue="")))
with file_io.FileWriter("interpoint.summary", opt) as f:
f.writerows(table)
def write_mc_dist_to_psd(dtype):
if not opt.run_monte_carlo:
return
table = []
if dtype == 'metric':
table.append(["Lateral distances in %s to center of the nearest PSD"
% eval_proli[0].metric_unit])
elif dtype == 'normalized':
table.append(["Normalized lateral distances to the center of the nearest PSD"])
table.append(["Run %d" % (n + 1) for n in range(0, opt.monte_carlo_runs)])
for pro in eval_proli:
if dtype == 'metric':
table.extend(zip(*[[m(p.lateral_dist_psd, pro.pixelwidth) for p in li["pli"]]
for li in pro.mcli]))
elif dtype == 'normalized':
table.extend(zip(*[[p.norm_lateral_dist_psd for p in li["pli"]]
for li in pro.mcli]))
with file_io.FileWriter("simulated.PSD.%s.lateral.distances" % dtype, opt) as f:
f.writerows(table)
def write_mc_dist_to_posel():
if not opt.run_monte_carlo:
return
table = [["Run %d" % (n + 1) for n in range(0, opt.monte_carlo_runs)]]
for pro in eval_proli:
table.extend(itertools.zip_longest(*[[m(p.dist_to_posel, pro.pixelwidth)
for p in li['pli']] for li in pro.mcli]))
with file_io.FileWriter(
"simulated.postsynaptic.element.membrane.distances", opt) as f:
f.writerows(table)
def write_mc_ip_dists(dist_type):
def m_li(*_li):
return [m(x, pro.pixelwidth) for x in _li]
if not (opt.run_monte_carlo and opt.determine_interpoint_dists):
return
for ip_type in [key for key, val in opt.interpoint_relations.items()
if 'simulated' in key and val]:
if ((dist_type == 'shortest' and not opt.interpoint_shortest_dist) or
(dist_type == 'lateral' and not opt.interpoint_lateral_dist)):
return
if dist_type == 'lateral':
short_dist_type = 'lat'
else:
short_dist_type = ''
table = [["Run %d" % (n + 1) for n in range(0, opt.monte_carlo_runs)]]
for pro in eval_proli:
table.extend(itertools.zip_longest(*[m(p, pro.pixelwidth)
for li in pro.mcli
for p in li[ip_type]["%sdist"
% short_dist_type]]))
with file_io.FileWriter("%s.interpoint.%s.distances"
% (ip_type.replace(" ", ""), dist_type), opt) as f:
f.writerows(table)
def write_mc_cluster_summary():
if not (opt.determine_clusters and opt.run_monte_carlo):
return
table = [["N particles in cluster", "Run",
"Distance to postsynaptic element membrane from centroid",
"Distance to nearest cluster along postsynaptic element membrane",
"Profile ID",
"Input file",
"Comment"]]
for pro in eval_proli:
for n in range(0, opt.monte_carlo_runs):
for c in pro.mcli[n]["clusterli"]:
table.append([len(c), n + 1,
m(c.dist_to_posel, pro.pixelwidth),
m(na(c.dist_to_nearest_cluster),
pro.pixelwidth),
pro.id,
os.path.basename(pro.inputfn),
pro.comment])
with file_io.FileWriter("simulated.clusters", opt) as f:
f.writerows(table)
sys.stdout.write("\nSaving summaries to %s:\n" % opt.output_dir)
opt.save_result = {'any_saved': False, 'any_err': False}
eval_proli = [profile for profile in profileli if not profile.errflag]
clean_fli = [profile.inputfn for profile in profileli
if not (profile.errflag or profile.warnflag)]
warn_fli = [profile.inputfn for profile in profileli if profile.warnflag]
err_fli = [profile.inputfn for profile in profileli if profile.errflag]
nop_fli = [profile.inputfn for profile in eval_proli if not profile.pli]
write_session_summary()
write_profile_summary()
write_point_summary('particle')
write_point_summary('random')
write_interpoint_summaries()
write_cluster_summary()
write_mc_dist_to_posel()
write_mc_dist_to_psd('metric')
write_mc_dist_to_psd('normalized')
write_mc_ip_dists('shortest')
write_mc_ip_dists('lateral')
write_mc_cluster_summary()
if opt.save_result['any_err']:
sys.stdout.write("Note: One or more summaries could not be saved.\n")
if opt.save_result['any_saved']:
sys.stdout.write("Done.\n")
else:
sys.stdout.write("No summaries saved.\n")
def reset_options(opt):
""" Deletes certain options that should always be set anew for each run
(each time the "Start" button is pressed)
"""
for optstr in ('metric_unit', 'use_random'):
if hasattr(opt, optstr):
delattr(opt, optstr)
def show_options(opt):
sys.stdout.write("{} version: {} (Last modified {} {}, {})\n".format(
version.title, version.version, *version.date))
sys.stdout.write("Output file format: %s\n" % opt.output_file_format)
sys.stdout.write("Suffix of output files: %s\n" % opt.output_filename_suffix)
sys.stdout.write("Output directory: %s\n" % opt.output_dir)
sys.stdout.write("Spatial resolution: %d\n" % opt.spatial_resolution)
sys.stdout.write("Shell width: %d metric units\n" % opt.shell_width)
sys.stdout.write("Interpoint distances calculated: %s\n"
% stringconv.yes_or_no(opt.determine_interpoint_dists))
if opt.determine_interpoint_dists:
sys.stdout.write("Interpoint distance mode: %s\n" % opt.interpoint_dist_mode.capitalize())
sys.stdout.write("Shortest interpoint distances: %s\n"
% stringconv.yes_or_no(opt.interpoint_shortest_dist))
sys.stdout.write("Lateral interpoint distances: %s\n"
% stringconv.yes_or_no(opt.interpoint_lateral_dist))
sys.stdout.write("Monte Carlo simulations performed: %s\n"
% stringconv.yes_or_no(opt.run_monte_carlo))
if opt.run_monte_carlo:
sys.stdout.write("Number of Monte Carlo runs: %d\n"
% opt.monte_carlo_runs)
sys.stdout.write("Monte Carlo simulation window: %s\n"
% opt.monte_carlo_simulation_window)
sys.stdout.write("Strict localization in simulation window: %s\n"
% stringconv.yes_or_no(opt.monte_carlo_strict_location))
sys.stdout.write("Clusters determined: %s\n" % stringconv.yes_or_no(opt.determine_clusters))
if opt.determine_clusters:
sys.stdout.write("Within-cluster distance: %d\n" % opt.within_cluster_dist)
def get_output_format(opt):
if opt.output_file_format == 'excel':
try:
import openpyxl
except ImportError:
sys.stdout.write("Unable to write Excel files: resorting to csv format.\n")
opt.output_file_format = 'csv'
if opt.output_file_format == 'csv':
opt.output_filename_ext = '.csv'
opt.csv_format = {'dialect': 'excel', 'lineterminator': '\n'}
if opt.csv_delimiter == 'tab':
opt.csv_format['delimiter'] = '\t'
if opt.output_filename_date_suffix:
from datetime import date
opt.output_filename_suffix = "." + date.today().isoformat()
if opt.output_filename_other_suffix != '':
opt.output_filename_suffix += "." + opt.output_filename_other_suffix
def main_proc(parent):
""" Process profile data files
"""
opt = parent.opt
if not opt.input_file_list:
sys.stdout.write("No input files.\n")
return 0
i, n = 0, 0
profileli = []
sys.stdout.write("--- Session started %s local time ---\n" % time.ctime())
for f in opt.input_file_list:
if opt.input_file_list.count(f) > 1:
sys.stdout.write("Duplicate input filename %s:\n => removing first occurrence in "
"list\n" % f)
opt.input_file_list.remove(f)
get_output_format(opt)
reset_options(opt)
show_options(opt)
while True:
if i < len(opt.input_file_list):
inputfn = opt.input_file_list[i]
i += 1
else:
sys.stdout.write("\nNo more input files...\n")
break
parent.process_queue.put(("new_file", inputfn))
profileli.append(core.ProfileData(inputfn, opt))
profileli[-1].process()
if opt.stop_requested:
sys.stdout.write("\n--- Session aborted by user %s local time ---\n" % time.ctime())
return 3
if not profileli[-1].errflag:
n += 1
if profileli[-1].warnflag:
sys.stdout.write("Warning(s) found while processing input file.\n")
continue
else:
sys.stdout.write("Error(s) found while processing input file =>\n"
" => No distances could be determined.\n")
continue
# no more input files
errfli = [pro.inputfn for pro in profileli if pro.errflag]
warnfli = [pro.inputfn for pro in profileli if pro.warnflag]
if errfli:
sys.stdout.write("\n%s input %s generated one or more errors:\n"
% (stringconv.plurality("This", len(errfli)),
stringconv.plurality("file", len(errfli))))
sys.stdout.write("%s\n" % "\n".join([fn for fn in errfli]))
if warnfli:
sys.stdout.write("\n%s input %s generated one or more warnings:\n"
% (stringconv.plurality("This", len(warnfli)),
stringconv.plurality("file", len(warnfli))))
sys.stdout.write("%s\n" % "\n".join([fn for fn in warnfli]))
if n > 0:
parent.process_queue.put(("saving_summaries", ""))
save_output(profileli, opt)
else:
sys.stdout.write("\nNo files processed.\n")
sys.stdout.write("--- Session ended %s local time ---\n" % time.ctime())
parent.process_queue.put(("done", ""))
if errfli:
return 0
elif warnfli:
return 2
else:
return 1
# End of main.py
| maxdl/Synapse.py | synapse/main.py | Python | mit | 24,280 |
import os
import re
import subprocess
from six.moves.urllib.parse import urlparse, quote_plus
from subprocess import CalledProcessError, PIPE, STDOUT
from conans.client.tools.env import no_op, environment_append
from conans.client.tools.files import chdir
from conans.errors import ConanException
from conans.util.files import decode_text, to_file_bytes
class Git(object):
def __init__(self, folder=None, verify_ssl=True, username=None, password=None,
force_english=True, runner=None):
self.folder = folder or os.getcwd()
if not os.path.exists(self.folder):
os.makedirs(self.folder)
self._verify_ssl = verify_ssl
self._force_eng = force_english
self._username = username
self._password = password
self._runner = runner
def run(self, command):
command = "git %s" % command
with chdir(self.folder) if self.folder else no_op():
with environment_append({"LC_ALL": "en_US.UTF-8"}) if self._force_eng else no_op():
if not self._runner:
return subprocess.check_output(command, shell=True).decode().strip()
else:
return self._runner(command)
def get_repo_root(self):
return self.run("rev-parse --show-toplevel")
def get_url_with_credentials(self, url):
if not self._username or not self._password:
return url
if urlparse(url).password:
return url
user_enc = quote_plus(self._username)
pwd_enc = quote_plus(self._password)
url = url.replace("://", "://" + user_enc + ":" + pwd_enc + "@", 1)
return url
def _configure_ssl_verify(self):
return self.run("config http.sslVerify %s" % ("true" if self._verify_ssl else "false"))
def clone(self, url, branch=None):
url = self.get_url_with_credentials(url)
if os.path.exists(url):
url = url.replace("\\", "/") # Windows local directory
if os.path.exists(self.folder) and os.listdir(self.folder):
if not branch:
raise ConanException("The destination folder '%s' is not empty, "
"specify a branch to checkout (not a tag or commit) "
"or specify a 'subfolder' "
"attribute in the 'scm'" % self.folder)
output = self.run("init")
output += self._configure_ssl_verify()
output += self.run('remote add origin "%s"' % url)
output += self.run("fetch ")
output += self.run("checkout -t origin/%s" % branch)
else:
branch_cmd = "--branch %s" % branch if branch else ""
output = self.run('clone "%s" . %s' % (url, branch_cmd))
output += self._configure_ssl_verify()
return output
def checkout(self, element, submodule=None):
self._check_git_repo()
output = self.run('checkout "%s"' % element)
if submodule:
if submodule == "shallow":
output += self.run("submodule sync")
output += self.run("submodule update --init")
elif submodule == "recursive":
output += self.run("submodule sync --recursive")
output += self.run("submodule update --init --recursive")
else:
raise ConanException("Invalid 'submodule' attribute value in the 'scm'. "
"Unknown value '%s'. Allowed values: ['shallow', 'recursive']" % submodule)
# Element can be a tag, branch or commit
return output
def excluded_files(self):
try:
file_paths = [os.path.normpath(os.path.join(os.path.relpath(folder, self.folder), el)).replace("\\", "/")
for folder, dirpaths, fs in os.walk(self.folder)
for el in fs + dirpaths]
p = subprocess.Popen(['git', 'check-ignore', '--stdin'],
stdout=PIPE, stdin=PIPE, stderr=STDOUT, cwd=self.folder)
paths = to_file_bytes("\n".join(file_paths))
grep_stdout = decode_text(p.communicate(input=paths)[0])
tmp = grep_stdout.splitlines()
except CalledProcessError:
tmp = []
return tmp
def get_remote_url(self, remote_name=None):
self._check_git_repo()
remote_name = remote_name or "origin"
try:
remotes = self.run("remote -v")
for remote in remotes.splitlines():
try:
name, url = remote.split(None, 1)
url, _ = url.rsplit(None, 1)
if name == remote_name:
return url
except Exception:
pass
except subprocess.CalledProcessError:
pass
return None
def get_commit(self):
self._check_git_repo()
try:
commit = self.run("rev-parse HEAD")
commit = commit.strip()
return commit
except Exception as e:
raise ConanException("Unable to get git commit from %s\n%s" % (self.folder, str(e)))
get_revision = get_commit
def _check_git_repo(self):
try:
self.run("status")
except Exception:
raise ConanException("Not a valid git repository")
def get_branch(self):
self._check_git_repo()
try:
status = self.run("status -bs --porcelain")
# ## feature/scm_branch...myorigin/feature/scm_branch
branch = status.splitlines()[0].split("...")[0].strip("#").strip()
return branch
except Exception as e:
raise ConanException("Unable to get git branch from %s\n%s" % (self.folder, str(e)))
| luckielordie/conan | conans/client/tools/scm.py | Python | mit | 5,881 |
### import ####################################################################
import pycmds.project.classes as pc
import pycmds.hardware.hardware as hw
import pathlib
import appdirs
import toml
import yaqc
### driver ####################################################################
class Driver(hw.Driver):
def __init__(self, *args, **kwargs):
self._yaqd_port = kwargs.pop("yaqd_port")
super().__init__(*args, **kwargs)
self.grating_index = pc.Combo(
name="Grating",
allowed_values=[1, 2],
section=self.name,
option="grating_index",
display=True,
set_method="set_turret",
)
self.exposed.append(self.grating_index)
def get_position(self):
native_position = self.ctrl.get_position()
self.position.write(native_position, self.native_units)
return self.position.read()
def initialize(self, *args, **kwargs):
# open control
self.ctrl = yaqc.Client(self._yaqd_port)
# import some information from control
id_dict = self.ctrl.id()
self.serial_number = id_dict["serial"]
self.position.write(self.ctrl.get_position())
# recorded
self.recorded[self.name] = [self.position, self.native_units, 1.0, "m", False]
self.wait_until_still()
# finish
self.initialized.write(True)
self.initialized_signal.emit()
def is_busy(self):
return self.ctrl.busy()
def set_position(self, destination):
self.ctrl.set_position(float(destination))
self.wait_until_still()
def set_turret(self, destination_index):
if type(destination_index) == list:
destination_index = destination_index[0]
# turret index on ActiveX call starts from zero
destination_index_zero_based = int(destination_index) - 1
self.ctrl.set_turret(destination_index_zero_based)
self.grating_index.write(destination_index)
self.wait_until_still()
self.limits.write(*self.ctrl.get_limits(), self.native_units)
### gui #######################################################################
class GUI(hw.GUI):
pass
### hardware ##################################################################
class Hardware(hw.Hardware):
def __init__(self, *args, **kwargs):
self.kind = "spectrometer"
hw.Hardware.__init__(self, *args, **kwargs)
### import ####################################################################
conf = pathlib.Path(appdirs.user_config_dir("pycmds", "pycmds")) / "config.toml"
conf = toml.load(conf)
hardwares, gui, advanced_gui = hw.import_hardwares(
conf.get("hardware", {}).get("spectrometers", {}),
name="Spectrometers",
Driver=Driver,
GUI=GUI,
Hardware=Hardware,
)
| wright-group/PyCMDS | pycmds/hardware/spectrometers.py | Python | mit | 2,840 |
u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
| thonkify/thonkify | src/lib/libpasteurize/fixes/fix_kwargs.py | Python | mit | 6,066 |
#!/usr/bin/env python
__author__ = 'Radoslaw Matusiak'
__copyright__ = 'Copyright (c) 2016 Radoslaw Matusiak'
__license__ = 'MIT'
__version__ = '0.5'
import cmd
import functools
import os
import sys
from polar import Device
from polar.pb import device_pb2 as pb_device
__INTRO = """
_| _| _|
_| _|_| _|_| _|_|_| _|_|_| _|_| _| _|_|
_| _| _| _| _| _| _| _| _| _| _| _| _|_|_|_|
_| _| _| _| _| _| _| _| _| _| _| _| _|
_| _|_| _|_| _|_|_| _| _| _|_| _| _|_|_|
_|
_|
ver. {}
"""
def check_if_device_is_connected(f):
"""
Decorator. Checks if device is connected before invoking function.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if args[0].device is not None:
return f(*args, **kwargs)
else:
print '[!] Device disconnected.'
print
return wrapper
class LoopholeCli(cmd.Cmd):
""" Loophole command line interface class. """
__PROMPT = 'loophole({})>'
def __init__(self):
"""Constructor.
"""
cmd.Cmd.__init__(self)
self.prompt = LoopholeCli.__PROMPT.format('no device')
self.device = None
# end-of-method __init__
def do_exit(self, _):
"""Quit.
Usage: exit
"""
if self.device is not None:
self.device.close()
sys.exit(0)
# end-of-method do_exit
def do_EOF(self, _):
"""Quit. handles EOF"""
self.do_exit(_)
# end-of-method do_EOF
def do_list(self, _):
"""List available Polar devices.
Usage: list
"""
devs = Device.list()
if len(devs) > 0:
for i, dev in enumerate(devs):
try:
info = Device.get_info(dev)
except ValueError as err:
print "Device no: %i" % i
print "Device info:"
print dev
print "-"*79
if 'langid' in err.message:
raise ValueError(
(
"Can't get device info. Origin Error: %s\n"
"Maybe this is a permission issue.\n"
"Please read section 'permission' in README ;)"
) % err
)
raise # raise origin error
print '{} - {} ({})'.format(i, info['product_name'], info['serial_number'])
else:
print '[!] No Polar devices found!'
print
# end-of-method do_list
def do_connect(self, dev_no):
"""Connect Polar device. Run 'list' to see available devices.
Usage: connect <device_no>
"""
try:
dev_no = int(dev_no)
except ValueError:
print '[!] You need to specify the device number. Run \'list\' to see available devices.'
print
return
try:
devs = Device.list()
dev = devs[dev_no]
serial = Device.get_info(dev)['serial_number']
self.prompt = LoopholeCli.__PROMPT.format(serial)
self.device = Device(dev)
self.device.open()
print '[+] Device connected.'
print
except IndexError:
print '[!] Device not found or failed to open it. Run \'list\' to see available devices.'
print
# end-of-method do_connect
@check_if_device_is_connected
def do_disconnect(self, _):
"""Disconnect Polar device.
"""
self.device.close()
self.device = None
self.prompt = LoopholeCli.__PROMPT.format('no device')
print '[+] Device disconnected.'
print
# end-of-method do_disconnect
@check_if_device_is_connected
def do_get(self, line):
"""Read file from device and store in under local_path.
Usage: get <device_path> <local_path>
"""
try:
src, dest = line.strip().split()
data = self.device.read_file(src)
with open(dest, 'wb') as outfile:
outfile.write(bytearray(data))
print '[+] File \'{}\' saved to \'{}\''.format(src, dest)
print
except ValueError:
print '[!] Invalid command usage.'
print '[!] Usage: get <source> <destination>'
print
# end-of-method do_get
@check_if_device_is_connected
def do_delete(self, line):
"""Delete file from device.
Usage: delete <device_path>
"""
path = line.strip()
_ = self.device.delete(path)
# end-of-method do_delete
@check_if_device_is_connected
def do_dump(self, path):
"""Dump device memory. Path is local folder to store dump.
Usage: dump <local_path>
"""
print '[+] Reading files tree...'
dev_map = self.device.walk(self.device.SEP)
for directory in dev_map.keys():
fixed_directory = directory.replace(self.device.SEP, os.sep)
full_path = os.path.abspath(os.path.join(path, fixed_directory[1:]))
if not os.path.exists(full_path):
os.makedirs(full_path)
d = dev_map[directory]
files = [e for e in d.entries if not e.name.endswith('/')]
for file in files:
with open(os.path.join(full_path, file.name), 'wb') as fh:
print '[+] Dumping {}{}'.format(directory, file.name)
data = self.device.read_file('{}{}'.format(directory, file.name))
fh.write(bytearray(data))
print '[+] Device memory dumped.'
print
# end-of-method do_dump
@check_if_device_is_connected
def do_info(self, _):
"""Print connected device info.
Usage: info
"""
info = Device.get_info(self.device.usb_device)
print '{:>20s} - {}'.format('Manufacturer', info['manufacturer'])
print '{:>20s} - {}'.format('Product name', info['product_name'])
print '{:>20s} - {}'.format('Vendor ID', info['vendor_id'])
print '{:>20s} - {}'.format('Product ID', info['product_id'])
print '{:>20s} - {}'.format('Serial number', info['serial_number'])
try:
data = self.device.read_file('/DEVICE.BPB')
resp = ''.join(chr(c) for c in data)
d = pb_device.PbDeviceInfo()
d.ParseFromString(resp)
bootloader_version = '{}.{}.{}'.format(d.bootloader_version.major, d.bootloader_version.minor, d.bootloader_version.patch)
print '{:>20s} - {}'.format('Bootloader version', bootloader_version)
platform_version = '{}.{}.{}'.format(d.platform_version.major, d.platform_version.minor, d.platform_version.patch)
print '{:>20s} - {}'.format('Platform version', platform_version)
device_version = '{}.{}.{}'.format(d.device_version.major, d.device_version.minor, d.device_version.patch)
print '{:>20s} - {}'.format('Device version', device_version)
print '{:>20s} - {}'.format('SVN revision', d.svn_rev)
print '{:>20s} - {}'.format('Hardware code', d.hardware_code)
print '{:>20s} - {}'.format('Color', d.product_color)
print '{:>20s} - {}'.format('Product design', d.product_design)
except:
print '[!] Failed to get extended info.'
print ' '
# end-of-method do_info
@check_if_device_is_connected
def do_fuzz(self, _):
import polar
num = _.strip()
if len(num) > 0:
num = int(num)
resp = self.device.send_raw([0x01, num] + [0x00] * 62)
print 'req: {} '.format(num),
if resp:
print 'err code: {}'.format(polar.PFTP_ERROR[resp[0]])
return
for i in xrange(256):
#raw_input('Sending [{}]...<press enter>'.format(i))
if (i & 0x03) == 2:
continue
if i in [3, 251, 252]:
continue
resp = self.device.send_raw([0x01, i] + [0x00] * 62)
print 'resp: {} '.format(i),
if resp:
print 'err code: {}'.format(polar.PFTP_ERROR[resp[0]])
else:
print
# end-of-method do_fuzz
@check_if_device_is_connected
def do_put_file(self, line):
path, filename = line.split()
self.device.put_file(path.strip(), filename.strip())
# end-of-method do_put_file
@check_if_device_is_connected
def do_walk(self, path):
"""Walk file system. Default device_path is device root folder.
Usage: walk [device_path]
"""
if not path.endswith('/'):
path += '/'
fs = self.device.walk(path)
keyz = fs.keys()
keyz.sort()
for k in keyz:
print k
d = fs[k]
files = [e for e in d.entries if not e.name.endswith('/')]
files.sort()
for f in files:
print '{}{} ({} bytes)'.format(k, f.name, f.size)
print
# end-of-method do_walk
pass
# end-of-class Loophole
def main():
cli = LoopholeCli()
cli.cmdloop(__INTRO.format(__version__))
# end-of-function main
##
# Entry point
if __name__ == '__main__':
main()
| rsc-dev/loophole | loophole/__main__.py | Python | mit | 9,572 |
import json
import sublime
import sublime_plugin
from .edit import Edit
class PawnBuildPathCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().show_input_panel(
"Working directory that contains pawncc.exe",
"C:\\Pawno\\",
self.onPawnPathDone,
None,
None
)
def onPawnPathDone(self, path):
view = self.view.window().new_file()
path = path.replace("\\", "/")
obj = {
"cmd": [
"pawncc.exe",
"$file",
"-o$file_path/$file_base_name",
"-;+",
"-(+",
"-d3"
],
"file_regex": r"(.*?)\(([0-9]*)[- 0-9]*\)",
"selector": "source.pwn",
"working_dir": path
}
with Edit(view) as edit:
edit.insert(0, json.dumps(obj, indent=4))
view.set_name("Pawn.sublime-build")
view.run_command("save")
| Southclaw/pawn-sublime-language | PawnBuildPath.py | Python | mit | 1,010 |
class node:
def __init__(self):
self.outputs=[]
def set(self):
for out in self.outputs:
out.set()
def clear(self):
for out in self.outputs:
out.clear()
class switch:
def __init__(self):
self.outputs=[]
self.state=False
self.input=False
def set(self):
self.input=True
if(self.state):
for out in self.outputs:
out.set()
def clear(self):
self.input=False
for out in self.outputs:
out.clear()
def open(self):
self.state=False
for out in self.outputs:
out.clear()
def close(self):
self.input=True
if(self.input):
for out in self.outputs:
out.set()
class light:
def __init__(self):
self.outputs=[]
def set(self):
print('light set')
for out in self.outputs:
out.set()
def clear(self):
print('light cleared')
for out in self.outputs:
out.clear()
if __name__ == '__main__':
a=node()
s=switch()
b=node()
l=light()
a.outputs.append(s)
s.outputs.append(b)
b.outputs.append(l)
a.set()
s.close()
print('switch close')
s.open() | mikadam/LadderiLogical | tests/node.py | Python | mit | 1,030 |
from math import sqrt
def is_prime(x):
for i in xrange(2, int(sqrt(x) + 1)):
if x % i == 0:
return False
return True
def rotate(v):
res = []
u = str(v)
while True:
u = u[1:] + u[0]
w = int(u)
if w == v:
break
res.append(w)
return res
MILLION = 1000000
primes = filter(is_prime, range(2, MILLION))
s = set(primes)
ans = 0
for item in primes:
flag = True
print item
for y in rotate(item):
if y not in s:
flag = False
if flag:
ans += 1
print ans
| neutronest/eulerproject-douby | e35/35.py | Python | mit | 586 |
"""
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import time
import sys
import json
try:
import urllib.request as request
from urllib.error import HTTPError
except ImportError:
import urllib2 as request
from urllib2 import HTTPError
import pandas
PREV_TIME = time.time()
IS_PYTHON3 = sys.version[0] == "3"
def fix_missing_gene_symbols(de_novos, build='grch37'):
""" adds gene symbols to variants lacking them.
Args:
de_novos: dataframe of de novo variants
build: whether to use the 'grch37' or 'grch38' build (default=GRCh37)
Returns:
pandas Series of HGNC symbols, with additional annotations for many
variants previously lacking a HGNC symbol.
"""
symbols = de_novos["symbol"].copy()
# get the variants with no gene annotation, ensure chrom, start and stop
# positions columns exist
missing = de_novos[symbols == ""].copy()
missing['end'] = missing["pos"] + missing["ref"].str.len() - 1
# find the HGNC symbols (if any) for the variants
missing = [ get_gene_id(x["chrom"], x["pos"], x['end'], build=build, verbose=True) for i, x in missing.iterrows() ]
symbols[de_novos["symbol"] == ""] = missing
# 360 out of 17000 de novos still lack HGNC symbols. Their consequences are:
#
# consequence count
# ======================== =====
# downstream_gene_variant 17
# intergenic_variant 259
# regulatory_region_variant 63
# upstream_gene_variant 27
#
# In spot checks, these are sufficiently distant from genes that we can't
# add them to the analysis of their nearest gene. We shall analyse these
# per site by giving them mock gene symbols.
missing = de_novos[symbols == ""].copy()
fake = 'fake_symbol.' + missing['chrom'].map(str) + '_' + missing["pos"].map(str)
symbols[symbols == ""] = fake
return symbols
def open_url(url, headers):
""" open url with python libraries
Args:
url:
headers:
Returns:
tuple of http response, http response code, and http headers
"""
req = request.Request(url, headers=headers)
try:
handler = request.urlopen(req)
except HTTPError as e:
handler = e
status_code = handler.getcode()
response = handler.read()
if IS_PYTHON3:
response = response.decode("utf-8")
# parse the headers into a key, value dictionary
headers = dict(zip(map(str.lower, handler.headers.keys()), handler.headers.values()))
return response, status_code, headers
def rate_limit_requests(rate_limit=0.0667):
""" limit ensembl requests to one per 0.067 s
"""
global PREV_TIME
diff = time.time() - PREV_TIME
if diff < rate_limit:
time.sleep(rate_limit - diff)
PREV_TIME = time.time()
def get_gene_id(chrom, start_pos, end_pos, build="grch37", verbose=False, attempts=0):
"""find the hgnc symbol overlapping a variant position
Args:
variant: data frame or list for a variant, containing columns named
"chrom", "start_pos", and "end_pos" for a single variant
build: genome build to find consequences on
verbose: flag indicating whether to print variants as they are checked
Returns:
a character string containing the HGNC symbol.
"""
attempts += 1
if attempts > 5:
raise ValueError("too many attempts, figure out why its failing")
rate_limit_requests()
# define parts of the URL
ext = "overlap/region/human/{0}:{1}-{2}?feature=gene".format(chrom, start_pos, end_pos)
server_dict = {"grch37": "grch37.", "grch38": ""}
base_url = "http://{}rest.ensembl.org".format(server_dict[build])
url = "{0}/{1}".format(base_url, ext)
headers = {"Content-Type" : "application/json"}
if verbose:
print("chr{0}:{1} {2}".format(chrom, start_pos, ext))
response, status_code, requested_headers = open_url(url, headers)
if status_code == 429:
if "retry-after" in requested_headers:
time.sleep(float(requested_headers["retry-after"]))
elif "x-ratelimit-reset" in requested_headers:
time.sleep(int(requested_headers["x-ratelimit-reset"]))
return get_gene_id(chrom, start_pos, end_pos, build, verbose, attempts)
elif status_code in [503, 504]:
time.sleep(30)
return get_gene_id(chrom, start_pos, end_pos, build, verbose, attempts)
elif status_code != 200:
raise ValueError('Invalid Ensembl response: {0}.\nSubmitted '
'URL was: {1}{2}\nheaders: {3}\nresponse: {4}'.format(status_code,
base_url, ext, requested_headers, response))
json_text = json.loads(response)
if len(json_text) > 0:
return json_text[0]["external_name"]
return ""
| jeremymcrae/denovoFilter | denovoFilter/missing_symbols.py | Python | mit | 5,967 |
import mock
from tests.compat import unittest
from tests.utils import APITestCase
import evelink.eve as evelink_eve
class EVETestCase(APITestCase):
def setUp(self):
super(EVETestCase, self).setUp()
self.eve = evelink_eve.EVE(api=self.api)
def test_character_names_from_ids(self):
self.api.get.return_value = self.make_api_result("eve/character_name.xml")
result, current, expires = self.eve.character_names_from_ids(set([1,2]))
self.assertEqual(result, {1:"EVE System", 2:"EVE Central Bank"})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterName', params={'IDs': set([1,2])}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_character_name_from_id(self):
self.api.get.return_value = self.make_api_result("eve/character_name_single.xml")
result, current, expires = self.eve.character_name_from_id(1)
self.assertEqual(result, "EVE System")
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterName', params={'IDs': [1]}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_character_ids_from_names(self):
self.api.get.return_value = self.make_api_result("eve/character_id.xml")
result, current, expires = self.eve.character_ids_from_names(set(["EVE System", "EVE Central Bank"]))
self.assertEqual(result, {"EVE System":1, "EVE Central Bank":2})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterID', params={'names': set(["EVE System","EVE Central Bank"])}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_character_id_from_name(self):
self.api.get.return_value = self.make_api_result("eve/character_id_single.xml")
result, current, expires = self.eve.character_id_from_name("EVE System")
self.assertEqual(result, 1)
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterID', params={'names': ["EVE System"]}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_affiliations_for_characters(self):
self.api.get.return_value = self.make_api_result("eve/character_affiliation.xml")
result, current, expires = self.eve.affiliations_for_characters(set([92168909, 401111892, 1979087900]))
self.assertEqual(result, {
1979087900: {
'id': 1979087900,
'name': 'Marcel Devereux',
'faction': {
'id': 500004,
'name': 'Gallente Federation'
},
'corp': {
'id': 1894214152,
'name': 'Aideron Robotics'
}
},
401111892: {
'id': 401111892,
'name': 'ShadowMaster',
'alliance': {
'id': 99000652,
'name': 'RvB - BLUE Republic'
},
'corp': {
'id': 1741770561,
'name': 'Blue Republic'
}
},
92168909: {
'id': 92168909,
'name': 'CCP FoxFour',
'alliance': {
'id': 434243723,
'name': 'C C P Alliance'
},
'corp': {
'id': 109299958,
'name': 'C C P'
}
}
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterAffiliation', params={'ids': set([92168909, 401111892, 1979087900])})
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_affiliations_for_character(self):
self.api.get.return_value = self.make_api_result("eve/character_affiliation_single.xml")
result, current, expires = self.eve.affiliations_for_character(92168909)
self.assertEqual(result, {
'id': 92168909,
'name': 'CCP FoxFour',
'alliance': {
'id': 434243723,
'name': 'C C P Alliance'
},
'corp': {
'id': 109299958,
'name': 'C C P'
}
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterAffiliation', params={'ids': [92168909]})
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_character_info_from_id(self):
self.api.get.return_value = self.make_api_result("eve/character_info.xml")
result, current, expires = self.eve.character_info_from_id(1234)
self.assertEqual(result, {
'alliance': {'id': None, 'name': None, 'timestamp': None},
'bloodline': 'Civire',
'corp': {'id': 2345, 'name': 'Test Corporation', 'timestamp': 1338689400},
'history': [
{'corp_id': 1, 'corp_name': 'test_one', 'start_ts': 1338603000},
{'corp_id': 2, 'corp_name': 'test_two', 'start_ts': 1318422896}
],
'id': 1234,
'isk': None,
'location': None,
'name': 'Test Character',
'race': 'Caldari',
'sec_status': 2.5,
'ship': {'name': None, 'type_id': None, 'type_name': None},
'skillpoints': None,
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/CharacterInfo', params={'characterID': 1234}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_alliances(self):
self.api.get.return_value = self.make_api_result("eve/alliances.xml")
result, current, expires = self.eve.alliances()
self.assertEqual(result, {
1: {
'executor_id': 2,
'id': 1,
'member_corps': {
2: {'id': 2, 'timestamp': 1289250660},
3: {'id': 3, 'timestamp': 1327728960},
4: {'id': 4, 'timestamp': 1292440500},
},
'member_count': 123,
'name': 'Test Alliance',
'ticker': 'TEST',
'timestamp': 1272717240,
}
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/AllianceList', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_errors(self):
self.api.get.return_value = self.make_api_result("eve/errors.xml")
result, current, expires = self.eve.errors()
self.assertEqual(result, {1:"Foo", 2:"Bar"})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/ErrorList', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_faction_warfare_stats(self):
self.api.get.return_value = self.make_api_result("eve/faction_warfare_stats.xml")
result, current, expires = self.eve.faction_warfare_stats()
self.assertEqual(result, {
'kills': {'total': 232772, 'week': 3246, 'yesterday': 677},
'points': {'total': 44045189, 'week': 414049, 'yesterday': 55087},
'factions': {
500001: {
'id': 500001,
'kills': {'total': 59239, 'week': 627, 'yesterday': 115},
'name': 'Caldari State',
'pilots': 5324,
'points': {'total': 4506493, 'week': 64548, 'yesterday': 9934},
'systems': 61,
},
500002: {
'id': 500002,
'kills': {'total': 56736, 'week': 952, 'yesterday': 213},
'name': 'Minmatar Republic',
'pilots': 4068,
'points': {'total': 3627522, 'week': 51211, 'yesterday': 2925},
'systems': 0,
},
500003: {
'id': 500003,
'kills': {'total': 55717, 'week': 1000, 'yesterday': 225},
'name': 'Amarr Empire',
'pilots': 3960,
'points': {'total': 3670190, 'week': 50518, 'yesterday': 3330},
'systems': 11,
},
500004: {
'id': 500004,
'kills': {'total': 61080, 'week': 667, 'yesterday': 124},
'name': 'Gallente Federation',
'pilots': 3663,
'points': {'total': 4098366, 'week': 62118, 'yesterday': 10343},
'systems': 0,
},
},
'wars': [
{
'against': {'id': 500002, 'name': 'Minmatar Republic'},
'faction': {'id': 500001, 'name': 'Caldari State'},
},
{
'against': {'id': 500004, 'name': 'Gallente Federation'},
'faction': {'id': 500001, 'name': 'Caldari State'},
},
{
'against': {'id': 500001, 'name': 'Caldari State'},
'faction': {'id': 500002, 'name': 'Minmatar Republic'},
},
{
'against': {'id': 500003, 'name': 'Amarr Empire'},
'faction': {'id': 500002, 'name': 'Minmatar Republic'},
},
{
'against': {'id': 500002, 'name': 'Minmatar Republic'},
'faction': {'id': 500003, 'name': 'Amarr Empire'},
},
{
'against': {'id': 500004, 'name': 'Gallente Federation'},
'faction': {'id': 500003, 'name': 'Amarr Empire'},
},
{
'against': {'id': 500001, 'name': 'Caldari State'},
'faction': {'id': 500004, 'name': 'Gallente Federation'},
},
{
'against': {'id': 500003, 'name': 'Amarr Empire'},
'faction': {'id': 500004, 'name': 'Gallente Federation'},
}
],
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/FacWarStats', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_faction_warfare_leaderboard(self):
self.api.get.return_value = self.make_api_result("eve/faction_warfare_leaderboard.xml")
result, current, expires = self.eve.faction_warfare_leaderboard()
self.assertEqual(result, {
'char': {
'kills': {
'total': [{'id': 673662188, 'kills': 451, 'name': 'Val Erian'}],
'week': [{'id': 187452523, 'kills': 52, 'name': 'Tigrana Blanque'}],
'yesterday': [
{'id': 1007512845, 'kills': 14, 'name': 'StonedBoy'},
{'id': 646053002, 'kills': 11, 'name': 'Erick Voliffe'},
],
},
'points': {
'total': [{'id': 395923478, 'name': 'sasawong', 'points': 197046}],
'week': [{'id': 161929388, 'name': 'Ankhesentapemkah', 'points': 20851}],
'yesterday': [{'id': 774720050, 'name': 'v3nd3tt4', 'points': 3151}],
},
},
'corp': {
'kills': {
'total': [{'id': 673662188, 'kills': 451, 'name': 'Val Erian'}],
'week': [{'id': 187452523, 'kills': 52, 'name': 'Tigrana Blanque'}],
'yesterday': [
{'id': 1007512845, 'kills': 14, 'name': 'StonedBoy'},
{'id': 646053002, 'kills': 11, 'name': 'Erick Voliffe'},
],
},
'points': {
'total': [{'id': 395923478, 'name': 'sasawong', 'points': 197046}],
'week': [{'id': 161929388, 'name': 'Ankhesentapemkah', 'points': 20851}],
'yesterday': [{'id': 774720050, 'name': 'v3nd3tt4', 'points': 3151}],
},
},
'faction': {
'kills': {
'total': [{'id': 500004, 'kills': 104, 'name': 'Gallente Federation'}],
'week': [{'id': 500004, 'kills': 105, 'name': 'Gallente Federation'}],
'yesterday': [{'id': 500004, 'kills': 106, 'name': 'Gallente Federation'}],
},
'points': {
'total': [{'id': 500004, 'points': 101, 'name': 'Gallente Federation'}],
'week': [{'id': 500004, 'points': 102, 'name': 'Gallente Federation'}],
'yesterday': [{'id': 500004, 'points': 103, 'name': 'Gallente Federation'}],
},
},
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/FacWarTopStats', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_conquerable_stations(self):
self.api.get.return_value = self.make_api_result("eve/conquerable_stations.xml")
result, current, expires = self.eve.conquerable_stations()
self.assertEqual(result, {
1:{ 'id':1,
'name':"Station station station",
'type_id':123,
'system_id':512,
'corp':{
'id':444,
'name':"Valkyries of Night" }
},
2:{ 'id':2,
'name':"Station the station",
'type_id':42,
'system_id':503,
'corp':{
'id':400,
'name':"Deus Fides Empire"}
}
})
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/ConquerableStationlist', params={}),
])
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
def test_skill_tree(self):
self.api.get.return_value = self.make_api_result("eve/skill_tree.xml")
result, current, expires = self.eve.skill_tree()
self.assertEqual(result, {
255: {
'id': 255,
'name': 'Gunnery',
'skills': {
3300: {
'attributes': {
'primary': 'perception',
'secondary': 'willpower',
},
'bonuses': {
'turretSpeeBonus': {
'type': 'turretSpeeBonus',
'value': -2.0,
},
},
'description': "Basic turret operation skill. 2% Bonus to weapon turrets' rate of fire per skill level.",
'group_id': 255,
'id': 3300,
'name': 'Gunnery',
'published': True,
'rank': 1,
'required_skills': {},
},
3301: {
'attributes': {
'primary': 'perception',
'secondary': 'willpower',
},
'bonuses': {
'damageMultiplierBonus': {
'type': 'damageMultiplierBonus',
'value': 5.0,
},
},
'description': 'Operation of small hybrid turrets. 5% Bonus to small hybrid turret damage per level.',
'group_id': 255,
'id': 3301,
'name': 'Small Hybrid Turret',
'published': True,
'rank': 1,
'required_skills': {
3300: {
'id': 3300,
'level': 1,
'name': 'Gunnery',
},
},
},
},
},
266: {
'id': 266,
'name': 'Corporation Management',
'skills': {
11584 : {
'id': 11584,
'group_id': 266,
'name': 'Anchoring',
'description': 'Skill at Anchoring Deployables. Can not be trained on Trial Accounts.',
'published': True,
'rank': 3,
'attributes': {
'primary': 'memory',
'secondary': 'charisma',
},
'required_skills': {},
'bonuses': {
'canNotBeTrainedOnTrial': {
'type': 'canNotBeTrainedOnTrial',
'value': 1.0,
}
}
},
3369 : {
'id': 3369,
'group_id': 266,
'name': 'CFO Training',
'description': 'Skill at managing corp finances. 5% discount on all fees at non-hostile NPC station if acting as CFO of a corp. ',
'published': False,
'rank': 3,
'attributes': {
'primary': 'memory',
'secondary': 'charisma',
},
'required_skills': {
3363 : { 'id' : 3363, 'level' : 2, 'name' : None },
3444 : { 'id' : 3444, 'level' : 3, 'name' : None },
},
'bonuses': {}
}
}
}
})
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/SkillTree', params={})
])
def test_reference_types(self):
self.api.get.return_value = self.make_api_result("eve/reference_types.xml")
result, current, expires = self.eve.reference_types()
self.assertEqual(result, {
0: 'Undefined',
1: 'Player Trading',
2: 'Market Transaction',
3: 'GM Cash Transfer',
4: 'ATM Withdraw',
5: 'ATM Deposit'
})
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
self.assertEqual(self.api.mock_calls, [
mock.call.get('eve/RefTypes', params={})
])
if __name__ == "__main__":
unittest.main()
| Morloth1274/EVE-Online-POCO-manager | tests/test_eve.py | Python | mit | 20,398 |
try:
from calais import Calais
except ImportError: # pragma: no cover
Calais = None # NOQA
if Calais is not None:
def process_calais(content, key):
calais = Calais(key)
response = calais.analyze(content)
people = [entity["name"] for entity in getattr(response, "entities", []) if entity["_type"] == "Person"]
return {"people": people}
| prologic/spyda | spyda/processors.py | Python | mit | 385 |
import datetime
day = datetime.datetime.now().weekday()
def get_sunday():
return "Today it's Sunday"
def get_monday():
return "Today it's Monday"
def get_tuesday():
return "Today it's Tuesday"
def get_wednesday():
return "Today it's Wednesday"
def get_thursday():
return "Today it's Thursday"
def get_friday():
return "Today it's Friday"
def get_saturday():
return "Today it's Saturday"
def get_default():
return "Looking forward to the Weekend"
switcher = {
0:get_sunday,
1:get_monday,
2:get_tuesday,
3:get_wednesday,
4:get_thursday,
5:get_friday,
6:get_default
}
dayName = switcher.get(day,get_default)()
print(dayName) | vickyi/scoala | pachong/pythonClass/switch.py | Python | mit | 685 |
from django.conf.urls import url
from timeline import views
urlpatterns = [
url(r'^$', views.timelines, name='timelines'),
] | fredwulei/fredsneverland | fredsneverland/timeline/urls.py | Python | mit | 130 |
from contextlib import contextmanager
from functools import wraps
from werkzeug.local import LocalProxy, LocalStack
_additional_ctx_stack = LocalStack()
__all__ = ("current_additions", "Additional", "AdditionalManager")
@LocalProxy
def current_additions():
"""
Proxy to the currently added requirements
"""
rv = _additional_ctx_stack.top
if rv is None:
return None
return rv[1]
def _isinstance(f):
@wraps(f)
def check(self, other):
if not isinstance(other, Additional):
return NotImplemented
return f(self, other)
return check
class Additional(object):
"""
Container object that allows to run extra requirements on checks. These
additional requirements will be run at most once per check and will
occur in no guarenteed order.
Requirements can be added by passing them into the constructor or
by calling the ``add`` method. They can be removed from this object
by calling the ``remove`` method. To check if a requirement has been added
to the current conext, you may call ``is_added`` or use ``in``::
some_req in additional
additional.is_added(some)req)
Additional objects can be iterated and length checked::
additional = Additional(some_req)
assert len(additional) == 1
assert list(additional) == [some_req]
Additional objects may be combined and compared to each other with the following
operators:
``+`` creates a new additional object by combining two others, the new
additional supplies all requirements that both parents did.
``+=`` similar to ``+`` except it is an inplace update.
``-`` creates a new additional instance by removing any requirements from
the first instance that are contained in the second instance.
``-=`` similar to ``-`` except it is an inplace update.
``==`` compares two additional instances and returns true if both have
the same added requirements.
``!=`` similar to ``!=`` except returns true if both have different
requirements contained in them.
"""
def __init__(self, *requirements):
self._requirements = set(requirements)
def add(self, requirement, *requirements):
self._requirements.update((requirement,) + requirements)
def remove(self, requirement, *requirements):
self._requirements.difference_update((requirement,) + requirements)
@_isinstance
def __add__(self, other):
requirements = self._requirements | other._requirements
return Additional(*requirements)
@_isinstance
def __iadd__(self, other):
if len(other._requirements) > 0:
self._requirements.add(*other._requirements)
return self
@_isinstance
def __sub__(self, other):
requirements = self._requirements - other._requirements
return Additional(*requirements)
@_isinstance
def __isub__(self, other):
if len(other._requirements) > 0:
self.remove(*other._requirements)
return self
@_isinstance
def __eq__(self, other):
return self._requirements == other._requirements
@_isinstance
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter(self._requirements)
def is_added(self, requirement):
return requirement in self._requirements
def __contains__(self, requirement):
return self.is_added(requirement)
def __len__(self):
return len(self._requirements)
def __bool__(self):
return len(self) != 0
__nonzero__ = __bool__
def __repr__(self):
return "Additional({!r})".format(self._requirements)
class AdditionalManager(object):
"""
Used to manage the process of adding and removing additional requirements
to be run. This class shouldn't be used directly, instead use
``allows.additional`` to access these controls.
"""
def push(self, additional, use_parent=False):
"""
Binds an additional to the current context, optionally use the
current additionals in conjunction with this additional
If ``use_parent`` is true, a new additional is created from the
parent and child additionals rather than manipulating either
directly.
"""
current = self.current
if use_parent and current:
additional = current + additional
_additional_ctx_stack.push((self, additional))
def pop(self):
"""
Pops the latest additional context.
If the additional context was pushed by a different additional manager,
a ``RuntimeError`` is raised.
"""
rv = _additional_ctx_stack.pop()
if rv is None or rv[0] is not self:
raise RuntimeError(
"popped wrong additional context ({} instead of {})".format(rv, self)
)
@property
def current(self):
"""
Returns the current additional context if set otherwise None
"""
try:
return _additional_ctx_stack.top[1]
except TypeError:
return None
@contextmanager
def additional(self, additional, use_parent=False):
"""
Allows temporarily pushing an additional context, yields the new context
into the following block.
"""
self.push(additional, use_parent)
yield self.current
self.pop()
| justanr/flask-allows | src/flask_allows/additional.py | Python | mit | 5,469 |
import unittest
"""
Given a binary tree, we need to find maximum value we can get by subtracting
value of node B from value of node A, where A and B are two nodes of the binary tree
and A is ancestor of B. Expected time complexity is O(n).
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def add_left_child(self, data):
self.left = Node(data)
return self.left
def add_right_child(self, data):
self.right = Node(data)
return self.right
class BinaryTree:
def __init__(self, root):
self.root = root
self.max_difference = -float('inf')
def max_difference_node_and_ancestor(self):
self.max_min_in_subtree(self.root)
return self.max_difference
def max_min_in_subtree(self, node):
if node is None:
return float('inf'), -float('inf')
left_min, left_max = self.max_min_in_subtree(node.left)
right_min, right_max = self.max_min_in_subtree(node.right)
if node.left:
self.max_difference = max(self.max_difference, node.data - left_min, node.data - left_max)
if node.right:
self.max_difference = max(self.max_difference, node.data - right_min, node.data - right_max)
return min(node.data, left_min, right_min), max(node.data, left_max, right_max)
class TestBinaryTree(unittest.TestCase):
def test_max_difference(self):
root = Node(8)
root.left = Node(3)
root.left.left = Node(1)
root.left.right = Node(6)
root.left.right.left = Node(4)
root.left.right.right = Node(7)
root.right = Node(10)
root.right.right = Node(14)
root.right.right.left = Node(13)
binary_tree = BinaryTree(root)
self.assertEqual(binary_tree.max_difference_node_and_ancestor(), 7)
| prathamtandon/g4gproblems | Graphs/max_difference.py | Python | mit | 1,915 |
# https://www.w3resource.com/python-exercises/
# 3. Write a Python program to display the current date and time.
# Sample Output :
# Current date and time :
# 2014-07-05 14:34:14
import datetime
now = datetime.datetime.now()
print now.strftime("%Y-%m-%d %H:%M:%S")
| dadavidson/Python_Lab | Python-w3resource/Python_Basic/ex03.py | Python | mit | 269 |
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2018> Gabriel Falcão <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import requests
from unittest import skip
from sure import expect
from httpretty import HTTPretty
@skip
def test_http_passthrough():
url = 'http://httpbin.org/status/200'
response1 = requests.get(url)
response1 = requests.get(url, stream=True)
HTTPretty.enable()
HTTPretty.register_uri(HTTPretty.GET, 'http://google.com/', body="Not Google")
response2 = requests.get('http://google.com/')
expect(response2.content).to.equal(b'Not Google')
response3 = requests.get(url, stream=True)
(response3.content).should.equal(response1.content)
HTTPretty.disable()
response4 = requests.get(url, stream=True)
(response4.content).should.equal(response1.content)
@skip
def test_https_passthrough():
url = 'https://raw.githubusercontent.com/gabrielfalcao/HTTPretty/master/COPYING'
response1 = requests.get(url, stream=True)
HTTPretty.enable()
HTTPretty.register_uri(HTTPretty.GET, 'https://google.com/', body="Not Google")
response2 = requests.get('https://google.com/')
expect(response2.content).to.equal(b'Not Google')
response3 = requests.get(url, stream=True)
(response3.content).should.equal(response1.content)
HTTPretty.disable()
response4 = requests.get(url, stream=True)
(response4.content).should.equal(response1.content)
| andresriancho/HTTPretty | tests/functional/test_passthrough.py | Python | mit | 2,551 |
#!/usr/bin/env python3
from SPARQLWrapper import SPARQLWrapper, JSON
import requests
import re
import os
import os.path
import time
import sys
FINTO_ENDPOINT='http://api.dev.finto.fi/sparql'
FINNA_API_SEARCH='https://api.finna.fi/v1/search'
lang = sys.argv[1]
# map ISO 639-1 language codes into the ISO 639-2 codes that Finna uses
LANGMAP = {
'fi': 'fin',
'sv': 'swe',
'en': 'eng'
}
def row_to_concept(row):
concept = {'uri': row['c']['value'],
'pref': row['pref']['value'],
'ysapref': row['ysapref']['value'],
'allarspref': row['allarspref']['value']}
if 'alts' in row:
concept['alts'] = row['alts']['value']
return concept
def get_concepts(lang):
sparql = SPARQLWrapper(FINTO_ENDPOINT)
sparql.setQuery("""
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX ysometa: <http://www.yso.fi/onto/yso-meta/>
SELECT ?c ?pref (GROUP_CONCAT(?alt) AS ?alts) ?ysapref ?allarspref
WHERE {
GRAPH <http://www.yso.fi/onto/yso/> {
?c a skos:Concept .
?c skos:prefLabel ?pref .
FILTER(LANG(?pref)='%s')
OPTIONAL {
?c skos:altLabel ?alt .
FILTER(LANG(?alt)='%s')
}
FILTER NOT EXISTS { ?c owl:deprecated true }
FILTER NOT EXISTS { ?c a ysometa:Hierarchy }
}
GRAPH <http://www.yso.fi/onto/ysa/> {
?ysac skos:closeMatch|skos:exactMatch ?c .
?ysac skos:prefLabel ?ysapref .
}
GRAPH <http://www.yso.fi/onto/allars/> {
?allarsc skos:closeMatch|skos:exactMatch ?c .
?allarsc skos:prefLabel ?allarspref .
}
}
GROUP BY ?c ?pref ?ysapref ?allarspref
#LIMIT 500
""" % (lang, lang))
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return [row_to_concept(row) for row in results['results']['bindings']]
concepts = get_concepts(lang)
def search_finna(params):
r = requests.get(FINNA_API_SEARCH, params=params, headers={'User-agent': 'annif 0.1'})
return r.json()
def records_to_texts(records):
texts = []
for rec in records:
if 'title' in rec:
texts.append(rec['title'])
if 'summary' in rec:
for summary in rec['summary']:
texts.append(summary)
return texts
def generate_text(concept, lang):
# start with pref- and altlabels
labels = [concept['pref']]
if lang == 'fi':
# we can use the YSA label too
labels.append(concept['ysapref'])
if lang == 'sv':
# we can use the Allars label too
labels.append(concept['allarspref'])
if 'alts' in concept:
labels.append(concept['alts'])
labels = ' '.join(labels)
# look for more text in Finna API
texts = []
fields = ['title','summary']
finnaterms = (concept['ysapref'], concept['allarspref'])
finnalang = LANGMAP[lang]
# Search type 1: exact matches using topic facet
params = {'lookfor': 'topic_facet:"%s" OR topic_facet:"%s"' % finnaterms, 'filter[]': 'language:%s' % finnalang, 'lng':lang, 'limit':100, 'field[]':fields}
response = search_finna(params)
if 'records' in response:
texts += records_to_texts(response['records'])
# Search type 2: exact matches using Subject search
params['lookfor'] = '"%s" OR "%s"' % finnaterms
params['type'] = 'Subject'
response = search_finna(params)
if 'records' in response:
texts += records_to_texts(response['records'])
# Search type 3: fuzzy matches using Subject search
params['lookfor'] = '(%s) OR (%s)' % finnaterms
response = search_finna(params)
if 'records' in response:
texts += records_to_texts(response['records'])
return "\n".join([labels] + list(set(texts)))
for concept in concepts:
localname = concept['uri'].split('/')[-1]
outfile = 'corpus/%s-%s.raw' % (localname, lang)
if os.path.exists(outfile):
continue
text = None
tries = 0
while tries < 10:
try:
text = generate_text(concept, lang)
break
except:
# failure, try again until tries exhausted
tries += 1
print("Error generating text for concept %s, trying again (attempt %d)" % (concept['uri'], tries))
time.sleep(tries) # wait progressively longer between attempts
if text is None:
print("Failed looking up concept %s, exiting" % concept['uri'])
sys.exit(1)
print(localname, lang, concept['pref'], concept['ysapref'], concept['allarspref'], len(text.split()))
f = open(outfile, 'w')
print (concept['uri'], concept['pref'], file=f)
print (text, file=f)
f.close()
| osma/annif | create_corpus_yso_finna.py | Python | cc0-1.0 | 4,644 |
from django.db import models
# Create your models here.
class Profil(models.Model):
awal = ''
PILIHAN_JENJANG = (
(awal, '----'),
('Pertama', 'Perekayasa Pertama'),
('Muda', 'Perekayasa Muda'),
('Madya', 'Perekayasa Madya'),
('Utama', 'Perekayasa Utama'),
)
nip = models.CharField(max_length=50, verbose_name='NIP')
pendidikan = models.CharField(max_length=150, verbose_name='Pendidikan')
instansi = models.TextField(verbose_name='Nama Lengkap Unit')
instansi_kode = models.CharField(max_length=20, verbose_name='Singkatan Unit')
satuan = models.TextField(verbose_name='Nama Lengkap Satuan kerja', blank=True)
kantor = models.TextField(verbose_name='Nama Lengkap Kantor', blank=True)
pangkat = models.TextField(verbose_name='Pangkat/Golongan Ruang/TMT')
jabatan = models.CharField(max_length=150, verbose_name='Jabatan')
jenjang = models.CharField(max_length=10, verbose_name='Jenjang Perekayasa', choices=PILIHAN_JENJANG, default=awal)
user = models.ForeignKey('auth.User', verbose_name='Personil', on_delete=models.CASCADE)
class Meta:
verbose_name_plural = 'Profil'
def __str__(self):
return self.nip
| akbarpn136/perek-dj | utiliti/models.py | Python | cc0-1.0 | 1,225 |
#!/usr/bin/python3
#
# Checks that the upstream DNS has been set correctly and that
# TLS certificates have been signed, etc., and if not tells the user
# what to do next.
import sys, os, os.path, re, subprocess, datetime, multiprocessing.pool
import dns.reversename, dns.resolver
import dateutil.parser, dateutil.tz
import idna
import psutil
from dns_update import get_dns_zones, build_tlsa_record, get_custom_dns_config, get_secondary_dns, get_custom_dns_record
from web_update import get_web_domains, get_domains_with_a_records
from ssl_certificates import get_ssl_certificates, get_domain_ssl_files, check_certificate
from mailconfig import get_mail_domains, get_mail_aliases
from utils import shell, sort_domains, load_env_vars_from_file, load_settings
def run_checks(rounded_values, env, output, pool):
# run systems checks
output.add_heading("System")
# check that services are running
if not run_services_checks(env, output, pool):
# If critical services are not running, stop. If bind9 isn't running,
# all later DNS checks will timeout and that will take forever to
# go through, and if running over the web will cause a fastcgi timeout.
return
# clear bind9's DNS cache so our DNS checks are up to date
# (ignore errors; if bind9/rndc isn't running we'd already report
# that in run_services checks.)
shell('check_call', ["/usr/sbin/rndc", "flush"], trap=True)
run_system_checks(rounded_values, env, output)
# perform other checks asynchronously
run_network_checks(env, output)
run_domain_checks(rounded_values, env, output, pool)
def get_ssh_port():
# Returns ssh port
try:
output = shell('check_output', ['sshd', '-T'])
except FileNotFoundError:
# sshd is not installed. That's ok.
return None
returnNext = False
for e in output.split():
if returnNext:
return int(e)
if e == "port":
returnNext = True
# Did not find port!
return None
def run_services_checks(env, output, pool):
# Check that system services are running.
services = [
{ "name": "Local DNS (bind9)", "port": 53, "public": False, },
#{ "name": "NSD Control", "port": 8952, "public": False, },
{ "name": "Local DNS Control (bind9/rndc)", "port": 953, "public": False, },
{ "name": "Dovecot LMTP LDA", "port": 10026, "public": False, },
{ "name": "Postgrey", "port": 10023, "public": False, },
{ "name": "Spamassassin", "port": 10025, "public": False, },
{ "name": "OpenDKIM", "port": 8891, "public": False, },
{ "name": "OpenDMARC", "port": 8893, "public": False, },
{ "name": "Memcached", "port": 11211, "public": False, },
{ "name": "Mail-in-a-Box Management Daemon", "port": 10222, "public": False, },
{ "name": "SSH Login (ssh)", "port": get_ssh_port(), "public": True, },
{ "name": "Public DNS (nsd4)", "port": 53, "public": True, },
{ "name": "Incoming Mail (SMTP/postfix)", "port": 25, "public": True, },
{ "name": "Outgoing Mail (SMTP 587/postfix)", "port": 587, "public": True, },
#{ "name": "Postfix/master", "port": 10587, "public": True, },
{ "name": "IMAPS (dovecot)", "port": 993, "public": True, },
{ "name": "Mail Filters (Sieve/dovecot)", "port": 4190, "public": True, },
{ "name": "HTTP Web (nginx)", "port": 80, "public": True, },
{ "name": "HTTPS Web (nginx)", "port": 443, "public": True, },
]
all_running = True
fatal = False
ret = pool.starmap(check_service, ((i, service, env) for i, service in enumerate(services)), chunksize=1)
for i, running, fatal2, output2 in sorted(ret):
if output2 is None: continue # skip check (e.g. no port was set, e.g. no sshd)
all_running = all_running and running
fatal = fatal or fatal2
output2.playback(output)
if all_running:
output.print_ok("All system services are running.")
return not fatal
def check_service(i, service, env):
if not service["port"]:
# Skip check (no port, e.g. no sshd).
return (i, None, None, None)
output = BufferedOutput()
running = False
fatal = False
# Helper function to make a connection to the service, since we try
# up to three ways (localhost, IPv4 address, IPv6 address).
def try_connect(ip):
# Connect to the given IP address on the service's port with a one-second timeout.
import socket
s = socket.socket(socket.AF_INET if ":" not in ip else socket.AF_INET6, socket.SOCK_STREAM)
s.settimeout(1)
try:
s.connect((ip, service["port"]))
return True
except OSError as e:
# timed out or some other odd error
return False
finally:
s.close()
if service["public"]:
# Service should be publicly accessible.
if try_connect(env["PUBLIC_IP"]):
# IPv4 ok.
if not env.get("PUBLIC_IPV6") or service.get("ipv6") is False or try_connect(env["PUBLIC_IPV6"]):
# No IPv6, or service isn't meant to run on IPv6, or IPv6 is good.
running = True
# IPv4 ok but IPv6 failed. Try the PRIVATE_IPV6 address to see if the service is bound to the interface.
elif service["port"] != 53 and try_connect(env["PRIVATE_IPV6"]):
output.print_error("%s is running (and available over IPv4 and the local IPv6 address), but it is not publicly accessible at %s:%d." % (service['name'], env['PUBLIC_IP'], service['port']))
else:
output.print_error("%s is running and available over IPv4 but is not accessible over IPv6 at %s port %d." % (service['name'], env['PUBLIC_IPV6'], service['port']))
# IPv4 failed. Try the private IP to see if the service is running but not accessible (except DNS because a different service runs on the private IP).
elif service["port"] != 53 and try_connect("127.0.0.1"):
output.print_error("%s is running but is not publicly accessible at %s:%d." % (service['name'], env['PUBLIC_IP'], service['port']))
else:
output.print_error("%s is not running (port %d)." % (service['name'], service['port']))
# Why is nginx not running?
if not running and service["port"] in (80, 443):
output.print_line(shell('check_output', ['nginx', '-t'], capture_stderr=True, trap=True)[1].strip())
else:
# Service should be running locally.
if try_connect("127.0.0.1"):
running = True
else:
output.print_error("%s is not running (port %d)." % (service['name'], service['port']))
# Flag if local DNS is not running.
if not running and service["port"] == 53 and service["public"] == False:
fatal = True
return (i, running, fatal, output)
def run_system_checks(rounded_values, env, output):
check_ssh_password(env, output)
check_software_updates(env, output)
check_miab_version(env, output)
check_system_aliases(env, output)
check_free_disk_space(rounded_values, env, output)
check_free_memory(rounded_values, env, output)
def check_ssh_password(env, output):
# Check that SSH login with password is disabled. The openssh-server
# package may not be installed so check that before trying to access
# the configuration file.
if not os.path.exists("/etc/ssh/sshd_config"):
return
sshd = open("/etc/ssh/sshd_config").read()
if re.search("\nPasswordAuthentication\s+yes", sshd) \
or not re.search("\nPasswordAuthentication\s+no", sshd):
output.print_error("""The SSH server on this machine permits password-based login. A more secure
way to log in is using a public key. Add your SSH public key to $HOME/.ssh/authorized_keys, check
that you can log in without a password, set the option 'PasswordAuthentication no' in
/etc/ssh/sshd_config, and then restart the openssh via 'sudo service ssh restart'.""")
else:
output.print_ok("SSH disallows password-based login.")
def is_reboot_needed_due_to_package_installation():
return os.path.exists("/var/run/reboot-required")
def check_software_updates(env, output):
# Check for any software package updates.
pkgs = list_apt_updates(apt_update=False)
if is_reboot_needed_due_to_package_installation():
output.print_error("System updates have been installed and a reboot of the machine is required.")
elif len(pkgs) == 0:
output.print_ok("System software is up to date.")
else:
output.print_error("There are %d software packages that can be updated." % len(pkgs))
for p in pkgs:
output.print_line("%s (%s)" % (p["package"], p["version"]))
def check_system_aliases(env, output):
# Check that the administrator alias exists since that's where all
# admin email is automatically directed.
check_alias_exists("System administrator address", "administrator@" + env['PRIMARY_HOSTNAME'], env, output)
def check_free_disk_space(rounded_values, env, output):
# Check free disk space.
st = os.statvfs(env['STORAGE_ROOT'])
bytes_total = st.f_blocks * st.f_frsize
bytes_free = st.f_bavail * st.f_frsize
if not rounded_values:
disk_msg = "The disk has %s GB space remaining." % str(round(bytes_free/1024.0/1024.0/1024.0*10.0)/10)
else:
disk_msg = "The disk has less than %s%% space left." % str(round(bytes_free/bytes_total/10 + .5)*10)
if bytes_free > .3 * bytes_total:
output.print_ok(disk_msg)
elif bytes_free > .15 * bytes_total:
output.print_warning(disk_msg)
else:
output.print_error(disk_msg)
def check_free_memory(rounded_values, env, output):
# Check free memory.
percent_free = 100 - psutil.virtual_memory().percent
memory_msg = "System memory is %s%% free." % str(round(percent_free))
if percent_free >= 20:
if rounded_values: memory_msg = "System free memory is at least 20%."
output.print_ok(memory_msg)
elif percent_free >= 10:
if rounded_values: memory_msg = "System free memory is below 20%."
output.print_warning(memory_msg)
else:
if rounded_values: memory_msg = "System free memory is below 10%."
output.print_error(memory_msg)
def run_network_checks(env, output):
# Also see setup/network-checks.sh.
output.add_heading("Network")
# Stop if we cannot make an outbound connection on port 25. Many residential
# networks block outbound port 25 to prevent their network from sending spam.
# See if we can reach one of Google's MTAs with a 5-second timeout.
code, ret = shell("check_call", ["/bin/nc", "-z", "-w5", "aspmx.l.google.com", "25"], trap=True)
if ret == 0:
output.print_ok("Outbound mail (SMTP port 25) is not blocked.")
else:
output.print_error("""Outbound mail (SMTP port 25) seems to be blocked by your network. You
will not be able to send any mail. Many residential networks block port 25 to prevent hijacked
machines from being able to send spam. A quick connection test to Google's mail server on port 25
failed.""")
# Stop if the IPv4 address is listed in the ZEN Spamhaus Block List.
# The user might have ended up on an IP address that was previously in use
# by a spammer, or the user may be deploying on a residential network. We
# will not be able to reliably send mail in these cases.
rev_ip4 = ".".join(reversed(env['PUBLIC_IP'].split('.')))
zen = query_dns(rev_ip4+'.zen.spamhaus.org', 'A', nxdomain=None)
if zen is None:
output.print_ok("IP address is not blacklisted by zen.spamhaus.org.")
else:
output.print_error("""The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
% (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))
def run_domain_checks(rounded_time, env, output, pool):
# Get the list of domains we handle mail for.
mail_domains = get_mail_domains(env)
# Get the list of domains we serve DNS zones for (i.e. does not include subdomains).
dns_zonefiles = dict(get_dns_zones(env))
dns_domains = set(dns_zonefiles)
# Get the list of domains we serve HTTPS for.
web_domains = set(get_web_domains(env))
domains_to_check = mail_domains | dns_domains | web_domains
# Get the list of domains that we don't serve web for because of a custom CNAME/A record.
domains_with_a_records = get_domains_with_a_records(env)
# Serial version:
#for domain in sort_domains(domains_to_check, env):
# run_domain_checks_on_domain(domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains)
# Parallelize the checks across a worker pool.
args = ((domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains, domains_with_a_records)
for domain in domains_to_check)
ret = pool.starmap(run_domain_checks_on_domain, args, chunksize=1)
ret = dict(ret) # (domain, output) => { domain: output }
for domain in sort_domains(ret, env):
ret[domain].playback(output)
def run_domain_checks_on_domain(domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains, domains_with_a_records):
output = BufferedOutput()
# we'd move this up, but this returns non-pickleable values
ssl_certificates = get_ssl_certificates(env)
# The domain is IDNA-encoded in the database, but for display use Unicode.
try:
domain_display = idna.decode(domain.encode('ascii'))
output.add_heading(domain_display)
except (ValueError, UnicodeError, idna.IDNAError) as e:
# Looks like we have some invalid data in our database.
output.add_heading(domain)
output.print_error("Domain name is invalid: " + str(e))
if domain == env["PRIMARY_HOSTNAME"]:
check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles)
if domain in dns_domains:
check_dns_zone(domain, env, output, dns_zonefiles)
if domain in mail_domains:
check_mail_domain(domain, env, output)
if domain in web_domains:
check_web_domain(domain, rounded_time, ssl_certificates, env, output)
if domain in dns_domains:
check_dns_zone_suggestions(domain, env, output, dns_zonefiles, domains_with_a_records)
return (domain, output)
def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):
# If a DS record is set on the zone containing this domain, check DNSSEC now.
has_dnssec = False
for zone in dns_domains:
if zone == domain or domain.endswith("." + zone):
if query_dns(zone, "DS", nxdomain=None) is not None:
has_dnssec = True
check_dnssec(zone, env, output, dns_zonefiles, is_checking_primary=True)
ip = query_dns(domain, "A")
ns_ips = query_dns("ns1." + domain, "A") + '/' + query_dns("ns2." + domain, "A")
my_ips = env['PUBLIC_IP'] + ((" / "+env['PUBLIC_IPV6']) if env.get("PUBLIC_IPV6") else "")
# Check that the ns1/ns2 hostnames resolve to A records. This information probably
# comes from the TLD since the information is set at the registrar as glue records.
# We're probably not actually checking that here but instead checking that we, as
# the nameserver, are reporting the right info --- but if the glue is incorrect this
# will probably fail.
if ns_ips == env['PUBLIC_IP'] + '/' + env['PUBLIC_IP']:
output.print_ok("Nameserver glue records are correct at registrar. [ns1/ns2.%s ↦ %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
elif ip == env['PUBLIC_IP']:
# The NS records are not what we expect, but the domain resolves correctly, so
# the user may have set up external DNS. List this discrepancy as a warning.
output.print_warning("""Nameserver glue records (ns1.%s and ns2.%s) should be configured at your domain name
registrar as having the IP address of this box (%s). They currently report addresses of %s. If you have set up External DNS, this may be OK."""
% (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))
else:
output.print_error("""Nameserver glue records are incorrect. The ns1.%s and ns2.%s nameservers must be configured at your domain name
registrar as having the IP address %s. They currently report addresses of %s. It may take several hours for
public DNS to update after a change."""
% (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))
# Check that PRIMARY_HOSTNAME resolves to PUBLIC_IP[V6] in public DNS.
ipv6 = query_dns(domain, "AAAA") if env.get("PUBLIC_IPV6") else None
if ip == env['PUBLIC_IP'] and ipv6 in (None, env['PUBLIC_IPV6']):
output.print_ok("Domain resolves to box's IP address. [%s ↦ %s]" % (env['PRIMARY_HOSTNAME'], my_ips))
else:
output.print_error("""This domain must resolve to your box's IP address (%s) in public DNS but it currently resolves
to %s. It may take several hours for public DNS to update after a change. This problem may result from other
issues listed above."""
% (my_ips, ip + ((" / " + ipv6) if ipv6 is not None else "")))
# Check reverse DNS matches the PRIMARY_HOSTNAME. Note that it might not be
# a DNS zone if it is a subdomain of another domain we have a zone for.
existing_rdns_v4 = query_dns(dns.reversename.from_address(env['PUBLIC_IP']), "PTR")
existing_rdns_v6 = query_dns(dns.reversename.from_address(env['PUBLIC_IPV6']), "PTR") if env.get("PUBLIC_IPV6") else None
if existing_rdns_v4 == domain and existing_rdns_v6 in (None, domain):
output.print_ok("Reverse DNS is set correctly at ISP. [%s ↦ %s]" % (my_ips, env['PRIMARY_HOSTNAME']))
elif existing_rdns_v4 == existing_rdns_v6 or existing_rdns_v6 is None:
output.print_error("""Your box's reverse DNS is currently %s, but it should be %s. Your ISP or cloud provider will have instructions
on setting up reverse DNS for your box.""" % (existing_rdns_v4, domain) )
else:
output.print_error("""Your box's reverse DNS is currently %s (IPv4) and %s (IPv6), but it should be %s. Your ISP or cloud provider will have instructions
on setting up reverse DNS for your box.""" % (existing_rdns_v4, existing_rdns_v6, domain) )
# Check the TLSA record.
tlsa_qname = "_25._tcp." + domain
tlsa25 = query_dns(tlsa_qname, "TLSA", nxdomain=None)
tlsa25_expected = build_tlsa_record(env)
if tlsa25 == tlsa25_expected:
output.print_ok("""The DANE TLSA record for incoming mail is correct (%s).""" % tlsa_qname,)
elif tlsa25 is None:
if has_dnssec:
# Omit a warning about it not being set if DNSSEC isn't enabled,
# since TLSA shouldn't be used without DNSSEC.
output.print_warning("""The DANE TLSA record for incoming mail is not set. This is optional.""")
else:
output.print_error("""The DANE TLSA record for incoming mail (%s) is not correct. It is '%s' but it should be '%s'.
It may take several hours for public DNS to update after a change."""
% (tlsa_qname, tlsa25, tlsa25_expected))
# Check that the hostmaster@ email address exists.
check_alias_exists("Hostmaster contact address", "hostmaster@" + domain, env, output)
def check_alias_exists(alias_name, alias, env, output):
mail_aliases = dict([(address, receivers) for address, receivers, *_ in get_mail_aliases(env)])
if alias in mail_aliases:
if mail_aliases[alias]:
output.print_ok("%s exists as a mail alias. [%s ↦ %s]" % (alias_name, alias, mail_aliases[alias]))
else:
output.print_error("""You must set the destination of the mail alias for %s to direct email to you or another administrator.""" % alias)
else:
output.print_error("""You must add a mail alias for %s which directs email to you or another administrator.""" % alias)
def check_dns_zone(domain, env, output, dns_zonefiles):
# If a DS record is set at the registrar, check DNSSEC first because it will affect the NS query.
# If it is not set, we suggest it last.
if query_dns(domain, "DS", nxdomain=None) is not None:
check_dnssec(domain, env, output, dns_zonefiles)
# We provide a DNS zone for the domain. It should have NS records set up
# at the domain name's registrar pointing to this box. The secondary DNS
# server may be customized.
# (I'm not sure whether this necessarily tests the TLD's configuration,
# as it should, or if one successful NS line at the TLD will result in
# this query being answered by the box, which would mean the test is only
# half working.)
custom_dns_records = list(get_custom_dns_config(env)) # generator => list so we can reuse it
correct_ip = get_custom_dns_record(custom_dns_records, domain, "A") or env['PUBLIC_IP']
custom_secondary_ns = get_secondary_dns(custom_dns_records, mode="NS")
secondary_ns = custom_secondary_ns or ["ns2." + env['PRIMARY_HOSTNAME']]
existing_ns = query_dns(domain, "NS")
correct_ns = "; ".join(sorted(["ns1." + env['PRIMARY_HOSTNAME']] + secondary_ns))
ip = query_dns(domain, "A")
probably_external_dns = False
if existing_ns.lower() == correct_ns.lower():
output.print_ok("Nameservers are set correctly at registrar. [%s]" % correct_ns)
elif ip == correct_ip:
# The domain resolves correctly, so maybe the user is using External DNS.
output.print_warning("""The nameservers set on this domain at your domain name registrar should be %s. They are currently %s.
If you are using External DNS, this may be OK."""
% (correct_ns, existing_ns) )
probably_external_dns = True
else:
output.print_error("""The nameservers set on this domain are incorrect. They are currently %s. Use your domain name registrar's
control panel to set the nameservers to %s."""
% (existing_ns, correct_ns) )
# Check that each custom secondary nameserver resolves the IP address.
if custom_secondary_ns and not probably_external_dns:
for ns in custom_secondary_ns:
# We must first resolve the nameserver to an IP address so we can query it.
ns_ip = query_dns(ns, "A")
if not ns_ip:
output.print_error("Secondary nameserver %s is not valid (it doesn't resolve to an IP address)." % ns)
continue
# Now query it to see what it says about this domain.
ip = query_dns(domain, "A", at=ns_ip, nxdomain=None)
if ip == correct_ip:
output.print_ok("Secondary nameserver %s resolved the domain correctly." % ns)
elif ip is None:
output.print_error("Secondary nameserver %s is not configured to resolve this domain." % ns)
else:
output.print_error("Secondary nameserver %s is not configured correctly. (It resolved this domain as %s. It should be %s.)" % (ns, ip, correct_ip))
def check_dns_zone_suggestions(domain, env, output, dns_zonefiles, domains_with_a_records):
# Warn if a custom DNS record is preventing this or the automatic www redirect from
# being served.
if domain in domains_with_a_records:
output.print_warning("""Web has been disabled for this domain because you have set a custom DNS record.""")
if "www." + domain in domains_with_a_records:
output.print_warning("""A redirect from 'www.%s' has been disabled for this domain because you have set a custom DNS record on the www subdomain.""" % domain)
# Since DNSSEC is optional, if a DS record is NOT set at the registrar suggest it.
# (If it was set, we did the check earlier.)
if query_dns(domain, "DS", nxdomain=None) is None:
check_dnssec(domain, env, output, dns_zonefiles)
def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
# See if the domain has a DS record set at the registrar. The DS record may have
# several forms. We have to be prepared to check for any valid record. We've
# pre-generated all of the valid digests --- read them in.
ds_file = '/etc/nsd/zones/' + dns_zonefiles[domain] + '.ds'
if not os.path.exists(ds_file): return # Domain is in our database but DNS has not yet been updated.
ds_correct = open(ds_file).read().strip().split("\n")
digests = { }
for rr_ds in ds_correct:
ds_keytag, ds_alg, ds_digalg, ds_digest = rr_ds.split("\t")[4].split(" ")
digests[ds_digalg] = ds_digest
# Some registrars may want the public key so they can compute the digest. The DS
# record that we suggest using is for the KSK (and that's how the DS records were generated).
alg_name_map = { '7': 'RSASHA1-NSEC3-SHA1', '8': 'RSASHA256' }
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
dnsssec_pubkey = open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key')).read().split("\t")[3].split(" ")[3]
# Query public DNS for the DS record at the registrar.
ds = query_dns(domain, "DS", nxdomain=None)
ds_looks_valid = ds and len(ds.split(" ")) == 4
if ds_looks_valid: ds = ds.split(" ")
if ds_looks_valid and ds[0] == ds_keytag and ds[1] == ds_alg and ds[3] == digests.get(ds[2]):
if is_checking_primary: return
output.print_ok("DNSSEC 'DS' record is set correctly at registrar.")
else:
if ds == None:
if is_checking_primary: return
output.print_warning("""This domain's DNSSEC DS record is not set. The DS record is optional. The DS record activates DNSSEC.
To set a DS record, you must follow the instructions provided by your domain name registrar and provide to them this information:""")
else:
if is_checking_primary:
output.print_error("""The DNSSEC 'DS' record for %s is incorrect. See further details below.""" % domain)
return
output.print_error("""This domain's DNSSEC DS record is incorrect. The chain of trust is broken between the public DNS system
and this machine's DNS server. It may take several hours for public DNS to update after a change. If you did not recently
make a change, you must resolve this immediately by following the instructions provided by your domain name registrar and
provide to them this information:""")
output.print_line("")
output.print_line("Key Tag: " + ds_keytag + ("" if not ds_looks_valid or ds[0] == ds_keytag else " (Got '%s')" % ds[0]))
output.print_line("Key Flags: KSK")
output.print_line(
("Algorithm: %s / %s" % (ds_alg, alg_name_map[ds_alg]))
+ ("" if not ds_looks_valid or ds[1] == ds_alg else " (Got '%s')" % ds[1]))
# see http://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml
output.print_line("Digest Type: 2 / SHA-256")
# http://www.ietf.org/assignments/ds-rr-types/ds-rr-types.xml
output.print_line("Digest: " + digests['2'])
if ds_looks_valid and ds[3] != digests.get(ds[2]):
output.print_line("(Got digest type %s and digest %s which do not match.)" % (ds[2], ds[3]))
output.print_line("Public Key: ")
output.print_line(dnsssec_pubkey, monospace=True)
output.print_line("")
output.print_line("Bulk/Record Format:")
output.print_line("" + ds_correct[0])
output.print_line("")
def check_mail_domain(domain, env, output):
# Check the MX record.
recommended_mx = "10 " + env['PRIMARY_HOSTNAME']
mx = query_dns(domain, "MX", nxdomain=None)
if mx is None:
mxhost = None
else:
# query_dns returns a semicolon-delimited list
# of priority-host pairs.
mxhost = mx.split('; ')[0].split(' ')[1]
if mxhost == None:
# A missing MX record is okay on the primary hostname because
# the primary hostname's A record (the MX fallback) is... itself,
# which is what we want the MX to be.
if domain == env['PRIMARY_HOSTNAME']:
output.print_ok("Domain's email is directed to this domain. [%s has no MX record, which is ok]" % (domain,))
# And a missing MX record is okay on other domains if the A record
# matches the A record of the PRIMARY_HOSTNAME. Actually this will
# probably confuse DANE TLSA, but we'll let that slide for now.
else:
domain_a = query_dns(domain, "A", nxdomain=None)
primary_a = query_dns(env['PRIMARY_HOSTNAME'], "A", nxdomain=None)
if domain_a != None and domain_a == primary_a:
output.print_ok("Domain's email is directed to this domain. [%s has no MX record but its A record is OK]" % (domain,))
else:
output.print_error("""This domain's DNS MX record is not set. It should be '%s'. Mail will not
be delivered to this box. It may take several hours for public DNS to update after a
change. This problem may result from other issues listed here.""" % (recommended_mx,))
elif mxhost == env['PRIMARY_HOSTNAME']:
good_news = "Domain's email is directed to this domain. [%s ↦ %s]" % (domain, mx)
if mx != recommended_mx:
good_news += " This configuration is non-standard. The recommended configuration is '%s'." % (recommended_mx,)
output.print_ok(good_news)
else:
output.print_error("""This domain's DNS MX record is incorrect. It is currently set to '%s' but should be '%s'. Mail will not
be delivered to this box. It may take several hours for public DNS to update after a change. This problem may result from
other issues listed here.""" % (mx, recommended_mx))
# Check that the postmaster@ email address exists. Not required if the domain has a
# catch-all address or domain alias.
if "@" + domain not in [address for address, *_ in get_mail_aliases(env)]:
check_alias_exists("Postmaster contact address", "postmaster@" + domain, env, output)
# Stop if the domain is listed in the Spamhaus Domain Block List.
# The user might have chosen a domain that was previously in use by a spammer
# and will not be able to reliably send mail.
dbl = query_dns(domain+'.dbl.spamhaus.org', "A", nxdomain=None)
if dbl is None:
output.print_ok("Domain is not blacklisted by dbl.spamhaus.org.")
else:
output.print_error("""This domain is listed in the Spamhaus Domain Block List (code %s),
which may prevent recipients from receiving your mail.
See http://www.spamhaus.org/dbl/ and http://www.spamhaus.org/query/domain/%s.""" % (dbl, domain))
def check_web_domain(domain, rounded_time, ssl_certificates, env, output):
# See if the domain's A record resolves to our PUBLIC_IP. This is already checked
# for PRIMARY_HOSTNAME, for which it is required for mail specifically. For it and
# other domains, it is required to access its website.
if domain != env['PRIMARY_HOSTNAME']:
ok_values = []
for (rtype, expected) in (("A", env['PUBLIC_IP']), ("AAAA", env.get('PUBLIC_IPV6'))):
if not expected: continue # IPv6 is not configured
value = query_dns(domain, rtype)
if value == expected:
ok_values.append(value)
else:
output.print_error("""This domain should resolve to your box's IP address (%s %s) if you would like the box to serve
webmail or a website on this domain. The domain currently resolves to %s in public DNS. It may take several hours for
public DNS to update after a change. This problem may result from other issues listed here.""" % (rtype, expected, value))
return
# If both A and AAAA are correct...
output.print_ok("Domain resolves to this box's IP address. [%s ↦ %s]" % (domain, '; '.join(ok_values)))
# We need a TLS certificate for PRIMARY_HOSTNAME because that's where the
# user will log in with IMAP or webmail. Any other domain we serve a
# website for also needs a signed certificate.
check_ssl_cert(domain, rounded_time, ssl_certificates, env, output)
def query_dns(qname, rtype, nxdomain='[Not Set]', at=None):
# Make the qname absolute by appending a period. Without this, dns.resolver.query
# will fall back a failed lookup to a second query with this machine's hostname
# appended. This has been causing some false-positive Spamhaus reports. The
# reverse DNS lookup will pass a dns.name.Name instance which is already
# absolute so we should not modify that.
if isinstance(qname, str):
qname += "."
# Use the default nameservers (as defined by the system, which is our locally
# running bind server), or if the 'at' argument is specified, use that host
# as the nameserver.
resolver = dns.resolver.get_default_resolver()
if at:
resolver = dns.resolver.Resolver()
resolver.nameservers = [at]
# Set a timeout so that a non-responsive server doesn't hold us back.
resolver.timeout = 5
# Do the query.
try:
response = resolver.query(qname, rtype)
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
# Host did not have an answer for this query; not sure what the
# difference is between the two exceptions.
return nxdomain
except dns.exception.Timeout:
return "[timeout]"
# There may be multiple answers; concatenate the response. Remove trailing
# periods from responses since that's how qnames are encoded in DNS but is
# confusing for us. The order of the answers doesn't matter, so sort so we
# can compare to a well known order.
return "; ".join(sorted(str(r).rstrip('.') for r in response))
def check_ssl_cert(domain, rounded_time, ssl_certificates, env, output):
# Check that TLS certificate is signed.
# Skip the check if the A record is not pointed here.
if query_dns(domain, "A", None) not in (env['PUBLIC_IP'], None): return
# Where is the certificate file stored?
tls_cert = get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=True)
if tls_cert is None:
output.print_warning("""No TLS (SSL) certificate is installed for this domain. Visitors to a website on
this domain will get a security warning. If you are not serving a website on this domain, you do
not need to take any action. Use the TLS Certificates page in the control panel to install a
TLS certificate.""")
return
# Check that the certificate is good.
cert_status, cert_status_details = check_certificate(domain, tls_cert["certificate"], tls_cert["private-key"], rounded_time=rounded_time)
if cert_status == "OK":
# The certificate is ok. The details has expiry info.
output.print_ok("TLS (SSL) certificate is signed & valid. " + cert_status_details)
elif cert_status == "SELF-SIGNED":
# Offer instructions for purchasing a signed certificate.
if domain == env['PRIMARY_HOSTNAME']:
output.print_error("""The TLS (SSL) certificate for this domain is currently self-signed. You will get a security
warning when you check or send email and when visiting this domain in a web browser (for webmail or
static site hosting).""")
else:
output.print_error("""The TLS (SSL) certificate for this domain is self-signed.""")
else:
output.print_error("The TLS (SSL) certificate has a problem: " + cert_status)
if cert_status_details:
output.print_line("")
output.print_line(cert_status_details)
output.print_line("")
_apt_updates = None
def list_apt_updates(apt_update=True):
# See if we have this information cached recently.
# Keep the information for 8 hours.
global _apt_updates
if _apt_updates is not None and _apt_updates[0] > datetime.datetime.now() - datetime.timedelta(hours=8):
return _apt_updates[1]
# Run apt-get update to refresh package list. This should be running daily
# anyway, so on the status checks page don't do this because it is slow.
if apt_update:
shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])
# Run apt-get upgrade in simulate mode to get a list of what
# it would do.
simulated_install = shell("check_output", ["/usr/bin/apt-get", "-qq", "-s", "upgrade"])
pkgs = []
for line in simulated_install.split('\n'):
if line.strip() == "":
continue
if re.match(r'^Conf .*', line):
# remove these lines, not informative
continue
m = re.match(r'^Inst (.*) \[(.*)\] \((\S*)', line)
if m:
pkgs.append({ "package": m.group(1), "version": m.group(3), "current_version": m.group(2) })
else:
pkgs.append({ "package": "[" + line + "]", "version": "", "current_version": "" })
# Cache for future requests.
_apt_updates = (datetime.datetime.now(), pkgs)
return pkgs
def what_version_is_this(env):
# This function runs `git describe --abbrev=0` on the Mail-in-a-Box installation directory.
# Git may not be installed and Mail-in-a-Box may not have been cloned from github,
# so this function may raise all sorts of exceptions.
miab_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
tag = shell("check_output", ["/usr/bin/git", "describe", "--abbrev=0"], env={"GIT_DIR": os.path.join(miab_dir, '.git')}).strip()
return tag
def get_latest_miab_version():
# This pings https://mailinabox.email/setup.sh and extracts the tag named in
# the script to determine the current product version.
import urllib.request
return re.search(b'TAG=(.*)', urllib.request.urlopen("https://mailinabox.email/setup.sh?ping=1").read()).group(1).decode("utf8")
def check_miab_version(env, output):
config = load_settings(env)
if config.get("privacy", True):
output.print_warning("Mail-in-a-Box version check disabled by privacy setting.")
else:
try:
this_ver = what_version_is_this(env)
except:
this_ver = "Unknown"
latest_ver = get_latest_miab_version()
if this_ver == latest_ver:
output.print_ok("Mail-in-a-Box is up to date. You are running version %s." % this_ver)
else:
output.print_error("A new version of Mail-in-a-Box is available. You are running version %s. The latest version is %s. For upgrade instructions, see https://mailinabox.email. "
% (this_ver, latest_ver))
def run_and_output_changes(env, pool):
import json
from difflib import SequenceMatcher
out = ConsoleOutput()
# Run status checks.
cur = BufferedOutput()
run_checks(True, env, cur, pool)
# Load previously saved status checks.
cache_fn = "/var/cache/mailinabox/status_checks.json"
if os.path.exists(cache_fn):
prev = json.load(open(cache_fn))
# Group the serial output into categories by the headings.
def group_by_heading(lines):
from collections import OrderedDict
ret = OrderedDict()
k = []
ret["No Category"] = k
for line_type, line_args, line_kwargs in lines:
if line_type == "add_heading":
k = []
ret[line_args[0]] = k
else:
k.append((line_type, line_args, line_kwargs))
return ret
prev_status = group_by_heading(prev)
cur_status = group_by_heading(cur.buf)
# Compare the previous to the current status checks
# category by category.
for category, cur_lines in cur_status.items():
if category not in prev_status:
out.add_heading(category + " -- Added")
BufferedOutput(with_lines=cur_lines).playback(out)
else:
# Actual comparison starts here...
prev_lines = prev_status[category]
def stringify(lines):
return [json.dumps(line) for line in lines]
diff = SequenceMatcher(None, stringify(prev_lines), stringify(cur_lines)).get_opcodes()
for op, i1, i2, j1, j2 in diff:
if op == "replace":
out.add_heading(category + " -- Previously:")
elif op == "delete":
out.add_heading(category + " -- Removed")
if op in ("replace", "delete"):
BufferedOutput(with_lines=prev_lines[i1:i2]).playback(out)
if op == "replace":
out.add_heading(category + " -- Currently:")
elif op == "insert":
out.add_heading(category + " -- Added")
if op in ("replace", "insert"):
BufferedOutput(with_lines=cur_lines[j1:j2]).playback(out)
for category, prev_lines in prev_status.items():
if category not in cur_status:
out.add_heading(category)
out.print_warning("This section was removed.")
# Store the current status checks output for next time.
os.makedirs(os.path.dirname(cache_fn), exist_ok=True)
with open(cache_fn, "w") as f:
json.dump(cur.buf, f, indent=True)
class FileOutput:
def __init__(self, buf, width):
self.buf = buf
self.width = width
def add_heading(self, heading):
print(file=self.buf)
print(heading, file=self.buf)
print("=" * len(heading), file=self.buf)
def print_ok(self, message):
self.print_block(message, first_line="✓ ")
def print_error(self, message):
self.print_block(message, first_line="✖ ")
def print_warning(self, message):
self.print_block(message, first_line="? ")
def print_block(self, message, first_line=" "):
print(first_line, end='', file=self.buf)
message = re.sub("\n\s*", " ", message)
words = re.split("(\s+)", message)
linelen = 0
for w in words:
if self.width and (linelen + len(w) > self.width-1-len(first_line)):
print(file=self.buf)
print(" ", end="", file=self.buf)
linelen = 0
if linelen == 0 and w.strip() == "": continue
print(w, end="", file=self.buf)
linelen += len(w)
print(file=self.buf)
def print_line(self, message, monospace=False):
for line in message.split("\n"):
self.print_block(line)
class ConsoleOutput(FileOutput):
def __init__(self):
self.buf = sys.stdout
# Do nice line-wrapping according to the size of the terminal.
# The 'stty' program queries standard input for terminal information.
if sys.stdin.isatty():
try:
self.width = int(shell('check_output', ['stty', 'size']).split()[1])
except:
self.width = 76
else:
# However if standard input is not a terminal, we would get
# "stty: standard input: Inappropriate ioctl for device". So
# we test with sys.stdin.isatty first, and if it is not a
# terminal don't do any line wrapping. When this script is
# run from cron, or if stdin has been redirected, this happens.
self.width = None
class BufferedOutput:
# Record all of the instance method calls so we can play them back later.
def __init__(self, with_lines=None):
self.buf = [] if not with_lines else with_lines
def __getattr__(self, attr):
if attr not in ("add_heading", "print_ok", "print_error", "print_warning", "print_block", "print_line"):
raise AttributeError
# Return a function that just records the call & arguments to our buffer.
def w(*args, **kwargs):
self.buf.append((attr, args, kwargs))
return w
def playback(self, output):
for attr, args, kwargs in self.buf:
getattr(output, attr)(*args, **kwargs)
if __name__ == "__main__":
from utils import load_environment
env = load_environment()
pool = multiprocessing.pool.Pool(processes=10)
if len(sys.argv) == 1:
run_checks(False, env, ConsoleOutput(), pool)
elif sys.argv[1] == "--show-changes":
run_and_output_changes(env, pool)
elif sys.argv[1] == "--check-primary-hostname":
# See if the primary hostname appears resolvable and has a signed certificate.
domain = env['PRIMARY_HOSTNAME']
if query_dns(domain, "A") != env['PUBLIC_IP']:
sys.exit(1)
ssl_certificates = get_ssl_certificates(env)
tls_cert = get_domain_ssl_files(domain, ssl_certificates, env)
if not os.path.exists(tls_cert["certificate"]):
sys.exit(1)
cert_status, cert_status_details = check_certificate(domain, tls_cert["certificate"], tls_cert["private-key"], warn_if_expiring_soon=False)
if cert_status != "OK":
sys.exit(1)
sys.exit(0)
elif sys.argv[1] == "--version":
print(what_version_is_this(env))
| nstanke/mailinabox | management/status_checks.py | Python | cc0-1.0 | 41,760 |
from colab.plugins.utils.proxy_data_api import ProxyDataAPI
class JenkinsDataAPI(ProxyDataAPI):
def fetch_data(self):
pass
| rafamanzo/colab | colab/plugins/jenkins/data_api.py | Python | gpl-2.0 | 138 |
"""Library for performing speech recognition with the Google Speech Recognition API."""
__author__ = 'Anthony Zhang (Uberi)'
__version__ = '1.0.4'
__license__ = 'BSD'
import io, subprocess, wave, shutil
import math, audioop, collections
import json, urllib.request
#wip: filter out clicks and other too short parts
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
try:
import pyaudio
class Microphone(AudioSource):
def __init__(self, device_index = None):
self.device_index = device_index
self.format = pyaudio.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = pyaudio.get_sample_size(self.format)
self.RATE = 16000 # sampling rate in Hertz
self.CHANNELS = 1 # mono audio
self.CHUNK = 1024 # number of frames stored in each buffer
self.audio = None
self.stream = None
def __enter__(self):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(
input_device_index = self.device_index,
format = self.format, rate = self.RATE, channels = self.CHANNELS, frames_per_buffer = self.CHUNK,
input = True, # stream is an input stream
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
except ImportError:
pass
class WavFile(AudioSource):
def __init__(self, filename_or_fileobject):
if isinstance(filename_or_fileobject, str):
self.filename = filename_or_fileobject
else:
self.filename = None
self.wav_file = filename_or_fileobject
self.stream = None
def __enter__(self):
if self.filename: self.wav_file = open(self.filename, "rb")
self.wav_reader = wave.open(self.wav_file, "rb")
self.SAMPLE_WIDTH = self.wav_reader.getsampwidth()
self.RATE = self.wav_reader.getframerate()
self.CHANNELS = self.wav_reader.getnchannels()
assert self.CHANNELS == 1 # audio must be mono
self.CHUNK = 4096
self.stream = WavFile.WavStream(self.wav_reader)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.filename: self.wav_file.close()
self.stream = None
class WavStream(object):
def __init__(self, wav_reader):
self.wav_reader = wav_reader
def read(self, size = -1):
if size == -1:
return self.wav_reader.readframes(self.wav_reader.getnframes())
return self.wav_reader.readframes(size)
class AudioData(object):
def __init__(self, rate, data):
self.rate = rate
self.data = data
class Recognizer(AudioSource):
def __init__(self, language = "fr-FR", key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"):
self.key = key
self.language = language
self.energy_threshold = 1500 # minimum audio energy to consider for recording
self.pause_threshold = 0.8 # seconds of quiet time before a phrase is considered complete
self.quiet_duration = 0.5 # amount of quiet time to keep on both sides of the recording
def samples_to_flac(self, source, frame_data):
import platform, os
with io.BytesIO() as wav_file:
with wave.open(wav_file, "wb") as wav_writer:
wav_writer.setsampwidth(source.SAMPLE_WIDTH)
wav_writer.setnchannels(source.CHANNELS)
wav_writer.setframerate(source.RATE)
wav_writer.writeframes(frame_data)
wav_data = wav_file.getvalue()
# determine which converter executable to use
system = platform.system()
path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
if shutil.which("flac") is not None: # check for installed version first
flac_converter = shutil.which("flac")
elif system == "Windows" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}: # Windows NT, use the bundled FLAC conversion utility
flac_converter = os.path.join(path, "flac-win32.exe")
elif system == "Linux" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}:
flac_converter = os.path.join(path, "flac-linux-i386")
else:
raise ChildProcessError("FLAC conversion utility not available - consider installing the FLAC utility")
process = subprocess.Popen("\"%s\" --stdout --totally-silent --best -" % flac_converter, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
flac_data, stderr = process.communicate(wav_data)
return flac_data
def record(self, source, duration = None):
assert isinstance(source, AudioSource) and source.stream
frames = io.BytesIO()
seconds_per_buffer = source.CHUNK / source.RATE
elapsed_time = 0
while True: # loop for the total number of chunks needed
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def listen(self, source, timeout = None):
assert isinstance(source, AudioSource) and source.stream
# record audio data as raw samples
frames = collections.deque()
assert self.pause_threshold >= self.quiet_duration >= 0
seconds_per_buffer = source.CHUNK / source.RATE
pause_buffer_count = math.ceil(self.pause_threshold / seconds_per_buffer) # number of buffers of quiet audio before the phrase is complete
quiet_buffer_count = math.ceil(self.quiet_duration / seconds_per_buffer) # maximum number of buffers of quiet audio to retain before and after
elapsed_time = 0
# store audio input until the phrase starts
while True:
# handle timeout if specified
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout:
raise TimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has stopped being quiet
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
break
if len(frames) > quiet_buffer_count: # ensure we only keep the needed amount of quiet buffers
frames.popleft()
# read audio input until the phrase ends
pause_count = 0
while True:
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has gone quiet for longer than the pause threshold
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# obtain frame data
for i in range(quiet_buffer_count, pause_buffer_count): frames.pop() # remove extra quiet frames at the end
frame_data = b"".join(list(frames))
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def recognize(self, audio_data, show_all = False):
assert isinstance(audio_data, AudioData)
url = "http://www.google.com/speech-api/v2/recognize?client=chromium&lang=%s&key=%s" % (self.language, self.key)
self.request = urllib.request.Request(url, data = audio_data.data, headers = {"Content-Type": "audio/x-flac; rate=%s" % audio_data.rate})
# check for invalid key response from the server
try:
response = urllib.request.urlopen(self.request)
except:
raise KeyError("Server wouldn't respond (invalid key or quota has been maxed out)")
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
# make sure we have a list of transcriptions
if "alternative" not in actual_result:
raise LookupError("Speech is unintelligible")
# return the best guess unless told to do otherwise
if not show_all:
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
return prediction["transcript"]
raise LookupError("Speech is unintelligible")
spoken_text = []
# check to see if Google thinks it's 100% correct
default_confidence = 0
if len(actual_result["alternative"])==1: default_confidence = 1
# return all the possibilities
for prediction in actual_result["alternative"]:
if "confidence" in prediction:
spoken_text.append({"text":prediction["transcript"],"confidence":prediction["confidence"]})
else:
spoken_text.append({"text":prediction["transcript"],"confidence":default_confidence})
return spoken_text
if __name__ == "__main__":
r = Recognizer()
m = Microphone()
while True:
print("Say something!")
with m as source:
audio = r.listen(source)
print("Got it! Now to recognize it...")
try:
print("You said " + r.recognize(audio))
except LookupError:
print("Oops! Didn't catch that")
| bizalu/sImulAcre | core/lib/speech_recognition/__init__.py | Python | gpl-2.0 | 10,485 |
from ftw.upgrade import UpgradeStep
from plone import api
class AddLanguageIndex(UpgradeStep):
"""Add Language index."""
def __call__(self):
self.install_upgrade_profile()
ct = api.portal.get_tool("portal_catalog")
survey_results = ct(portal_type="euphorie.survey")
for brain in survey_results:
survey = brain.getObject()
survey.reindexObject(idxs=["Language"])
| euphorie/Euphorie | src/euphorie/upgrade/deployment/v1/20210720143221_add_language_index/upgrade.py | Python | gpl-2.0 | 429 |
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test API for Zenodo and GitHub integration."""
from __future__ import absolute_import, print_function
from contextlib import contextmanager
from copy import deepcopy
import pytest
from flask import current_app
from invenio_accounts.models import User
from invenio_github.models import Release, ReleaseStatus, Repository
from invenio_pidrelations.contrib.versioning import PIDVersioning
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_sipstore.models import SIP
from mock import MagicMock, Mock, patch
from six import BytesIO
from zenodo.modules.deposit.tasks import datacite_register
from zenodo.modules.github.api import ZenodoGitHubRelease
from zenodo.modules.github.utils import is_github_owner, is_github_versioned
from zenodo.modules.records.api import ZenodoRecord
from zenodo.modules.records.minters import zenodo_record_minter
from zenodo.modules.records.permissions import has_newversion_permission, \
has_update_permission
creators_params = (
(dict(),
[dict(name='Contributor', affiliation='X'), ],
[dict(name='Owner', affiliation='Y'), ],
[dict(name='Contributor', affiliation='X'), ]),
(dict(creators=[]), # List of creators provided as empty
[dict(name='Contributor', affiliation='X'), ],
[dict(name='Owner', affiliation='Y'), ],
[dict(name='Owner', affiliation='Y'), ]),
(dict(creators=None),
[dict(name='Contributor', affiliation='X'), ],
None, # Failed to get GH owner
[dict(name='Unknown', affiliation=''), ]),
)
@pytest.mark.parametrize('defaults,contribs,owner,output', creators_params)
@patch('zenodo.modules.github.api.get_owner')
@patch('zenodo.modules.github.api.get_contributors')
@patch('zenodo.modules.github.api.legacyjson_v1_translator')
def test_github_creators_metadata(m_ljv1t, m_get_contributors, m_get_owner,
defaults, contribs, owner, output):
"""Test 'creators' metadata fetching from GitHub."""
m_get_contributors.return_value = contribs
m_get_owner.return_value = owner
release = MagicMock()
release.event.user_id = 1
release.event.payload['repository']['id'] = 1
zgh = ZenodoGitHubRelease(release)
zgh.defaults = defaults
zgh.gh.api = None
zgh.extra_metadata = {}
zgh.metadata
m_ljv1t.assert_called_with({'metadata': {'creators': output}})
@patch('zenodo.modules.github.api.ZenodoGitHubRelease.metadata')
@patch('invenio_pidstore.providers.datacite.DataCiteMDSClient')
def test_github_publish(datacite_mock, zgh_meta, db, users, location,
deposit_metadata):
"""Test basic GitHub payload."""
data = b'foobar'
resp = Mock()
resp.headers = {'Content-Length': len(data)}
resp.raw = BytesIO(b'foobar')
resp.status_code = 200
gh3mock = MagicMock()
gh3mock.api.session.get = Mock(return_value=resp)
gh3mock.account.user.email = '[email protected]'
release = MagicMock()
release.event.user_id = 1
release.event.payload['release']['author']['id'] = 1
release.event.payload['foo']['bar']['baz'] = 1
release.event.payload['repository']['id'] = 1
zgh = ZenodoGitHubRelease(release)
zgh.gh = gh3mock
zgh.release = dict(author=dict(id=1))
zgh.metadata = deposit_metadata
zgh.files = (('foobar.txt', None), )
zgh.model.repository.releases.filter_by().count.return_value = 0
datacite_task_mock = MagicMock()
# We have to make the call to the task synchronous
datacite_task_mock.delay = datacite_register.apply
with patch('zenodo.modules.deposit.tasks.datacite_register',
new=datacite_task_mock):
zgh.publish()
# datacite should be called twice - for regular DOI and Concept DOI
assert datacite_mock().metadata_post.call_count == 2
datacite_mock().doi_post.assert_any_call(
'10.5072/zenodo.1', 'https://zenodo.org/record/1')
datacite_mock().doi_post.assert_any_call(
'10.5072/zenodo.2', 'https://zenodo.org/record/2')
expected_sip_agent = {
'email': '[email protected]',
'$schema': 'http://zenodo.org/schemas/sipstore/'
'agent-githubclient-v1.0.0.json',
'user_id': 1,
'github_id': 1,
}
gh_sip = SIP.query.one()
assert gh_sip.agent == expected_sip_agent
@patch('invenio_github.api.GitHubAPI.check_sync', new=lambda *_, **__: False)
def test_github_newversion_permissions(app, db, minimal_record, users, g_users,
g_remoteaccounts):
"""Test new version creation permissions for GitHub records."""
old_owner, new_owner = [User.query.get(u['id']) for u in g_users]
# Create repository, and set owner to `old_owner`
repo = Repository.create(
name='foo/bar', github_id=8000, user_id=old_owner.id, hook=1234)
# Create concpetrecid for the GitHub records
conceptrecid = PersistentIdentifier.create(
'recid', '100', status=PIDStatus.RESERVED)
def create_deposit_and_record(pid_value, owner):
"""Utility function for creating records and deposits."""
recid = PersistentIdentifier.create(
'recid', pid_value, status=PIDStatus.RESERVED)
pv = PIDVersioning(parent=conceptrecid)
pv.insert_draft_child(recid)
depid = PersistentIdentifier.create(
'depid', pid_value, status=PIDStatus.REGISTERED)
deposit = ZenodoRecord.create({'_deposit': {'id': depid.pid_value},
'conceptrecid': conceptrecid.pid_value,
'recid': recid.pid_value})
deposit.commit()
depid.assign('rec', deposit.id)
record_metadata = deepcopy(minimal_record)
record_metadata['_deposit'] = {'id': depid.pid_value}
record_metadata['conceptrecid'] = conceptrecid.pid_value
record_metadata['recid'] = int(recid.pid_value)
record_metadata['owners'] = [owner.id]
record = ZenodoRecord.create(record_metadata)
zenodo_record_minter(record.id, record)
record.commit()
return (depid, deposit, recid, record)
# Create first GitHub record (by `old_owner`)
depid1, d1, recid1, r1 = create_deposit_and_record('101', old_owner)
rel1 = Release(release_id=111, repository_id=repo.id, record_id=d1.id,
status=ReleaseStatus.PUBLISHED)
db.session.add(rel1)
db.session.commit()
assert is_github_versioned(recid1)
@contextmanager
def set_identity(user):
from flask_principal import AnonymousIdentity, Identity
principal = current_app.extensions['security'].principal
principal.set_identity(Identity(user))
yield
principal.set_identity(AnonymousIdentity())
with app.test_request_context():
with set_identity(old_owner):
assert is_github_owner(old_owner, recid1)
assert has_update_permission(old_owner, r1)
assert has_newversion_permission(old_owner, r1)
with set_identity(new_owner):
assert not is_github_owner(new_owner, recid1)
assert not has_update_permission(new_owner, r1)
assert not has_newversion_permission(new_owner, r1)
# Change the repository owner
repo.user_id = new_owner.id
db.session.add(repo)
db.session.commit()
with app.test_request_context():
with set_identity(old_owner):
assert not is_github_owner(old_owner, recid1)
# `old_owner` can edit his record of course
assert has_update_permission(old_owner, r1)
assert has_newversion_permission(old_owner, r1)
with set_identity(new_owner):
assert is_github_owner(new_owner, recid1)
# `new_owner` can't edit the `old_owner`'s record
assert not has_update_permission(new_owner, r1)
assert not has_newversion_permission(new_owner, r1)
# Create second GitHub record (by `new_owner`)
depid2, d2, recid2, r2 = create_deposit_and_record('102', new_owner)
rel2 = Release(release_id=222, repository_id=repo.id, record_id=d2.id,
status=ReleaseStatus.PUBLISHED)
db.session.add(rel2)
db.session.commit()
with app.test_request_context():
with set_identity(old_owner):
assert not is_github_owner(old_owner, recid1)
assert not is_github_owner(old_owner, recid2)
assert has_update_permission(old_owner, r1)
# `old_owner` can't edit the `new_owner`'s record
assert not has_update_permission(old_owner, r2)
assert not has_newversion_permission(old_owner, r1)
assert not has_newversion_permission(old_owner, r2)
with set_identity(new_owner):
assert is_github_owner(new_owner, recid1)
assert is_github_owner(new_owner, recid2)
assert not has_update_permission(new_owner, r1)
# `new_owner` can edit his newly released record
assert has_update_permission(new_owner, r2)
assert has_newversion_permission(new_owner, r1)
assert has_newversion_permission(new_owner, r2)
# Create a manual record (by `new_owner`)
depid3, d3, recid3, r3 = create_deposit_and_record('103', new_owner)
db.session.commit()
with app.test_request_context():
with set_identity(old_owner):
assert not is_github_owner(old_owner, recid3)
assert not has_update_permission(old_owner, r3)
assert not has_newversion_permission(old_owner, r3)
with set_identity(new_owner):
assert is_github_owner(new_owner, recid3)
assert has_update_permission(new_owner, r3)
assert has_newversion_permission(new_owner, r3)
| lnielsen/zenodo | tests/unit/github/test_api.py | Python | gpl-2.0 | 10,739 |
# -*- coding: utf-8 -*-
"""sdist tests"""
import os
import shutil
import sys
import tempfile
import unittest
import urllib
import unicodedata
import posixpath
from StringIO import StringIO
from setuptools.command.sdist import sdist
from setuptools.command.egg_info import manifest_maker
from setuptools.dist import Distribution
SETUP_ATTRS = {
'name': 'sdist_test',
'version': '0.0',
'packages': ['sdist_test'],
'package_data': {'sdist_test': ['*.txt']}
}
SETUP_PY = """\
from setuptools import setup
setup(**%r)
""" % SETUP_ATTRS
if sys.version_info >= (3,):
LATIN1_FILENAME = 'smörbröd.py'.encode('latin-1')
else:
LATIN1_FILENAME = 'sm\xf6rbr\xf6d.py'
# Cannot use context manager because of Python 2.4
def quiet():
global old_stdout, old_stderr
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
def unquiet():
sys.stdout, sys.stderr = old_stdout, old_stderr
# Fake byte literals to shut up Python <= 2.5
def b(s, encoding='utf-8'):
if sys.version_info >= (3,):
return s.encode(encoding)
return s
# HFS Plus returns decomposed UTF-8
def decompose(path):
if isinstance(path, unicode):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
# HFS Plus quotes unknown bytes like so: %F6
def hfs_quote(path):
if isinstance(path, unicode):
raise TypeError('bytes are required')
try:
u = path.decode('utf-8')
except UnicodeDecodeError:
path = urllib.quote(path) # Not UTF-8
else:
if sys.version_info >= (3,):
path = u
return path
class TestSdistTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
f = open(os.path.join(self.temp_dir, 'setup.py'), 'w')
f.write(SETUP_PY)
f.close()
# Set up the rest of the test package
test_pkg = os.path.join(self.temp_dir, 'sdist_test')
os.mkdir(test_pkg)
# *.rst was not included in package_data, so c.rst should not be
# automatically added to the manifest when not under version control
for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
# Just touch the files; their contents are irrelevant
open(os.path.join(test_pkg, fname), 'w').close()
self.old_cwd = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.temp_dir)
def test_package_data_in_sdist(self):
"""Regression test for pull request #4: ensures that files listed in
package_data are included in the manifest even if they're not added to
version control.
"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# squelch output
quiet()
try:
cmd.run()
finally:
unquiet()
manifest = cmd.filelist.files
self.assertTrue(os.path.join('sdist_test', 'a.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'b.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'c.rst') not in manifest)
def test_manifest_is_written_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = posixpath.join('sdist_test', 'smörbröd.py')
# Add UTF-8 filename and write manifest
quiet()
try:
mm.run()
mm.filelist.files.append(filename)
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
u = contents.decode('UTF-8')
except UnicodeDecodeError, e:
self.fail(e)
# The manifest should contain the UTF-8 filename
if sys.version_info >= (3,):
self.assertTrue(filename in u)
else:
self.assertTrue(filename in contents)
def test_manifest_is_written_with_surrogateescape_error_handler(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# Latin-1 filename
filename = posixpath.join(b('sdist_test'), LATIN1_FILENAME)
# Add filename with surrogates and write manifest
quiet()
try:
mm.run()
if sys.version_info >= (3,):
u = filename.decode('utf-8', 'surrogateescape')
mm.filelist.files.append(u)
else:
mm.filelist.files.append(filename)
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should contain the Latin-1 filename
self.assertTrue(filename in contents)
def test_manifest_is_read_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# UTF-8 filename
filename = os.path.join('sdist_test', 'smörbröd.py')
open(filename, 'w').close()
quiet()
try:
cmd.run()
finally:
unquiet()
# The filelist should contain the UTF-8 filename
if sys.platform == 'darwin':
filename = decompose(filename)
self.assertTrue(filename in cmd.filelist.files)
def test_manifest_is_read_with_surrogateescape_error_handler(self):
# Test for #303.
# This is hard to test on HFS Plus because it quotes unknown
# bytes (see previous test). Furthermore, egg_info.FileList
# only appends filenames that os.path.exist.
# We therefore write the manifest file by hand and check whether
# read_manifest produces a UnicodeDecodeError.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
quiet()
try:
cmd.run()
# Add Latin-1 filename to manifest
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(filename+b('\n'))
manifest.close()
# Re-read manifest
try:
cmd.read_manifest()
except UnicodeDecodeError, e:
self.fail(e)
finally:
unquiet()
def test_sdist_with_utf8_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
open(filename, 'w').close()
quiet()
try:
cmd.run()
finally:
unquiet()
# The filelist should contain the UTF-8 filename
# (in one representation or other)
if sys.version_info >= (3,):
filename = filename.decode(sys.getfilesystemencoding(), 'surrogateescape')
if sys.platform == 'darwin':
filename = decompose(filename)
self.assertTrue(filename in cmd.filelist.files)
def test_sdist_with_latin1_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
open(filename, 'w').close()
quiet()
try:
cmd.run()
finally:
unquiet()
# The filelist should contain the Latin-1 filename
# (in one representation or other)
if sys.platform == 'darwin':
filename = hfs_quote(filename)
elif sys.version_info >= (3,):
filename = filename.decode(sys.getfilesystemencoding(), 'surrogateescape')
self.assertTrue(filename in cmd.filelist.files)
def test_decompose(self):
self.assertNotEqual('smörbröd.py', decompose('smörbröd.py'))
if sys.version_info >= (3,):
self.assertEqual(len('smörbröd.py'), 11)
self.assertEqual(len(decompose('smörbröd.py')), 13)
else:
self.assertEqual(len('smörbröd.py'), 13)
self.assertEqual(len(decompose('smörbröd.py')), 15)
def test_hfs_quote(self):
self.assertEqual(hfs_quote(LATIN1_FILENAME), 'sm%F6rbr%F6d.py')
# Bytes are required
if sys.version_info >= (3,):
self.assertRaises(TypeError, hfs_quote, 'smörbröd.py')
else:
self.assertRaises(TypeError, hfs_quote, 'smörbröd.py'.decode('utf-8'))
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| xbianonpi/xbian-package-development | content/usr/local/lib/python2.7/dist-packages/distribute-0.6.30-py2.7.egg/setuptools/tests/test_sdist.py | Python | gpl-2.0 | 9,701 |
#!/usr/bin/env python
import os
from setuptools import setup
from subprocess import call
from sys import platform, argv
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
SCRIPTS = ["src/bg_daemon/background_daemon.py"]
# only compile quack when none of these options are chosen
if (all([e not in argv for e in ['egg_info', 'sdist', 'register']]) and
platform == 'darwin'):
try:
call(['make', '-C', 'src/bg_daemon/'])
SCRIPTS.append("src/bg_daemon/quack")
except OSError as e:
print "Can't compile quack, reason {}".format(str(e))
setup(
name="bg_daemon",
version="0.0.1",
author="Santiago Torres",
author_email="[email protected]",
description=("An extensible set of classes that can programmatically "
"update the desktop wallpaper"),
license="GPLv2",
keywords="imgur desktop wallpaper background",
url="https://github.com/santiagotorres/bg_daemon",
packages=["bg_daemon", "bg_daemon.fetchers"],
package_dir={"bg_daemon": "src/bg_daemon",
"bg_daemon.fetchers": "src/bg_daemon/fetchers"},
scripts=SCRIPTS,
include_package_data=True,
data_files=[('bg_daemon', ['src/bg_daemon/settings.json',
'src/bg_daemon/mac-update.sh'])],
long_description=read("README.md"),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Topic :: Utilities",
"License :: ",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Operating System :: Unix",
"Topic :: Multimedia",
],
install_requires=[
"imgurpython",
"requests",
"python-crontab",
"mock",
],
)
| echenran/bg_daemon | setup.py | Python | gpl-2.0 | 1,862 |
"""
Stateful module base class and interface description.
All stateful Python modules
- Get Skype4Py Skype instance on init - have full control over Skype and
thus are not limited to !command handlers
- Reside in the some modules/ folder as UNIX script modules
- Have .py extension and be valid Python 2.7 modules
- Have #!/sevabot magic string at the head of the file
- Exports Python attribute *sevabot_handler* which is an instance of the class
as described below
Please note that in the future we might have different chat backends (GTalk)
and thus have a same-same-but-different stateful handlers.
"""
class StatefulSkypeHandler:
"""
Base class for stateful handlers.
All exceptions slip through are caught and logged.
"""
def init(self, sevabot):
"""
Set-up our state. This is called every time module is (re)loaded.
You can get Skype4Py instance via ``sevabot.getSkype()``.
:param sevabot: Handle to Sevabot instance
"""
def handle_message(self, msg, status):
"""Override this method to have a customized handler for each Skype message.
:param msg: ChatMessage instance https://github.com/awahlig/skype4py/blob/master/Skype4Py/chat.py#L409
:param status: -
:return: True if the message was handled and should not be further processed
"""
def shutdown():
""" Called when the module is reloaded.
In ``shutdown()`` you must
* Stop all created threads
* Unregister all event handlers
..note ::
We do *not* guaranteed to be call when Sevabot process shutdowns as
the process may terminate with SIGKILL.
"""
def register_callback(self, skype, event, callback):
"""
Register any callable as a callback for a skype event.
Thin wrapper for RegisterEventHandler https://github.com/awahlig/skype4py/blob/master/Skype4Py/utils.py
:param skype: Skype4Py instance
:param event: Same as Event
:param callback: Same as Target
:return: Same as RegisterEventHandler
"""
return skype.RegisterEventHandler(event, callback)
def unregister_callback(self, skype, event, callback):
"""
Unregister a callback previously registered with register_callback.
Thin wrapper for UnregisterEventHandler https://github.com/awahlig/skype4py/blob/master/Skype4Py/utils.py
:param skype: Skype4Py instance
:param event: Same as Event
:param callback: Same as Target
:return: Same as UnregisterEventHandler
"""
return skype.UnregisterEventHandler(event, callback)
| mikemike/SkypeBot | unused-modules/stateful.py | Python | gpl-2.0 | 2,755 |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
from zope.sqlalchemy import ZopeTransactionExtension
import tornado.web
from handlers.index import IndexHandler
from handlers.sensors import SensorsHandler
import logging
logging.getLogger().setLevel(logging.DEBUG)
app = tornado.web.Application([
(r'/', IndexHandler),
(r'/sensors', SensorsHandler)
])
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
| nextgis/entels_front_demo | entels_demo_tornado/__init__.py | Python | gpl-2.0 | 548 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
v_univar.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def postProcessResults(alg):
htmlFile = alg.getOutputFromName('html').value
found = False
f = open(htmlFile, "w")
f.write("<h2>v.univar</h2>\n")
for line in alg.consoleOutput:
if found and not line.strip().endswith('exit'):
f.write(line + "<br>\n")
if 'v.univar' in line:
found = True
f.close()
| bstroebl/QGIS | python/plugins/sextante/grass/ext/v_univar.py | Python | gpl-2.0 | 1,499 |
#
#LiloConf.py
#
import sys, re, os
import logging
import GrubConf
class LiloImage(object):
def __init__(self, lines, path):
self.reset(lines, path)
def __repr__(self):
return ("title: %s\n"
" root: %s\n"
" kernel: %s\n"
" args: %s\n"
" initrd: %s\n" %(self.title, self.root, self.kernel,
self.args, self.initrd))
def reset(self, lines, path):
self._initrd = self._kernel = self._readonly = None
self._args = ""
self.title = ""
self.lines = []
self.path = path
self.root = ""
map(self.set_from_line, lines)
def set_from_line(self, line, replace = None):
(com, arg) = GrubConf.grub_exact_split(line, 2)
if self.commands.has_key(com):
if self.commands[com] is not None:
setattr(self, self.commands[com], re.sub('^"(.+)"$', r"\1", arg.strip()))
else:
logging.info("Ignored image directive %s" %(com,))
else:
logging.warning("Unknown image directive %s" %(com,))
# now put the line in the list of lines
if replace is None:
self.lines.append(line)
else:
self.lines.pop(replace)
self.lines.insert(replace, line)
def set_kernel(self, val):
self._kernel = (None, self.path + "/" + val)
def get_kernel(self):
return self._kernel
kernel = property(get_kernel, set_kernel)
def set_initrd(self, val):
self._initrd = (None, self.path + "/" + val)
def get_initrd(self):
return self._initrd
initrd = property(get_initrd, set_initrd)
def set_args(self, val):
self._args = val
def get_args(self):
args = self._args
if self.root:
args += " root=" + self.root
if self.readonly:
args += " ro"
return args
args = property(get_args, set_args)
def set_readonly(self, val):
self._readonly = 1
def get_readonly(self):
return self._readonly
readonly = property(get_readonly, set_readonly)
# set up command handlers
commands = { "label": "title",
"root": "root",
"rootnoverify": "root",
"image": "kernel",
"initrd": "initrd",
"append": "args",
"read-only": "readonly",
"chainloader": None,
"module": None}
class LiloConfigFile(object):
def __init__(self, fn = None):
self.filename = fn
self.images = []
self.timeout = -1
self._default = 0
if fn is not None:
self.parse()
def parse(self, buf = None):
if buf is None:
if self.filename is None:
raise ValueError, "No config file defined to parse!"
f = open(self.filename, 'r')
lines = f.readlines()
f.close()
else:
lines = buf.split("\n")
path = os.path.dirname(self.filename)
img = []
for l in lines:
l = l.strip()
# skip blank lines
if len(l) == 0:
continue
# skip comments
if l.startswith('#'):
continue
# new image
if l.startswith("image"):
if len(img) > 0:
self.add_image(LiloImage(img, path))
img = [l]
continue
if len(img) > 0:
img.append(l)
continue
(com, arg) = GrubConf.grub_exact_split(l, 2)
if self.commands.has_key(com):
if self.commands[com] is not None:
setattr(self, self.commands[com], arg.strip())
else:
logging.info("Ignored directive %s" %(com,))
else:
logging.warning("Unknown directive %s" %(com,))
if len(img) > 0:
self.add_image(LiloImage(img, path))
def add_image(self, image):
self.images.append(image)
def _get_default(self):
for i in range(0, len(self.images) - 1):
if self.images[i].title == self._default:
return i
return 0
def _set_default(self, val):
self._default = val
default = property(_get_default, _set_default)
commands = { "default": "self.default",
"timeout": "self.timeout",
"prompt": None,
"relocatable": None,
}
if __name__ == "__main__":
if sys.argv < 2:
raise RuntimeError, "Need a grub.conf to read"
g = LiloConfigFile(sys.argv[1])
for i in g.images:
print i #, i.title, i.root, i.kernel, i.args, i.initrd
print g.default
| mikesun/xen-cow-checkpointing | tools/pygrub/src/LiloConf.py | Python | gpl-2.0 | 4,887 |
#!/usr/bin/env python2.7
import os
import sys
this_dir = os.path.dirname(os.path.abspath(__file__))
trunk_dir = os.path.split(this_dir)[0]
sys.path.insert(0,trunk_dir)
from ikol.dbregister import DataBase
from ikol import var
if os.path.exists(var.DB_PATH):
os.remove(var.DB_PATH)
DB = DataBase(var.DB_PATH)
DB.insertPlaylist("loLWOCl7nlk","test")
DB.insertPlaylist("loLWO357nlk","testb")
DB.insertVideo("KDk2341oEQQ","loLWOCl7nlk","test")
DB.insertVideo("KDktIWeoE23","loLWOCl7nlk","testb")
print DB.getAllVideosByPlaylist("loLWOCl7nlk")
print DB.getVideoById("KDk2341oEQQ") | lokiteitor/ikol | test/DBtest.py | Python | gpl-2.0 | 589 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# TrinityX documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 25 14:04:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'rinoh.frontend.sphinx',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'TrinityX'
copyright = '2020, ClusterVision Solutions BV'
author = 'ClusterVision Solutions BV'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '12'
# The full version, including alpha/beta/rc tags.
release = '12.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'none'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
#html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = 'TrinityX r12'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'trinityxlogo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TrinityXdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TrinityX.tex', 'TrinityX Documentation',
'ClusterVision Solutions BV', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'trinityx', 'TrinityX Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TrinityX', 'TrinityX Documentation',
author, 'TrinityX', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| clustervision/trinityX | doc/conf.py | Python | gpl-2.0 | 9,991 |
TESTS = {
"Level_1": [
{
"input": [1, 2, 3],
"answer": 2,
"explanation": "3-1=2"
},
{
"input": [5, -5],
"answer": 10,
"explanation": "5-(-5)=10"
},
{
"input": [10.2, -2.2, 0, 1.1, 0.5],
"answer": 12.4,
"explanation": "10.2-(-2.2)=12.4"
},
{
"input": [],
"answer": 0,
"explanation": "Empty"
},
{"input": [-99.9, 99.9],
"answer": 199.8,
"explanation": "99.9-(-99.9)"},
{"input": [1, 1],
"answer": 0,
"explanation": "1-1"},
{"input": [0, 0, 0, 0],
"answer": 0,
"explanation": "0-0"},
{"input": [36.0, -26.0, -7.5, 0.9, 0.53, -6.6, -71.0, 0.53, -48.0, 57.0, 69.0, 0.063, -4.7, 0.01, 9.2],
"answer": 140.0,
"explanation": "69.0-(-71.0)"},
{"input": [-0.035, 0.0, -0.1, 83.0, 0.28, 60.0],
"answer": 83.1,
"explanation": "83.0-(-0.1)"},
{"input": [0.02, 0.93, 0.066, -94.0, -0.91, -21.0, -7.2, -0.018, 26.0],
"answer": 120.0,
"explanation": "26.0-(-94.0)"},
{"input": [89.0, 0.014, 2.9, -1.2, 5.8],
"answer": 90.2,
"explanation": "89.0-(-1.2)"},
{"input": [-69.0, 0.0, 0.0, -0.051, -0.021, -0.81],
"answer": 69.0,
"explanation": "0.0-(-69.0)"},
{"input": [-0.07],
"answer": 0.0,
"explanation": "-0.07-(-0.07)"},
{"input": [0.074, 0.12, -0.4, 4.0, -1.7, 3.0, -5.1, 0.57, -54.0, -41.0, -5.2, -5.6, 3.8, 0.054, -35.0, -5.0,
-0.005, 0.034],
"answer": 58.0,
"explanation": "4.0-(-54.0)"},
{"input": [29.0, 0.47, -4.5, -6.7, -0.051, -0.82, -0.074, -4.0, -0.015, -0.015, -8.0, -0.43],
"answer": 37.0,
"explanation": "29.0-(-8.0)"},
{"input": [-0.036, -0.11, -0.55, -64.0],
"answer": 63.964,
"explanation": "-0.036-(-64.0)"},
{"input": [-0.092, -0.079, -0.31, -0.87, -28.0, -6.2, -0.097, -5.8, -0.025, -28.0, -4.7, -2.9, -8.0, -0.093,
-13.0, -73.0],
"answer": 72.975,
"explanation": "-0.025-(-73.0)"},
{"input": [-0.015, 7.6],
"answer": 7.615,
"explanation": "7.6-(-0.015)"},
{"input": [-46.0, 0.19, -0.08, -4.0, 4.4, 0.071, -0.029, -0.034, 28.0, 0.043, -97.0],
"answer": 125.0,
"explanation": "28.0-(-97.0)"},
{"input": [32.0, -0.07, -0.056, -6.4, 0.084],
"answer": 38.4,
"explanation": "32.0-(-6.4)"},
{"input": [0.017, 0.015, 0.69, 0.78],
"answer": 0.765,
"explanation": "0.78-0.015"},
]
}
| Empire-of-Code-Puzzles/checkio-empire-most-numbers | verification/src/tests.py | Python | gpl-2.0 | 2,790 |
# _*_ coding:utf-8 _*_
# Filename:ClientUI.py
# Python在线聊天客户端
from socket import *
from ftplib import FTP
import ftplib
import socket
import thread
import time
import sys
import codecs
import os
reload(sys)
sys.setdefaultencoding( "utf-8" )
class ClientMessage():
#设置用户名密码
def setUsrANDPwd(self,usr,pwd):
self.usr=usr
self.pwd=pwd
#设置目标用户
def setToUsr(self,toUsr):
self.toUsr=toUsr
self.ChatFormTitle=toUsr
#设置ip地址和端口号
def setLocalANDPort(self,local,port):
self.local = local
self.port = port
def check_info(self):
self.buffer = 1024
self.ADDR=(self.local,self.port)
self.udpCliSock = socket.socket(AF_INET, SOCK_DGRAM)
self.udpCliSock.sendto('0##'+self.usr+'##'+self.pwd,self.ADDR)
self.serverMsg ,self.ADDR = self.udpCliSock.recvfrom(self.buffer)
s=self.serverMsg.split('##')
if s[0]=='Y':
return True
elif s[0]== 'N':
return False
#接收消息
def receiveMessage(self):
self.buffer = 1024
self.ADDR=(self.local,self.port)
self.udpCliSock = socket.socket(AF_INET, SOCK_DGRAM)
self.udpCliSock.sendto('0##'+self.usr+'##'+self.pwd,self.ADDR)
while True:
#连接建立,接收服务器端消息
self.serverMsg ,self.ADDR = self.udpCliSock.recvfrom(self.buffer)
s=self.serverMsg.split('##')
if s[0]=='Y':
#self.chatText.insert(Tkinter.END,'客户端已经与服务器端建立连接......')
return True
elif s[0]== 'N':
#self.chatText.insert(Tkinter.END,'客户端与服务器端建立连接失败......')
return False
elif s[0]=='CLOSE':
i=5
while i>0:
self.chatText.insert(Tkinter.END,'你的账号在另一端登录,该客户端'+str(i)+'秒后退出......')
time.sleep(1)
i=i-1
self.chatText.delete(Tkinter.END)
os._exit(0)
#好友列表
elif s[0]=='F':
for eachFriend in s[1:len(s)]:
print eachFriend
#好友上线
elif s[0]=='0':
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime+' ' +'你的好友' + s[1]+'上线了')
#好友下线
elif s[0]=='1':
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime+' ' +'你的好友' + s[1]+'下线了')
#好友传来消息
elif s[0]=='2':
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime +' '+s[1] +' 说:\n')
self.chatText.insert(Tkinter.END, ' ' + s[3])
#好友传来文件
elif s[0]=='3':
filename=s[2]
f=FTP('192.168.1.105')
f.login('Coder', 'xianjian')
f.cwd(self.usr)
filenameD=filename[:-1].encode("cp936")
try:
f.retrbinary('RETR '+filenameD,open('..\\'+self.usr+'\\'+filenameD,'wb').write)
except ftplib.error_perm:
print 'ERROR:cannot read file "%s"' %file
self.chatText.insert(Tkinter.END,filename[:-1]+' 传输完成')
elif s[0]=='4':
agreement=raw_input(s[1]+'请求加你为好友,验证消息:'+s[3]+'你愿意加'+s[1]+'为好友吗(Y/N)')
if agreement=='Y':
self.udpCliSock.sendto('5##'+s[1]+'##'+s[2]+'##Y',self.ADDR)
elif agreement=='N':
self.udpCliSock.sendto('5##'+s[1]+'##'+s[2]+'##N',self.ADDR)
elif s[0]=='5':
if s[3]=='Y':
print s[2]+'接受了你的好友请求'
elif s[3]=='N':
print s[2]+'拒绝了你的好友请求'
#发送消息
def sendMessage(self):
#得到用户在Text中输入的消息
message = self.inputText.get('1.0',Tkinter.END)
#格式化当前的时间
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime +' 我 说:\n')
self.chatText.insert(Tkinter.END,' ' + message + '\n')
self.udpCliSock.sendto('2##'+self.usr+'##'+self.toUsr+'##'+message,self.ADDR);
#清空用户在Text中输入的消息
self.inputText.delete(0.0,message.__len__()-1.0)
#传文件
def sendFile(self):
filename = self.inputText.get('1.0',Tkinter.END)
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime +'我' + ' 传文件:\n')
self.chatText.insert(Tkinter.END,' ' + filename[:-1] + '\n')
f=FTP('192.168.1.105')
f.login('Coder', 'xianjian')
f.cwd(self.toUsr)
filenameU=filename[:-1].encode("cp936")
try:
#f.retrbinary('RETR '+filename,open(filename,'wb').write)
#将文件上传到服务器对方文件夹中
f.storbinary('STOR ' + filenameU, open('..\\'+self.usr+'\\'+filenameU, 'rb'))
except ftplib.error_perm:
print 'ERROR:cannot read file "%s"' %file
self.udpCliSock.sendto('3##'+self.usr+'##'+self.toUsr+'##'+filename,self.ADDR);
#加好友
def addFriends(self):
message= self.inputText.get('1.0',Tkinter.END)
s=message.split('##')
self.udpCliSock.sendto('4##'+self.usr+'##'+s[0]+'##'+s[1],self.ADDR);
#关闭消息窗口并退出
def close(self):
self.udpCliSock.sendto('1##'+self.usr,self.ADDR);
sys.exit()
#启动线程接收服务器端的消息
def startNewThread(self):
thread.start_new_thread(self.receiveMessage,())
def main():
client = ClientMessage()
client.setLocalANDPort('192.168.1.105', 8808)
client.setUsrANDPwd('12073127', '12073127')
client.setToUsr('12073128')
client.startNewThread()
if __name__=='__main__':
main()
| gzxultra/IM_programming | class_ClientMessage.py | Python | gpl-2.0 | 6,373 |
# pygopherd -- Gopher-based protocol server in Python
# module: serve up gopherspace via http
# $Id: http.py,v 1.21 2002/04/26 15:18:10 jgoerzen Exp $
# Copyright (C) 2002 John Goerzen
# <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import SocketServer
import re, binascii
import os, stat, os.path, mimetypes, urllib, time
from pygopherd import handlers, protocols, GopherExceptions
from pygopherd.protocols.base import BaseGopherProtocol
import pygopherd.version
import cgi
class HTTPProtocol(BaseGopherProtocol):
def canhandlerequest(self):
self.requestparts = map(lambda arg: arg.strip(), self.request.split(" "))
return len(self.requestparts) == 3 and \
(self.requestparts[0] == 'GET' or self.requestparts[0] == 'HEAD') and \
self.requestparts[2][0:5] == 'HTTP/'
def headerslurp(self):
if hasattr(self.requesthandler, 'pygopherd_http_slurped'):
# Already slurped.
self.httpheaders = self.requesthandler.pygopherd_http_slurped
return
# Slurp up remaining lines.
self.httpheaders = {}
while 1:
line = self.rfile.readline()
if not len(line):
break
line = line.strip()
if not len(line):
break
splitline = line.split(':', 1)
if len(splitline) == 2:
self.httpheaders[splitline[0].lower()] = splitline[1]
self.requesthandler.pygopherd_http_slurped = self.httpheaders
def handle(self):
self.canhandlerequest() # To get self.requestparts
self.iconmapping = eval(self.config.get("protocols.http.HTTPProtocol",
"iconmapping"))
self.headerslurp()
splitted = self.requestparts[1].split('?')
self.selector = splitted[0]
self.selector = urllib.unquote(self.selector)
self.selector = self.slashnormalize(self.selector)
self.formvals = {}
if len(splitted) >= 2:
self.formvals = cgi.parse_qs(splitted[1])
if self.formvals.has_key('searchrequest'):
self.searchrequest = self.formvals['searchrequest'][0]
icon = re.match('/PYGOPHERD-HTTPPROTO-ICONS/(.+)$', self.selector)
if icon:
iconname = icon.group(1)
if icons.has_key(iconname):
self.wfile.write("HTTP/1.0 200 OK\r\n")
self.wfile.write("Last-Modified: Fri, 14 Dec 2001 21:19:47 GMT\r\n")
self.wfile.write("Content-Type: image/gif\r\n\r\n")
if self.requestparts[0] == 'HEAD':
return
self.wfile.write(binascii.unhexlify(icons[iconname]))
return
try:
handler = self.gethandler()
self.log(handler)
self.entry = handler.getentry()
handler.prepare()
self.wfile.write("HTTP/1.0 200 OK\r\n")
if self.entry.getmtime() != None:
gmtime = time.gmtime(self.entry.getmtime())
mtime = time.strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime)
self.wfile.write("Last-Modified: " + mtime + "\r\n")
mimetype = self.entry.getmimetype()
mimetype = self.adjustmimetype(mimetype)
self.wfile.write("Content-Type: " + mimetype + "\r\n\r\n")
if self.requestparts[0] == 'GET':
if handler.isdir():
self.writedir(self.entry, handler.getdirlist())
else:
self.handlerwrite(self.wfile)
except GopherExceptions.FileNotFound, e:
self.filenotfound(str(e))
except IOError, e:
GopherExceptions.log(e, self, None)
self.filenotfound(e[1])
def handlerwrite(self, wfile):
self.handler.write(wfile)
def adjustmimetype(self, mimetype):
if mimetype == None:
return 'text/plain'
if mimetype == 'application/gopher-menu':
return 'text/html'
return mimetype
def renderobjinfo(self, entry):
url = None
# Decision time....
if re.match('(/|)URL:', entry.getselector()):
# It's a plain URL. Make it that.
url = re.match('(/|)URL:(.+)$', entry.getselector()).group(2)
elif (not entry.gethost()) and (not entry.getport()):
# It's a link to our own server. Make it as such. (relative)
url = urllib.quote(entry.getselector())
else:
# Link to a different server. Make it a gopher URL.
url = entry.geturl(self.server.server_name, 70)
# OK. Render.
return self.getrenderstr(entry, url)
def getrenderstr(self, entry, url):
retstr = '<TR><TD>'
retstr += self.getimgtag(entry)
retstr += "</TD>\n<TD> "
if entry.gettype() != 'i' and entry.gettype() != '7':
retstr += '<A HREF="%s">' % url
retstr += "<TT>"
if entry.getname() != None:
retstr += cgi.escape(entry.getname()).replace(" ", " ")
else:
retstr += cgi.escape(entry.getselector()).replace(" ", " ")
retstr += "</TT>"
if entry.gettype() != 'i' and entry.gettype() != '7':
retstr += '</A>'
if (entry.gettype() == '7'):
retstr += '<BR><FORM METHOD="GET" ACTION="%s">' % url
retstr += '<INPUT TYPE="text" NAME="searchrequest" SIZE="30">'
retstr += '<INPUT TYPE="submit" NAME="Submit" VALUE="Submit">'
retstr += '</FORM>'
retstr += '</TD><TD><FONT SIZE="-2">'
if entry.getmimetype():
subtype = re.search('/.+$', entry.getmimetype())
if subtype:
retstr += cgi.escape(subtype.group()[1:])
retstr += '</FONT></TD></TR>\n'
return retstr
def renderdirstart(self, entry):
retstr ='<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">'
retstr += "\n<HTML><HEAD><TITLE>Gopher"
if self.entry.getname():
retstr += ": " + cgi.escape(self.entry.getname())
retstr += "</TITLE></HEAD><BODY>"
if self.config.has_option("protocols.http.HTTPProtocol", "pagetopper"):
retstr += re.sub('GOPHERURL',
self.entry.geturl(self.server.server_name,
self.server.server_port),
self.config.get("protocols.http.HTTPProtocol",
"pagetopper"))
retstr += "<H1>Gopher"
if self.entry.getname():
retstr += ": " + cgi.escape(self.entry.getname())
retstr += '</H1><TABLE WIDTH="100%" CELLSPACING="1" CELLPADDING="0">'
return retstr
def renderdirend(self, entry):
retstr = "</TABLE><HR>\n[<A HREF=\"/\">server top</A>]"
retstr += " [<A HREF=\"%s\">view with gopher</A>]" % \
entry.geturl(self.server.server_name,
self.server.server_port)
retstr += '<BR>Generated by <A HREF="%s">%s</A>' % (
pygopherd.version.homepage, pygopherd.version.productname)
return retstr + "\n</BODY></HTML>\n"
def filenotfound(self, msg):
self.wfile.write("HTTP/1.0 404 Not Found\r\n")
self.wfile.write("Content-Type: text/html\r\n\r\n")
self.wfile.write('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">')
self.wfile.write("""\n<HTML><HEAD><TITLE>Selector Not Found</TITLE>
<H1>Selector Not Found</H1>
<TT>""")
self.wfile.write(cgi.escape(msg))
self.wfile.write("</TT><HR>Pygopherd</BODY></HTML>\n")
def getimgtag(self, entry):
name = 'generic.gif'
if self.iconmapping.has_key(entry.gettype()):
name = self.iconmapping[entry.gettype()]
return '<IMG ALT=" * " SRC="%s" WIDTH="20" HEIGHT="22" BORDER="0">' % \
('/PYGOPHERD-HTTPPROTO-ICONS/' + name)
icons = {
'binary.gif':
'47494638396114001600c20000ffffffccffffcccccc99999933333300000000000000000021fe4e546869732061727420697320696e20746865207075626c696320646f6d61696e2e204b6576696e204875676865732c206b6576696e68406569742e636f6d2c2053657074656d62657220313939350021f90401000001002c000000001400160000036948babcf1301040ab9d24be590a105d210013a9715e07a8a509a16beab5ae14df6a41e8fc76839d5168e8b3182983e4a0e0038a6e1525d396931d97be2ad482a55a55c6eec429f484a7b4e339eb215fd138ebda1b7fb3eb73983bafee8b094a8182493b114387885309003b',
'binhex.gif':
'47494638396114001600c20000ffffffccffff99999966666633333300000000000000000021fe4e546869732061727420697320696e20746865207075626c696320646f6d61696e2e204b6576696e204875676865732c206b6576696e68406569742e636f6d2c2053657074656d62657220313939350021f90401000001002c000000001400160000036948babcf1301040ab9d24be59baefc0146adce78555068914985e2b609e0551df9b3c17ba995b408a602828e48a2681856894f44cc1628e07a42e9b985d14ab1b7c9440a9131c0c733b229bb5222ecdb6bfd6da3cd5d29d688a1aee2c97db044482834336113b884d09003b',
'folder.gif':
'47494638396114001600c20000ffffffffcc99ccffff99663333333300000000000000000021fe4e546869732061727420697320696e20746865207075626c696320646f6d61696e2e204b6576696e204875676865732c206b6576696e68406569742e636f6d2c2053657074656d62657220313939350021f90401000002002c000000001400160000035428badcfe30ca4959b9f8ce12baef45c47d64a629c5407a6a8906432cc72b1c8ef51a13579e0f3c9c8f05ec0d4945e171673cb2824e2234da495261569856c5ddc27882d46c3c2680c3e6b47acd232c4cf08c3b01003b',
'image3.gif':
'47494638396114001600e30000ffffffff3333ccffff9999996600003333330099cc00993300336600000000000000000000000000000000000000000021fe4e546869732061727420697320696e20746865207075626c696320646f6d61696e2e204b6576696e204875676865732c206b6576696e68406569742e636f6d2c2053657074656d62657220313939350021f90401000002002c0000000014001600000479b0c849a7b85814c0bbdf45766d5e49861959762a3a76442c132ae0aa44a0ef49d1ff2f4e6ea74b188f892020c70c3007d04152b3aa46a7adcaa42355160ee0f041d5a572bee23017cb1abbbf6476d52a0720ee78fc5a8930f8ff06087b66768080832a7d8a81818873744a8f8805519596503e19489b9c5311003b',
'sound1.gif':
'47494638396114001600c20000ffffffff3333ccffffcccccc99999966000033333300000021fe4e546869732061727420697320696e20746865207075626c696320646f6d61696e2e204b6576696e204875676865732c206b6576696e68406569742e636f6d2c2053657074656d62657220313939350021f90401000002002c000000001400160000036b28badcfe3036c34290ea1c61558f07b171170985c0687e0d9a729e77693401dc5bd7154148fcb6db6b77e1b984c20d4fb03406913866717a842aa7d22af22acd120cdf6fd2d49cd10e034354871518de06b43a17334de42a36243e187d4a7b1a762c7b140b8418898a0b09003b',
'text.gif':
'47494638396114001600c20000ffffffccffff99999933333300000000000000000000000021fe4e546869732061727420697320696e20746865207075626c696320646f6d61696e2e204b6576696e204875676865732c206b6576696e68406569742e636f6d2c2053657074656d62657220313939350021f90401000001002c000000001400160000035838babcf1300c40ab9d23be693bcf11d75522b88dd7057144eb52c410cf270abb6e8db796e00b849aadf20b4a6ebb1705281c128daca412c03c3a7b50a4f4d9bc5645dae9f78aed6e975932baebfc0e7ef0b84f1691da8d09003b',
'generic.gif':
'47494638396114001600c20000ffffffccffff99999933333300000000000000000000000021fe4e546869732061727420697320696e20746865207075626c696320646f6d61696e2e204b6576696e204875676865732c206b6576696e68406569742e636f6d2c2053657074656d62657220313939350021f90401000001002c000000001400160000035038babcf1300c40ab9d23be693bcf11d75522b88dd705892831b8f08952446d13f24c09bc804b3a4befc70a027c39e391a8ac2081cd65d2f82c06ab5129b4898d76b94c2f71d02b9b79afc86dcdfe2500003b',
'blank.gif':
'47494638396114001600a10000ffffffccffff00000000000021fe4e546869732061727420697320696e20746865207075626c696320646f6d61696e2e204b6576696e204875676865732c206b6576696e68406569742e636f6d2c2053657074656d62657220313939350021f90401000001002c00000000140016000002138c8fa9cbed0fa39cb4da8bb3debcfb0f864901003b'}
| mas90/pygopherd | pygopherd/protocols/http.py | Python | gpl-2.0 | 12,731 |
#
# Copyright 2019-2022 Ghent University
#
# This file is part of vsc-mympirun,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# the Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/hpcugent/vsc-mympirun
#
# vsc-mympirun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# vsc-mympirun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with vsc-mympirun. If not, see <http://www.gnu.org/licenses/>.
#
"""
End-to-end tests for mypmirun
"""
import os
import logging
logging.basicConfig(level=logging.DEBUG)
from pmi_utils import PMITest
from vsc.utils.affinity import sched_getaffinity, sched_setaffinity
class TaskPrologEnd2End(PMITest):
def setUp(self):
"""Prepare to run test."""
super(TaskPrologEnd2End, self).setUp()
self.script = os.path.join(os.path.dirname(self.script), 'mytaskprolog.py')
def test_simple(self):
origaff = sched_getaffinity()
aff = sched_getaffinity()
aff.set_bits([1]) # only use first core (we can always assume there is one core
sched_setaffinity(aff)
self.pmirun([], pattern='export CUDA_VISIBLE_DEVICES=0')
# restore
sched_setaffinity(origaff)
| hpcugent/vsc-mympirun | test/mytaskprolog.py | Python | gpl-2.0 | 1,831 |
from fabric.api import local
def html():
local('hovercraft -t ./sixfeetup_hovercraft formation_flask.rst ./build/')
| domofwk/domofwk-docs | source/formations/flask/fabfile.py | Python | gpl-2.0 | 122 |
Subsets and Splits