repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
anurag-ks/eden | modules/s3db/cap.py | 1 | 140541 | # -*- coding: utf-8 -*-
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3CAPModel",
"S3CAPAreaNameModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_rheader",
"cap_alert_list_layout",
"add_area_from_template",
"cap_AssignArea",
"cap_AreaRepresent",
#"cap_gis_location_xml_post_parse",
#"cap_gis_location_xml_post_render",
)
import datetime
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
from ..s3 import *
# =============================================================================
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ("cap_alert",
"cap_alert_represent",
"cap_alert_approve",
"cap_warning_priority",
"cap_info",
"cap_info_represent",
"cap_resource",
"cap_area",
"cap_area_id",
"cap_area_represent",
"cap_area_location",
"cap_area_tag",
"cap_info_category_opts",
"cap_template_represent",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a
# few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed
# as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
# http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.amberAlert": T("Child Abduction Emergency"),
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
# http://en.wikipedia.org/wiki/Silver_Alert
"missingPerson.silver": T("Missing Senior Citizen"),
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# ---------------------------------------------------------------------
# CAP alerts
#
# CAP alert Status Code (status)
cap_alert_status_code_opts = OrderedDict([
("Actual", T("Actual - actionable by all targeted recipients")),
("Exercise", T("Exercise - only for designated participants (decribed in note)")),
("System", T("System - for internal functions")),
("Test", T("Test - testing, all recipients disregard")),
("Draft", T("Draft - not actionable in its current form")),
])
# CAP alert message type (msgType)
cap_alert_msgType_code_opts = OrderedDict([
("Alert", T("Alert: Initial information requiring attention by targeted recipients")),
("Update", T("Update: Update and supercede earlier message(s)")),
("Cancel", T("Cancel: Cancel earlier message(s)")),
("Ack", T("Ack: Acknowledge receipt and acceptance of the message(s)")),
("Error", T("Error: Indicate rejection of the message(s)")),
])
# CAP alert scope
cap_alert_scope_code_opts = OrderedDict([
("Public", T("Public - unrestricted audiences")),
("Restricted", T("Restricted - to users with a known operational requirement (described in restriction)")),
("Private", T("Private - only to specified addresses (mentioned as recipients)"))
])
# CAP info categories
cap_info_category_opts = OrderedDict([
("Geo", T("Geo - Geophysical (inc. landslide)")),
("Met", T("Met - Meteorological (inc. flood)")),
("Safety", T("Safety - General emergency and public safety")),
("Security", T("Security - Law enforcement, military, homeland and local/private security")),
("Rescue", T("Rescue - Rescue and recovery")),
("Fire", T("Fire - Fire suppression and rescue")),
("Health", T("Health - Medical and public health")),
("Env", T("Env - Pollution and other environmental")),
("Transport", T("Transport - Public and private transportation")),
("Infra", T("Infra - Utility, telecommunication, other non-transport infrastructure")),
("CBRNE", T("CBRNE - Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack")),
("Other", T("Other - Other events")),
])
tablename = "cap_alert"
define_table(tablename,
Field("is_template", "boolean",
readable = False,
writable = True,
),
Field("template_id", "reference cap_alert",
label = T("Template"),
ondelete = "RESTRICT",
represent = self.cap_template_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
self.cap_template_represent,
filterby="is_template",
filter_opts=(True,)
)),
comment = T("Apply a template"),
),
Field("template_title",
label = T("Template Title"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Template Title"),
T("Title for the template, to indicate to which event this template is related to"))),
),
Field("template_settings", "text",
default = "{}",
readable = False,
),
Field("identifier", unique=True, length=128,
default = self.generate_identifier,
label = T("Identifier"),
requires = IS_MATCH('^[^,<&\s]+$',
error_message=current.T("Cannot be empty and Must not include spaces, commas, or restricted characters (< and &).")),
# Dont Allow to change the identifier
readable = True,
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A unique identifier of the alert message"),
T("A number or string uniquely identifying this message, assigned by the sender. Must notnclude spaces, commas or restricted characters (< and &)."))),
),
Field("sender",
label = T("Sender"),
default = self.generate_sender,
requires = IS_MATCH('^[^,<&\s]+$',
error_message=current.T("Cannot be empty and Must not include spaces, commas, or restricted characters (< and &).")),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The identifier of the sender of the alert message"),
T("This is guaranteed by assigner to be unique globally; e.g., may be based on an Internet domain name. Must not include spaces, commas or restricted characters (< and &)."))),
),
s3_datetime("sent",
default = "now",
writable = False,
),
Field("status",
default = "Draft",
label = T("Status"),
represent = lambda opt: \
cap_alert_status_code_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(cap_alert_status_code_opts),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the appropriate handling of the alert message"),
T("See options."))),
),
Field("msg_type",
label = T("Message Type"),
default = "Alert",
represent = lambda opt: \
cap_alert_msgType_code_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_msgType_code_opts)
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The nature of the alert message"),
T("See options."))),
),
Field("source",
label = T("Source"),
default = self.generate_source,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text identifying the source of the alert message"),
T("The particular source of this alert; e.g., an operator or a specific device."))),
),
Field("scope",
label = T("Scope"),
represent = lambda opt: \
cap_alert_scope_code_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_scope_code_opts)
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the intended distribution of the alert message"),
T("Who is this alert for?"))),
),
# Text describing the restriction for scope=restricted
Field("restriction", "text",
label = T("Restriction"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text describing the rule for limiting distribution of the restricted alert message"),
T("Used when scope is 'Restricted'."))),
),
Field("addresses", "list:string",
label = T("Recipients"),
represent = self.list_string_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The group listing of intended recipients of the alert message"),
T("Required when scope is 'Private', optional when scope is 'Public' or 'Restricted'. Each recipient shall be identified by an identifier or an address."))),
#@ToDo: provide a better way to add multiple addresses,
# do not ask the user to delimit it themselves
# this should eventually use the CAP contacts
#widget = S3CAPAddressesWidget,
),
Field("codes", "list:string",
default = settings.get_cap_codes(),
label = T("Codes"),
represent = self.list_string_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Codes for special handling of the message"),
T("Any user-defined flags or special codes used to flag the alert message for special handling."))),
),
Field("note", "text",
label = T("Note"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text describing the purpose or significance of the alert message"),
T("The message note is primarily intended for use with status 'Exercise' and message type 'Error'"))),
),
Field("reference", "list:reference cap_alert",
label = T("Reference"),
represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ",
multiple = True,
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The group listing identifying earlier message(s) referenced by the alert message"),
T("The extended message identifier(s) (in the form sender,identifier,sent) of an earlier CAP message or messages referenced by this one."))),
# @ToDo: This should not be manually entered,
# needs a widget
#widget = S3ReferenceWidget(table,
# one_to_many=True,
# allow_create=False),
),
# @ToDo: Switch to using event_incident_type_id
Field("incidents", "list:string",
label = T("Incidents"),
represent = S3Represent(options = cap_incident_type_opts,
multiple = True),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_incident_type_opts,
multiple = True,
sort = True,
)),
widget = S3MultiSelectWidget(selectedList = 10),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A list of incident(s) referenced by the alert message"),
T("Used to collate multiple messages referring to different aspects of the same incident. If multiple incident identifiers are referenced, they SHALL be separated by whitespace. Incident names including whitespace SHALL be surrounded by double-quotes."))),
),
# approved_on field for recording when the alert was approved
s3_datetime("approved_on",
readable = False,
writable = False,
),
*s3_meta_fields())
list_fields = [(T("Sent"), "sent"),
"scope",
"info.priority",
"info.event_type_id",
"info.sender_name",
"area.name",
]
notify_fields = [(T("Identifier"), "identifier"),
(T("Date"), "sent"),
(T("Status"), "status"),
(T("Message Type"), "msg_type"),
(T("Source"), "source"),
(T("Scope"), "scope"),
(T("Restriction"), "restriction"),
(T("Category"), "info.category"),
(T("Event"), "info.event_type_id"),
(T("Response type"), "info.response_type"),
(T("Priority"), "info.priority"),
(T("Urgency"), "info.urgency"),
(T("Severity"), "info.severity"),
(T("Certainty"), "info.certainty"),
(T("Effective"), "info.effective"),
(T("Expires at"), "info.expires"),
(T("Sender's name"), "info.sender_name"),
(T("Headline"), "info.headline"),
(T("Description"), "info.description"),
(T("Instruction"), "info.instruction"),
(T("Contact information"), "info.contact"),
(T("URL"), "info.web"),
(T("Area Description"), "area.name"),
]
filter_widgets = [
S3TextFilter(["identifier",
"sender",
"incidents",
"cap_info.headline",
"cap_info.event",
],
label = T("Search"),
comment = T("Search for an Alert by sender, incident, headline or event."),
),
S3OptionsFilter("info.category",
label = T("Category"),
options = cap_info_category_opts,
),
S3OptionsFilter("info.event_type_id",
),
S3OptionsFilter("info.priority",
),
S3LocationFilter("location.location_id",
label = T("Location(s)"),
# options = gis.get_countries().keys(),
),
S3OptionsFilter("info.language",
label = T("Language"),
),
]
configure(tablename,
context = {"location": "location.location_id",
},
create_onaccept = self.cap_alert_create_onaccept,
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = cap_alert_list_layout,
list_orderby = "cap_info.expires desc",
notify_fields = notify_fields,
onapprove = self.cap_alert_approve,
onvalidation = self.cap_alert_onvalidation,
orderby = "cap_info.expires desc",
)
# Components
add_components(tablename,
cap_area = "alert_id",
cap_area_location = {"name": "location",
"joinby": "alert_id",
},
cap_area_tag = {"name": "tag",
"joinby": "alert_id",
},
cap_info = "alert_id",
cap_resource = "alert_id",
)
self.set_method("cap", "alert",
method = "import_feed",
action = CAPImportFeed())
self.set_method("cap", "alert",
method = "assign",
action = self.cap_AssignArea())
if crud_strings["cap_template"]:
crud_strings[tablename] = crud_strings["cap_template"]
else:
crud_strings[tablename] = Storage(
label_create = T("Create Alert"),
title_display = T("Alert Details"),
title_list = T("Alerts"),
# If already-published, this should create a new "Update"
# alert instead of modifying the original
title_update = T("Edit Alert"),
title_upload = T("Import Alerts"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert created"),
msg_record_modified = T("Alert modified"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No alerts to show"))
alert_represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ")
alert_id = S3ReusableField("alert_id", "reference %s" % tablename,
comment = T("The alert message containing this information"),
label = T("Alert"),
ondelete = "CASCADE",
represent = alert_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
alert_represent)),
)
# ---------------------------------------------------------------------
# CAP info segments
#
cap_info_responseType_opts = OrderedDict([
("Shelter", T("Shelter - Take shelter in place or per instruction")),
("Evacuate", T("Evacuate - Relocate as instructed in the instruction")),
("Prepare", T("Prepare - Make preparations per the instruction")),
("Execute", T("Execute - Execute a pre-planned activity identified in instruction")),
("Avoid", T("Avoid - Avoid the subject event as per the instruction")),
("Monitor", T("Monitor - Attend to information sources as described in instruction")),
("Assess", T("Assess - Evaluate the information in this message.")),
("AllClear", T("AllClear - The subject event no longer poses a threat")),
("None", T("None - No action recommended")),
])
cap_info_urgency_opts = OrderedDict([
("Immediate", T("Response action should be taken immediately")),
("Expected", T("Response action should be taken soon (within next hour)")),
("Future", T("Responsive action should be taken in the near future")),
("Past", T("Responsive action is no longer required")),
("Unknown", T("Unknown")),
])
cap_info_severity_opts = OrderedDict([
("Extreme", T("Extraordinary threat to life or property")),
("Severe", T("Significant threat to life or property")),
("Moderate", T("Possible threat to life or property")),
("Minor", T("Minimal to no known threat to life or property")),
("Unknown", T("Severity unknown")),
])
cap_info_certainty_opts = OrderedDict([
("Observed", T("Observed: determined to have occurred or to be ongoing")),
("Likely", T("Likely (p > ~50%)")),
("Possible", T("Possible but not likely (p <= ~50%)")),
("Unlikely", T("Not expected to occur (p ~ 0)")),
("Unknown", T("Certainty unknown")),
])
# ---------------------------------------------------------------------
# Warning Priorities for CAP
tablename = "cap_warning_priority"
define_table(tablename,
Field("priority_rank", "integer",
label = T("Priority Rank"),
length = 2,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Priority Rank"),
T("The Priority Rank is basically to give it a ranking 1, 2, ..., n. That way we know 1 is the most important of the chain and n is lowest element. For eg. (1, Signal 1), (2, Signal 2)..., (5, Signal 5) to enumerate the priority for cyclone."))),
),
Field("event_code",
label = T("Event Code"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Event Code"),
T("Code (key) for the event like for eg. (2001, Typhoon), (2002, Flood)"))),
),
Field("name", notnull=True, length=64,
label = T("Name"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Name"),
T("The actual name for the warning priority, for eg. Typhoons in Philippines have five priority name (PSWS# 1, PSWS# 2, PSWS# 3, PSWS# 4 and PSWS# 5)"))),
),
Field("event_type",
label = T("Event Type"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Event Type"),
T("The Event to which this priority is targeted for. The 'Event Type' is the name of the standard Eden Event Type . These are available at /eden/event/event_type (The 'Event Type' should be exactly same as in /eden/event/event_type - case sensitive). For those events which are not in /eden/event/event_type but having the warning priority, you can create the event type using /eden/event/event_type/create and they will appear in this list."))),
),
Field("urgency",
label = T("Urgency"),
requires = IS_IN_SET(cap_info_urgency_opts),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the urgency of the subject event of the alert message"),
T("The urgency, severity, and certainty of the information collectively distinguish less emphatic from more emphatic messages." +
"'Immediate' - Responsive action should be taken immediately" +
"'Expected' - Responsive action should be taken soon (within next hour)" +
"'Future' - Responsive action should be taken in the near future" +
"'Past' - Responsive action is no longer required" +
"'Unknown' - Urgency not known"))),
),
Field("severity",
label = T("Severity"),
requires = IS_IN_SET(cap_info_severity_opts),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the severity of the subject event of the alert message"),
T("The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages." +
"'Extreme' - Extraordinary threat to life or property" +
"'Severe' - Significant threat to life or property" +
"'Moderate' - Possible threat to life or property" +
"'Minor' - Minimal to no known threat to life or property" +
"'Unknown' - Severity unknown"))),
),
Field("certainty",
label = T("Certainty"),
requires = IS_IN_SET(cap_info_certainty_opts),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the certainty of the subject event of the alert message"),
T("The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages." +
"'Observed' - Determined to have occurred or to be ongoing" +
"'Likely' - Likely (p > ~50%)" +
"'Possible' - Possible but not likely (p <= ~50%)" +
"'Unlikely' - Not expected to occur (p ~ 0)" +
"'Unknown' - Certainty unknown"))),
),
Field("color_code",
label = T("Color Code"),
widget = S3ColorPickerWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The color code for this priority"),
T("Pick from the color widget the color that is associated to this priority of the event. The color code is in hex format"))),
),
*s3_meta_fields())
priority_represent = S3Represent(lookup=tablename)
crud_strings[tablename] = Storage(
label_create = T("Create Warning Priority"),
title_display = T("Warning Priority Details"),
title_list = T("Warning Priorities"),
title_update = T("Edit Warning Priority"),
title_upload = T("Import Warning Priorities"),
label_list_button = T("List Warning Priorities"),
label_delete_button = T("Delete Warning Priority"),
msg_record_created = T("Warning Priority added"),
msg_record_modified = T("Warning Priority updated"),
msg_record_deleted = T("Warning Priority removed"),
msg_list_empty = T("No Warning Priorities currently registered")
)
configure(tablename,
deduplicate = S3Duplicate(primary=("event_type", "name")),
)
# ---------------------------------------------------------------------
# CAP info priority
# @ToDo: i18n: Need label=T("")
languages = settings.get_cap_languages()
tablename = "cap_info"
define_table(tablename,
alert_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("template_info_id", "reference cap_info",
ondelete = "RESTRICT",
readable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
self.cap_template_represent,
filterby="is_template",
filter_opts=(True,)
)),
widget = S3HiddenWidget(),
),
Field("template_settings", "text",
readable = False,
),
Field("language",
default = "en-US",
represent = lambda opt: languages.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(languages)
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the language of the information"),
T("Code Values: Natural language identifier per [RFC 3066]. If not present, an implicit default value of 'en-US' will be assumed. Edit settings.cap.languages in 000_config.py to add more languages. See <a href=\"%s\">here</a> for a full list.") % "http://www.i18nguy.com/unicode/language-identifiers.html")),
),
Field("category", "list:string", # 1 or more allowed
represent = S3Represent(options = cap_info_category_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_category_opts,
multiple = True,
),
widget = S3MultiSelectWidget(selectedList = 10),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the category of the subject event of the alert message"),
T("You may select multiple categories by holding down control and then selecting the items."))),
),
Field("event",
label = T("Event"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text denoting the type of the subject event of the alert message"),
T("If not specified, will the same as the Event Type."))),
),
self.event_type_id(empty = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Event Type of the alert message"),
T("Event field above is more general. And this Event Type is classification of event. For eg. Event can be 'Terrorist Attack' and Event Type can be either 'Terrorist Bomb Explosion' or 'Terrorist Chemical Waefare Attack'. If not specified, will the same as the Event Type."))),
script = '''
$.filterOptionsS3({
'trigger':'event_type_id',
'target':'priority',
'lookupURL':S3.Ap.concat('/cap/priority_get/'),
'lookupResource':'event_type'
})'''
),
Field("response_type", "list:string", # 0 or more allowed
represent = S3Represent(options = cap_info_responseType_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_responseType_opts,
multiple = True),
widget = S3MultiSelectWidget(selectedList = 10),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the type of action recommended for the target audience"),
T("Multiple response types can be selected by holding down control and then selecting the items"))),
),
Field("priority", "reference cap_warning_priority",
represent = priority_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_warning_priority.id",
priority_represent
),
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Priority of the alert message"),
T("Defines the priority of the alert message. Selection of the priority automatically sets the value for 'Urgency', 'Severity' and 'Certainty'"))),
),
Field("urgency",
represent = lambda opt: \
cap_info_urgency_opts.get(opt, UNKNOWN_OPT),
# Empty For Template, checked onvalidation hook
requires = IS_EMPTY_OR(
IS_IN_SET(cap_info_urgency_opts)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the urgency of the subject event of the alert message"),
T("The urgency, severity, and certainty of the information collectively distinguish less emphatic from more emphatic messages." +
"'Immediate' - Responsive action should be taken immediately" +
"'Expected' - Responsive action should be taken soon (within next hour)" +
"'Future' - Responsive action should be taken in the near future" +
"'Past' - Responsive action is no longer required" +
"'Unknown' - Urgency not known"))),
),
Field("severity",
represent = lambda opt: \
cap_info_severity_opts.get(opt, UNKNOWN_OPT),
# Empty For Template, checked onvalidation hook
requires = IS_EMPTY_OR(
IS_IN_SET(cap_info_severity_opts)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the severity of the subject event of the alert message"),
T("The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages." +
"'Extreme' - Extraordinary threat to life or property" +
"'Severe' - Significant threat to life or property" +
"'Moderate' - Possible threat to life or property" +
"'Minor' - Minimal to no known threat to life or property" +
"'Unknown' - Severity unknown"))),
),
Field("certainty",
represent = lambda opt: \
cap_info_certainty_opts.get(opt, UNKNOWN_OPT),
# Empty For Template, checked onvalidation hook
requires = IS_EMPTY_OR(
IS_IN_SET(cap_info_certainty_opts)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the certainty of the subject event of the alert message"),
T("The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages." +
"'Observed' - Determined to have occurred or to be ongoing" +
"'Likely' - Likely (p > ~50%)" +
"'Possible' - Possible but not likely (p <= ~50%)" +
"'Unlikely' - Not expected to occur (p ~ 0)" +
"'Unknown' - Certainty unknown"))),
),
Field("audience", "text",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Audience"),
T("The intended audience of the alert message"))),
),
Field("event_code", "text",
default = settings.get_cap_event_codes(),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A system-specific code identifying the event type of the alert message"),
T("Any system-specific code for events, in the form of key-value pairs. (e.g., SAME, FIPS, ZIP)."))),
),
s3_datetime("effective",
default = "now",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The effective time of the information of the alert message"),
T("If not specified, the effective time shall be assumed to be the same the time the alert was sent."))),
),
s3_datetime("onset",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Onset"),
T("The expected time of the beginning of the subject event of the alert message"))),
),
s3_datetime("expires",
past = 0,
default = self.get_expirydate,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The expiry time of the information of the alert message"),
T("If this item is not provided, each recipient is free to enforce its own policy as to when the message is no longer in effect."))),
),
Field("sender_name",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text naming the originator of the alert message"),
T("The human-readable name of the agency or authority issuing this alert."))),
),
Field("headline",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text headline of the alert message"),
T("A brief human-readable headline. Note that some displays (for example, short messaging service devices) may only present this headline; it should be made as direct and actionable as possible while remaining short. 160 characters may be a useful target limit for headline length."))),
),
Field("description", "text",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The subject event of the alert message"),
T("An extended human readable description of the hazard or event that occasioned this message."))),
),
Field("instruction", "text",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The recommended action to be taken by recipients of the alert message"),
T("An extended human readable instruction to targeted recipients. If different instructions are intended for different recipients, they should be represented by use of multiple information blocks. You can use a different information block also to specify this information in a different language."))),
),
Field("contact", "text",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Contact"),
T("The contact for follow-up and confirmation of the alert message"))),
),
Field("web",
requires = IS_EMPTY_OR(IS_URL()),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A URL associating additional information with the alert message"),
T("A full, absolute URI for an HTML page or other text resource with additional or reference information regarding this alert."))),
),
Field("parameter", "text",
default = settings.get_cap_parameters(),
label = T("Parameters"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A system-specific additional parameter associated with the alert message"),
T("Any system-specific datum, in the form of key-value pairs."))),
),
*s3_meta_fields())
info_labels = cap_info_labels()
for field in info_labels:
db.cap_info[field].label = info_labels[field]
if crud_strings["cap_template_info"]:
crud_strings[tablename] = crud_strings["cap_template_info"]
else:
ADD_INFO = T("Add alert information")
crud_strings[tablename] = Storage(
label_create = ADD_INFO,
title_display = T("Alert information"),
title_list = T("Information entries"),
title_update = T("Update alert information"), # this will create a new "Update" alert?
title_upload = T("Import alert information"),
subtitle_list = T("Listing of alert information items"),
label_list_button = T("List information entries"),
label_delete_button = T("Delete Information"),
msg_record_created = T("Alert information created"),
msg_record_modified = T("Alert information modified"),
msg_record_deleted = T("Alert information deleted"),
msg_list_empty = T("No alert information to show"))
info_represent = S3Represent(lookup = tablename,
fields = ["language", "headline"],
field_sep = " - ")
info_id = S3ReusableField("info_id", "reference %s" % tablename,
label = T("Information Segment"),
ondelete = "CASCADE",
represent = info_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
info_represent)
),
sortby = "identifier",
)
configure(tablename,
#create_next = URL(f="info", args=["[id]", "area"]),
# Required Fields
mark_required = ("urgency", "severity", "certainty",),
onaccept = self.cap_info_onaccept,
onvalidation = self.cap_info_onvalidation,
)
# Components
add_components(tablename,
cap_resource = "info_id",
cap_area = "info_id",
)
# ---------------------------------------------------------------------
# CAP Resource segments
#
# Resource elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
tablename = "cap_resource"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
self.super_link("doc_id", "doc_entity"),
Field("resource_desc",
requires = IS_NOT_EMPTY(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The type and content of the resource file"),
T("The human-readable text describing the type and content, such as 'map' or 'photo', of the resource file."))),
),
Field("mime_type",
requires = IS_NOT_EMPTY(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The identifier of the MIME content type and sub-type describing the resource file"),
T("MIME content type and sub-type as described in [RFC 2046]. (As of this document, the current IANA registered MIME types are listed at http://www.iana.org/assignments/media-types/)"))),
),
Field("size", "integer",
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The integer indicating the size of the resource file"),
T("Approximate size of the resource file in bytes."))),
),
Field("uri",
# needs a special validation
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The identifier of the hyperlink for the resource file"),
T("A full absolute URI, typically a Uniform Resource Locator that can be used to retrieve the resource over the Internet."))),
),
#Field("file", "upload"),
Field("deref_uri", "text",
readable = False,
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Deref URI"),
T("The base-64 encoded data content of the resource file"))),
),
Field("digest",
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The code representing the digital digest ('hash') computed from the resource file"),
T("Calculated using the Secure Hash Algorithm (SHA-1)."))),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Resource"),
title_display = T("Alert Resource"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
subtitle_list = T("List Resources"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No resources currently defined for this alert"))
# @todo: complete custom form
crud_form = S3SQLCustomForm("alert_id",
"info_id",
"is_template",
"resource_desc",
S3SQLInlineComponent("image",
label = T("Image"),
fields = ["file",
],
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Image"),
T("Attach an image that provides extra information about the event."))),
),
S3SQLInlineComponent("document",
label = T("Document"),
fields = ["file",
],
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Document"),
T("Attach document that provides extra information about the event."))),
),
)
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
crud_form = crud_form,
super_entity = "doc_entity",
)
# ---------------------------------------------------------------------
# CAP Area segments
#
# Area elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
# Each <area> can have multiple elements which are one of <polygon>,
# <circle>, or <geocode>.
# <polygon> and <circle> are explicit geometry elements.
# <geocode> is a key-value pair in which the key is a standard
# geocoding system like SAME, FIPS, ZIP, and the value is a defined
# value in that system. The region described by the <area> is the
# union of the areas described by the individual elements, but the
# CAP spec advises that, if geocodes are included, the concrete
# geometry elements should outline the area specified by the geocodes,
# as not all recipients will have access to the meanings of the
# geocodes. However, since geocodes are a compact way to describe an
# area, it may be that they will be used without accompanying geometry,
# so we should not count on having <polygon> or <circle>.
#
# Geometry elements are each represented by a gis_location record, and
# linked to the cap_area record via the cap_area_location link table.
# For the moment, <circle> objects are stored with the center in the
# gis_location's lat, lon, and radius (in km) as a tag "radius" and
# value. ToDo: Later, we will add CIRCLESTRING WKT.
#
# Geocode elements are currently stored as key value pairs in the
# cap_area record.
#
# <area> can also specify a minimum altitude and maximum altitude
# ("ceiling"). These are stored in explicit fields for now, but could
# be replaced by key value pairs, if it is found that they are rarely
# used.
#
# (An alternative would be to have cap_area link to a gis_location_group
# record. In that case, the geocode tags could be stored in the
# gis_location_group's overall gis_location element's tags. The altitude
# could be stored in the overall gis_location's elevation, with ceiling
# stored in a tag. We could consider adding a maximum elevation field.)
tablename = "cap_area"
define_table(tablename,
alert_id(),
info_id(comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Information segment for this Area segment"),
T("To which Information segment is this Area segment related. Note an Information segment can have multiple Area segments."))),
),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("name",
label = T("Area Description"),
required = True,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The affected area of the alert message"),
T("A text description of the affected area."))),
),
Field("altitude", "integer", # Feet above Sea-level in WGS84 (Specific or Minimum is using a range)
label = T("Altitude"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The specific or minimum altitude of the affected area"),
T("If used with the ceiling element this value is the lower limit of a range. Otherwise, this value specifies a specific altitude. The altitude measure is in feet above mean sea level."))),
),
Field("ceiling", "integer", # Feet above Sea-level in WGS84 (Maximum)
label = T("Ceiling"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The maximum altitude of the affected area"),
T("must not be used except in combination with the 'altitude' element. The ceiling measure is in feet above mean sea level."))),
),
# Only used for Templates
self.event_type_id(comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Event Type of this predefined alert area"),
T("Event Type relating to this predefined area."))),
script = '''
$.filterOptionsS3({
'trigger':'event_type_id',
'target':'priority',
'lookupURL':S3.Ap.concat('/cap/priority_get/'),
'lookupResource':'event_type'
})'''
),
# Only used for Templates
Field("priority", "reference cap_warning_priority",
label = T("Priority"),
represent = priority_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(
db, "cap_warning_priority.id",
priority_represent
),
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Priority of the Event Type"),
T("Defines the priority of the Event Type for this predefined area."))),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Area"),
title_display = T("Alert Area"),
title_list = T("Areas"),
title_update = T("Edit Area"),
subtitle_list = T("List Areas"),
label_list_button = T("List Areas"),
label_delete_button = T("Delete Area"),
msg_record_created = T("Area added"),
msg_record_modified = T("Area updated"),
msg_record_deleted = T("Area deleted"),
msg_list_empty = T("No areas currently defined for this alert"))
crud_form = S3SQLCustomForm("alert_id",
"info_id",
"is_template",
"name",
"info_id",
S3SQLInlineComponent("location",
name = "location",
label = "",
multiple = False,
fields = [("", "location_id")],
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Geolocation"),
T("The paired values of points defining a polygon that delineates the affected area of the alert message"))),
),
S3SQLInlineComponent("tag",
name = "tag",
label = "",
fields = ["tag",
"value",
],
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The geographic code delineating the affected area"),
T("Any geographically-based code to describe a message target area, in the form. The key is a user-assigned string designating the domain of the code, and the content of value is a string (which may represent a number) denoting the value itself (e.g., name='ZIP' and value='54321'). This should be used in concert with an equivalent description in the more universally understood polygon and circle forms whenever possible."))),
),
"altitude",
"ceiling",
"event_type_id",
"priority",
)
area_represent = cap_AreaRepresent(show_link=True)
configure(tablename,
#create_next = URL(f="area", args=["[id]", "location"]),
# Old: Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
onvalidation = self.cap_area_onvalidation,
crud_form = crud_form,
)
# Components
add_components(tablename,
cap_area_location = {"name": "location",
"joinby": "area_id",
},
cap_area_tag = {"name": "tag",
"joinby": "area_id",
},
# Names
cap_area_name = {"name": "name",
"joinby": "area_id",
},
)
area_id = S3ReusableField("area_id", "reference %s" % tablename,
label = T("Area"),
ondelete = "CASCADE",
represent = area_represent,
requires = IS_ONE_OF(db, "cap_area.id",
area_represent),
)
# ToDo: Use a widget tailored to entering <polygon> and <circle>.
# Want to be able to enter them by drawing on the map.
# Also want to allow selecting existing locations that have
# geometry, maybe with some filtering so the list isn't cluttered
# with irrelevant locations.
tablename = "cap_area_location"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
self.gis_location_id(
widget = S3LocationSelector(points = False,
polygons = True,
show_map = True,
catalog_layers = True,
show_address = False,
show_postcode = False,
),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Location"),
title_display = T("Alert Location"),
title_list = T("Locations"),
title_update = T("Edit Location"),
subtitle_list = T("List Locations"),
label_list_button = T("List Locations"),
label_delete_button = T("Delete Location"),
msg_record_created = T("Location added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location deleted"),
msg_list_empty = T("No locations currently defined for this alert"))
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# Area Tags
# - Key-Value extensions
# - Used to hold for geocodes: key is the geocode system name, and
# value is the specific value for this area.
# - Could store other values here as well, to avoid dedicated fields
# in cap_area for rarely-used items like altitude and ceiling, but
# would have to distinguish those from geocodes.
#
# ToDo: Provide a mechanism for pre-loading geocodes that are not tied
# to individual areas.
# ToDo: Allow sharing the key-value pairs. Cf. Ruby on Rails tagging
# systems such as acts-as-taggable-on, which has a single table of tags
# used by all classes. Each tag record has the class and field that the
# tag belongs to, as well as the tag string. We'd want tag and value,
# but the idea is the same: There would be a table with tag / value
# pairs, and individual cap_area, event_event, org_whatever records
# would link to records in the tag table. So we actually would not have
# duplicate tag value records as we do now.
tablename = "cap_area_tag"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
# ToDo: Allow selecting from a dropdown list of pre-defined
# geocode system names.
Field("tag",
label = T("Geocode Name"),
),
# ToDo: Once the geocode system is selected, fetch a list
# of current values for that geocode system. Allow adding
# new values, e.g. with combo box menu.
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
configure(tablename,
create_onaccept = update_alert_id(tablename),
# deduplicate = self.cap_area_tag_deduplicate,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(cap_alert_id = alert_id,
cap_alert_represent = alert_represent,
cap_alert_approve = self.cap_alert_approve,
cap_area_id = area_id,
cap_area_represent = area_represent,
cap_info_represent = info_represent,
cap_info_category_opts = cap_info_category_opts,
cap_template_represent = self.cap_template_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def generate_identifier():
"""
Generate an identifier for a new form
"""
db = current.db
table = db.cap_alert
r = db().select(table.id,
limitby=(0, 1),
orderby=~table.id).first()
_time = datetime.datetime.strftime(datetime.datetime.utcnow(), "%Y%m%d")
if r:
next_id = int(r.id) + 1
else:
next_id = 1
# Format: prefix-time+-timezone+sequence-suffix
settings = current.deployment_settings
prefix = settings.get_cap_identifier_prefix() or current.xml.domain
oid = settings.get_cap_identifier_oid()
suffix = settings.get_cap_identifier_suffix()
return "%s-%s-%s-%03d-%s" % \
(prefix, oid, _time, next_id, suffix)
# -------------------------------------------------------------------------
@staticmethod
def generate_sender():
"""
Generate a sender for a new form
"""
try:
user_id = current.auth.user.id
except AttributeError:
return ""
return "%s/%d" % (current.xml.domain, user_id)
# -------------------------------------------------------------------------
@staticmethod
def generate_source():
"""
Generate a source for CAP alert
"""
return "%s@%s" % (current.xml.domain,
current.deployment_settings.get_base_public_url())
# -------------------------------------------------------------------------
@staticmethod
def get_expirydate():
"""
Default Expiry date based on the expire offset
"""
return current.request.utcnow + \
datetime.timedelta(days = current.deployment_settings.\
get_cap_expire_offset())
# -------------------------------------------------------------------------
@staticmethod
def cap_template_represent(id, row=None):
"""
Represent an alert template concisely
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.is_template,
table.template_title,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1)).first()
try:
# @ToDo: Should get headline from "info"?
if row.is_template:
return row.template_title
else:
return s3db.cap_alert_represent(id)
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def list_string_represent(string, fmt=lambda v: v):
try:
if isinstance(string, list):
return ", ".join([fmt(i) for i in string])
elif isinstance(string, basestring):
return ", ".join([fmt(i) for i in string[1:-1].split("|")])
except IndexError:
return current.messages.UNKNOWN_OPT
return ""
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_create_onaccept(form):
"""
Auto-approve Templates
"""
form_vars = form.vars
if form_vars.get("is_template"):
user = current.auth.user
if user:
current.db(current.s3db.cap_alert.id == form_vars.id).update(
approved_by = user.id)
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_onvalidation(form):
"""
Custom Form Validation:
multi-field level
"""
form_vars = form.vars
if form_vars.get("scope") == "Private" and not form_vars.get("addresses"):
form.errors["addresses"] = \
current.T("'Recipients' field mandatory in case of 'Private' scope")
# -------------------------------------------------------------------------
@staticmethod
def cap_info_onaccept(form):
"""
After DB I/O
"""
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
info_id = form_vars.id
if not info_id:
return
db = current.db
atable = db.cap_alert
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
itable.event,
itable.event_type_id,
limitby=(0, 1)).first()
if info:
alert_id = info.alert_id
set_ = db(itable.id == info_id)
if alert_id and cap_alert_is_template(alert_id):
set_.update(is_template = True)
if not info.event:
set_.update(event = current.db.cap_info.event_type_id.\
represent(info.event_type_id))
# -------------------------------------------------------------------------
@staticmethod
def cap_info_onvalidation(form):
"""
Custom Form Validation:
used for import from CSV
"""
form_record = form.record
if form_record and form_record.is_template == False:
form_vars = form.vars
if not form_vars.get("urgency"):
form.errors["urgency"] = \
current.T("'Urgency' field is mandatory")
if not form_vars.get("severity"):
form.errors["severity"] = \
current.T("'Severity' field is mandatory")
if not form_vars.get("certainty"):
form.errors["certainty"] = \
current.T("'Certainty' field is mandatory")
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_approve(record=None):
"""
Update the approved_on field when alert gets approved
"""
if not record:
return
alert_id = record["id"]
# Update approved_on at the time the alert is approved
if alert_id:
db = current.db
approved_on = record["approved_on"]
db(db.cap_alert.id == alert_id).update(approved_on = current.request.utcnow)
# -------------------------------------------------------------------------
@staticmethod
def cap_area_onvalidation(form):
"""
Custom Form Validation
"""
form_vars = form.vars
if form_vars.get("ceiling") and not form_vars.get("altitude"):
form.errors["altitude"] = \
current.T("'Altitude' field is mandatory if using 'Ceiling' field.")
# =============================================================================
class S3CAPAreaNameModel(S3Model):
"""
CAP Name Model:
-local names for CAP Area
"""
names = ("cap_area_name",
)
def model(self):
T = current.T
l10n_languages = current.deployment_settings.get_L10n_languages()
# ---------------------------------------------------------------------
# Local Names
#
tablename = "cap_area_name"
self.define_table(tablename,
self.cap_area_id(empty = False,
ondelete = "CASCADE",
),
Field("language",
label = T("Language"),
represent = lambda opt: l10n_languages.get(opt,
current.messages.UNKNOWN_OPT),
requires = IS_ISO639_2_LANGUAGE_CODE(),
),
Field("name_l10n",
label = T("Local Name"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary=("area_id", "language")),
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
def cap_info_labels():
"""
Labels for CAP info segments
"""
T = current.T
return dict(language=T("Language"),
category=T("Category"),
event=T("Event"),
response_type=T("Response type"),
urgency=T("Urgency"),
severity=T("Severity"),
certainty=T("Certainty"),
audience=T("Audience"),
event_code=T("Event code"),
effective=T("Effective"),
onset=T("Onset"),
expires=T("Expires at"),
sender_name=T("Sender's name"),
headline=T("Headline"),
description=T("Description"),
instruction=T("Instruction"),
web=T("URL"),
contact=T("Contact information"),
parameter=T("Parameters")
)
# =============================================================================
def cap_alert_is_template(alert_id):
"""
Tell whether an alert entry is a template
"""
if not alert_id:
return False
table = current.s3db.cap_alert
query = (table.id == alert_id)
r = current.db(query).select(table.is_template,
limitby=(0, 1)).first()
return r and r.is_template
# =============================================================================
def cap_rheader(r):
""" Resource Header for CAP module """
rheader = None
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
tablename = r.tablename
if tablename == "cap_alert":
alert_id = record.id
itable = s3db.cap_info
row = current.db(itable.alert_id == alert_id).\
select(itable.id,
limitby=(0, 1)).first()
if record.is_template:
if not (row and row.id):
error = DIV(T("An alert needs to contain at least one info item."),
_class="error")
else:
error = ""
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
#(T("Area"), "area"),
(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(s3db.cap_template_represent(alert_id, record),
_href=URL(c="cap", f="template",
args=[alert_id, "update"]))),
),
),
rheader_tabs,
error
)
else:
if not (row and row.id):
error = DIV(T("You need to create at least one alert information item in order to be able to broadcast this alert!"),
_class="error")
export_btn = ""
submit_btn = None
else:
error = ""
export_btn = A(DIV(_class="export_cap_large"),
_href=URL(c="cap", f="alert", args=["%s.cap" % alert_id]),
_target="_blank",
)
# Display 'Submit for Approval' based on permission
# and deployment settings
if not current.request.get_vars.get("_next") and \
not r.record.approved_by and \
current.deployment_settings.get_cap_authorisation() and \
current.auth.s3_has_permission("update", "cap_alert",
record_id=alert_id):
# Get the user ids for the role alert_approver
db = current.db
agtable = db.auth_group
group_rows = db(agtable.role == "Alert Approver").\
select(agtable.id)
if group_rows:
group_members = current.auth.s3_group_members
user_pe_id = current.auth.s3_user_pe_id
for group_row in group_rows:
group_id = group_row.id
user_ids = group_members(group_id) # List of user_ids
pe_ids = [] # List of pe_ids
pe_append = pe_ids.append
for user_id in user_ids:
pe_append(user_pe_id(int(user_id)))
submit_btn = A(T("Submit for Approval"),
_href = URL(f = "compose",
vars = {"cap_alert.id": record.id,
"pe_ids": pe_ids,
},
),
_class = "action-btn confirm-btn"
)
current.response.s3.jquery_ready.append(
'''S3.confirmClick('.confirm-btn','%s')''' % T("Do you want to submit the alert for approval?"))
else:
submit_btn = None
else:
submit_btn = None
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
(T("Area"), "area"),
(T("Resource Files"), "resource"),
]
if r.representation == "html" and \
current.auth.s3_has_permission("update", "cap_alert",
record_id=alert_id):
# Check to see if 'Predefined Areas' tab need to be added
artable = s3db.cap_area
query = (artable.is_template == True) & \
(artable.deleted == False)
template_area_rows = current.db(query)._select(artable.id,
limitby=(0, 1))
if template_area_rows:
tabs.insert(2, (T("Predefined Areas"), "assign"))
# Display "Copy" Button to copy record from the opened info
if r.component_name == "info" and \
r.component_id:
copy_btn = A(T("Copy"),
_href = URL(f = "alert",
args = [r.id, "info", "create",],
vars = {"from_record": r.component_id,
},
),
_class = "action-btn"
)
else:
copy_btn = None
else:
copy_btn = None
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(alert_id, record),
_href=URL(c="cap", f="alert",
args=[alert_id, "update"]))),
),
TR(export_btn)
),
rheader_tabs,
error
)
if copy_btn:
rheader.insert(1, TR(TD(copy_btn)))
if submit_btn:
rheader.insert(1, TR(TD(submit_btn)))
elif tablename == "cap_area":
# Used only for Area Templates
tabs = [(T("Area"), None),
]
if current.deployment_settings.get_L10n_translate_cap_area():
tabs.insert(1, (T("Local Names"), "name"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"])))
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.info_id),
_href=URL(c="cap", f="info",
args=[record.info_id, "update"]))),
),
TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.id, record),
_href=URL(c="cap", f="area",
args=[record.id, "update"]))),
),
),
rheader_tabs
)
elif tablename == "cap_info":
# Shouldn't ever be called
tabs = [(T("Information"), None),
(T("Resource Files"), "resource"),
]
if cap_alert_is_template(record.alert_id):
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(s3db.cap_template_represent(record.alert_id),
_href=URL(c="cap", f="template",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Info template")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs,
_class="cap_info_template_form"
)
current.response.s3.js_global.append('''i18n.cap_locked="%s"''' % T("Locked"))
else:
tabs.insert(1, (T("Areas"), "area"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs
)
return rheader
# =============================================================================
def update_alert_id(tablename):
""" On-accept for area and resource records """
def func(form):
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
if form_vars.get("alert_id", None):
# Nothing to do
return
# Look up from the info/area
_id = form_vars.id
if not _id:
return
db = current.db
table = db[tablename]
if tablename == "cap_area_location" or tablename == "cap_area_tag":
area_id = form_vars.get("area_id", None)
if not area_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.area_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
area_id = item.area_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
atable = db.cap_area
area = db(atable.id == area_id).select(atable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = area.alert_id
except:
# Nothing we can do
return
else:
# cap_area or cap_resource
info_id = form_vars.get("info_id", None)
if not info_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.info_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
info_id = item.info_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = info.alert_id
except:
# Nothing we can do
return
if alert_id:
db(table.id == _id).update(alert_id = alert_id)
return func
# =============================================================================
def cap_gis_location_xml_post_parse(element, record):
"""
UNUSED - done in XSLT
Convert CAP polygon representation to WKT; extract circle lat lon.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Extract altitude and ceiling from the enclosing <area>, and
# compute an elevation value to apply to all enclosed gis_locations.
cap_polygons = element.xpath("cap_polygon")
if cap_polygons:
cap_polygon_text = cap_polygons[0].text
# CAP polygons and WKT have opposite separator conventions:
# CAP has spaces between coordinate pairs and within pairs the
# coordinates are separated by comma, and vice versa for WKT.
# Unfortunately, CAP and WKT (as we use it) also have opposite
# orders of lat and lon. CAP has lat lon, WKT has lon lat.
# Both close the polygon by repeating the first point.
cap_points_text = cap_polygon_text.split()
cap_points = [cpoint.split(",") for cpoint in cap_points_text]
# @ToDo: Should we try interpreting all the points as decimal numbers,
# and failing validation if they're wrong?
wkt_points = ["%s %s" % (cpoint[1], cpoint[0]) for cpoint in cap_points]
wkt_polygon_text = "POLYGON ((%s))" % ", ".join(wkt_points)
record.wkt = wkt_polygon_text
return
cap_circle_values = element.xpath("resource[@name='gis_location_tag']/data[@field='tag' and text()='cap_circle']/../data[@field='value']")
if cap_circle_values:
cap_circle_text = cap_circle_values[0].text
coords, radius = cap_circle_text.split()
lat, lon = coords.split(",")
try:
# If any of these fail to interpret as numbers, the circle was
# badly formatted. For now, we don't try to fail validation,
# but just don't set the lat, lon.
lat = float(lat)
lon = float(lon)
radius = float(radius)
except ValueError:
return
record.lat = lat
record.lon = lon
# Add a bounding box for the given radius, if it is not zero.
if radius > 0.0:
bbox = current.gis.get_bounds_from_radius(lat, lon, radius)
record.lat_min = bbox["lat_min"]
record.lon_min = bbox["lon_min"]
record.lat_max = bbox["lat_max"]
record.lon_max = bbox["lon_max"]
# =============================================================================
def cap_gis_location_xml_post_render(element, record):
"""
UNUSED - done in XSLT
Convert Eden WKT polygon (and eventually circle) representation to
CAP format and provide them in the rendered s3xml.
Not all internal formats have a parallel in CAP, but an effort is made
to provide a resonable substitute:
Polygons are supported.
Circles that were read in from CAP (and thus carry the original CAP
circle data) are supported.
Multipolygons are currently rendered as their bounding box.
Points are rendered as zero radius circles.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Can we rely on gis_feature_type == 3 to tell if the location is a
# polygon, or is it better to look for POLYGON in the wkt? For now, check
# both.
# @ToDo: CAP does not support multipolygons. Do we want to extract their
# outer polygon if passed MULTIPOLYGON wkt? For now, these are exported
# with their bounding box as the polygon.
# @ToDo: What if a point (gis_feature_type == 1) that is not a CAP circle
# has a non-point bounding box? Should it be rendered as a polygon for
# the bounding box?
try:
from lxml import etree
except:
# This won't fail, since we're in the middle of processing xml.
return
SubElement = etree.SubElement
s3xml = current.xml
TAG = s3xml.TAG
RESOURCE = TAG["resource"]
DATA = TAG["data"]
ATTRIBUTE = s3xml.ATTRIBUTE
NAME = ATTRIBUTE["name"]
FIELD = ATTRIBUTE["field"]
VALUE = ATTRIBUTE["value"]
loc_tablename = "gis_location"
tag_tablename = "gis_location_tag"
tag_fieldname = "tag"
val_fieldname = "value"
polygon_tag = "cap_polygon"
circle_tag = "cap_circle"
fallback_polygon_tag = "cap_polygon_fallback"
fallback_circle_tag = "cap_circle_fallback"
def __cap_gis_location_add_polygon(element, cap_polygon_text, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds the CAP polygon
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_polygon_tag
else:
tag_field.text = polygon_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
val_field.text = cap_polygon_text
def __cap_gis_location_add_circle(element, lat, lon, radius, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds CAP circle
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_circle_tag
else:
tag_field.text = circle_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
# Construct a CAP circle string: latitude,longitude radius
cap_circle_text = "%s,%s %s" % (lat, lon, radius)
val_field.text = cap_circle_text
# Sort out the geometry case by wkt, CAP tags, gis_feature_type, bounds,...
# Check the two cases for CAP-specific locations first, as those will have
# definite export values. For others, we'll attempt to produce either a
# circle or polygon: Locations with a bounding box will get a box polygon,
# points will get a zero-radius circle.
# Currently wkt is stripped out of gis_location records right here:
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1332
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1426
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L3152
# Until we provide a way to configure that choice, this will not work for
# polygons.
wkt = record.get("wkt", None)
# WKT POLYGON: Although there is no WKT spec, according to every reference
# that deals with nested polygons, the outer, enclosing, polygon must be
# listed first. Hence, we extract only the first polygon, as CAP has no
# provision for nesting.
if wkt and wkt.startswith("POLYGON"):
# ToDo: Is it sufficient to test for adjacent (( to find the start of
# the polygon, or might there be whitespace between them?
start = wkt.find("((")
end = wkt.find(")")
if start >=0 and end >=0:
polygon_text = wkt[start + 2 : end]
points_text = polygon_text.split(",")
points = [p.split() for p in points_text]
cap_points_text = ["%s,%s" % (point[1], point[0]) for point in points]
cap_polygon_text = " ".join(cap_points_text)
__cap_gis_location_add_polygon(element, cap_polygon_text)
return
# Fall through if the wkt string was mal-formed.
# CAP circle stored in a gis_location_tag with tag = cap_circle.
# If there is a cap_circle tag, we don't need to do anything further, as
# export.xsl will use it. However, we don't know if there is a cap_circle
# tag...
#
# @ToDo: The export calls xml_post_render after processing a resource's
# fields, but before its components are added as children in the xml tree.
# If this were delayed til after the components were added, we could look
# there for the cap_circle gis_location_tag record. Since xml_post_parse
# isn't in use yet (except for this), maybe we could look at moving it til
# after the components?
#
# For now, with the xml_post_render before components: We could do a db
# query to check for a real cap_circle tag record, and not bother with
# creating fallbacks from bounding box or point...but we don't have to.
# Instead, just go ahead and add the fallbacks under different tag names,
# and let the export.xsl sort them out. This only wastes a little time
# compared to a db query.
# ToDo: MULTIPOLYGON -- Can stitch together the outer polygons in the
# multipolygon, but would need to assure all were the same handedness.
# The remaining cases are for locations that don't have either polygon wkt
# or a cap_circle tag.
# Bounding box: Make a four-vertex polygon from the bounding box.
# This is a fallback, as if there is a circle tag, we'll use that.
lon_min = record.get("lon_min", None)
lon_max = record.get("lon_max", None)
lat_min = record.get("lat_min", None)
lat_max = record.get("lat_max", None)
if lon_min and lon_max and lat_min and lat_max and \
(lon_min != lon_max) and (lat_min != lat_max):
# Although there is no WKT requirement, arrange the points in
# counterclockwise order. Recall format is:
# lat1,lon1 lat2,lon2 ... latN,lonN, lat1,lon1
cap_polygon_text = \
"%(lat_min)s,%(lon_min)s %(lat_min)s,%(lon_max)s %(lat_max)s,%(lon_max)s %(lat_max)s,%(lon_min)s %(lat_min)s,%(lon_min)s" \
% {"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": lat_min,
"lat_max": lat_max}
__cap_gis_location_add_polygon(element, cap_polygon_text, fallback=True)
return
# WKT POINT or location with lat, lon: This can be rendered as a
# zero-radius circle.
# Q: Do we put bounding boxes around POINT locations, and are they
# meaningful?
lat = record.get("lat", None)
lon = record.get("lon", None)
if not lat or not lon:
# Look for POINT.
if wkt and wkt.startswith("POINT"):
start = wkt.find("(")
end = wkt.find(")")
if start >=0 and end >=0:
point_text = wkt[start + 2 : end]
point = point_text.split()
try:
lon = float(point[0])
lat = float(point[1])
except ValueError:
pass
if lat and lon:
# Add a (fallback) circle with zero radius.
__cap_gis_location_add_circle(element, lat, lon, 0, True)
return
# ToDo: Other WKT.
# Did not find anything to use. Presumably the area has a text description.
return
# =============================================================================
def cap_alert_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for CAP Alerts on the Home page.
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["cap_alert.id"]
item_class = "thumbnail"
T = current.T
#raw = record._row
# @ToDo: handle the case where we have multiple info segments &/or areas
headline = record["cap_info.headline"]
location = record["cap_area.name"]
priority = record["cap_info.priority"]
status = record["cap_alert.status"]
scope = record["cap_alert.scope"]
event = record["cap_info.event_type_id"]
if current.auth.s3_logged_in():
_href = URL(c="cap", f="alert", args=[record_id, "profile"])
else:
_href = URL(c="cap", f="public", args=[record_id, "profile"])
priority_row = None
if priority and priority != "-":
# Give the priority color to headline
db = current.db
wptable = db.cap_warning_priority
priority_row = db(wptable.name == priority).select(wptable.color_code,
limitby=(0, 1)).first()
more = A(T("Full Alert"),
_href = _href,
_target = "_blank",
)
if list_id == "map_popup":
itable = current.s3db.cap_info
# Map popup
event = itable.event_type_id.represent(event)
if priority is None:
priority = T("Unknown")
else:
priority = itable.priority.represent(priority)
description = record["cap_info.description"]
response_type = record["cap_info.response_type"]
sender = record["cap_info.sender_name"]
last = TAG[""](BR(),
description,
BR(),
", ".join(response_type),
BR(),
sender,
BR(),
)
details = "%s %s %s" % (priority, status, scope)
headline_ = A(headline,
_href = _href,
_target = "_blank",
)
if priority_row:
headline_["_style"] = "color: #%s" % (priority_row.color_code)
item = DIV(headline_,
BR(),
location,
BR(),
details,
BR(),
event,
last,
more,
_class=item_class,
_id=item_id,
)
else:
if priority == current.messages["NONE"]:
priority = T("Unknown")
certainty = record["cap_info.certainty"]
severity = record["cap_info.severity"]
urgency = record["cap_info.urgency"]
msg_type = record["cap_alert.msg_type"]
sender_name = record["cap_info.sender_name"]
sent = record["cap_alert.sent"]
headline = "%s; %s, %s" % (msg_type, headline, location)
sub_heading = "%s %s" % (priority, event)
sub_headline = A(sub_heading,
_href = _href,
_target = "_blank",
)
if priority_row:
sub_headline["_style"] = "color: #%s" % (priority_row.color_code)
para = T("It is %(certainty)s and %(urgency)s with %(severity)s threat to life and property.") \
% dict(certainty=certainty, urgency=urgency, severity=severity)
issuer = "%s: %s" % (T("Issued by"), sender_name)
issue_date = "%s: %s" % (T("Issued on"), sent)
item = DIV(headline,
BR(),
sub_headline,
BR(),
para,
BR(),
issuer,
BR(),
issue_date,
BR(),
more,
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def add_area_from_template(area_id, alert_id):
"""
Add an area from a Template along with its components Location and Tag
"""
afieldnames = ("name",
"altitude",
"ceiling",
)
lfieldnames = ("location_id",
)
tfieldnames = ("tag",
"value",
"comments",
)
db = current.db
s3db = current.s3db
atable = s3db.cap_area
itable = s3db.cap_info
ltable = s3db.cap_area_location
ttable = s3db.cap_area_tag
# Create Area Record from Template
atemplate = db(atable.id == area_id).select(*afieldnames,
limitby=(0, 1)).first()
rows = db(itable.alert_id == alert_id).select(itable.id)
area_ids = []
for row in rows:
# @ToDo set_record_owner, update_super and/or onaccept
# Currently not required by SAMBRO template
adata = {"is_template": False,
"alert_id": alert_id,
"info_id": row.id,
}
for field in afieldnames:
adata[field] = atemplate[field]
aid = atable.insert(**adata)
# Add Area Location Components of Template
ltemplate = db(ltable.area_id == area_id).select(*lfieldnames)
for rows in ltemplate:
ldata = {"area_id": aid,
"alert_id": alert_id,
}
for field in lfieldnames:
ldata[field] = rows[field]
lid = ltable.insert(**ldata)
# Add Area Tag Components of Template
ttemplate = db(ttable.area_id == area_id).select(*tfieldnames)
for row in ttemplate:
tdata = {"area_id": aid,
"alert_id": alert_id,
}
for field in tfieldnames:
tdata[field] = row[field]
tid = ttable.insert(**tdata)
area_ids.append(aid)
return area_ids
# =============================================================================
class CAPImportFeed(S3Method):
"""
Import CAP alerts from a URL
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
request = current.request
response = current.response
title = T("Import from Feed URL")
# @ToDo: use Formstyle
form = FORM(
TABLE(
TR(TD(DIV(B("%s:" % T("URL")),
SPAN(" *", _class="req"))),
TD(INPUT(_type="text", _name="url",
_id="url", _value="")),
TD(),
),
TR(TD(B("%s: " % T("User"))),
TD(INPUT(_type="text", _name="user",
_id="user", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Password"))),
TD(INPUT(_type="text", _name="password",
_id="password", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors")),
TD(),
),
TR(TD(),
TD(INPUT(_type="submit", _value=T("Import"))),
TD(),
)
)
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
url = form_vars.get("url", None)
if not url:
response.error = T("URL is required")
return output
# @ToDo:
username = form_vars.get("username", None)
password = form_vars.get("password", None)
try:
file = fetch(url)
except urllib2.URLError:
response.error = str(sys.exc_info()[1])
return output
except urllib2.HTTPError:
response.error = str(sys.exc_info()[1])
return output
File = StringIO(file)
stylesheet = os.path.join(request.folder, "static", "formats",
"cap", "import.xsl")
xml = current.xml
tree = xml.parse(File)
resource = current.s3db.resource("cap_alert")
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=form_vars.get("ignore_errors", None))
except:
response.error = str(sys.exc_info()[1])
else:
import_count = resource.import_count
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("Alerts successfully imported."))
else:
response.information = T("No Alerts available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# -----------------------------------------------------------------------------
class cap_AssignArea(S3Method):
"""
Assign CAP area to an alert, allows (multi-)selection of Predefined areas
"""
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if not r.record:
# Must be called for a particular alert
r.error(404, current.ERROR.BAD_RECORD)
# The record ID of the alert the method is called for
alert_id = r.id
# Requires permission to update this alert
authorised = current.auth.s3_has_permission("update", "cap_alert",
record_id=alert_id)
if not authorised:
r.unauthorised()
T = current.T
s3db = current.s3db
response = current.response
# Filter to limit the selection of areas
area_filter = (FS("is_template") == True)
if r.http == "POST":
# Template areas have been selected
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
# Handle exclusion filter
if post_vars.mode == "Exclusive":
# URL filters
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.ajaxURL)
else:
filters = None
query = area_filter & (~(FS("id").belongs(selected)))
aresource = s3db.resource("cap_area",
filter = query,
vars = filters)
rows = aresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
for area_id in selected:
area_id = int(area_id.strip())
add_area_from_template(area_id, alert_id)
added += 1
current.session.confirmation = T("%(number)s assigned") % \
{"number": added}
if added > 0:
# Redirect to the list of areas of this alert
redirect(URL(args=[r.id, "area"], vars={}))
else:
# Return to the "assign" page
redirect(URL(args=r.args, vars={}))
elif r.http == "GET":
# Filter widgets (@todo: lookup from cap_area resource config?)
filter_widgets = []
# List fields
list_fields = ["id",
"name",
"event_type_id",
"priority",
]
# Data table
aresource = s3db.resource("cap_area", filter=area_filter)
totalrows = aresource.count()
get_vars = r.get_vars
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
# Datatable filter and sorting
query, orderby, left = aresource.datatable_filter(list_fields,
get_vars,
)
aresource.add_filter(query)
# Extract the data
data = aresource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True,
)
filteredrows = data.numrows
# Instantiate the datatable
dt = S3DataTable(data.rfields, data.rows)
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Assign"), "assign")]
if r.representation == "html":
# Page load
# Disallow deletion from this table, and link all open-buttons
# to the respective area read page
aresource.configure(deletable = False)
profile_url = URL(c = "cap",
f = "area",
args = ["[id]", "read"],
)
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url,
)
# Hide export icons
response.s3.no_formats = True
# Render the datatable (will be "items" in the output dict)
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url = URL(args = r.args,
extension="aadata",
vars={},
),
dt_bulk_actions = dt_bulk_actions,
dt_pageLength = display_length,
dt_pagination = "true",
dt_searching = "false",
)
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
get_vars = aresource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=get_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f="cap_area",
args=["filter.options"],
vars={},
)
get_config = aresource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear = filter_clear,
formstyle = filter_formstyle,
submit = filter_submit,
ajax = True,
url = filter_submit_url,
ajaxurl = filter_ajax_url,
_class = "filter-form",
_id = "datatable-filter-form",
)
fresource = s3db.resource("cap_area")
ff = filter_form.html(fresource,
r.get_vars,
target = "datatable",
)
else:
ff = ""
output = {"items": items, # the datatable
"title": T("Add Areas"),
"list_filter_form": ff,
}
response.view = "list_filter.html"
return output
elif r.representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions=dt_bulk_actions,
)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(501, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
# -----------------------------------------------------------------------------
class cap_AreaRepresent(S3Represent):
""" Representation of CAP Area """
def __init__(self,
show_link=False,
multiple=False):
settings = current.deployment_settings
# Translation using cap_area_name & not T()
translate = settings.get_L10n_translate_cap_area()
if translate:
language = current.session.s3.language
if language == settings.get_L10n_default_language():
translate = False
super(cap_AreaRepresent,
self).__init__(lookup="cap_area",
show_link=show_link,
translate=translate,
multiple=multiple
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom lookup method for Area(CAP) rows.Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the cap_area IDs
"""
db = current.db
s3db = current.s3db
artable = s3db.cap_area
count = len(values)
if count == 1:
query = (artable.id == values[0])
else:
query = (artable.id.belongs(values))
fields = [artable.id,
artable.name,
]
if self.translate:
ltable = s3db.cap_area_name
fields += [ltable.name_l10n,
]
left = [ltable.on((ltable.area_id == artable.id) & \
(ltable.language == current.session.s3.language)),
]
else:
left = None
rows = current.db(query).select(left = left,
limitby = (0, count),
*fields)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the cap_area Row
"""
if self.translate:
name = row["cap_area_name.name_l10n"] or row["cap_area.name"]
else:
name = row["cap_area.name"]
if not name:
return self.default
return s3_unicode(name)
# END =========================================================================
| mit | 3,098,138,409,540,321,300 | 48.837234 | 517 | 0.431575 | false |
maxamillion/product-definition-center | pdc/apps/component/serializers.py | 1 | 23374 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.text import capfirst
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from pdc.apps.contact.models import Contact, ContactRole
from pdc.apps.contact.serializers import RoleContactSerializer
from pdc.apps.common.serializers import DynamicFieldsSerializerMixin, LabelSerializer, StrictSerializerMixin
from pdc.apps.common.fields import ChoiceSlugField
from pdc.apps.release.models import Release
from pdc.apps.common.hacks import convert_str_to_int
from .models import (GlobalComponent,
RoleContact,
ReleaseComponent,
Upstream,
BugzillaComponent,
ReleaseComponentGroup,
GroupType,
ReleaseComponentType,
ReleaseComponentRelationshipType,
ReleaseComponentRelationship)
from . import signals
__all__ = (
'GlobalComponentSerializer',
'ReleaseComponentSerializer',
'HackedContactSerializer',
'UpstreamSerializer',
'BugzillaComponentSerializer',
'GroupSerializer',
'GroupTypeSerializer'
)
def reverse_url(request, view_name, **kwargs):
return request.build_absolute_uri(reverse(viewname=view_name,
kwargs=kwargs))
class HackedContactSerializer(RoleContactSerializer):
"""
Could use as a view leveled serializer to encode/decode the contact data, or
as a field in the global/release component.
Automatically replace the url with /[global|release]-components/<instance_pk>/contacts/<pk>.
Automatically set inherited = True when serialize release component.
"""
def __init__(self, *args, **kwargs):
self.inherited = kwargs.pop('inherited', False)
self.view_name = kwargs.pop('view_name', 'globalcomponentcontact-detail')
context = kwargs.get('context', None)
self.instance_pk = None
self.view = None
# Set view/instance_pk when uses the class as a serializer.
if context:
self.view = context.get('view', None)
extra_kwargs = context.get('extra_kwargs', None)
if extra_kwargs:
self.instance_pk = extra_kwargs.get('instance_pk', None)
super(HackedContactSerializer, self).__init__(*args, **kwargs)
def to_representation(self, obj):
ret = super(HackedContactSerializer, self).to_representation(obj)
request = self.context.get('request', None)
url_kwargs = self.context.get('extra_kwargs', {})
# NOTE(xchu): The `instance_pk` is needed for building a valid url,
# so if not provided, we should raise `KeyError`.
instance_pk = url_kwargs['instance_pk']
ret['url'] = reverse_url(request, self.view_name, **{
'instance_pk': instance_pk,
'pk': obj.pk
})
if self.inherited and self.view_name == 'globalcomponentcontact-detail':
ret['inherited'] = True
return ret
def to_internal_value(self, data):
# Run StrictSerializerMixin's to_internal_value() to check if extra field exists.
super(HackedContactSerializer, self).to_internal_value(data)
request = self.context.get('request', None)
serializer = RoleContactSerializer(data=data,
many=not isinstance(data, dict),
context={'request': request})
kwargs = {}
kwargs['contact_role'] = data.get('contact_role')
kwargs.update(data.get('contact'))
try:
contact = RoleContact.specific_objects.get(**kwargs)
except (RoleContact.DoesNotExist, Contact.DoesNotExist, ContactRole.DoesNotExist):
# If we can't get RoleContact in database, validate the input data and create the RoleContact.
if serializer.is_valid(raise_exception=True):
contact = RoleContact.specific_objects.create(**kwargs)
if request and request.changeset:
model_name = ContentType.objects.get_for_model(contact).model
request.changeset.add(model_name,
contact.id,
'null',
json.dumps(contact.export()))
component_class = self.view.model
if component_class.objects.get(pk=self.instance_pk).contacts.filter(pk=contact.pk).exists():
model_name = six.text_type(capfirst(component_class._meta.verbose_name))
raise serializers.ValidationError({"detail": "%s contact with this %s and Contact already exists."
% (model_name, model_name)})
else:
return contact
def save(self, **kwargs):
"""
Save the deserialized object and return it.
"""
instance_pk = self.context['extra_kwargs']['instance_pk']
component_class = self.context['view'].model
component = component_class.objects.get(pk=instance_pk)
existed_contacts = component.contacts.all()
if isinstance(self.validated_data, list):
contacts = [self.get_object_from_db(item) for item in self.validated_data if item not in existed_contacts]
component.contacts.add(*contacts)
if self.validated_data['_deleted']:
[self.delete_object(item) for item in self.validated_data['_deleted']]
else:
contacts = self.get_object_from_db(self.validated_data)
component.contacts.add(contacts)
return contacts
def get_object_from_db(self, item):
contact = RoleContact.objects.get(**{
'contact_role_id': item.contact_role_id,
'contact_id': item.contact_id
})
return contact
class Meta:
model = RoleContact
fields = ('url', 'contact_role', 'contact')
# In order not to run parent's validators, set validators to []
validators = []
class HackedContactField(serializers.Field):
"""
HackedContactField is used in GlobalComponentSerializer/ReleaseComponentSerializer insteadof HackedContactSerilizer.
It has the ablility to get_attribute() from GlobalComponentSerializer/ReleaseComponentSerializer.
"""
def __init__(self, view_name, *args, **kwargs):
self.view_name = view_name
super(HackedContactField, self).__init__(*args, **kwargs)
def to_representation(self, value):
serializer = HackedContactSerializer(value, many=True, context=self.context, view_name=self.view_name)
return serializer.data
def get_attribute(self, obj):
"""
Get attribute from the serializer which uses this field.
@param obj: The model object related to the serializer.
"""
# NOTE(xchu): The `instance_pk` is needed for building a valid url,
# it's not provided when used as a field, so we should inject one.
if 'extra_kwargs' not in self.context or 'instance_pk' not in self.context['extra_kwargs']:
self.context['extra_kwargs'] = {'instance_pk': obj.pk}
return obj.contacts.all()
class UpstreamSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = Upstream
fields = ('homepage', 'scm_type', 'scm_url')
class UpstreamRelatedField(serializers.RelatedField):
def to_representation(self, value):
serializer = UpstreamSerializer(value)
return serializer.data
def to_internal_value(self, value):
request = self.context.get('request', None)
if isinstance(value, dict):
try:
upstream = Upstream.objects.get(**value)
except Upstream.DoesNotExist:
serializer = UpstreamSerializer(data=value, many=False, context={'request': request})
if serializer.is_valid(raise_exception=True):
upstream = serializer.save()
model_name = ContentType.objects.get_for_model(upstream).model
if request and request.changeset:
request.changeset.add(model_name,
upstream.id,
'null',
json.dumps(upstream.export()))
return upstream
else:
self._errors = serializer._errors
except Exception as err:
raise serializers.ValidationError("Can not get or create Upstream with the input(%s): %s." % (value, err))
else:
return upstream
else:
raise serializers.ValidationError("Unsupported upstream input.")
class GlobalComponentSerializer(DynamicFieldsSerializerMixin,
StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
contacts = HackedContactField(required=False, read_only=False, view_name='globalcomponentcontact-detail')
name = serializers.CharField(required=True,
max_length=100)
dist_git_path = serializers.CharField(required=False,
max_length=200,
allow_blank=True)
dist_git_web_url = serializers.URLField(required=False,
max_length=200)
labels = LabelSerializer(many=True, required=False, read_only=True)
upstream = UpstreamRelatedField(read_only=False, required=False, queryset=Upstream.objects.all())
class Meta:
model = GlobalComponent
fields = ('id', 'name', 'dist_git_path', 'dist_git_web_url', 'contacts', 'labels', 'upstream')
class TreeForeignKeyField(serializers.Field):
def to_representation(self, value):
request = self.context.get("request", None)
serializer = BugzillaComponentSerializer(value, context={'request': request, 'top_level': False})
return serializer.data
def to_internal_value(self, data):
if data.strip() == "":
raise serializers.ValidationError({'bugzilla_component': 'This field is required.'})
else:
components = data.strip("/").split("/")
len_components = len(components)
bc = None
# Only Bugzilla component name exists, parent component name will be considered as None.
if len_components == 1:
try:
bc = BugzillaComponent.objects.get(name=components[0], parent_component=None)
except:
raise serializers.ValidationError({'bugzilla_component': ("Bugzilla component with name %s does not exist."
% data)})
# Not only bugzilla Component, but also its ancestors exist.
if len_components > 1:
z = zip(components, components[1:])
root_bc_name, bc_name = z[0]
qs = BugzillaComponent.objects.filter(name=bc_name, parent_component__name=root_bc_name)
for _, bc_name in z[1:]:
qs = BugzillaComponent.objects.filter(name=bc_name, parent_component__in=qs)
if not qs:
raise serializers.ValidationError({'bugzilla_component': ("Bugzilla component with name %s does not exist."
% data)})
if len(qs) > 1:
raise serializers.ValidationError({'bugzilla_component': ("Duplicate Bugzilla component with name %s exists."
% data)})
if qs:
bc = qs[0]
return bc
class BugzillaComponentSerializer(DynamicFieldsSerializerMixin,
StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
"""
Bugzilla Component serializer.
"""
parent_component = serializers.CharField(required=False, max_length=200)
subcomponents = serializers.SerializerMethodField()
extra_fields = ['parent_pk']
def get_subcomponents(self, obj):
"""[string]"""
return obj.get_subcomponents()
class Meta:
model = BugzillaComponent
fields = ('id', 'name', 'parent_component', 'subcomponents')
class ReleaseField(serializers.SlugRelatedField):
def __init__(self, **kwargs):
super(ReleaseField, self).__init__(slug_field='release_id',
queryset=Release.objects.all(),
**kwargs)
def to_representation(self, value):
return {
'release_id': value.release_id,
'active': value.active
}
class ReleaseComponentTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = ReleaseComponentType
fields = ('name',)
class ReleaseComponentSerializer(DynamicFieldsSerializerMixin,
StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
"""
ReleaseComponent Serializer
"""
release = ReleaseField(read_only=False)
global_component = serializers.SlugRelatedField(slug_field='name', read_only=False, queryset=GlobalComponent.objects.all())
contacts = HackedContactField(required=False, read_only=False, view_name='releasecomponentcontact-detail')
dist_git_branch = serializers.CharField(source='inherited_dist_git_branch', required=False)
dist_git_web_url = serializers.URLField(required=False, max_length=200, read_only=True)
bugzilla_component = TreeForeignKeyField(read_only=False, required=False, allow_null=True)
brew_package = serializers.CharField(required=False)
active = serializers.BooleanField(required=False, default=True)
type = ChoiceSlugField(slug_field='name', queryset=ReleaseComponentType.objects.all(), required=False,
allow_null=True)
def update(self, instance, validated_data):
signals.releasecomponent_serializer_extract_data.send(sender=self, validated_data=validated_data)
instance = super(ReleaseComponentSerializer, self).update(instance, validated_data)
signals.releasecomponent_serializer_post_update.send(sender=self, release_component=instance)
if hasattr(instance, 'pk'):
# reload to make sure changes in mapping are reflected
instance = ReleaseComponent.objects.get(pk=instance.pk)
# from view's doc, for ReleaseComponent,
# PUT and PATCH update works the same as each other except `name` is required when PUT update,
# so there will be not setattr here.
return instance
def create(self, validated_data):
signals.releasecomponent_serializer_extract_data.send(sender=self, validated_data=validated_data)
instance = super(ReleaseComponentSerializer, self).create(validated_data)
signals.releasecomponent_serializer_post_create.send(sender=self, release_component=instance)
return instance
def to_representation(self, instance):
ret = super(ReleaseComponentSerializer, self).to_representation(instance)
request = self.context.get("request", None)
# Include global component contacts - PDC-184
gcs = GlobalComponentSerializer(
instance=instance.global_component,
context={'request': request})
# Exclude global component contacts whose contact_role are already in release component contacts
gcc = gcs.data.get('contacts', [])
contacts = ret.get('contacts', [])
contact_role_lists = [contact['contact_role'] for contact in contacts]
for contact in gcc:
if contact['contact_role'] in contact_role_lists:
continue
contact['inherited'] = True
contacts.append(contact)
return ret
def to_internal_value(self, data):
# Raise error explictly when release and global_component are given.
if self.instance:
allowed_keys = self.get_allowed_keys() - set(['release', 'global_component'])
extra_fields = set(data.keys()) - allowed_keys
self.maybe_raise_error(extra_fields)
data['release'] = self.instance.release
data['global_component'] = self.instance.global_component
return super(ReleaseComponentSerializer, self).to_internal_value(data)
def validate_release(self, value):
if not isinstance(value, Release):
if isinstance(value, dict):
release_id = value['release_id']
else:
release_id = value
if release_id is None or release_id.strip() == "":
self._errors = {'release': 'This field is required.'}
return
release = get_object_or_404(Release, release_id=release_id)
if not release.is_active():
self._errors = {'release': 'Can not create a release component with an inactive release.'}
return
value = release
return value
def validate_global_component(self, value):
if not isinstance(value, GlobalComponent):
global_component_name = value
if global_component_name is None or global_component_name.strip() == "":
self._errors = {'global_component': 'This field is required.'}
return
gc = get_object_or_404(GlobalComponent, name=global_component_name)
value = gc
return value
def validate_name(self, value):
if value.strip() == "":
self._errors = {'name': 'This field is required.'}
return value
def validate_type(self, value):
if not isinstance(value, ReleaseComponentType):
if value is not None and value.strip() != "":
value = get_object_or_404(ReleaseComponentType, name=value.strip())
else:
raise serializers.ValidationError("This field can't be set to null.")
return value
class Meta:
model = ReleaseComponent
fields = ('id', 'release', 'bugzilla_component', 'brew_package', 'global_component',
'name', 'dist_git_branch', 'dist_git_web_url', 'active',
'contacts', 'type')
validators = [UniqueTogetherValidator(
queryset=ReleaseComponent.objects.all(),
fields=('name', 'release', 'global_component')
)]
class GroupTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
description = serializers.CharField(required=False)
class Meta:
model = GroupType
fields = ('id', 'name', 'description')
class ReleaseComponentRelatedField(serializers.RelatedField):
doc_format = '{"id": "int", "name": "string"}'
def to_representation(self, value):
result = dict()
if value:
result['id'] = value.id
result['name'] = value.name
return result
def to_internal_value(self, data):
if not isinstance(data, dict):
raise serializers.ValidationError({'detail': "Input [%s] for ReleaseComponent must be a dict." % data})
if set(data.keys()) not in [set(['id']), set(['release', 'global_component', 'name'])]:
raise serializers.ValidationError(
{'detail': "Only accept ['id'] or ['release', 'global_component', 'name']"})
kwargs = dict()
if 'id' in data:
kwargs['id'] = convert_str_to_int(data.get('id'))
else:
kwargs['release__release_id'] = data.get('release')
kwargs['global_component__name'] = data.get('global_component')
kwargs['name'] = data.get('name')
try:
rc = ReleaseComponent.objects.get(**kwargs)
except ReleaseComponent.DoesNotExist:
raise serializers.ValidationError({'detail': "ReleaseComponent [%s] doesn't exist" % data})
return rc
class GroupSerializer(StrictSerializerMixin, serializers.ModelSerializer):
group_type = serializers.SlugRelatedField(
queryset=GroupType.objects.all(),
slug_field='name',
required=True
)
release = serializers.SlugRelatedField(
queryset=Release.objects.all(),
slug_field='release_id',
required=True
)
description = serializers.CharField(required=True)
components = ReleaseComponentRelatedField(
required=False,
many=True,
queryset=ReleaseComponent.objects.all()
)
def validate(self, value):
# # POST
if not self.instance:
components = value.get('components', [])
release = value.get('release')
# PUT or PATCH
else:
components = value.get('components', self.instance.components.all())
release = value.get('release', self.instance.release)
for component in components:
if component.release != release:
raise serializers.ValidationError({
'detail': 'Not allow to group release_component[%s] <release[%s]> with other release[%s].'
% (component.name, component.release.release_id, release.release_id)})
return value
class Meta:
model = ReleaseComponentGroup
fields = ('id', 'group_type', 'description', 'release', 'components')
class RCRelationshipTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = ReleaseComponentRelationshipType
fields = ('name',)
class RCForRelationshipRelatedField(ReleaseComponentRelatedField):
doc_format = '{"id": "int", "name": "string", "release": "Release.release_id"}'
def to_representation(self, value):
result = dict()
if value:
result['id'] = value.id
result['name'] = value.name
result['release'] = value.release.release_id
return result
class ReleaseComponentRelationshipSerializer(StrictSerializerMixin, serializers.ModelSerializer):
type = ChoiceSlugField(
queryset=ReleaseComponentRelationshipType.objects.all(),
slug_field='name',
required=True,
source='relation_type'
)
from_component = RCForRelationshipRelatedField(
required=True,
queryset=ReleaseComponent.objects.all()
)
to_component = RCForRelationshipRelatedField(
required=True,
queryset=ReleaseComponent.objects.all()
)
class Meta:
model = ReleaseComponentRelationship
fields = ('id', 'type', 'from_component', 'to_component')
| mit | 9,017,138,999,367,044,000 | 41.115315 | 129 | 0.609438 | false |
iddl/git-events | messages.py | 1 | 1641 | import sys
from termcolor import colored
class Messages():
LOGFILE = "git-events.log"
#Status and operations
RUNNING = 'Successfully started gitevents'
WAS_RUNNING = 'Gitevents is already running'
NOT_RUNNING = 'Git-events is not running'
STOPPED = 'Successfully stopped gitevents'
#Errors
INCOMPATIBLE_OS = 'Your OS is not compatible with Git events'
GITHUB_API_ERROR = 'I\'m unable to access your GitHub account, please check your internet connection and GitHub access token'
GITHUB_LOGIN_ERROR = 'Unable to login. Wrong username/password ?'
CONFIGURATION_ERROR = 'Please configure cfg.ini before starting'
#Success
ACCESS_TOKEN_SET = 'Successfully set access token'
INTERVAL_SET = 'Successfully set polling interval'
#Setup
INPUT_USERNAME = 'Please type your Github account name: '
INPUT_PASSWORD = 'Please type your Github account password: '
SETUP_FAIL = 'Failed to create Github access token'
SETUP_SUCCESS = 'Successfully saved access token. You are all set.'
def abort(self, message=""):
print(colored(message, 'red'))
sys.exit(1)
def print_success(self, message=""):
print(colored(message, 'green'))
def log(self, message=""):
print(message)
def use_logfile(self):
sys.stdout = open(self.LOGFILE, 'w')
sys.stderr = open(self.LOGFILE, 'w')
class MessagesProvider():
def __init__(self):
self.instance = None
def get(self):
if self.instance is None:
self.instance = Messages()
return self.instance
messages_provider = MessagesProvider()
| apache-2.0 | -2,440,621,822,528,702,500 | 29.388889 | 129 | 0.669714 | false |
carmenfdezb/osmscout-server | scripts/import/valhalla_country_pack.py | 1 | 1633 | import glob
from poly import parse_poly
from shapely.geometry import Polygon
# directories used for searching for packages
valhalla_meta_dir = 'valhalla/packages_meta'
valhalla_packages_dir = 'valhalla/packages'
valhalla_tiles_timestamp = "valhalla/tiles/timestamp"
version = "1"
def getsize(sname):
f = open(sname, 'r')
return int(f.read().split()[0])
def gettimestamp(sname):
f = open(valhalla_tiles_timestamp, 'r')
return f.read().split()[0]
# call with the name of POLY filename
def country_pack(country_poly_fname):
country = parse_poly(country_poly_fname)
packs = []
size_compressed = 0
size = 0
ts = None
for bbox in glob.glob(valhalla_meta_dir + "/*.bbox"):
coors = []
for i in open(bbox, 'r'):
for k in i.split():
coors.append(float(k))
poly = Polygon( ( (coors[0], coors[1]), (coors[0], coors[3]),
(coors[2], coors[3]), (coors[2], coors[1]) ) )
if country.intersects(poly):
pname = bbox[len(valhalla_meta_dir)+1:-len(".bbox")]
packs.append(pname)
pdata = valhalla_packages_dir + "/" + bbox[len(valhalla_meta_dir)+1:-len(".bbox")] + ".tar"
size_compressed += getsize(pdata + '.size-compressed')
size += getsize(pdata + '.size')
ts = gettimestamp(pdata)
return { "packages": packs,
"timestamp": ts,
"version": version,
"size": str(size),
"size-compressed": str(size_compressed) }
if __name__ == '__main__':
print country_pack('hierarchy/europe/estonia/poly')
| gpl-3.0 | -4,252,452,534,935,834,600 | 31.66 | 103 | 0.581751 | false |
aglitke/vdsm | client/vdsClient.py | 1 | 102506 | # Copyright 2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import sys
import ast
import getopt
import traceback
import xmlrpclib
import re
import socket
import pprint as pp
from vdsm import vdscli
try:
import vdsClientGluster as ge
_glusterEnabled = True
except ImportError:
_glusterEnabled = False
BLANK_UUID = '00000000-0000-0000-0000-000000000000'
STATUS_ERROR = {'status': {'code': 100, 'message': "ERROR"}}
# Storage Domain Types
UNKNOWN_DOMAIN = 0
NFS_DOMAIN = 1
FCP_DOMAIN = 2
ISCSI_DOMAIN = 3
LOCALFS_DOMAIN = 4
CIFS_DOMAIN = 5
# Volume Types
UNKNOWN_VOL = 0
PREALLOCATED_VOL = 1
SPARSE_VOL = 2
# Volume Format
UNKNOWN_FORMAT = 3
COW_FORMAT = 4
RAW_FORMAT = 5
# Volume Role
SHARED_VOL = 6
INTERNAL_VOL = 7
LEAF_VOL = 8
def validateArgTypes(args, conv, requiredArgsNumber=0):
if len(args) > len(conv) or len(args) < requiredArgsNumber:
raise ValueError("Wrong number of arguments provided, "
"expecting %d (%d required) got %d"
% (len(conv), requiredArgsNumber, len(args)))
for i in range(len(args)):
args[i] = conv[i](args[i])
def fmt3(num):
for x in ['', 'KB', 'MB', 'GB', 'TB']:
if num < 1024:
return "%3.1f%s" % (num, x)
num /= 1024
def usage(cmd, full=True):
print "Usage: vdsClient [OPTIONS] <server> <command> [Command parameters]"
print "\nOptions"
print "-h\tDisplay this help"
print "-m\tList supported methods and their params (Short help)"
print "-s [--truststore path]\tConnect to server with SSL."
print "-o, --oneliner\tShow the key-val information in one line."
print "\tIf truststore path is not specified, use defaults."
print "\nCommands"
verbs = cmd.keys()
verbs.sort()
for entry in verbs:
if full:
print entry
for line in cmd[entry][1]:
print '\t' + line
else:
print entry + '\t' + cmd[entry][1][0]
def printConf(conf):
try:
print "\n" + conf['vmId']
print "\tStatus = " + conf['status']
except:
pass
for element in conf.keys():
if element not in ('vmId', 'status'):
print "\t%s = %s" % (element, conf[element])
def printDict(dict, pretty=True):
keys = dict.keys()
keys.sort()
for element in keys:
if pretty:
representation = pp.pformat(dict[element]).replace(
'\n', '\n\t' + ' ' * len(element + ' = '))
else:
representation = dict[element]
print "\t%s = %s" % (element, representation)
def printStats(list):
for conf in list:
printConf(conf)
class service:
def __init__(self):
self.useSSL = False
self.truststore = None
self.pretty = True
def do_connect(self, hostPort):
self.s = vdscli.connect(hostPort, self.useSSL, self.truststore)
def ExecAndExit(self, response, parameterName='none'):
if response['status']['code'] != 0:
print response['status']['message']
else:
if 'vmList' in response:
printConf(response['vmList'])
elif 'statsList' in response:
if parameterName != 'none':
print response['statsList'][0][parameterName]
else:
printStats(response['statsList'])
elif 'info' in response:
printDict(response['info'], self.pretty)
else:
printDict(response['status'], self.pretty)
sys.exit(response['status']['code'])
def do_create(self, args):
params = {}
drives = []
devices = []
cpuPinning = {}
confLines = []
confFile = open(args[0])
for line in confFile.readlines():
line = re.sub("\s+", '', line)
line = re.sub("\#.*", '', line)
if line:
confLines.append(line)
if len(args) > 1:
confLines.extend(args[1:])
for line in confLines:
if '=' in line:
param, value = line.split("=", 1)
if param == 'devices':
devices.append(self._parseDriveSpec(value))
elif param == 'drive':
drives.append(self._parseDriveSpec(value))
elif param == 'cpuPinning':
cpuPinning, rStr = self._parseNestedSpec(value)
elif param.startswith('custom_'):
if not 'custom' in params:
params['custom'] = {}
params['custom'][param[7:]] = value
else:
if param in ('cdrom', 'floppy'):
value = self._parseDriveSpec(value)
params[param] = value
else:
params[line.strip()] = ''
if cpuPinning:
params['cpuPinning'] = cpuPinning
if drives:
params['drives'] = drives
if devices:
params['devices'] = devices
##Backward compatibility for vdsClient users
if 'vt' in params:
params['kvmEnable'] = params['vt']
if 'imageFile' in params:
params['hda'] = params['imageFile']
drives = ['hdd', 'hdc', 'hdb']
if 'moreImages' in params:
for image in params['moreImages'].split(','):
params[drives.pop()] = image
if 'sysprepInf' in params:
infFile = open(params['sysprepInf'], 'rb')
try:
params['sysprepInf'] = xmlrpclib.Binary(infFile.read())
finally:
infFile.close()
return self.ExecAndExit(self.s.create(params))
def vmUpdateDevice(self, args):
params = self._eqSplit(args[1:])
if 'portMirroring' in params:
params['portMirroring'] = [net for net in params['portMirroring']
.split(',') if net != '']
return self.ExecAndExit(self.s.vmUpdateDevice(args[0], params))
def hotplugNic(self, args):
nic = self._parseDriveSpec(args[1])
nic['type'] = 'interface'
params = {'vmId': args[0], 'nic': nic}
return self.ExecAndExit(self.s.hotplugNic(params))
def hotunplugNic(self, args):
nic = self._parseDriveSpec(args[1])
nic['type'] = 'interface'
params = {'vmId': args[0], 'nic': nic}
return self.ExecAndExit(self.s.hotunplugNic(params))
def hotplugDisk(self, args):
drive = self._parseDriveSpec(args[1])
drive['type'] = 'disk'
drive['device'] = 'disk'
params = {'vmId': args[0], 'drive': drive}
return self.ExecAndExit(self.s.hotplugDisk(params))
def hotunplugDisk(self, args):
drive = self._parseDriveSpec(args[1])
drive['type'] = 'disk'
drive['device'] = 'disk'
params = {'vmId': args[0], 'drive': drive}
return self.ExecAndExit(self.s.hotunplugDisk(params))
def do_changeCD(self, args):
vmId = args[0]
file = self._parseDriveSpec(args[1])
return self.ExecAndExit(self.s.changeCD(vmId, file))
def do_changeFloppy(self, args):
vmId = args[0]
file = self._parseDriveSpec(args[1])
return self.ExecAndExit(self.s.changeFloppy(vmId, file))
def do_list(self, args):
"""
Usage: vdsClient 0 list [table/long/ids] [vms:vmId1,vmId2]
"""
def _vmsParser(vmsParam):
vmsList = vmsParam.split(':')[1].strip()
if vmsList:
vmsList = [vm.strip() for vm in vmsList.split(',')]
else:
raise ValueError('Empty VMs list.')
return vmsList
vmListViews = ('table', 'long', 'ids')
view = 'long' # Default view
vms = []
if args:
if args[0].startswith('vms:'):
vms = _vmsParser(args[0])
else:
view = args[0]
if len(args) > 1 and args[1].startswith('vms:'):
vms = _vmsParser(args[1])
if view not in vmListViews:
raise ValueError('Invalid argument "%s".' % view)
if view == 'table':
allStats = {}
response = self.s.getAllVmStats()
if response['status']['code']:
return (response['status']['code'],
response['status']['message'])
for res in response['statsList']:
if not vms or res['vmId'] in vms:
allStats[res['vmId']] = res
response = self.s.list(True, vms)
if response['status']['code']:
return response['status']['code'], response['status']['message']
for conf in response['vmList']:
if view == 'long':
if 'sysprepInf' in conf:
conf['sysprepInf'] = '<<exists>>'
printConf(conf)
elif view == 'table':
vmId = conf['vmId']
if vmId not in allStats: # Avoid race.
continue
status = conf['status']
if allStats[vmId].get('monitorResponse') == '-1':
status += '*'
print ("%-36s %6s %-20s %-20s %-20s" %
(vmId, conf.get('pid', 'none'),
conf.get('vmName', '<< NO NAME >>'),
status, allStats[vmId].get('guestIPs', '')))
elif view == 'ids':
print conf['vmId']
sys.exit(response['status']['code'])
def do_destroy(self, args):
vmId = args[0]
response = self.s.destroy(vmId)
print response['status']['message']
sys.exit(response['status']['code'])
def do_pause(self, args):
vmId = args[0]
return self.ExecAndExit(self.s.pause(vmId))
def do_continue(self, args):
vmId = args[0]
response = self.s.cont(vmId)
return self.ExecAndExit(response)
def do_shutdown(self, args):
vmId, timeout, message = args
response = self.s.shutdown(vmId, timeout, message)
print response['status']['message']
sys.exit(response['status']['code'])
def do_setVmTicket(self, args):
if len(args) == 3:
vmId, otp64, secs = args[:3]
connAct = 'disconnect'
params = {}
else:
vmId, otp64, secs, connAct = args[:4]
params = {}
if (len(args) > 4):
params = self._parseDriveSpec(args[4])
return self.ExecAndExit(self.s.setVmTicket(vmId, otp64, secs, connAct,
params))
def do_reset(self, args):
vmId = args[0]
return self.ExecAndExit(self.s.reset(vmId))
def monitorCommand(self, args):
vmId = args[0]
cmd = args[1]
response = self.s.monitorCommand(vmId, cmd)
if response['status']['code']:
print response['status']['message']
else:
for line in response['output']:
print line
sys.exit(response['status']['code'])
def do_newDisk(self, args):
file, size = args
response = self.s.newDisk(file, size)
print response['status']['message']
sys.exit(response['status']['code'])
def do_sendkeys(self, args):
vmId = args[0]
return self.ExecAndExit(self.s.sendkeys(vmId, args[1:]))
def hibernate(self, args):
vmId, hiberVolHandle = args[0], args[1]
response = self.s.hibernate(vmId, hiberVolHandle)
print response['status']['message']
sys.exit(response['status']['code'])
def do_migrate(self, args):
params = {}
if len(args) > 0:
for line in args:
param, value = line.split("=")
params[param] = value
else:
raise Exception("Not enough parameters")
response = self.s.migrate(params)
print response['status']['message']
sys.exit(response['status']['code'])
def do_mStat(self, args):
vmId = args[0]
response = self.s.migrateStatus(vmId)
if not response['status']['code']:
print (response['status']['message'] +
' ' + str(response['progress']) + '%')
else:
print response['status']['message']
sys.exit(response['status']['code'])
def do_mCancel(self, args):
vmId = args[0]
response = self.s.migrateCancel(vmId)
print response['status']['message']
sys.exit(response['status']['code'])
def do_getCap(self, args):
return self.ExecAndExit(self.s.getVdsCapabilities())
def do_getHardware(self, args):
return self.ExecAndExit(self.s.getVdsHardwareInfo())
def do_getVdsStats(self, args):
return self.ExecAndExit(self.s.getVdsStats())
def do_getVmStats(self, args):
vmId = args[0]
if len(args) > 1:
return self.ExecAndExit(self.s.getVmStats(vmId), args[1])
else:
return self.ExecAndExit(self.s.getVmStats(vmId))
def do_getAllVmStats(self, args):
return self.ExecAndExit(self.s.getAllVmStats())
def desktopLogin(self, args):
vmId, domain, user, password = tuple(args)
response = self.s.desktopLogin(vmId, domain, user, password)
print response['status']['message']
sys.exit(response['status']['code'])
def desktopLock(self, args):
vmId = args[0]
response = self.s.desktopLock(vmId)
print response['status']['message']
sys.exit(response['status']['code'])
def desktopLogoff(self, args):
vmId, force = tuple(args)
response = self.s.desktopLogoff(vmId, force)
print response['status']['message']
sys.exit(response['status']['code'])
def sendHcCmd(self, args):
vmId, message = tuple(args)
response = self.s.sendHcCmdToDesktop(vmId, message)
print response['status']['message']
sys.exit(response['status']['code'])
def getDiskAlignment(self, args):
driveSpecs = {}
driveSpecs['device'] = 'disk'
vmId = BLANK_UUID if args[0] == '0' else args[0]
if len(args) > 2:
driveSpecs['poolID'] = args[1]
driveSpecs['domainID'] = args[2]
driveSpecs['imageID'] = args[3]
driveSpecs['volumeID'] = args[4]
else:
driveSpecs['GUID'] = args[1]
res = self.s.getDiskAlignment(vmId, driveSpecs)
if res['status'] == 0:
for pName, aligned in res['alignment'].items():
print "\t%s = %s" % (pName, aligned)
else:
print "Error in scan disk alignment"
sys.exit(0)
######## IRS methods ####################
def createStorageDomain(self, args):
validateArgTypes(args, [int, str, str, str, int, int])
dom = self.s.createStorageDomain(*args)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def setStorageDomainDescription(self, args):
sdUUID = args[0]
descr = args[1]
dom = self.s.setStorageDomainDescription(sdUUID, descr)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def validateStorageDomain(self, args):
sdUUID = args[0]
dom = self.s.validateStorageDomain(sdUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def activateStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
dom = self.s.activateStorageDomain(sdUUID, spUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def deactivateStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
msdUUID = args[2]
mVer = int(args[3])
dom = self.s.deactivateStorageDomain(sdUUID, spUUID, msdUUID, mVer)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def attachStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
dom = self.s.attachStorageDomain(sdUUID, spUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def detachStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
msdUUID = args[2]
mVer = int(args[3])
dom = self.s.detachStorageDomain(sdUUID, spUUID, msdUUID, mVer)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def forcedDetachStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
dom = self.s.forcedDetachStorageDomain(sdUUID, spUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def formatStorageDomain(self, args):
sdUUID = args[0]
if len(args) > 1:
autoDetach = args[1]
else:
autoDetach = 'False'
dom = self.s.formatStorageDomain(sdUUID, autoDetach)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def getStorageDomainInfo(self, args):
sdUUID = args[0]
info = self.s.getStorageDomainInfo(sdUUID)
if info['status']['code']:
return info['status']['code'], info['status']['message']
for element in info['info'].keys():
print "\t%s = %s" % (element, info['info'][element])
return 0, ''
def getStorageDomainStats(self, args):
sdUUID = args[0]
stats = self.s.getStorageDomainStats(sdUUID)
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
dt = stats['stats']['disktotal']
df = stats['stats']['diskfree']
print "\tdisktotal = %s (%s)" % (dt, fmt3(int(dt)))
print "\tdiskfree = %s (%s)" % (df, fmt3(int(df)))
return 0, ''
def getStorageDomainsList(self, args):
if len(args) > 0:
spUUID = args[0]
else:
spUUID = BLANK_UUID
domains = self.s.getStorageDomainsList(spUUID)
if domains['status']['code']:
return domains['status']['code'], domains['status']['message']
for entry in domains['domlist']:
print entry
return 0, ''
def getDeviceList(self, args):
devices = self.s.getDeviceList(*args)
if devices['status']['code']:
return devices['status']['code'], devices['status']['message']
pp.pprint(devices['devList'])
return 0, ''
def getDevicesVisibility(self, args):
devList = args[0].split(',')
res = self.s.getDevicesVisibility(devList, {})
if res['status']['code']:
return res['status']['code'], res['status']['message']
for guid, visible in res['visible'].iteritems():
print '\t%s = %s' % (guid, visible)
return 0, ''
def getVGList(self, args):
if len(args) > 0:
storageType = int(args[0])
vgs = self.s.getVGList(storageType)
else:
vgs = self.s.getVGList()
if vgs['status']['code']:
return vgs['status']['code'], vgs['status']['message']
for entry in vgs['vglist']:
print '============================'
for element in entry.keys():
print "%s = %s " % (element, entry[element])
return 0, ''
def getVGInfo(self, args):
vgUUID = args[0]
info = self.s.getVGInfo(vgUUID)
if info['status']['code']:
return info['status']['code'], info['status']['message']
#print info['info']
for entry in info['info'].keys():
print '============================'
if entry != 'pvlist':
print "%s = %s " % (entry, info['info'][entry])
else:
print 'pvlist:'
for item in info['info'][entry]:
for i in item.keys():
print "%s = %s " % (i, item[i]),
print
return 0, ''
def createVG(self, args):
sdUUID = args[0]
devList = args[1].split(',')
force = args[2].capitalize() == "True" if len(args) > 2 else False
dom = self.s.createVG(sdUUID, devList, force)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, dom['uuid']
def removeVG(self, args):
vgUUID = args[0]
dom = self.s.removeVG(vgUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def extendStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
devList = args[2].split(',')
dom = self.s.extendStorageDomain(sdUUID, spUUID, devList)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def discoverST(self, args):
portal = args[0].split(":")
ip = portal[0]
port = "3260"
if len(portal) > 1:
port = portal[1]
if len(args) == 1:
username = password = ""
else:
username = args[1]
password = args[2]
con = dict(id="", connection=ip, port=port, iqn="", portal="",
user=username, password=password)
targets = self.s.discoverSendTargets(con)
if targets['status']['code']:
return targets['status']['code'], targets['status']['message']
print "---- fullTargets"
for target in targets['fullTargets']:
print target
print "---- targets"
for target in targets['targets']:
print target
return 0, ''
def cleanupUnusedConnections(self, args):
res = self.s.cleanupUnusedConnections()
return res['status']['code'], res['status']['message']
def connectStorageServer(self, args):
serverType = int(args[0])
spUUID = args[1]
params = args[2].split(',')
conList = []
con = {}
for item in params:
key, value = item.split('=')
con[key] = value
conList.append(con)
res = self.s.connectStorageServer(serverType, spUUID, conList)
if res['status']['code']:
return res['status']['code'], res['status']['message']
return 0, ''
def validateStorageServerConnection(self, args):
serverType = int(args[0])
spUUID = args[1]
params = args[2].split(',')
conList = []
con = {}
for item in params:
key, value = item.split('=')
con[key] = value
conList.append(con)
res = self.s.validateStorageServerConnection(serverType,
spUUID, conList)
if res['status']['code']:
return res['status']['code'], res['status']['message']
else:
for i in res['statuslist']:
print "Connection id %s - status %s" % (i['id'], i['status'])
return 0, ''
def disconnectStorageServer(self, args):
serverType = int(args[0])
spUUID = args[1]
params = args[2].split(',')
conList = []
con = {}
for item in params:
key, value = item.split('=')
con[key] = value
conList.append(con)
res = self.s.disconnectStorageServer(serverType, spUUID, conList)
if res['status']['code']:
return res['status']['code'], res['status']['message']
return 0, ''
def spmStart(self, args):
validateArgTypes(args, [str, int, int, int, str, int, int],
requiredArgsNumber=5)
status = self.s.spmStart(*args)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, status['uuid']
def spmStop(self, args):
spUUID = args[0]
status = self.s.spmStop(spUUID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, ''
def getSpmStatus(self, args):
spUUID = args[0]
status = self.s.getSpmStatus(spUUID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
for element in status['spm_st'].keys():
print "\t%s = %s" % (element, status['spm_st'][element])
return 0, ''
def fenceSpmStorage(self, args):
spUUID = args[0]
prevID = int(args[1])
prevLVER = int(args[2])
status = self.s.fenceSpmStorage(spUUID, prevID, prevLVER)
if status['status']['code']:
return status['status']['code'], status['status']['message']
for element in status['spm_st'].keys():
print "\t%s = %s" % (element, status['spm_st'][element])
return 0, ''
def updateVM(self, args):
spUUID = args[0]
params = args[1].split(',')
if len(args) >= 3:
sdUUID = args[2]
else:
sdUUID = BLANK_UUID
vmList = []
vm = {}
for item in params:
key, value = item.split('=')
if key == 'imglist':
value = value.replace('+', ',')
vm[key] = value
vmList.append(vm)
res = self.s.updateVM(spUUID, vmList, sdUUID)
if res['status']['code']:
return res['status']['code'], res['status']['message']
return 0, ''
def upgradeStoragePool(self, args):
validateArgTypes(args, [str, int], True)
status = self.s.upgradeStoragePool(*args)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, status['upgradeStatus']
def removeVM(self, args):
spUUID = args[0]
vmUUID = args[1]
if len(args) >= 3:
sdUUID = args[2]
else:
sdUUID = BLANK_UUID
res = self.s.removeVM(spUUID, vmUUID, sdUUID)
if res['status']['code']:
return res['status']['code'], res['status']['message']
return 0, ''
def reconstructMaster(self, args):
spUUID = args[0]
poolName = args[1]
masterDom = args[2]
domList = args[3].split(",")
domDict = {}
for item in domList:
key, value = item.split('=')
domDict[key] = value
mVer = int(args[4])
if len(args) > 5:
st = self.s.reconstructMaster(spUUID, poolName, masterDom, domDict,
mVer, *map(int, args[5:]))
else:
st = self.s.reconstructMaster(spUUID, poolName, masterDom, domDict,
mVer)
if st['status']['code']:
return st['status']['code'], st['status']['message']
return 0, ''
def createStoragePool(self, args):
poolType = int(args[0])
spUUID = args[1]
poolName = args[2]
masterDom = args[3]
domList = args[4].split(",")
mVer = int(args[5])
pool = None
if len(args) > 6:
pool = self.s.createStoragePool(poolType, spUUID,
poolName, masterDom,
domList, mVer, *args[6:])
else:
pool = self.s.createStoragePool(poolType, spUUID,
poolName, masterDom,
domList, mVer)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def destroyStoragePool(self, args):
spUUID = args[0]
ID = int(args[1])
scsi_key = args[2]
pool = self.s.destroyStoragePool(spUUID, ID, scsi_key)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def connectStoragePool(self, args):
spUUID = args[0]
ID = int(args[1])
scsi_key = args[2]
if len(args) > 3:
master = args[3]
else:
master = BLANK_UUID
if len(args) > 4:
master_ver = int(args[4])
else:
master_ver = -1
pool = self.s.connectStoragePool(spUUID, ID, scsi_key,
master, master_ver)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def disconnectStoragePool(self, args):
spUUID = args[0]
ID = int(args[1])
scsi_key = args[2]
pool = self.s.disconnectStoragePool(spUUID, ID, scsi_key)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def refreshStoragePool(self, args):
spUUID = args[0]
msdUUID = args[1]
masterVersion = int(args[2])
pool = self.s.refreshStoragePool(spUUID, msdUUID, masterVersion)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def setStoragePoolDescription(self, args):
spUUID = args[0]
descr = args[1]
dom = self.s.setStoragePoolDescription(spUUID, descr)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def getStoragePoolInfo(self, args):
spUUID = args[0]
info = self.s.getStoragePoolInfo(spUUID)
if info['status']['code']:
return info['status']['code'], info['status']['message']
for element in info['info'].keys():
print "\t%s = %s" % (element, info['info'][element])
for element in info['dominfo'].keys():
print "\t%s = %s" % (element, info['dominfo'][element])
return 0, ''
def createVolume(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
diskSize = int(args[3])
convertFactor = 2097152
size = diskSize * convertFactor
volFormat = int(args[4])
preallocate = int(args[5])
diskType = int(args[6])
newVol = args[7]
descr = args[8]
if len(args) > 9:
srcImgUUID = args[9]
else:
srcImgUUID = BLANK_UUID
if len(args) > 10:
srcVolUUID = args[10]
else:
srcVolUUID = BLANK_UUID
image = self.s.createVolume(sdUUID, spUUID, imgUUID, size,
volFormat, preallocate,
diskType, newVol, descr,
srcImgUUID, srcVolUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def getVolumeInfo(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
volUUID = args[3]
info = self.s.getVolumeInfo(sdUUID, spUUID, imgUUID, volUUID)
if info['status']['code']:
return info['status']['code'], info['status']['message']
for element in info['info'].keys():
print "\t%s = %s" % (element, info['info'][element])
return 0, ''
def getVolumePath(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
uuid = args[3]
info = self.s.getVolumePath(sdUUID, spUUID, imgUUID, uuid)
if info['status']['code']:
return info['status']['code'], info['status']['message']
return 0, info['path']
def getVolumeSize(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
uuid = args[3]
size = self.s.getVolumeSize(sdUUID, spUUID, imgUUID, uuid)
if size['status']['code']:
return size['status']['code'], size['status']['message']
del size['status']
printDict(size, self.pretty)
return 0, ''
def extendVolumeSize(self, args):
spUUID, sdUUID, imgUUID, volUUID, newSize = args
status = self.s.extendVolumeSize(
spUUID, sdUUID, imgUUID, volUUID, newSize)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, ''
def setVolumeDescription(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
volUUID = args[3]
descr = args[4]
status = self.s.setVolumeDescription(sdUUID, spUUID, imgUUID,
volUUID, descr)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, ''
def setVolumeLegality(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
volUUID = args[3]
legality = args[4]
image = self.s.setVolumeLegality(sdUUID, spUUID, imgUUID,
volUUID, legality)
return image['status']['code'], image['status']['message']
def deleteVolume(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
volUUID = args[3].split(',')
if len(args) > 4:
postZero = args[4]
else:
postZero = 'False'
if len(args) > 5:
force = args[5]
else:
force = 'False'
status = self.s.deleteVolume(sdUUID, spUUID, imgUUID,
volUUID, postZero, force)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, status['uuid']
def deleteVolumeByDescr(self, args):
sdUUID = args[1]
spUUID = args[2]
imgUUID = args[3]
volumes = self.s.getVolumesList(sdUUID, spUUID, imgUUID)
todelete = []
if volumes['status']['code']:
return volumes['status']['code'], volumes['status']['message']
print "Images to delete:"
for entry in volumes['uuidlist']:
info = self.s.getVolumeInfo(sdUUID, spUUID, imgUUID, entry)['info']
if info['description']:
if args[0] in info['description']:
print "\t" + entry + " : " + info['description']
todelete.append(entry)
if not len(todelete):
return 0, 'Nothing to delete'
var = raw_input("Are you sure yes/no?[no] :")
if var == "yes":
print self.s.deleteVolume(sdUUID, spUUID, imgUUID,
todelete, 'false')
return 0, ''
def getVolumesList(self, args):
sdUUID = args[0]
spUUID = args[1]
if len(args) > 2:
images = [args[2]]
else:
imgs = self.s.getImagesList(sdUUID)
if imgs['status']['code'] == 0:
images = imgs['imageslist']
for imgUUID in images:
volumes = self.s.getVolumesList(sdUUID, spUUID, imgUUID)
if volumes['status']['code']:
return volumes['status']['code'], volumes['status']['message']
for entry in volumes['uuidlist']:
message = entry + ' : '
res = self.s.getVolumeInfo(sdUUID, spUUID, imgUUID, entry)
if not 'info' in res:
print 'ERROR:', entry, ':', res
continue
info = res['info']
if info['description']:
message += info['description'] + '. '
if BLANK_UUID not in info['parent']:
message += 'Parent is ' + info['parent']
print message
return 0, ''
def getFileStats(self, args):
assert args
validateArgTypes(args, [str, str])
response = self.s.getFileStats(*args)
if response['status']['code']:
return response['status']['code'], response['status']['message']
for key, value in response['fileStats'].iteritems():
print 'file: ', key, 'stats: ', value
return 0, ''
def getIsoList(self, args):
spUUID = args[0]
isos = self.s.getIsoList(spUUID)
if isos['status']['code']:
return isos['status']['code'], isos['status']['message']
print '------ ISO list with proper permissions only -------'
for entry in isos['isolist']:
print entry
return 0, ''
def getFloppyList(self, args):
spUUID = args[0]
floppies = self.s.getFloppyList(spUUID)
if floppies['status']['code']:
return floppies['status']['code'], floppies['status']['message']
for entry in floppies['isolist']:
print entry
return 0, ''
def getImagesList(self, args):
sdUUID = args[0]
images = self.s.getImagesList(sdUUID)
if images['status']['code']:
return images['status']['code'], images['status']['message']
for entry in images['imageslist']:
print entry
return 0, ''
def getImageDomainsList(self, args):
spUUID = args[0]
imgUUID = args[1]
domains = self.s.getImageDomainsList(spUUID, imgUUID)
if domains['status']['code']:
return domains['status']['code'], domains['status']['message']
for entry in domains['domainslist']:
print entry
return 0, ''
def getConnectedStoragePoolsList(self, args):
pools = self.s.getConnectedStoragePoolsList()
if pools['status']['code']:
return pools['status']['code'], pools['status']['message']
for entry in pools['poollist']:
print entry
return 0, ''
def getTaskInfo(self, args):
taskID = args[0]
status = self.s.getTaskInfo(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
for k, v in status['TaskInfo'].iteritems():
print '\t', k, '=', v
return 0, ''
def getAllTasksInfo(self, args):
status = self.s.getAllTasksInfo()
if status['status']['code']:
return status['status']['code'], status['status']['message']
for t, inf in status['allTasksInfo'].iteritems():
print t, ':'
for k, v in inf.iteritems():
print '\t', k, '=', v
return 0, ''
def getTaskStatus(self, args):
taskID = args[0]
status = self.s.getTaskStatus(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
print "TASK: %s STATUS: %s RESULT: %s MESSAGE: '%s'" % (
taskID,
status["taskStatus"]["taskState"],
status["taskStatus"]["taskResult"],
status["taskStatus"]["message"])
print "%s" % status # TODO
return 0, ''
def getAllTasksStatuses(self, args):
status = self.s.getAllTasksStatuses()
if status['status']['code']:
return status['status']['code'], status['status']['message']
print status # TODO
return 0, ''
def getAllTasks(self, args):
keys = []
if len(args) > 0:
keys = [x.strip() for x in args[0].split(',')]
status = self.s.getAllTasks(keys)
if status['status']['code']:
return status['status']['code'], status['status']['message']
for t, inf in status['tasks'].iteritems():
print t, ':'
for k, v in inf.iteritems():
print '\t', k, '=', v
return 0, ''
def stopTask(self, args):
taskID = args[0]
status = self.s.stopTask(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
print status # TODO
return 0, ''
def clearTask(self, args):
taskID = args[0]
status = self.s.clearTask(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
print status # TODO
return 0, ''
def revertTask(self, args):
taskID = args[0]
status = self.s.revertTask(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
print status # TODO
return 0, ''
def getParent(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
uuid = args[3]
image = self.s.getVolumeInfo(sdUUID, spUUID, imgUUID, uuid)
if image['status']['code']:
return image['status']['code'], image['status']['message']
if '00000000-0000-0000-0000-000000000000' in image['info']['parent']:
return 1, 'No parent available'
return 0, image['info']['parent']
def deleteImage(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
if len(args) > 3:
postZero = args[3]
else:
postZero = 'False'
if len(args) > 4:
force = args[4]
else:
force = 'False'
image = self.s.deleteImage(sdUUID, spUUID, imgUUID, postZero, force)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def moveImage(self, args):
spUUID = args[0]
srcDomUUID = args[1]
dstDomUUID = args[2]
imgUUID = args[3]
vmUUID = args[4]
op = int(args[5])
if len(args) > 6:
postZero = args[6]
else:
postZero = 'False'
if len(args) > 7:
force = args[7]
else:
force = 'False'
image = self.s.moveImage(spUUID, srcDomUUID, dstDomUUID,
imgUUID, vmUUID, op, postZero, force)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def cloneImageStructure(self, args):
spUUID, sdUUID, imgUUID, dstSdUUID = args
image = self.s.cloneImageStructure(spUUID, sdUUID, imgUUID, dstSdUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def syncImageData(self, args):
spUUID, sdUUID, imgUUID, dstSdUUID, syncType = args
image = self.s.syncImageData(spUUID, sdUUID, imgUUID, dstSdUUID,
syncType)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def downloadImage(self, args):
methodArgs, spUUID, sdUUID, imgUUID, volUUID = args
methodArgsValue = ast.literal_eval(methodArgs)
image = self.s.downloadImage(
methodArgsValue, spUUID, sdUUID, imgUUID, volUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def uploadImage(self, args):
methodArgs, spUUID, sdUUID, imgUUID, volUUID = args
methodArgsValue = ast.literal_eval(methodArgs)
image = self.s.uploadImage(
methodArgsValue, spUUID, sdUUID, imgUUID, volUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def moveMultiImage(self, args):
spUUID = args[0]
srcDomUUID = args[1]
dstDomUUID = args[2]
imgList = args[3].split(",")
imgDict = {}
for item in imgList:
key, value = item.split('=')
imgDict[key] = value
vmUUID = args[4]
if len(args) > 5:
force = args[5]
else:
force = 'False'
image = self.s.moveMultipleImages(spUUID, srcDomUUID, dstDomUUID,
imgDict, vmUUID, force)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def copyImage(self, args):
sdUUID = args[0]
spUUID = args[1]
vmUUID = args[2]
srcImgUUID = args[3]
srcVolUUID = args[4]
dstImgUUID = args[5]
dstVolUUID = args[6]
descr = args[7]
if len(args) > 8:
dstSdUUID = args[8]
else:
dstSdUUID = BLANK_UUID
if len(args) > 9:
volType = int(args[9])
else:
volType = SHARED_VOL
if len(args) > 10:
volFormat = int(args[10])
else:
volFormat = UNKNOWN_VOL
if len(args) > 11:
preallocate = int(args[11])
else:
preallocate = UNKNOWN_VOL
if len(args) > 12:
postZero = args[12]
else:
postZero = 'False'
if len(args) > 13:
force = args[13]
else:
force = 'False'
image = self.s.copyImage(sdUUID, spUUID, vmUUID, srcImgUUID,
srcVolUUID, dstImgUUID, dstVolUUID,
descr, dstSdUUID, volType, volFormat,
preallocate, postZero, force)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def mergeSnapshots(self, args):
sdUUID = args[0]
spUUID = args[1]
vmUUID = args[2]
imgUUID = args[3]
ancestor = args[4]
successor = args[5]
if len(args) > 6:
postZero = args[6]
else:
postZero = 'False'
image = self.s.mergeSnapshots(sdUUID, spUUID, vmUUID, imgUUID,
ancestor, successor, postZero)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def acquireDomainLock(self, args):
spUUID = args[0]
sdUUID = args[1]
image = self.s.acquireDomainLock(spUUID, sdUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, ''
def releaseDomainLock(self, args):
spUUID = args[0]
sdUUID = args[1]
image = self.s.releaseDomainLock(spUUID, sdUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, ''
def prepareForShutdown(self, args):
stats = self.s.prepareForShutdown()
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
return 0, ''
def do_setLogLevel(self, args):
level = int(args[0])
assert len(args) == 1
stats = self.s.setLogLevel(level)
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
return 0, ''
def do_setMOMPolicy(self, policyFile):
stats = self.s.setMOMPolicy(policyFile)
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
return 0, ''
def do_setMOMPolicyParameters(self, args):
# convert arguments in the form of key=value to a dictionary
expand = lambda pair: (pair[0], eval(pair[1]))
key_value_store = dict([expand(arg.split("=", 1))
for arg in args
if "=" in arg])
stats = self.s.setMOMPolicyParameters(key_value_store)
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
return 0, ''
def do_getVmsInfo(self, args):
spUUID = args[0]
if len(args) >= 2:
sdUUID = args[1]
else:
sdUUID = BLANK_UUID
if len(args) >= 3:
vmList = args[2].split(",")
else:
vmList = []
infos = self.s.getVmsInfo(spUUID, sdUUID, vmList)
if infos['status']['code'] != 0:
return infos['status']['code'], infos['status']['message']
else:
message = ''
for entry in infos['vmlist']:
message += '\n' + '================================' + '\n'
message += entry + '=' + infos['vmlist'][entry]
if not message:
message = 'No VMs found.'
if isinstance(message, unicode):
print message.encode('utf-8')
else:
print message
return 0, ''
def do_getVmsList(self, args):
spUUID = args[0]
if len(args) >= 2:
sdUUID = args[1]
else:
sdUUID = BLANK_UUID
vms = self.s.getVmsList(spUUID, sdUUID)
if vms['status']['code'] != 0:
return vms['status']['code'], vms['status']['message']
else:
message = ''
for entry in vms['vmlist']:
message += '\n' + '================================' + '\n'
message += entry
if not message:
message = 'No VMs found.'
print message
return 0, ''
def _eqSplit(self, args):
d = {}
for arg in args:
kv = arg.split('=', 1)
if len(kv) != 2:
raise ValueError("Invalid argument: %s" % arg)
k, v = kv
d[k] = v
return d
def _splitDriveSpecItems(self, item):
"""
BC is BC.
"""
key, value = item.split(":", 1)
if key in ("domain", "pool", "image", "volume"):
key = "%sID" % key
return key, value
def _parseNestedSpec(self, spec):
d = dict()
if spec[0] != '{':
raise Exception("_parseNestedSpec called with "
"non nested spec: '%s'" % spec)
spec = spec[1:]
while True:
if not spec or not '}' in spec:
raise Exception("nested spec not terminated "
"with '}' in '%s'" % spec)
if spec[0] == '}':
return d, spec[1:]
# Split into first name + the rest
if not ':' in spec:
raise Exception("missing name value separator "
"':' in '%s'" % spec)
name, spec = spec.split(":", 1)
# Determine the value
if spec[0] == '{':
val, spec = self._parseNestedSpec(spec)
d[name] = val
else:
# The value ends either with a ',' meaning it is followed by
# another name:value pair, or with a '}' ending the spec
i = 0
while spec[i] != ',' and spec[i] != '}':
i = i + 1
val = spec[:i]
spec = spec[i:]
d[name] = val
# If there is a comma behind the value remove it before continuing
if spec and spec[0] == ',':
spec = spec[1:]
def _parseDriveSpec(self, spec):
"""
'{' or ',' means dict. (!)
"""
if spec[0] == '{':
val, spec = self._parseNestedSpec(spec)
if spec:
raise Exception("Trailing garbage after spec: '%s'" % spec)
return val
if ',' in spec:
return dict(self._splitDriveSpecItems(item)
for item in spec.split(',') if item)
return spec
def do_setupNetworks(self, args):
params = self._eqSplit(args)
networks = self._parseDriveSpec(params.get('networks', '{}'))
bondings = self._parseDriveSpec(params.get('bondings', '{}'))
for k in ('networks', 'bondings'):
if k in params:
del params[k]
params['connectivityCheck'] = params.get('connectivityCheck', 'False')
for bond in bondings:
if 'nics' in bondings[bond]:
bondings[bond]['nics'] = bondings[bond]['nics'].split("+")
status = self.s.setupNetworks(networks, bondings, params)
return status['status']['code'], status['status']['message']
def do_addNetwork(self, args):
params = self._eqSplit(args)
try:
nics = filter(None, params['nics'].split(','))
except:
raise ValueError
bridge = params.get('bridge', '')
vlan = params.get('vlan', '')
bond = params.get('bond', '')
for k in ['bridge', 'vlan', 'bond', 'nics']:
if k in params:
del params[k]
status = self.s.addNetwork(bridge, vlan, bond, nics, params)
return status['status']['code'], status['status']['message']
def do_editNetwork(self, args):
params = self._eqSplit(args)
try:
nics = params['nics'].split(',')
except:
raise ValueError
oldBridge = params.get('oldBridge', '')
newBridge = params.get('newBridge', '')
vlan = params.get('vlan', '')
bond = params.get('bond', '')
for k in ['oldBridge', 'newBridge', 'vlan', 'bond', 'nics']:
if k in params:
del params[k]
status = self.s.editNetwork(oldBridge, newBridge, vlan, bond,
nics, params)
return status['status']['code'], status['status']['message']
def do_delNetwork(self, args):
params = self._eqSplit(args)
try:
nics = params['nics'].split(',')
except:
raise ValueError
bridge = params.get('bridge', '')
vlan = params.get('vlan', '')
bond = params.get('bond', '')
for k in ['bridge', 'vlan', 'bond', 'nics']:
if k in params:
del params[k]
status = self.s.delNetwork(bridge, vlan, bond, nics, params)
return status['status']['code'], status['status']['message']
def do_setSafeNetworkConfig(self, args):
status = self.s.setSafeNetworkConfig()
return status['status']['code'], status['status']['message']
def do_fenceNode(self, args):
addr, port, agent, user, passwd, action = args[:6]
status = self.s.fenceNode(addr, port, agent, user, passwd, action,
*args[6:])
if action == 'status' and 'power' in status:
return status['status']['code'], status['power']
return status['status']['code'], status['status']['message']
def __image_status(self, imgUUID, res):
if "imagestatus" in res and "message" in res:
status = "OK"
if res["imagestatus"]:
status = "ERROR"
print ("Image %s status %s: %s (%s)" %
(imgUUID, status, res["message"], res["imagestatus"]))
if "badvols" in res:
for v, err in res["badvols"].iteritems():
print "\tVolume %s is bad: %s" % (v, err)
def __domain_status(self, sdUUID, res):
if "domainstatus" in res and "message" in res:
status = "OK"
if res["domainstatus"]:
status = "ERROR"
print ("Domain %s status %s: %s (%s)" %
(sdUUID, status, res["message"], res["domainstatus"]))
if "badimages" in res:
for i in res["badimages"]:
print "\tImage %s is bad" % (i)
self.__image_status(i, res["badimages"][i])
def __pool_status(self, spUUID, res):
if "poolstatus" in res and "message" in res:
status = "OK"
if res["poolstatus"]:
status = "ERROR"
print ("Pool %s status %s: %s (%s)" %
(spUUID, status, res["message"], res["poolstatus"]))
if "masterdomain":
print "\tMaster domain is %s" % res["masterdomain"]
if "spmhost":
print "\tThe SPM host id is %s" % res["spmhost"]
if "baddomains" in res:
for d in res["baddomains"]:
print "\tDomain %s is bad:" % (d)
self.__domain_status(d, res["baddomains"][d])
def repoStats(self, args):
stats = self.s.repoStats()
if stats['status']['code']:
print "count not get repo stats"
return int(stats['status']['code'])
for d in stats:
if d == "status":
continue
print 'Domain %s %s' % (d, str(stats[d]))
return 0, ''
def startMonitoringDomain(self, args):
sdUUID, hostID = args
status = self.s.startMonitoringDomain(sdUUID, hostID)
return status['status']['code'], status['status']['message']
def stopMonitoringDomain(self, args):
sdUUID, = args
status = self.s.stopMonitoringDomain(sdUUID)
return status['status']['code'], status['status']['message']
def snapshot(self, args):
vmUUID, sdUUID, imgUUID, baseVolUUID, volUUID = args
status = self.s.snapshot(vmUUID, [
{'domainID': sdUUID,
'imageID': imgUUID,
'baseVolumeID': baseVolUUID,
'volumeID': volUUID},
])
return status['status']['code'], status['status']['message']
def setBalloonTarget(self, args):
vmId = args[0]
target = int(args[1])
response = self.s.setBalloonTarget(vmId, target)
return response['status']['code'], response['status']['message']
def diskReplicateStart(self, args):
vmUUID, spUUID, sdUUID, imgUUID, volUUID, dstSdUUID = args
status = self.s.diskReplicateStart(
vmUUID,
{'poolID': spUUID, 'domainID': sdUUID, 'imageID': imgUUID,
'volumeID': volUUID},
{'poolID': spUUID, 'domainID': dstSdUUID, 'imageID': imgUUID,
'volumeID': volUUID})
return status['status']['code'], status['status']['message']
def diskReplicateFinish(self, args):
vmUUID, spUUID, sdUUID, imgUUID, volUUID, dstSdUUID = args
status = self.s.diskReplicateFinish(
vmUUID,
{'poolID': spUUID, 'domainID': sdUUID, 'imageID': imgUUID,
'volumeID': volUUID},
{'poolID': spUUID, 'domainID': dstSdUUID, 'imageID': imgUUID,
'volumeID': volUUID})
return status['status']['code'], status['status']['message']
def diskSizeExtend(self, args):
vmUUID, spUUID, sdUUID, imgUUID, volUUID, newSize = args
status = self.s.diskSizeExtend(
vmUUID, {
'poolID': spUUID, 'domainID': sdUUID, 'imageID': imgUUID,
'volumeID': volUUID, 'device': 'disk'
}, newSize)
if status['status']['code'] == 0:
print "New disk size:", status.get('size', None)
return status['status']['code'], status['status']['message']
if __name__ == '__main__':
if _glusterEnabled:
serv = ge.GlusterService()
else:
serv = service()
commands = {
'create': (serv.do_create,
('<configFile> [parameter=value, parameter=value, ......]',
'Creates new machine with the paremeters given in the'
' command line overriding the ones in the config file',
'Example with config file: vdsClient someServer create'
' myVmConfigFile',
'Example with no file : vdsClient someServer create'
' /dev/null vmId=<uuid> memSize=256 '
'imageFile=someImage display=<vnc|qxl|qxlnc>',
'Parameters list: r=required, o=optional',
'r vmId=<uuid> : Unique identification for the '
'created VM. Any additional operation on the VM must '
'refer to this ID',
'o vmType=<qemu/kvm> : Virtual machine technology - '
'if not given kvm is default',
'o kvmEnable=<true/false> : run in KVM enabled mode '
'or full emulation - default is according to the VDS '
'capabilities',
'r memSize=<int> : Memory to allocate for this '
'machine',
'r macAddr=<aa:bb:cc:dd:ee:ff> : MAC address of the '
'machine',
'r display=<vnc|qxl|qxlnc> : send the machine '
'display to vnc, spice, or spice with no '
'image compression',
'o drive=pool:poolID,domain:domainID,image:imageID,'
'volume:volumeID[,boot:true,format:cow] : disk image '
'by UUIDs',
'o (deprecated) hda/b/c/d=<path> : Disk drive '
'images',
'o floppy=<image> : Mount the specified Image as '
'floppy',
'o cdrom=<path> : ISO image file to be mounted as '
'the powerup cdrom',
'o boot=<c/d/n> : boot device - drive C or cdrom or '
'network',
'o sysprepInf=/path/to/file: Launch with the '
'specified file as sysprep.inf in floppy',
#'o any parmeter=<any value> : parameter that is '
#'not familiar is passed as is to the VM',
#' and displayed with '
#'all other parameter. They can be used for '
#'additional',
#' information the user '
#'want to reserve with the machine'
'o acpiEnable : If present will remove the default '
'-no-acpi switch',
'o qgaEnable : use qemu-ga as guest agent',
'o tdf : If present will add the -rtc-td-hack '
'switch',
'o irqChip : If false, add the -no-kvm-irqchip '
'switch',
'o spiceSecureChannels : comma-separated list of '
'spice channel that will be encrypted',
'o spiceMonitors : number of emulated screen heads',
'o soundDevice : emulated sound device',
'o launchPaused : If "true", start qemu paused',
'o vmName : human-readable name of new VM',
'o tabletEnable : If "true", enable tablet input',
'o timeOffset : guest\'s start date, relative to '
'host\'s time, in seconds',
'o smp : number of vcpus',
'o smpCoresPerSocket, smpThreadsPerCore : vcpu '
'topology',
'o keyboardLayout : language code of client '
'keyboard',
'o cpuType : emulated cpu (with optional flags)',
'o emulatedMachine : passed as qemu\'s -M',
'o devices={name:val[, name:val, name:{name:val, '
'name:val}]} : add a fully specified device',
'o cpuPinning={vcpuid:pinning} cpu pinning in '
'libvirt-like format. see '
'http://libvirt.org/formatdomain.html#elementsCPUTuning'
)),
'vmUpdateDevice': (serv.vmUpdateDevice,
('<vmId> <devicespec>',
'Update a VM\'s device',
'Example: vmUpdateDevice xxxx deviceType=interface'
' alias=net0 linkActive=false',
'devicespec list: r=required, '
'o=optional',
'r devicetype: interface',
'o network: network name - No chage if not '
'specified. Dummy bridge and link inactive if '
'empty string',
'o linkActive: bool - No change if not '
'specified',
'r alias: libvirt\'s vnic alias',
'o portMirroring: net[,net] - Only networks to '
'mirror. No change if not specified, no mirroring'
'if empty list.'
)),
'hotplugNic': (serv.hotplugNic,
('<vmId> <nicspec>',
'Hotplug NIC to existing VM',
'nicspec parameters list: r=required, o=optional',
'r device: bridge|sriov|vnlink|bridgeless.',
'r network: network name',
'r macAddr: mac address',
'r nicModel: pv|rtl8139|e1000',
'o bootOrder: <int> - global boot order across '
'all bootable devices'
)),
'hotunplugNic': (serv.hotunplugNic,
('<vmId> <nicspec>',
'Hotunplug NIC from existing VM',
'nicspec parameters list: r=required, o=optional',
'r device: bridge|sriov|vnlink|bridgeless.',
'r network: network name',
'r macAddr: mac address',
'r nicModel: pv|rtl8139|e1000',
'o bootOrder: <int> - global boot order across '
'all bootable devices'
)),
'hotplugDisk': (serv.hotplugDisk,
('<vmId> <drivespec>',
'Hotplug disk to existing VM',
'drivespec parameters list: r=required, o=optional',
'r iface:<ide|virtio> - Unique identification of '
'the existing VM.',
'r index:<int> - disk index unique per interface '
'virtio|ide',
'r [pool:UUID,domain:UUID,image:UUID,volume:UUID]|'
'[GUID:guid]|[UUID:uuid]',
'r format: cow|raw',
'r readonly: True|False - default is False',
'r propagateErrors: off|on - default is off',
'o bootOrder: <int> - global boot order across '
'all bootable devices',
'o shared: exclusive|shared|none',
'o optional: True|False'
)),
'hotunplugDisk': (serv.hotunplugDisk,
('<vmId> <drivespec >',
'Hotunplug disk from existing VM',
'drivespec parameters list: r=required, o=optional',
'r iface:<ide|virtio> - Unique identification of '
'the existing VM.',
'r index:<int> - disk index unique per interface '
'virtio|ide',
'r [pool:UUID,domain:UUID,image:UUID,volume:UUID]|'
'[GUID:guid]|[UUID:uuid]',
'r format: cow|raw',
'r readonly: True|False - default is False',
'r propagateErrors: off|on - default is off',
'o bootOrder: <int> - global boot order across '
'all bootable devices',
'o shared: exclusive|shared|none',
'o optional: True|False'
)),
'changeCD': (serv.do_changeCD,
('<vmId> <fileName|drivespec>',
'Changes the iso image of the cdrom'
)),
'changeFloppy': (serv.do_changeFloppy,
('<vmId> <fileName|drivespec>',
'Changes the image of the floppy drive'
)),
'destroy': (serv.do_destroy,
('<vmId>',
'Stops the emulation and destroys the virtual machine.'
' This is not a shutdown.'
)),
'shutdown': (serv.do_shutdown,
('<vmId> <timeout> <message>',
'Stops the emulation and graceful shutdown the virtual'
' machine.'
)),
'list': (serv.do_list,
('[view] [vms:vmId1,vmId2]',
'Lists all available machines on the specified server.',
"Optional vms list, should start with 'vms:' and follow with"
" 'vmId1,vmId2,...'",
'Optional views:',
' "long" all available configuration info (Default).',
' "table" table output with the fields: vmId, vmName, '
'Status and IP.',
' "ids" all vmIds.'
)),
'pause': (serv.do_pause,
('<vmId>',
'Pauses the execution of the virtual machine without '
'termination'
)),
'continue': (serv.do_continue,
('<vmId>',
'Continues execution after of a paused machine'
)),
'reset': (serv.do_reset,
('<vmId>',
'Sends reset signal to the vm'
)),
'setVmTicket': (serv.do_setVmTicket,
('<vmId> <password> <sec> [disconnect|keep|fail], '
'[params={}]',
'Set the password to the vm display for the next '
'<sec> seconds.',
'Optional argument instructs spice regarding '
'currently-connected client.',
'Optional additional parameters in dictionary format,'
' name:value,name:value'
)),
'migrate': (serv.do_migrate,
('vmId=<id> method=<offline|online> src=<host[:port]> '
'dst=<host[:port]> dstqemu=<host>',
'Migrate a desktop from src machine to dst host using '
'the specified ports'
)),
'migrateStatus': (serv.do_mStat,
('<vmId>',
'Check the progress of current outgoing migration'
)),
'migrateCancel': (serv.do_mCancel,
('<vmId>',
'(not implemented) cancel machine migration'
)),
'sendkeys': (serv.do_sendkeys,
('<vmId> <key1> ...... <keyN>',
'Send the key sequence to the vm'
)),
'getVdsCapabilities': (serv.do_getCap,
('',
'Get Capabilities info of the VDS'
)),
'getVdsCaps': (serv.do_getCap,
('',
'Get Capabilities info of the VDS'
)),
'getVdsHardwareInfo': (serv.do_getHardware,
('',
'Get hardware info of the VDS'
)),
'getVdsStats': (serv.do_getVdsStats,
('',
'Get Statistics info on the VDS'
)),
'getVmStats': (serv.do_getVmStats,
('<vmId>',
'Get Statistics info on the VM'
)),
'getAllVmStats': (serv.do_getAllVmStats,
('',
'Get Statistics info for all existing VMs'
)),
'getVGList': (serv.getVGList,
('storageType',
'List of all VGs.'
)),
'getDeviceList': (serv.getDeviceList,
('[storageType]',
'List of all block devices (optionally - matching '
'storageType).'
)),
'getDevicesVisibility': (serv.getDevicesVisibility,
('<devlist>',
'Get visibility of each device listed'
)),
'getDiskAlignment': (serv.getDiskAlignment,
('[<vmId> <poolId> <domId> <imgId> <volId>]',
'[<vmId> <GUID>]',
'Get alignment of each partition on the device'
)),
'getVGInfo': (serv.getVGInfo,
('<vgUUID>',
'Get info of VG'
)),
'createVG': (serv.createVG,
('<sdUUID> <devlist> [force]',
'Create a new VG from devices devlist (list of dev '
'GUIDs)'
)),
'removeVG': (serv.removeVG,
('<vgUUID>',
'remove the VG identified by its UUID'
)),
'extendStorageDomain': (serv.extendStorageDomain,
('<sdUUID> <spUUID> <devlist>',
'Extend the Storage Domain by adding devices'
' devlist (list of dev GUIDs)'
)),
'discoverST': (serv.discoverST,
('ip[:port] [username password]',
'Discover the available iSCSI targetnames on a '
'specified iSCSI portal'
)),
'cleanupUnusedConnections': (serv.cleanupUnusedConnections,
('',
'Clean up unused iSCSI storage '
'connections'
)),
'connectStorageServer': (serv.connectStorageServer,
('<server type> <spUUID> <conList (id=...,'
'connection=server:/export_path,portal=...,'
'port=...,iqn=...,user=...,password=...'
'[,initiatorName=...])>',
'Connect to a storage low level entity '
'(server)'
)),
'validateStorageServerConnection':
(serv.validateStorageServerConnection,
('<server type> <spUUID> <conList (id=...,'
'connection=server:/export_path,portal=...,port=...,iqn=...,'
'user=...,password=...[,initiatorName=...])>',
'Validate that we can connect to a storage server'
)),
'disconnectStorageServer': (serv.disconnectStorageServer,
('<server type> <spUUID> <conList (id=...,'
'connection=server:/export_path,'
'portal=...,port=...,iqn=...,user=...,'
'password=...[,initiatorName=...])>',
'Disconnect from a storage low level '
'entity (server)'
)),
'spmStart': (serv.spmStart,
('<spUUID> <prevID> <prevLVER> <recoveryMode> '
'<scsiFencing> <maxHostID> <version>',
'Start SPM functionality'
)),
'spmStop': (serv.spmStop,
('<spUUID>',
'Stop SPM functionality'
)),
'getSpmStatus': (serv.getSpmStatus,
('<spUUID>',
'Get SPM status'
)),
'acquireDomainLock': (serv.acquireDomainLock,
('<spUUID> <sdUUID>',
'acquire storage domain lock'
)),
'releaseDomainLock': (serv.releaseDomainLock,
('<spUUID> <sdUUID>',
'release storage domain lock'
)),
'fenceSpmStorage': (serv.fenceSpmStorage,
('<spUUID> <prevID> <prevLVER> ',
'fence SPM storage state'
)),
'updateVM': (serv.updateVM,
("<spUUID> <vmList> ('vm'=vmUUID,'ovf'='...','"
"imglist'='imgUUID1+imgUUID2+...') [sdUUID]",
'Update VM on pool or Backup domain'
)),
'upgradeStoragePool': (serv.upgradeStoragePool,
("<spUUID> <targetVersion>",
'Upgrade a pool to a new version (Requires a '
'running SPM)'
)),
'removeVM': (serv.removeVM,
('<spUUID> <vmUUID> [sdUUID]',
'Remove VM from pool or Backup domain'
)),
'reconstructMaster': (serv.reconstructMaster,
('<spUUID> <poolName> <masterDom> '
'<domDict>({sdUUID1=status,sdUUID2=status,...})'
' <masterVersion>, [<lockPolicy> '
'<lockRenewalIntervalSec> <leaseTimeSec> '
'<ioOpTimeoutSec> <leaseRetries>]',
'Reconstruct master domain'
)),
'createStoragePool': (serv.createStoragePool,
('<storage type> <spUUID> <poolName> <masterDom>'
' <domList>(sdUUID1,sdUUID2,...) '
'<masterVersion>, [<lockPolicy> '
'<lockRenewalIntervalSec> <leaseTimeSec> '
'<ioOpTimeoutSec> <leaseRetries>]',
'Create new storage pool with single/multiple '
'image data domain'
)),
'destroyStoragePool': (serv.destroyStoragePool,
('<spUUID> <id> <scsi-key>',
'Destroy storage pool'
)),
'connectStoragePool': (serv.connectStoragePool,
('<spUUID> <id> <scsi-key> [masterUUID] '
'[masterVer]',
'Connect a Host to specific storage pool'
)),
'disconnectStoragePool': (serv.disconnectStoragePool,
('<spUUID> <id> <scsi-key>',
'Disconnect a Host from the specific '
'storage pool'
)),
'refreshStoragePool': (serv.refreshStoragePool,
('<spUUID> <masterDom> <masterVersion>',
'Refresh storage pool'
)),
'setStoragePoolDescription': (serv.setStoragePoolDescription,
('<spUUID> <descr>',
'Set storage pool description'
)),
'getStoragePoolInfo': (serv.getStoragePoolInfo,
('<spUUID>',
'Get storage pool info'
)),
'createStorageDomain': (serv.createStorageDomain,
('<storage type> <domain UUID> <domain name> '
'<param> <domType> <version>',
'Creates new storage domain'
)),
'setStorageDomainDescription': (serv.setStorageDomainDescription,
('<domain UUID> <descr>',
'Set storage domain description'
)),
'validateStorageDomain': (serv.validateStorageDomain,
('<domain UUID>',
'Validate storage domain'
)),
'activateStorageDomain': (serv.activateStorageDomain,
('<domain UUID> <pool UUID>',
'Activate a storage domain that is already '
'a member in a storage pool.'
)),
'deactivateStorageDomain': (serv.deactivateStorageDomain,
('<domain UUID> <pool UUID> <new master '
'domain UUID> <masterVer>',
'Deactivate a storage domain. '
)),
'attachStorageDomain': (serv.attachStorageDomain,
('<domain UUID> <pool UUID>',
'Attach a storage domain to a storage pool.'
)),
'detachStorageDomain': (serv.detachStorageDomain,
('<domain UUID> <pool UUID> <new master domain'
' UUID> <masterVer>',
'Detach a storage domain from a storage pool.'
)),
'forcedDetachStorageDomain': (serv.forcedDetachStorageDomain,
('<domain UUID> <pool UUID>',
'Forced detach a storage domain from a '
'storage pool.'
)),
'formatStorageDomain': (serv.formatStorageDomain,
('<domain UUID> [<autoDetach>]',
'Format detached storage domain.'
)),
'getStorageDomainInfo': (serv.getStorageDomainInfo,
('<domain UUID>',
'Get storage domain info.'
)),
'getStorageDomainStats': (serv.getStorageDomainStats,
('<domain UUID>',
'Get storage domain statistics.'
)),
'getStorageDomainsList': (serv.getStorageDomainsList,
('<pool UUID>',
'Get storage domains list of pool or all '
'domains if pool omitted.'
)),
'createVolume': (serv.createVolume,
('<sdUUID> <spUUID> <imgUUID> <size> <volFormat> '
'<preallocate> <diskType> <newVolUUID> <descr> '
'<srcImgUUID> <srcVolUUID>',
'Creates new volume or snapshot'
)),
'extendVolumeSize': (serv.extendVolumeSize, (
'<spUUID> <sdUUID> <imgUUID> <volUUID> <newSize>',
'Extend the volume size (virtual disk size seen by the guest).',
)),
'getVolumePath': (serv.getVolumePath,
('<sdUUID> <spUUID> <imgUUID> <volume uuid>',
'Returns the path to the requested uuid'
)),
'setVolumeDescription': (serv.setVolumeDescription,
('<sdUUID> <spUUID> <imgUUID> <volUUID> '
'<Description>',
'Sets a new description to the volume'
)),
'setVolumeLegality': (serv.setVolumeLegality,
('<sdUUID> <spUUID> <imgUUID> <volUUID> '
'<Legality>',
'Set volume legality (ILLEGAL/LEGAL).'
)),
'deleteVolume': (serv.deleteVolume,
('<sdUUID> <spUUID> <imgUUID> <volUUID>,...,<volUUID>'
' <postZero> [<force>]',
'Deletes an volume if its a leaf. Else returns error'
)),
'deleteVolumeByDescr': (serv.deleteVolumeByDescr,
('<part of description> <sdUUID> <spUUID> '
'<imgUUID>',
'Deletes list of volumes(only leafs) '
'according to their description'
)),
'getVolumeInfo': (serv.getVolumeInfo,
('<sdUUID> <spUUID> <imgUUID> <volUUID>',
'Returns all the volume details'
)),
'getParent': (serv.getParent,
('<sdUUID> <spUUID> <imgUUID> <Disk Image uuid>',
'Returns the parent of the volume. Error if no parent'
' exists'
)),
'getVolumesList': (serv.getVolumesList,
('<sdUUID> <spUUID> [imgUUID]',
'Returns list of volumes of imgUUID or sdUUID if '
'imgUUID absent'
)),
'getVolumeSize': (serv.getVolumeSize,
('<sdUUID> <spUUID> <imgUUID> <volUUID>',
'Returns the apparent size and the true size of the'
' volume (in bytes)'
)),
'getFileStats': (serv.getFileStats,
('<sdUUID> [pattern][caseSensitive]',
'Returns files statistics from ISO domain'
)),
'getIsoList': (serv.getIsoList,
('<spUUID>',
'Returns list of all .iso images in ISO domain'
)),
'getFloppyList': (serv.getFloppyList,
('<spUUID>',
'Returns list of all .vfd images in ISO domain'
)),
'getImagesList': (serv.getImagesList,
('<sdUUID>',
'Get list of all images of specific domain'
)),
'getImageDomainsList': (serv.getImageDomainsList,
('<spUUID> <imgUUID> [datadomain=True]',
'Get list of all data domains in the pool '
'that contains imgUUID'
)),
'getConnectedStoragePoolsList': (serv.getConnectedStoragePoolsList,
('',
'Get storage pools list'
)),
'getTaskInfo': (serv.getTaskInfo,
('<TaskID>',
'get async task info'
)),
'getAllTasksInfo': (serv.getAllTasksInfo,
('',
'get info of all async tasks'
)),
'getTaskStatus': (serv.getTaskStatus,
('<TaskID>',
'get task status'
)),
'getAllTasksStatuses': (serv.getAllTasksStatuses,
('',
'list statuses of all async tasks'
)),
'getAllTasks': (serv.getAllTasks,
('[tags=\'\']',
'get status and information for all async tasks'
)),
'stopTask': (serv.stopTask,
('<TaskID>',
'stop async task'
)),
'clearTask': (serv.clearTask,
('<TaskID>',
'clear async task'
)),
'revertTask': (serv.revertTask,
('<TaskID>',
'revert async task'
)),
'prepareForShutdown': (serv.prepareForShutdown,
('', '')),
'setLogLevel': (serv.do_setLogLevel,
('<level> [logName][,logName]...', 'set log verbosity'
' level (10=DEBUG, 50=CRITICAL'
)),
'setMOMPolicy': (serv.do_setMOMPolicy,
('<policyfile>', 'set MOM policy')),
'setMOMPolicyParameters': (serv.do_setMOMPolicyParameters,
('key=python_code [key=python_code] ...',
'set variables for MOM policy fine '
'tuning')),
'deleteImage': (serv.deleteImage,
('<sdUUID> <spUUID> <imgUUID> [<postZero>] [<force>]',
'Delete Image folder with all volumes.',
)),
'moveImage': (serv.moveImage,
('<spUUID> <srcDomUUID> <dstDomUUID> <imgUUID> <vmUUID>'
' <op = COPY_OP/MOVE_OP> [<postZero>] [ <force>]',
'Move/Copy image between storage domains within same '
'storage pool'
)),
'cloneImageStructure': (serv.cloneImageStructure,
('<spUUID> <sdUUID> <imgUUID> <dstSdUUID>',
'Clone an image structure from a source '
'domain to a destination domain within the '
'same pool.'
)),
'syncImageData': (serv.syncImageData,
('<spUUID> <sdUUID> <imgUUID> <dstSdUUID> '
'<syncType>',
'Synchronize image data between storage domains '
'within same pool.'
)),
'uploadImage': (serv.uploadImage, (
'<methodArgs> <spUUID> <sdUUID> <imgUUID> [<volUUID>]',
'Upload an image to a remote endpoint using the specified'
'methodArgs.'
)),
'downloadImage': (serv.downloadImage, (
'<methodArgs> <spUUID> <sdUUID> <imgUUID> [<volUUID>]',
'Download an image from a remote endpoint using the specified',
'methodArgs.'
)),
'moveMultiImage': (serv.moveMultiImage,
('<spUUID> <srcDomUUID> <dstDomUUID> '
'<imgList>({imgUUID=postzero,'
'imgUUID=postzero,...}) <vmUUID> [<force>]',
'Move multiple images between storage domains '
'within same storage pool'
)),
'copyImage': (serv.copyImage,
('<sdUUID> <spUUID> <vmUUID> <srcImgUUID> <srcVolUUID> '
'<dstImgUUID> <dstVolUUID> <dstDescr> <dstSdUUID> '
'<volType> <volFormat> <preallocate> [<postZero>] '
'[<force>]',
'Create new template/volume from VM.',
'Do it by collapse and copy the whole chain '
'(baseVolUUID->srcVolUUID)'
)),
'mergeSnapshots': (serv.mergeSnapshots,
('<sdUUID> <spUUID> <vmUUID> <imgUUID> <Ancestor '
'Image uuid> <Successor Image uuid> [<postZero>]',
'Merge images from successor to ancestor.',
'The result is a image named as successor image '
'and contents the data of whole successor->'
'ancestor chain'
)),
'desktopLogin': (serv.desktopLogin,
('<vmId> <domain> <user> <password>',
'Login to vmId desktop using the supplied '
'credentials'
)),
'desktopLogoff': (serv.desktopLogoff,
('<vmId> <force>',
'Lock user session. force should be set to '
'true/false'
)),
'desktopLock': (serv.desktopLock,
('<vmId>',
'Logoff current user'
)),
'sendHcCmd': (serv.sendHcCmd,
('<vmId> <message>',
'Sends a message to a specific VM through Hypercall '
'channel'
)),
'hibernate': (serv.hibernate,
('<vmId> <hiberVolHandle>',
'Hibernates the desktop'
)),
'monitorCommand': (serv.monitorCommand,
('<vmId> <string>',
'Send a string containing monitor command to the '
'desktop'
)),
'getVmsInfo': (serv.do_getVmsInfo,
('<spUUID> [<sdUUID> [vmList](vmId1,vmId2,...)]',
'Return info of VMs from the pool or a backup domain '
'if its sdUUID is given. If vmList is also given, '
'return info for these VMs only.'
)),
'getVmsList': (serv.do_getVmsList,
('<spUUID> [sdUUID]',
'Get list of VMs from the pool or domain if sdUUID '
'given. Run only from the SPM.'
)),
'setupNetworks': (serv.do_setupNetworks,
('[connectivityCheck=False(default)|True] '
'[connectivityTimeout=<seconds>] '
'[<option>=<value>] '
'[networks=\'{<bridge>:{nic:<nic>,vlan:<number>,'
'bonding:<bond>,...}}\'] '
'[bondings=\'{<bond>:{nics:<nic>[+<nic>],..}}\']',
'Setup new configuration of multiple networks and '
'bonds.'
)),
'addNetwork': (serv.do_addNetwork,
('bridge=<bridge> [vlan=<number>] [bond=<bond>] '
'nics=nic[,nic]',
'Add a new network to this vds.'
)),
'delNetwork': (serv.do_delNetwork,
('bridge=<bridge> [vlan=<number>] [bond=<bond>] '
'nics=nic[,nic]',
'Remove a network (and parts thereof) from this vds.'
)),
'editNetwork': (serv.do_editNetwork,
('oldBridge=<bridge> newBridge=<bridge> [vlan=<number>]'
' [bond=<bond>] nics=nic[,nic]',
'Replace a network with a new one.'
)),
'setSafeNetworkConfig': (serv.do_setSafeNetworkConfig,
('',
'declare current network configuration as '
'"safe"'
)),
'fenceNode': (serv.do_fenceNode,
('<addr> <port> <agent> <user> <passwd> <action> '
'[<secure> [<options>]] \n\t<action> is one of '
'(status, on, off, reboot),\n\t<agent> is one of '
'(rsa, ilo, ipmilan, drac5, etc)\n\t<secure> '
'(true|false) may be passed to some agents',
'send a fencing command to a remote node'
)),
'repoStats': (serv.repoStats,
('',
'Get the health status of the monitored domains'
)),
'startMonitoringDomain': (serv.startMonitoringDomain,
('<sdUUID> <hostID>',
'Start SD: sdUUID monitoring with hostID'
)),
'stopMonitoringDomain': (serv.stopMonitoringDomain,
('<sdUUID>',
'Stop monitoring SD: sdUUID'
)),
'snapshot': (serv.snapshot,
('<vmId> <sdUUID> <imgUUID> <baseVolUUID> <volUUID>',
'Take a live snapshot'
)),
'setBalloonTarget': (serv.setBalloonTarget,
('<vmId> <target>',
"Set VM's balloon target"
)),
'diskReplicateStart': (serv.diskReplicateStart,
('<vmId> <spUUID> <sdUUID> <imgUUID> <volUUID> '
'<dstSdUUID>',
'Start live replication to the destination '
'domain'
)),
'diskReplicateFinish': (serv.diskReplicateFinish,
('<vmId> <spUUID> <sdUUID> <imgUUID> <volUUID>'
' <dstSdUUID>',
'Finish live replication to the destination '
'domain'
)),
'diskSizeExtend': (
serv.diskSizeExtend, (
'<vmId> <spUUID> <sdUUID> <imgUUID> <volUUID> <newSize>',
'Extends the virtual size of a disk'
)),
}
if _glusterEnabled:
commands.update(ge.getGlusterCmdDict(serv))
try:
opts, args = getopt.getopt(sys.argv[1:], "hmso", ["help", "methods",
"SSL", "truststore=",
"oneliner"])
for o, v in opts:
if o == "-h" or o == "--help":
usage(commands)
sys.exit(0)
if o == "-m" or o == "--methods":
usage(commands, False)
sys.exit(0)
if o == "-s" or o == "--SSL":
serv.useSSL = True
if o == "--truststore":
serv.truststore = v
if o == '-o' or o == '--oneliner':
serv.pretty = False
if len(args) < 2:
raise Exception("Need at least two arguments")
server, command = args[0:2]
if command not in commands:
raise Exception("Unknown command")
hostPort = vdscli.cannonizeHostPort(server)
except SystemExit as status:
sys.exit(status)
except Exception as e:
print "ERROR - %s" % (e)
usage(commands)
sys.exit(-1)
try:
serv.do_connect(hostPort)
try:
commandArgs = args[2:]
except:
commandArgs = []
code, message = commands[command][0](commandArgs)
if code != 0:
code = 1
print message
sys.exit(code)
except (TypeError, IndexError, ValueError, AssertionError) as e:
print "Error using command:", e, "\n"
print command
for line in commands[command][1]:
print '\t' + line
sys.exit(-1)
except SystemExit as status:
sys.exit(status)
except socket.error as e:
if e[0] == 111:
print "Connection to %s refused" % hostPort
else:
traceback.print_exc()
sys.exit(-1)
except:
traceback.print_exc()
sys.exit(-1)
| gpl-2.0 | -6,174,053,681,563,520,000 | 39.057054 | 79 | 0.468451 | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsvariable.py | 1 | 16571 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsvariable(base_resource) :
""" Configuration for variable resource. """
def __init__(self) :
self._name = ""
self._type = ""
self._scope = ""
self._iffull = ""
self._ifvaluetoobig = ""
self._ifnovalue = ""
self._init = ""
self._expires = 0
self._comment = ""
self._referencecount = 0
self.___count = 0
@property
def name(self) :
"""Variable name. This follows the same syntax rules as other default syntax expression entity names:
It must begin with an alpha character (A-Z or a-z) or an underscore (_).
The rest of the characters must be alpha, numeric (0-9) or underscores.
It cannot be re or xp (reserved for regular and XPath expressions).
It cannot be a default syntax expression reserved word (e.g. SYS or HTTP).
It cannot be used for an existing default syntax expression object (HTTP callout, patset, dataset, stringmap, or named expression).<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Variable name. This follows the same syntax rules as other default syntax expression entity names:
It must begin with an alpha character (A-Z or a-z) or an underscore (_).
The rest of the characters must be alpha, numeric (0-9) or underscores.
It cannot be re or xp (reserved for regular and XPath expressions).
It cannot be a default syntax expression reserved word (e.g. SYS or HTTP).
It cannot be used for an existing default syntax expression object (HTTP callout, patset, dataset, stringmap, or named expression).<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def type(self) :
"""Specification of the variable type; one of the following:
ulong - singleton variable with an unsigned 64-bit value.
text(value-max-size) - singleton variable with a text string value.
map(text(key-max-size),ulong,max-entries) - map of text string keys to unsigned 64-bit values.
map(text(key-max-size),text(value-max-size),max-entries) - map of text string keys to text string values.
where
value-max-size is a positive integer that is the maximum number of bytes in a text string value.
key-max-size is a positive integer that is the maximum number of bytes in a text string key.
max-entries is a positive integer that is the maximum number of entries in a map variable.
For a global singleton text variable, value-max-size <= 64000.
For a global map with ulong values, key-max-size <= 64000.
For a global map with text values, key-max-size + value-max-size <= 64000.
max-entries is a positive integer that is the maximum number of entries in a map variable. This has a theoretical maximum of 2^64-1, but in actual use will be much smaller, considering the memory available for use by the map.
Example:
map(text(10),text(20),100) specifies a map of text string keys (max size 10 bytes) to text string values (max size 20 bytes), with 100 max entries.<br/>Minimum length = 1.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
"""Specification of the variable type; one of the following:
ulong - singleton variable with an unsigned 64-bit value.
text(value-max-size) - singleton variable with a text string value.
map(text(key-max-size),ulong,max-entries) - map of text string keys to unsigned 64-bit values.
map(text(key-max-size),text(value-max-size),max-entries) - map of text string keys to text string values.
where
value-max-size is a positive integer that is the maximum number of bytes in a text string value.
key-max-size is a positive integer that is the maximum number of bytes in a text string key.
max-entries is a positive integer that is the maximum number of entries in a map variable.
For a global singleton text variable, value-max-size <= 64000.
For a global map with ulong values, key-max-size <= 64000.
For a global map with text values, key-max-size + value-max-size <= 64000.
max-entries is a positive integer that is the maximum number of entries in a map variable. This has a theoretical maximum of 2^64-1, but in actual use will be much smaller, considering the memory available for use by the map.
Example:
map(text(10),text(20),100) specifies a map of text string keys (max size 10 bytes) to text string values (max size 20 bytes), with 100 max entries.<br/>Minimum length = 1
"""
try :
self._type = type
except Exception as e:
raise e
@property
def scope(self) :
"""Scope of the variable:
global - (default) one set of values visible across all Packet Engines and, in a cluster, all nodes.<br/>Default value: global<br/>Possible values = global.
"""
try :
return self._scope
except Exception as e:
raise e
@scope.setter
def scope(self, scope) :
"""Scope of the variable:
global - (default) one set of values visible across all Packet Engines and, in a cluster, all nodes.<br/>Default value: global<br/>Possible values = global
"""
try :
self._scope = scope
except Exception as e:
raise e
@property
def iffull(self) :
"""Action to perform if an assignment to a map exceeds its configured max-entries:
lru - (default) reuse the least recently used entry in the map.
undef - force the assignment to return an undefined (Undef) result to the policy executing the assignment.<br/>Default value: lru<br/>Possible values = undef, lru.
"""
try :
return self._iffull
except Exception as e:
raise e
@iffull.setter
def iffull(self, iffull) :
"""Action to perform if an assignment to a map exceeds its configured max-entries:
lru - (default) reuse the least recently used entry in the map.
undef - force the assignment to return an undefined (Undef) result to the policy executing the assignment.<br/>Default value: lru<br/>Possible values = undef, lru
"""
try :
self._iffull = iffull
except Exception as e:
raise e
@property
def ifvaluetoobig(self) :
"""Action to perform if an value is assigned to a text variable that exceeds its configured max-size,
or if a key is used that exceeds its configured max-size:
truncate - (default) truncate the text string to the first max-size bytes and proceed.
undef - force the assignment or expression evaluation to return an undefined (Undef) result to the policy executing the assignment or expression.<br/>Default value: truncate<br/>Possible values = undef, truncate.
"""
try :
return self._ifvaluetoobig
except Exception as e:
raise e
@ifvaluetoobig.setter
def ifvaluetoobig(self, ifvaluetoobig) :
"""Action to perform if an value is assigned to a text variable that exceeds its configured max-size,
or if a key is used that exceeds its configured max-size:
truncate - (default) truncate the text string to the first max-size bytes and proceed.
undef - force the assignment or expression evaluation to return an undefined (Undef) result to the policy executing the assignment or expression.<br/>Default value: truncate<br/>Possible values = undef, truncate
"""
try :
self._ifvaluetoobig = ifvaluetoobig
except Exception as e:
raise e
@property
def ifnovalue(self) :
"""Action to perform if on a variable reference in an expression if the variable is single-valued and uninitialized
or if the variable is a map and there is no value for the specified key:
init - (default) initialize the single-value variable, or create a map entry for the key and the initial value,
using the -init value or its default.
undef - force the expression evaluation to return an undefined (Undef) result to the policy executing the expression.<br/>Default value: init<br/>Possible values = undef, init.
"""
try :
return self._ifnovalue
except Exception as e:
raise e
@ifnovalue.setter
def ifnovalue(self, ifnovalue) :
"""Action to perform if on a variable reference in an expression if the variable is single-valued and uninitialized
or if the variable is a map and there is no value for the specified key:
init - (default) initialize the single-value variable, or create a map entry for the key and the initial value,
using the -init value or its default.
undef - force the expression evaluation to return an undefined (Undef) result to the policy executing the expression.<br/>Default value: init<br/>Possible values = undef, init
"""
try :
self._ifnovalue = ifnovalue
except Exception as e:
raise e
@property
def init(self) :
"""Initialization value for values in this variable. Default: 0 for ulong, NULL for text.
"""
try :
return self._init
except Exception as e:
raise e
@init.setter
def init(self, init) :
"""Initialization value for values in this variable. Default: 0 for ulong, NULL for text.
"""
try :
self._init = init
except Exception as e:
raise e
@property
def expires(self) :
"""Value expiration in seconds. If the value is not referenced within the expiration period it will be deleted. 0 (the default) means no expiration.<br/>Maximum length = 31622400.
"""
try :
return self._expires
except Exception as e:
raise e
@expires.setter
def expires(self, expires) :
"""Value expiration in seconds. If the value is not referenced within the expiration period it will be deleted. 0 (the default) means no expiration.<br/>Maximum length = 31622400
"""
try :
self._expires = expires
except Exception as e:
raise e
@property
def comment(self) :
"""Comments associated with this variable.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
"""Comments associated with this variable.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def referencecount(self) :
"""The number of references to the variable in expressions and assignments.
"""
try :
return self._referencecount
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsvariable_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsvariable
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add nsvariable.
"""
try :
if type(resource) is not list :
addresource = nsvariable()
addresource.name = resource.name
addresource.type = resource.type
addresource.scope = resource.scope
addresource.iffull = resource.iffull
addresource.ifvaluetoobig = resource.ifvaluetoobig
addresource.ifnovalue = resource.ifnovalue
addresource.init = resource.init
addresource.expires = resource.expires
addresource.comment = resource.comment
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nsvariable() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].type = resource[i].type
addresources[i].scope = resource[i].scope
addresources[i].iffull = resource[i].iffull
addresources[i].ifvaluetoobig = resource[i].ifvaluetoobig
addresources[i].ifnovalue = resource[i].ifnovalue
addresources[i].init = resource[i].init
addresources[i].expires = resource[i].expires
addresources[i].comment = resource[i].comment
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete nsvariable.
"""
try :
if type(resource) is not list :
deleteresource = nsvariable()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nsvariable() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nsvariable() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the nsvariable resources that are configured on netscaler.
"""
try :
if not name :
obj = nsvariable()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = nsvariable()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nsvariable() for _ in range(len(name))]
obj = [nsvariable() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = nsvariable()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of nsvariable resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsvariable()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the nsvariable resources configured on NetScaler.
"""
try :
obj = nsvariable()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of nsvariable resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsvariable()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Iffull:
undef = "undef"
lru = "lru"
class Scope:
GLOBAL = "global"
class Ifvaluetoobig:
undef = "undef"
truncate = "truncate"
class Ifnovalue:
undef = "undef"
init = "init"
class nsvariable_response(base_response) :
def __init__(self, length=1) :
self.nsvariable = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsvariable = [nsvariable() for _ in range(length)]
| apache-2.0 | 4,863,755,548,847,731,000 | 35.181223 | 227 | 0.704001 | false |
0Chencc/CTFCrackTools | Lib/test/test_SimpleXMLRPCServer.py | 1 | 2895 | #
# Matt Shelton <[email protected]>
#
from SimpleXMLRPCServer import SimpleXMLRPCServer
import threading, xmlrpclib, unittest
from test import test_support
HOST = "127.0.0.1"
PORT = 7218
def multiply(x, y):
return x * y
class MyService:
"""This test class is going to be used to test an entire class being
exposed via XML-RPC."""
def _dispatch(self, method, params):
"""This method is called whenever a call is made to the
service."""
func = getattr(self, 'expose_' + method)
return func(*params)
def expose_squared(self, x):
"""Square"""
return x * x
class ServerThread(threading.Thread):
"""A test harness for launching a SimpleXMLRPCServer instance in the
background."""
def __init__(self, server):
threading.Thread.__init__(self)
self.server = server
def run(self):
self.server.socket.settimeout(5)
self.server.allow_reuse_address = 1
self.server.handle_request()
self.server.server_close()
class SimpleXMLRPCServerTestCase(unittest.TestCase):
"""Test case for the Python SimpleXMLRPCServer module."""
def test_exposeLambda(self):
"""Expose a lambda function via XML-RPC."""
# Create a server instance.
server = SimpleXMLRPCServer((HOST, PORT))
server.register_function(lambda x,y: x+y, 'add')
ServerThread(server).start()
# Access the exposed service.
client = xmlrpclib.ServerProxy("http://%s:%d" % (HOST, PORT))
self.assertEqual(client.add(10, 20), 30)
def test_exposeFunction1(self):
"""Expose a function via XML-RPC."""
server = SimpleXMLRPCServer((HOST, PORT + 1))
server.register_function(multiply)
ServerThread(server).start()
# Access the exposed service.
client = xmlrpclib.ServerProxy("http://%s:%d" % (HOST, PORT + 1))
self.assertEqual(client.multiply(5, 10), 50)
def test_exposeFunction2(self):
"""Expose a function using a different name via XML-RPC."""
server = SimpleXMLRPCServer((HOST, PORT + 2))
server.register_function(multiply, "mult")
ServerThread(server).start()
# Access the exposed service.
client = xmlrpclib.ServerProxy("http://%s:%d" % (HOST, PORT + 2))
self.assertEqual(client.mult(7, 11), 77)
def test_exposeClass(self):
"""Expose an entire class and test the _dispatch method."""
server = SimpleXMLRPCServer((HOST, PORT + 3))
server.register_instance(MyService())
ServerThread(server).start()
# Access the exposed service.
client = xmlrpclib.ServerProxy("http://%s:%d" % (HOST, PORT + 3))
self.assertEqual(client.squared(10), 100)
def test_main():
test_support.run_unittest(SimpleXMLRPCServerTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 | 9,152,783,330,394,041,000 | 30.813187 | 73 | 0.63247 | false |
rickiepark/openbidder | protobuf/protobuf-2.6.1/python/ez_setup.py | 1 | 10431 | #!python
# This file was obtained from:
# http://peak.telecommunity.com/dist/ez_setup.py
# on 2011/1/21.
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print((
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
), file=sys.stderr)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
return do_download()
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict as e:
if was_imported:
print((
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0]), file=sys.stderr)
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib.request, urllib.error, urllib.parse, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib.request.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print((
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
), file=sys.stderr)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print("Setuptools version",version,"or greater has been installed.")
print('(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)')
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in list(md5_data.items())]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print("Internal error!", file=sys.stderr)
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| mit | 4,900,200,301,619,704,000 | 35.728873 | 86 | 0.654683 | false |
ryfeus/lambda-packs | HDF4_H5_NETCDF/source2.7/pyhdf/VS.py | 1 | 95700 | # $Id: VS.py,v 1.4 2005-07-14 01:36:41 gosselin_a Exp $
# $Log: not supported by cvs2svn $
# Revision 1.3 2004/08/02 17:06:20 gosselin
# pyhdf-0.7.2
#
# Revision 1.2 2004/08/02 15:36:04 gosselin
# pyhdf-0.7-1
#
# Author: Andre Gosselin
# Maurice-Lamontagne Institute
# [email protected]
"""
VS (Vdata table) API (:mod:`pyhdf.VS`)
======================================
A module of the pyhdf package implementing the VS (Vdata table)
API of the NCSA HDF4 library.
(see: hdf.ncsa.uiuc.edu)
Introduction
------------
VS is one of the modules composing pyhdf, a python package implementing
the NCSA HDF library and letting one manage HDF files from within a python
program. Two versions of the HDF library currently exist, version 4 and
version 5. pyhdf only implements version 4 of the library. Many
different APIs are to be found inside the HDF4 specification.
Currently, pyhdf implements just a few of those: the SD, VS and V APIs.
Other APIs should be added in the future (GR, AN, etc).
VS allows the definition of structured data tables inside an HDF file.
Those tables are designated as "vdatas" (the name has to do with data
associated with the "vertices" of geometrical models, the storage of which
the API was originally designed for). A vdata is composed of a fixed
number of columns (also called fields), where a column can store a fixed
number of data values, all of the same type. The number of values allowed
inside a field is called the "order" of the field. A table is composed of a
varying number of rows (also called records), a record representing the
sequence of values stored in each field of the vdata.
A vdata is associated with a descriptive name, and likewise each field of
the vdata. A vdata can also be tagged with a "class" to further describe the
vdata purpose. Records and fields are identified by a zero-based index.
An arbitrary number of attributes of different types can be attached to
a vdata as a whole, or to its individual fields. An attribute is a
(name, value) pair, where "value" can be of many types, and be either
single or multi-valued. The number of values stored in an attribute is
called the "order" of the attribute.
The following example illustrates a simple vdata that could be stored
inside an HDF file. See section "Programming models" for an example
program implementing this vdata.
INVENTORY (experimental status)
====== =========== === ======== ========
partid description qty wght(lb) price($)
====== =========== === ======== ========
Q1234 bolt 12 0.01 0.05
B5432 brush 10 0.4 4.25
S7613 scissor 2 0.2 3.75
====== =========== === ======== ========
The vdata is composed of 5 fields. 3 records are shown (of course, a vdata
can store much more than that). "INVENTORY" would be the vdata name, and
"partid", "description", etc, would be the field names. The data type varies
between fields. "partid" and "description" would be of "multicharacter" type
(aka "string"), "qty" would be a integer, and "wght" and "price" would be
floats. The text in parentheses could be stored as attributes. A "status"
attribute could be defined for the table as a whole, and given the
value "experimental". Likewise, a "unit" attribute could be associated
with fields "wght" and "price", and given the values "lb" and "$", resp.
The VS API allows one to create, locate and open a vdata inside an
HDF file, update and append records inside it, read records randomly
or sequentially, and access and update the vdata and field attributes.
Attributes can be read and written using the familiar python "dot
notation", and records can be read and written by indexing and slicing the
vdata as if it were a python sequence.
VS module key features
----------------------
VS key features are as follows.
- pyhdf implements almost every routine of the original VS API.
Only a few have been ignored, most of them being of a rare use:
- VSgetblocksize() / VSsetblocksize()
- VSsetnumblocks()
- VSlone
- It is quite straightforward to go from a C version to a python version
of a program accessing the VS API, and to learn VS usage by refering to
the C API documentation.
- A few high-level python methods have been developped to ease
programmers task. Of greatest interest are the following:
- Access to attributes through the familiar "dot notation".
- Indexing and slicing a vdata to read and write its records,
similarly to a python sequence.
- Easy retrieval of info on a vdata and its fields.
- Easy creation of vdatas.
Accessing the VS module
-----------------------
To access the VS module a python program can say one of:
>>> import pyhdf.VS # must prefix names with "pyhdf.VS."
>>> from pyhdf import VS # must prefix names with "VS."
>>> from pyhdf.VS import * # names need no prefix
This document assumes the last import style is used.
VS is not self-contained, and needs functionnality provided by another
pyhdf module, namely the HDF module. This module must thus be imported
also:
>>> from .HDF import *
Package components
------------------
pyhdf is a proper Python package, eg a collection of modules stored under
a directory whose name is that of the package and which stores an
__init__.py file. Following the normal installation procedure, this
directory will be <python-lib>/site-packages/pyhdf', where <python-lib>
stands for the python installation directory.
For each HDF API exists a corresponding set of modules.
The following modules are related to the VS API.
_hdfext
C extension module responsible for wrapping the HDF
C library for all python modules
hdfext
python module implementing some utility functions
complementing the _hdfext extension module
error
defines the HDF4Error exception
HDF
python module providing support to the VS module
VS
python module wrapping the VS API routines inside
an OOP framework
_hdfext and hdfext were generated using the SWIG preprocessor.
SWIG is however *not* needed to run the package. Those two modules
are meant to do their work in the background, and should never be called
directly. Only HDF and VS should be imported by the user program.
Prerequisites
-------------
The following software must be installed in order for VS to
work.
HDF (v4) library
pyhdf does *not* include the HDF4 library, which must
be installed separately.
HDF is available at:
"http://hdf.ncsa.uiuc.edu/obtain.html".
Numeric is also needed by the SD module. See the SD module documentation.
Documentation
-------------
pyhdf has been written so as to stick as closely as possible to
the naming conventions and calling sequences documented inside the
"HDF User s Guide" manual. Even if pyhdf gives an OOP twist
to the C API, the manual can be easily used as a documentary source
for pyhdf, once the class to which a function belongs has been
identified, and of course once requirements imposed by the Python
langage have been taken into account. Consequently, this documentation
will not attempt to provide an exhaustive coverage of the HDF VS
API. For this, the user is referred to the above manual.
The documentation of each pyhdf method will indicate the name
of the equivalent routine as it is found inside the C API.
This document (in both its text and html versions) has been completely
produced using "pydoc", the Python documentation generator (which
made its debut in the 2.1 Python release). pydoc can also be used
as an on-line help tool. For example, to know everything about
the VS.VD class, say:
>>> from pydoc import help
>>> from pyhdf.VS import *
>>> help(VD)
To be more specific and get help only for the read() method of the
VD class:
>>> help(VD.read)
pydoc can also be called from the command line, as in::
% pydoc pyhdf.VS.VD # doc for the whole VD class
% pydoc pyhdf.VS.VD.read # doc for the VD.read method
Summary of differences between the pyhdf and C VS API
-----------------------------------------------------
Most of the differences between the pyhdf and C VS API can
be summarized as follows.
- In the C API, every function returns an integer status code, and values
computed by the function are returned through one or more pointers
passed as arguments.
- In pyhdf, error statuses are returned through the Python exception
mechanism, and values are returned as the method result. When the
C API specifies that multiple values are returned, pyhdf returns a
sequence of values, which are ordered similarly to the pointers in the
C function argument list.
Error handling
--------------
All errors reported by the C VS API with a SUCCESS/FAIL error code
are reported by pyhdf using the Python exception mechanism.
When the C library reports a FAIL status, pyhdf raises an HDF4Error
exception (a subclass of Exception) with a descriptive message.
Unfortunately, the C library is rarely informative about the cause of
the error. pyhdf does its best to try to document the error, but most
of the time cannot do more than saying "execution error".
VS needs support from the HDF module
------------------------------------
The VS module is not self-contained (countrary to the SD module).
It requires help from the HDF module, namely:
- the HDF.HDF class to open and close the HDF file, and initialize the
VS interface
- the HDF.HC class to provide different sorts of constants (opening modes,
data types, etc).
A program wanting to access HDF vdatas will almost always need to execute
the following minimal set of calls:
>>> from pyhdf.HDF import *
>>> from pyhdf.VS import *
>>> hdfFile = HDF(name, HC.xxx)# open HDF file
>>> vs = hdfFile.vstart() # initialize VS interface on HDF file
>>> ... # manipulate vdatas through "vs"
>>> vs.end() # terminate VS interface
>>> hdfFile.close() # close HDF file
Classes summary
---------------
pyhdf wraps the VS API using different python classes::
VS HDF VS interface
VD vdata
VDField vdata field
VDattr attribute (either at the vdata or field level)
In more detail::
VS The VS class implements the VS (Vdata) interface applied to an
HDF file. This class encapsulates the hdf instance, and all
the top-level functions of the VS API.
To create a VS instance, call the vstart() method of an
HDF instance.
methods:
constructors:
attach() open an existing vdata given its name or
reference number, or create a new one,
returning a VD instance
create() create a new vdata and define its structure,
returning a VD instance
creating and initializing a simple vdata
storedata() create a single-field vdata and initialize
its values
closing the interface
end() close the VS interface on the HDF file
searching
find() get a vdata reference number given its name
next() get the reference number of the vdata following
a given one
inquiry
vdatainfo() return info about all the vdatas in the
HDF file
VD The VD class describes a vdata. It encapsulates
the VS instance to which the vdata belongs, and the vdata
identifier.
To instantiate a VD class, call the attach() or create()
method of a VS class instance.
methods:
constructors
attr() create a VDAttr instance representing a
vdata attribute; "dot notation" can also be
used to access a vdata attribute
field() return a VDField instance representing a given
field of the vdata
closing vdata
detach() end access to the vdata
defining fields
fdefine() define the name, type and order of a new field
setfields() define the field names and field order for
the read() and write() methods; also used to
initialize the structure of a vdata previously
created with the VS.attach() method
reading and writing
note: a vdata can be indexed and sliced like a
python sequence
read() return the values of a number of records
starting at the current record position
seek() reset the current record position
seekend() seek past the last record
tell() return the current record position
write() write a number of records starting at the
current record position
inquiry
attrinfo() return info about all the vdata attributes
fexist() check if a vdata contains a given set of fields
fieldinfo() return info about all the vdata fields
findattr() locate an attribute, returning a VDAttr instance
if found
inquire() return info about the vdata
sizeof() return the size in bytes of one or more fields
VDField The VDField class represents a vdata field. It encapsulates
the VD instance to which the field belongs, and the field
index number.
To instantiate a VDField, call the field() method of a VD class
instance.
methods:
constructors:
attr() return a VDAttr instance representing an
attribute of the field; "dot notation"
can also be used to get/set an attribute.
inquiry
attrinfo() return info about all the field attributes
find() locate an attribute, returning a VDAttr
instance if found
VDAttr The VDAttr class encapsulates methods used to set and query
attributes defined at the level either of the vdata or the
vdata field.
To create an instance of this class, call the attr() or
findattr() methods of a VD instance (for vdata attributes),
or call the attr() or find() methods of a VDField instance
(for field attributes).
methods:
get / set
get() get the attribute value
set() set the attribute value
info
info() retrieve info about the attribute
Data types
----------
Data types come into play when first defining vdata fields and attributes,
and later when querying the definition of those fields and attributes.
Data types are specified using the symbolic constants defined inside the
HC class of the HDF module.
- CHAR and CHAR8 (equivalent): an 8-bit character.
- UCHAR, UCHAR8 and UINT8 (equivalent): unsigned 8-bit values (0 to 255)
- INT8: signed 8-bit values (-128 to 127)
- INT16: signed 16-bit values
- UINT16: unsigned 16 bit values
- INT32: signed 32 bit values
- UINT32: unsigned 32 bit values
- FLOAT32: 32 bit floating point values (C floats)
- FLOAT64: 64 bit floating point values (C doubles)
There is no explicit "string" type. To simulate a string, set the field or
attribute type to CHAR, and set the field or attribute "order" to
a value of 'n' > 1. This creates and "array of characters", close
to a string (except that strings will always be of length 'n', right-padded
with spaces if necessary).
Attribute access: low and high level
------------------------------------
The VS API allow setting attributes on vdatas and vdata fields. Attributes
can be of many types (int, float, char) of different bit lengths (8, 16, 32,
64 bits), and can be single or multi-valued. Values of a multi-valued
attribute must all be of the same type.
Attributes can be set and queried in two different ways. First, given a
VD instance (describing a vdata object) or a VDField instance (describing a
vdata field), the attr() method of that instance is called to create a
VDAttr instance representing the wanted attribute (possibly non existent).
The set() method of this VDAttr instance is then called to define the
attribute value, creating it if it does not already exist. The get() method
returns the current attribute value. Here is an example.
>>> from pyhdf.HDF import *
>>> from pyhdf.VS import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> vs = f.vstart() # init vdata interface
>>> vd = vs.attach('vtest', 1) # attach vdata 'vtest' in write mode
>>> attr = vd.attr('version') # prepare to define the 'version' attribute
# on the vdata
>>> attr.set(HC.CHAR8,'1.0') # set attribute 'version' to string '1.0'
>>> print(attr.get()) # get and print attribute value
>>> fld = vd.field('fld1') # obtain a field instance for field 'fld1'
>>> attr = fld.attr('range') # prepare to define attribute 'range' on
# this field
>>> attr.set(HC.INT32,(-10, 15)) # set attribute 'range' to a pair of ints
>>> print(attr.get()) # get and print attribute value
>>> vd.detach() # "close" the vdata
>>> vs.end() # terminate the vdata interface
>>> f.close() # close the HDF file
The second way consists of setting/querying an attribute as if it were a
normal python class attribute, using the usual dot notation. Above example
then becomes:
>>> from pyhdf.HDF import *
>>> from pyhdf.VS import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> vs = f.vstart() # init vdata interface
>>> vd = vs.attach('vtest', 1) # attach vdata 'vtest' in write mode
>>> vd.version = '1.0' # create vdata attribute 'version',
# setting it to string '1.0'
>>> print(vd.version) # print attribute value
>>> fld = vd.field('fld1') # obtain a field instance for field 'fld1'
>>> fld.range = (-10, 15) # create field attribute 'range', setting
# it to the pair of ints (-10, 15)
>>> print(fld.range) # print attribute value
>>> vd.detach() # "close" the vdata
>>> vs.end() # terminate the vdata interface
>>> f.close() # close the HDF file
Note how the dot notation greatly simplifies and clarifies the code.
Some latitude is however lost by manipulating attributes in that way,
because the pyhdf package, not the programmer, is then responsible of
setting the attribute type. The attribute type is chosen to be one of:
=========== ====================================
HC.CHAR8 if the attribute value is a string
HC.INT32 if all attribute values are integers
HC.FLOAT64 otherwise
=========== ====================================
The first way of handling attribute values must be used if one wants to
define an attribute of any other type (for ex. 8 or 16 bit integers,
signed or unsigned). Also, only a VDAttr instance gives access to attribute
info, through its info() method.
However, accessing HDF attributes as if they were python attributes raises
an important issue. There must exist a way to assign generic attributes
to the python objects without requiring those attributes to be converted
to HDF attributes. pyhdf uses the following rule: an attribute whose name
starts with an underscore ('_') is either a "predefined" attribute
(see below) or a standard python attribute. Otherwise, the attribute
is handled as an HDF attribute. Also, HDF attributes are not stored inside
the object dictionnary: the python dir() function will not list them.
Attribute values can be updated, but it is illegal to try to change the
value type, or the attribute order (number of values). This is important
for attributes holding string values. An attribute initialized with an
'n' character string is simply a character attribute of order 'n' (eg a
character array of length 'n'). If 'vd' is a vdata and we initialize its
'a1' attribute as 'vd.a1 = "abcdef"', then a subsequent update attempt
like 'vd.a1 = "12"' will fail, because we then try to change the order
of the attribute (from 6 to 2). It is mandatory to keep the length of string
attributes constant. Examples below show simple ways how this can be done.
Predefined attributes
---------------------
The VD and VDField classes support predefined attributes to get (and
occasionnaly set) attribute values easily, without having to call a
class method. The names of predefined attributes all start with an
underscore ('_').
In the following tables, the RW column holds an X if the attribute
is read/write. See the HDF User s guide for details about more
"exotic" topics like "class", "faked vdata" and "tag".
VD predefined attributes
=========== == ========================== =============================
name RW description C library routine
=========== == ========================== =============================
_class X class name VSgetclass/VSsetclass
_fields list of field names VSgetfields
_interlace X interlace mode VSgetinterlace/VSsetinterlace
_isattr true if vdata is "faked" VSisattr
by HDF to hold attributes
_name X name of the vdata VSgetname/VSsetname
_nattrs number of attributes VSfnattrs
_nfields number of fields VFnfields
_nrecs number of records VSelts
_recsize record size (bytes) VSQueryvsize
_refnum reference number VSQueryref
_tag vdata tag VSQuerytag
_tnattrs total number of vdata and VSnattrs
field attributes
=========== == ========================== =============================
VDField predefined attributes
=========== == ========================== =============================
name RW description C library routine
=========== == ========================== =============================
_esize external size (bytes) VFfieldesize
_index index number VSfindex
_isize internal size (bytes) VFfieldisize
_name name VFfieldname
_nattrs number of attributes VSfnattrs
_order order (number of values) VFfieldorder
_type field type (HC.xxx) VFfieldtype
=========== == ========================== =============================
Record access: low and high level
---------------------------------
vdata records can be read and written in two different ways. The first one
consists of calling the basic I/O methods of the vdata:
- seek() to set the current record position, if necessary;
- read() to retrieve a given number of records from that position;
- write() to write a given number of records starting at
that position
A second, higher level way, lets one see a vdata similarly to a python
sequence, and access its contents using the familiar indexing and slicing
notation in square brackets. Reading and writing a vdata as if it were a
python sequence may often look simpler, and improve code legibility.
Here are some examples of how a vdata 'vd' holding 3 fields could be read.
>>> print(vd[0]) # print record 0
>>> print(vd[-1]) # print last record
>>> print(vd[2:]) # print records 2 and those that follow
>>> print(vd[:]) # print all records
>>> print(vd[:,0]) # print field 0 of all records
>>> print(vd[:3,:2]) # print first 2 fields of first 3 records
As the above examples show, the usual python rules are obeyed regarding
the interpretation of indexing and slicing values. Note that the vdata
fields can be indexed and sliced, not only the records. The setfields()
method can also be used to select a subset to the vdata fields
(setfields() also let you reorder the fields). When the vdata is
indexed (as opposed to being sliced), a single record is returned as a list
of values. When the vdata is sliced, a list of records is
always returned (thus a 2-level list), even if the slice contains only
one record.
A vdata can also be written similarly to a python sequence. When indexing
the vdata (as opposed to slicing it), a single record must be assigned,
and the record must be given as a sequence of values. It is legal to use
as an index the current number of records in the vdata: the record is then
appended to the vdata. When slicing the vdata, the records assigned to the
slice must always be given as a list of records, even
if only one record is assigned. Also, the number of records assigned must
always match the width of the slice, except if the slice includes or goes
past the last record of the vdata. In that case, the number of records
assigned can exceed the width of the slice, and the extra records are
appended to the vdata. So, to append records to vdata 'vd', simply
assign records to the slice 'vd[vd._nrecs:]'. Note that, even if the
'field' dimension can be specified in the left-hand side expression,
there is no real interest in doing so, since all fields must
be specified when assigning a record to the vdata: it is an error to
try to assign just a few of the fields.
For example, given a vdata 'vd' holding 5 records, and lists 'reca',
'recb', etc, holding record values::
vd[0] = reca # updates record 0
vd[0,:] = reca # specifying fields is OK, but useless
vd[0,1:] = reca[1:] # error: all fields must be assigned
vd[1] = [recb, recc] # error: only one record allowed
vd[5] = recc # append one record
vd[1:3] = [reca,recb] # updates second and third record
vd[1:4] = [reca, recb] # error: 3 records needed
vd[5:] = [reca,recb] # appends 2 records to the vdata
vd[4:] = [reca, recb] # updates last record, append one
Programming models
------------------
Creating and initializing a new vdata
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following code can serve as a model for the creation and
initialization of a new vdata. It implements the INVENTORY example
described in the "Introduction" section::
from pyhdf.HDF import *
from pyhdf.VS import *
# Open HDF file and initialize the VS interface
f = HDF('inventory.hdf', # Open file 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
# Create vdata and define its structure
vd = vs.create( # create a new vdata
'INVENTORY', # name of the vdata
# fields of the vdata follow
(('partid',HC.CHAR8, 5), # 5 char string
('description',HC.CHAR8, 10), # 10 char string field
('qty',HC.INT16, 1), # 1 16 bit int field
('wght',HC.FLOAT32, 1), # 1 32 bit float
('price',HC.FLOAT32,1) # 1 32 bit float
)) # 5 fields allocated in the vdata
# Set attributes on the vdata and its fields
vd.field('wght').unit = 'lb'
vd.field('price').unit = '$'
# In order to be able to update a string attribute, it must
# always be set to the same length. This sets 'status' to a 20
# char long, left-justified string, padded with spaces on the right.
vd.status = "%-20s" % 'phase 1 done'
# Store records
vd.write(( # write 3 records
('Q1234', 'bolt',12, 0.01, 0.05), # record 1
('B5432', 'brush', 10, 0.4, 4.25), # record 2
('S7613', 'scissor', 2, 0.2, 3.75) # record 3
))
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
Note that is mandatory to always write whole records to the vdata.
Note also the comments about the initialization of the 'status'
vdata attribute. We want to be able update this attribute (see
following examples). However, the VS API prohibits changing an attribute
type when updating its value. Since the length (order) of an attribute
is part of its type, we make sure of setting the attribute to a length
long enough to accomodate the longest possible string we migh want to
assign to the attribute.
Appending records to a vdata
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Appending records requires first seeking to the end of the vdata, to avoid
overwriting existing records. The following code can serve as a model. The
INVENTORY vdata created before is used::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf', # Open 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY', 1) # attach 'INVENTORY' in write mode
# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list
# where number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 2 done')
vd[vd._nrecs:] = ( # append 2 records
('A4321', 'axe', 5, 1.5, 25), # first record
('C3214', 'cup', 100, 0.1, 3.25) # second record
)
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
Note how, when updating the value of the 'status' vdata attribute,
we take care of assigning a value of the same length as that of the
original value. Otherwise, the assignment would raise an exception.
Records are written by assigning the vdata through a slicing
expression, like a python sequence. By specifying the number of records
as the start of the slice, the records are appended to the vdata.
Updating records in a vdata
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating requires seeking to the record to update before writing the new
records. New data will overwrite this record and all records that follow,
until a new seek is performed or the vdata is closed. Note that record
numbering starts at 0.
The following code can serve as a model. The INVENTORY vdata created
before is used::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf', # Open 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY', 1) # attach 'INVENTORY' in write mode
# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list
# where number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 3 done')
# Update record at index 1 (second record)
vd[1] = ('Z4367', 'surprise', 10, 3.1, 44.5)
# Update record at index 4, and all those that follow
vd[4:] = (
('QR231', 'toy', 12, 2.5, 45),
('R3389', 'robot', 3, 45, 2000)
)
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
Reading a vdata
^^^^^^^^^^^^^^^
The following example shows how read the vdata attributes and sequentially
maneuver through its records. Note how we use the exception mechanism
to break out of the reading loop when we reach the end of the vdata::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf') # open 'inventory.hdf' in read mode
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY') # attach 'INVENTORY' in read mode
# Display some vdata attributes
print "status:", vd.status
print "vdata: ", vd._name # predefined attribute: vdata name
print "nrecs: ", vd._nrecs # predefined attribute: num records
# Display value of attribute 'unit' for all fields on which
# this attribute is set
print "units: ",
for fieldName in vd._fields: # loop over all field names
try:
# instantiate field and obtain value of attribute 'unit'
v = vd.field(fieldName).unit
print "%s: %s" % (fieldName, v),
except: # no 'unit' attribute: ignore
pass
print ""
print ""
# Display table header.
header = "%-7s %-12s %3s %4s %8s" % tuple(vd._fields)
print "-" * len(header)
print header
print "-" * len(header)
# Loop over the vdata records, displaying each record as a table row.
# Current record position is 0 after attaching the vdata.
while 1:
try:
rec = vd.read() # read next record
# equivalent to:
# rec = vd[vd.tell()]
print "%-7s %-12s %3d %4.1f %8.2f" % tuple(rec[0])
except HDF4Error: # end of vdata reached
break
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
In the previous example, the reading/displaying loop can be greatly
simplified by rewriting it as follows::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf') # open 'inventory.hdf' in read mode
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY') # attach 'INVENTORY' in read mode
....
# Read all records at once, and loop over the sequence.
for rec in vd[:]:
print "%-7s %-12s %3d %4.1f %8.2f" % tuple(rec)
vd.detach() # "close" the vdata
...
The indexing expression 'vd[:]' returns the complete set of records,
which can then be looped over using a 'for' statement. This style of loop
is quite clean, and should look very familiar to python adepts.
"""
import os, sys, types
from . import hdfext as _C
from . import six
from .six.moves import xrange
from .HC import HC
from .error import HDF4Error, _checkErr
# List of names we want to be imported by an "from pyhdf.VS import *"
# statement
__all__ = ['VS', 'VD', 'VDField', 'VDAttr']
class VS(object):
"""The VS class implements the VS (Vdata) interface applied to an
HDF file.
To instantiate a VS class, call the vstart() method of an
HDF instance. """
def __init__(self, hinst):
# Not to be called directly by the user.
# A VS object is instantiated using the vstart()
# method of an HDF instance.
# Args:
# hinst HDF instance
# Returns:
# A VS instance
#
# C library equivalent : Vstart (rather: Vinitialize)
# Private attributes:
# _hdf_inst: HDF instance
# Note: Vstart is just a macro; use 'Vinitialize' instead
status = _C.Vinitialize(hinst._id)
_checkErr('VS', status, "cannot initialize VS interface")
self._hdf_inst = hinst
def __del__(self):
"""Delete the instance, first calling the end() method
if not already done. """
try:
if self._hdf_inst:
self.end()
except:
pass
def end(self):
"""Close the VS interface.
Args::
No argument
Returns::
None
C library equivalent : Vend
"""
# Note: Vend is just a macro; use 'Vfinish' instead
_checkErr('end', _C.Vfinish(self._hdf_inst._id),
"cannot terminate VS interface")
self._hdf_inst = None
vend = end # For backward compatibility
def attach(self, num_name, write=0):
"""Locate an existing vdata or create a new vdata in the HDF file,
returning a VD instance.
Args::
num_name Name or reference number of the vdata. An existing vdata
can be specified either through its reference number or
its name. Use -1 to create a new vdata.
Note that uniqueness is not imposed on vdatas names,
whereas refnums are guaranteed to be unique. Thus
knowledge of its reference number may be the only way
to get at a wanted vdata.
write Set to 0 to open the vdata in read-only mode,
set to 1 to open it in write mode
Returns::
VD instance representing the vdata
C library equivalent : VSattach
After creating a new vdata (num_name == -1), fields must be
defined using method fdefine() of the VD instance, and those
fields must be allocated to the vdata with method setfields().
Same results can be achieved, but more simply, by calling the
create() method of the VS instance.
"""
mode = write and 'w' or 'r'
if isinstance(num_name, str):
num = self.find(num_name)
else:
num = num_name
vd = _C.VSattach(self._hdf_inst._id, num, mode)
if vd < 0:
_checkErr('attach', vd, 'cannot attach vdata')
return VD(self, vd)
def create(self, name, fields):
"""Create a new vdata, setting its name and allocating
its fields.
Args::
name Name to assign to the vdata
fields Sequence of field definitions. Each field definition
is a sequence with the following elements in order:
- field name
- field type (one of HC.xxx constants)
- field order (number of values)
Fields are allocated to the vdata in the given order
Returns::
VD instance representing the created vdata
Calling the create() method is equivalent to the following calls:
- vd = attach(-1,1), to create a new vdata and open it in
write mode
- vd._name = name, to set the vdata name
- vd.fdefine(...), to define the name, type and order of
each field
- vd.setfields(...), to allocate fields to the vdata
C library equivalent : no equivalent
"""
try:
# Create new vdata (-1), open in write mode (1)
vd = self.attach(-1, 1)
# Set vdata name
vd._name = name
# Define fields
allNames = []
for name, type, order in fields:
vd.fdefine(name, type, order)
allNames.append(name)
# Allocate fields to the vdata
vd.setfields(*allNames)
return vd
except HDF4Error as msg:
raise HDF4Error("error creating vdata (%s)" % msg)
def find(self, vName):
"""Get the reference number of a vdata given its name.
The vdata can then be opened (attached) by passing this
reference number to the attach() method.
Args::
vName Name of the vdata for which the reference number
is needed. vdatas names are not guaranteed to be
unique. When more than one vdata bear the same name,
find() will return the refnum of the first one founmd.
Returns::
vdata reference number. 0 is returned if the vdata does not exist.
C library equivalent : VSfind
"""
refNum = _C.VSfind(self._hdf_inst._id, vName)
_checkErr("find", refNum, "cannot find vdata %s" % vName)
return refNum
def next(self, vRef):
"""Get the reference number of the vdata following a given
vdata.
Args::
vRef Reference number of the vdata preceding the one
we require. Set to -1 to get the first vdata in
the HDF file. Knowing its reference number,
the vdata can then be opened (attached) by passing this
reference number to the attach() method.
Returns::
Reference number of the vdata following the one given
by argument vref
An exception is raised if no vdata follows the one given by vRef.
C library equivalent : VSgetid
"""
num = _C.VSgetid(self._hdf_inst._id, vRef)
_checkErr('next', num, 'cannot get next vdata')
return num
def vdatainfo(self, listAttr=0):
"""Return info about all the file vdatas.
Args::
listAttr Set to 0 to ignore vdatas used to store attribute
values, 1 to list them (see the VD._isattr readonly
attribute)
Returns::
List of vdata descriptions. Each vdata is described as
a 9-element tuple, composed of the following:
- vdata name
- vdata class
- vdata reference number
- vdata number of records
- vdata number of fields
- vdata number of attributes
- vdata record size in bytes
- vdata tag number
- vdata interlace mode
C library equivalent : no equivalent
"""
lst = []
ref = -1 # start at beginning
while True:
try:
nxtRef = self.next(ref)
except HDF4Error: # no vdata left
break
# Attach the vdata and check for an "attribute" vdata.
ref = nxtRef
vdObj = self.attach(ref)
if listAttr or not vdObj._isattr:
# Append a list of vdata properties.
lst.append((vdObj._name,
vdObj._class,
vdObj._refnum,
vdObj._nrecs,
vdObj._nfields,
vdObj._nattrs,
vdObj._recsize,
vdObj._tag,
vdObj._interlace))
vdObj.detach()
return lst
def storedata(self, fieldName, values, data_type, vName, vClass):
"""Create and initialize a single field vdata, returning
the vdata reference number.
Args::
fieldName Name of the single field in the vadata to create
values Sequence of values to store in the field;. Each value can
itself be a sequence, in which case the field will be
multivalued (all second-level sequences must be of
the same length)
data_type Values type (one of HC.xxx constants). All values
must be of the same type
vName Name of the vdata to create
vClass Vdata class (string)
Returns::
vdata reference number
C library equivalent : VHstoredata / VHstoredatam
"""
# See if the field is multi-valued.
nrecs = len(values)
if type(values[0]) in [list, tuple]:
order = len(values[0])
# Replace input list with a flattened list.
newValues = []
for el in values:
for e in el:
newValues.append(e)
values = newValues
else:
order = 1
n_values = nrecs * order
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("storedata: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
if order == 1:
vd = _C.VHstoredata(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass)
else:
vd = _C.VHstoredatam(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass, order)
_checkErr('storedata', vd, 'cannot create vdata')
return vd
class VD(object):
"""The VD class encapsulates the functionnality of a vdata.
To instantiate a VD class, call the attach() or the create()
method of a VS class instance."""
def __init__(self, vsinst, id):
# This construtor is not intended to be called directly
# by the user program. The attach() method of an
# VS class instance should be called instead.
# Arg:
# vsinst VS instance from which the call is made
# id vdata reference number
# Private attributes:
# _vs_inst VS instance to which the vdata belongs
# _id vdata identifier
# _offset current record offset
# _setfields last arg to setfields()
self._vs_inst = vsinst
self._id = id
self._offset = 0
self._setfields = None
def __getattr__(self, name):
"""Some vdata properties can be queried/set through the following
attributes. Their names all start with an "_" to avoid
clashes with user-defined attributes. Most are read-only.
Only the _class, _fields, _interlace and _name can be modified.
_fields and _interlace can only be set once.
Name RO Description C library routine
----- -- ----------------- -----------------
_class class name VSgetclass
_fields X field names VSgetfields
_interlace interlace mode VSgetinterlace
_isattr X attribute vs real vdata VSisattr
_name name VSgetname
_nattrs X number of attributes VSfnattrs
_nfields X number of fields VFnfields
_nrecs X number of records VSelts
_recsize X record size VSQueryvsize
_refnum X reference number VSQueryref
_tag X tag VSQuerytag
_tnattrs X total number of attr. VSnattrs
"""
# Check for a user defined attribute first.
att = self.attr(name)
if att._index is not None: # Then the attribute exists
return att.get()
# Check for a predefined attribute
elif name == "_class":
status, nm = _C.VSgetclass(self._id)
_checkErr('_class', status, 'cannot get vdata class')
return nm
elif name == "_fields":
n, fields = _C.VSgetfields(self._id)
_checkErr('_fields', n, "cannot get vdata field names")
return fields.split(',')
elif name == "_interlace":
mode = _C.VSgetinterlace(self._id)
_checkErr('_interlace', mode, "cannot get vdata interlace mode")
return mode
elif name == "_isattr":
return _C.VSisattr(self._id)
elif name == "_name":
status, nm = _C.VSgetname(self._id)
_checkErr('_name', status, 'cannot get vdata name')
return nm
elif name == "_nattrs":
n = _C.VSfnattrs(self._id, -1) # -1: vdata attributes
_checkErr("_nfields", n, "cannot retrieve number of attributes")
return n
elif name == "_nfields":
n = _C.VFnfields(self._id)
_checkErr("_nfields", n, "cannot retrieve number of fields")
return n
elif name == "_nrecs":
n = _C.VSelts(self._id)
_checkErr('_nrecs', n, 'cannot get vdata number of records')
return n
elif name == "_recsize":
return self.inquire()[3]
elif name == "_refnum":
n = _C.VSQueryref(self._id)
_checkErr('refnum', n, 'cannot get reference number')
return n
elif name == "_tag":
n = _C.VSQuerytag(self._id)
_checkErr('_tag', n, 'cannot get tag')
return n
elif name == "_tnattrs":
n = _C.VSnattrs(self._id)
_checkErr('_tnattrs', n, 'execution error')
return n
raise AttributeError
def __setattr__(self, name, value):
# A name starting with an underscore will be treated as
# a standard python attribute, and as an HDF attribute
# otherwise.
# Forbid assigning to our predefined attributes
if name in ["_fields", "_isattr", "_nattrs", "_nfields",
"_nrecs", "_recsize", "_refnum", "_tag", "_tnattrs"]:
raise AttributeError("%s: read-only attribute" % name)
# Handle the 3 VS attributes: _class, _interlace
# and _name. _interlace can only be set once.
elif name == "_class":
_checkErr(name, _C.VSsetclass(self._id, value),
'cannot set _class property')
elif name == "_interlace":
_checkErr(name, _C.VSsetinterlace(self._id, value),
'cannot set _interlace property')
elif name == "_name":
_checkErr(name, _C.VSsetname(self._id, value),
'cannot set _name property')
# Try to set the attribute.
else:
_setattr(self, name, value)
def __getitem__(self, elem):
# This method is called when the vdata is read
# like a Python sequence.
# Parse the indexing expression.
start, count = self.__buildStartCount(elem)
# Reset current position if necessary.
if self._offset != start[0]:
self.seek(start[0])
# Get records. A negative count means that an index was used.
recs = self.read(abs(count[0]))
# See if all the fields must be returned.
f0 = start[1]
if f0 == 0 and count[1] == self._nfields:
out = recs
else:
# Return only a subset of the vdata fields.
out = []
f1 = f0 + count[1]
for r in recs:
out.append(r[f0:f1])
# If an index was used (not a slice), return the record as
# a list, instead of returning it inside a 2-level list,
if count[0] < 0:
return out[0]
return out
def __setitem__(self, elem, data):
# This method is called when the vdata is written
# like a Python sequence.
#
# When indexing the vdata, 'data' must specify exactly
# one record, which must be specifed as a sequence. If the index is
# equal to the current number of records, the record
# is appended to the vdata.
#
# When slicing the vdata, 'data' must specify a list of records.
# The number of records in the top level-list must match the width
# of the slice, except if the slice extends past the end of the
# vdata. In that case, extra records can be specified in the list,
# which will be appended to the vdata. In other words,
# to append records to vdata 'vd', assign records to
# the slice 'vd[vd._nrecs:]'.
#
# For ex., given a vdata 'vd' holding 5 records, and lists
# 'reca', 'recb', etc holding record values:
# vd[0] = reca # updates record 0
# vd[1] = [recb, recc] # error: only one record allowed
# vd[1:3] = [reca,recb] # updates second and third record
# vd[1:4] = [reca, recb] # error: 3 records needed
# vd[5:] = [reca,recb] # appends 2 records to the vdata
# Check that arg is a list.
if not type(data) in [tuple, list]:
raise HDF4Error("record(s) must be specified as a list")
start, count = self.__buildStartCount(elem, setitem=1)
# Records cannot be partially written.
if start[1] != 0 or count[1] != self._nfields:
raise HDF4Error("each vdata field must be written")
# If an index (as opposed to a slice) was applied to the
# vdata, a single record must be passed. Since write() requires
# a 2-level list, wrap this record inside a list.
if count[0] < 0:
if len(data) != self._nfields:
raise HDF4Error("record does not specify all fields")
data = [data]
# A slice was used. The slice length must match the number of
# records, except if the end of the slice equals the number
# of records. Then, extra recors can be specified, which will
# be appended to the vdata.
else:
if count[0] != len(data):
if start[0] + count[0] != self._nrecs:
raise HDF4Error("illegal number of records")
# Reset current record position if necessary.
if self._offset != start[0]:
self.seek(start[0])
# Write records.
recs = self.write(data)
def __del__(self):
"""Delete the instance, first calling the detach() method
if not already done. """
try:
if self._id:
self.detach()
except:
pass
def detach(self):
"""Terminate access to the vdata.
Args::
no argument
Returns::
None
C library equivalent : VSdetach
"""
_checkErr('detach', _C.VSdetach(self._id), "cannot detach vdata")
self._id = None
def fdefine(self, name, type, order):
"""Define a field. To initialize a newly created vdata with
fields created with fdefine(), assign a tuple of field names
to the _fields attribute or call the setfields() method.
Args::
name field name
type field data type (one of HC.xxx)
order field order (number of values in the field)
Returns::
None
C library equivalent : VSfdefine
"""
_checkErr('fdefine', _C.VSfdefine(self._id, name, type, order),
'cannot define field')
def setfields(self, *fldNames):
"""Define the name and order of the fields to access
with the read() and write() methods.
Args::
fldNames variable length argument specifying one or more
vdata field names
Returns::
None
C library equivalent : VSsetfields
setfields() indicates how to perform the matching between the vdata
fields and the values passed to the write() method or returned
by the read() method.
For example, if the vdata contains fields 'a', 'b' and 'c' and
a "setfields('c','a')" call is made, read() will thereafter return
for each record the values of field 'c' and 'a', in that order.
Field 'b' will be ignored.
When writing to a vdata, setfields() has a second usage. It is used
to initialize the structure of the vdata, that is, the name and order
of the fields that it will contain. The fields must have been
previously defined by calls to the fdefine() method.
Following that first call, setfields() can be called again to
change the order in which the record values will be passed
to the write() method. However, since it is mandatory to write
whole records, subsequent calls to setfields() must specify every
field name: only the field order can be changed.
"""
_checkErr('setfields', _C.VSsetfields(self._id, ','.join(fldNames)),
'cannot execute')
self._setfields = fldNames # remember for read/write routines
def field(self, name_index):
"""Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent
"""
# Transform a name to an index number
if isinstance(name_index, str):
status, index = _C.VSfindex(self._id, name_index)
_checkErr('field', status, "illegal field name: %s" % name_index)
else:
n = _C.VFnfields(self._id)
_checkErr('field', n, 'cannot execute')
index = name_index
if index >= n:
raise HDF4Error("field: illegal index number")
return VDField(self, index)
def seek(self, recIndex):
"""Seek to the beginning of the record identified by its
record index. A succeeding read will load this record in
memory.
Args::
recIndex index of the record in the vdata; numbering
starts at 0. Legal values range from 0
(start of vdata) to the current number of
records (at end of vdata).
Returns::
record index
An exception is raised if an attempt is made to seek beyond the
last record.
The C API prohibits seeking past the next-to-last record,
forcing one to read the last record to advance to the end
of the vdata. The python API removes this limitation.
Seeking to the end of the vdata can also be done by calling
method ``seekend()``.
C library equivalent : VSseek
"""
if recIndex > self._nrecs - 1:
if recIndex == self._nrecs:
return self.seekend()
else:
raise HDF4Error("attempt to seek past last record")
n = _C.VSseek(self._id, recIndex)
_checkErr('seek', n, 'cannot seek')
self._offset = n
return n
def seekend(self):
"""Set the current record position past the last vdata record.
Subsequent write() calls will append records to the vdata.
Args::
no argument
Returns::
index of the last record plus 1
C library equivalent : no equivalent
"""
try:
# Seek to the next-to-last record position
n = self.seek(self._nrecs - 1) # updates _offset
# Read last record, ignoring values
self.read(1) # updates _offset
return self._nrecs
except HDF4Error:
raise HDF4Error("seekend: cannot execute")
def tell(self):
"""Return current record position in the vdata.
Args::
no argument
Returns::
current record position; 0 is at start of vdata.
C library equivalent : no equivalent
"""
return self._offset
def read(self, nRec=1):
"""Retrieve the values of a number of records, starting
at the current record position. The current record position
is advanced by the number of records read. Current position
is 0 after "opening" the vdata with the attach() method.
Args::
nRec number of records to read
Returns::
2-level list. First level is a sequence of records,
second level gives the sequence of values for each record.
The values returned for each record are those of the fields
specified in the last call to method setfields(), in that
order. The complete vdata field set is returned if
setfields() has not been called.
An exception is raised if the current record position is
already at the end of the vdata when read() is called. This
exception can be caught as an "end of vdata" indication to
exit a loop which scans each record of the vdata. Otherwise,
the number of records to be read is lowered to the number of
records remaining in the vdata, if that number is less than
the number asked for by parameter 'nRec'. Setting 'nRec' to
an arbitrarily large value can thus be used to retrieve the
remaining records in the vdata.
C library equivalent : VSread
"""
# Validate number of records to read vs the current offset.
# Return "end of vdata" exception if already at end of vdata
# otherwise "clip" the number of records if it exceeds the
# number of remaining records in the vdata.
n = self._nrecs
if self._offset == n:
raise HDF4Error("end of vdata reached")
if self._offset + nRec > n:
nRec = self._offset + nRec - n
fields = self._setfields or self._fields
nFields = len(fields)
fieldList = ','.join(fields)
_checkErr('read', _C.VSsetfields(self._id, fieldList),
'error defining fields to read')
# Allocate a buffer to store the packed records.
bufSize = self.sizeof(fields) * nRec
bigBuf = _C.array_byte(bufSize)
# Read records
nRead = _C.VSread(self._id, bigBuf, nRec, 0) # 0: FULL_INTERLACE
_checkErr('read', nRead, 'read error')
self._offset += nRec
# Allocate an array to store a pointer to the field buffer.
fldArr = _C.new_array_voidp(1)
# Initialize return value
values = []
for numRec in range(nRead):
v = []
for numFld in range(nFields):
v.append(None)
values.append(v)
# Unpack each field in turn.
for numFld in range(nFields):
fld = self.field(fields[numFld])
data_type = fld._type
order = fld._order
n_values = order * nRead
# Allocate a buffer to store the field values.
if data_type in [HC.CHAR8, HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("read: illegal or unupported type %d" % \
data_type)
# Unpack the field values.
_C.array_voidp_setitem(fldArr, 0, buf)
_checkErr('read',
_C.VSfpack(self._id, 1, fieldList, bigBuf, bufSize,
nRead, fld._name, fldArr),
"cannot execute")
# Extract values from the field buffer.
k = 0
for numRec in range(nRead):
if order == 1:
values[numRec][numFld] = buf[k]
k += 1
else:
# Handle strings specially
if data_type == HC.CHAR8:
s = ''
for i in range(order):
v = buf[k]
if v != 0:
s += chr(v)
k += 1
values[numRec][numFld] = s
# Return field values as a list
else:
values[numRec][numFld] = []
for i in range(order):
values[numRec][numFld].append(buf[k])
k += 1
del buf
return values
def write(self, values):
"""Write records to the vdata. Writing starts at the current
record position, which is advanced by the number of records
written.
Args::
values: 2-level sequence. First level is a sequence of records.
A second level gives the sequence of record values.
It is mandatory to always write whole records. Thus
every record field must appear at the second level.
The record values are ordered according the list of
field names set in the last call to the setfields()
method. The ordre of the complete vdata field set is
used if setfields() has not been called.
Returns::
number of records written
To append to a vdata already holding 'n' records, it is necessary
to first move the current record position to 'n-1' with a call to
method seek(), then to call method read() for the side effect
of advancing the current record position past this last record.
Method seekend() does just that.
C library equivalent : VSwrite
"""
nFields = self._nfields
# Fields give the order the record values, as defined in the
# last call to setfields()
fields = self._setfields or self._fields
# We must pack values using the effective field order in the vdata
fieldList = ','.join(self._fields)
# Validate the values argument.
if nFields != len(fields):
raise HDF4Error("write: must write whole records")
if type(values) not in [list, tuple]:
raise HDF4Error("write: values must be a sequence")
nRec = len(values)
for n in range(nRec):
rec = values[n]
if type(rec) not in [list, tuple]:
raise HDF4Error("write: records must be given as sequences")
# Make sure each record is complete.
if len(rec) != nFields:
raise HDF4Error("write: records must specify every field")
# Allocate a buffer to store the packed records.
bufSize = self._recsize * nRec
bigBuf = _C.array_byte(bufSize)
# Allocate an array to store a pointer to the field buffer.
fldArr = _C.new_array_voidp(1)
# Pack each field in turn.
for numFld in range(nFields):
fld = self.field(fields[numFld])
data_type = fld._type
order = fld._order
n_values = order * nRec
# Allocate a buffer to store the field values.
if data_type in [HC.CHAR8, HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("write: illegal or unupported type %d" % \
data_type)
# Load the field buffer with values.
k = 0
for numRec in range(nRec):
val = values[numRec][numFld]
# Single-valued field
if order == 1:
buf[k] = val
k += 1
# Multi-valued field
else:
# Handle strings specially.
if data_type == HC.CHAR8:
if not isinstance(val, str):
raise HDF4Error("char fields must be set with strings")
n = len(val)
for i in range(order):
buf[k] = i < n and ord(val[i]) or 0
k += 1
# Should validate field elements ...
elif type(val) not in [list, tuple]:
raise HDF4Error("multi-values fields must be given as sequences")
else:
for i in range(order):
buf[k] = val[i]
k += 1
# Store address of the field buffer in first position
# of the field array. Pack the field values.
_C.array_voidp_setitem(fldArr, 0, buf) # fldArr[0] = buf
_checkErr('write',
_C.VSfpack(self._id, 0, fieldList, bigBuf, bufSize,
nRec, fld._name, fldArr),
"cannot execute")
del buf
# Write the packed records.
n = _C.VSwrite(self._id, bigBuf, nRec, 0) # 0: FULL_INTERLACE
_checkErr('write', n, 'cannot execute')
self._offset += nRec
return n
def inquire(self):
"""Retrieve info about the vdata.
Args::
no argument
Returns::
5-element tuple with the following elements:
-number of records in the vdata
-interlace mode
-list of vdata field names
-size in bytes of the vdata record
-name of the vdata
C library equivalent : VSinquire
"""
status, nRecs, interlace, fldNames, size, vName = \
_C.VSinquire(self._id)
_checkErr('inquire', status, "cannot query vdata info")
return nRecs, interlace, fldNames.split(','), size, vName
def fieldinfo(self):
"""Retrieve info about all vdata fields.
Args::
no argument
Returns::
list where each element describes a field of the vdata;
each field is described by an 7-element tuple containing
the following elements:
- field name
- field data type (one of HC.xxx constants)
- field order
- number of attributes attached to the field
- field index number
- field external size
- field internal size
C library equivalent : no equivalent
"""
lst = []
for n in range(self._nfields):
fld = self.field(n)
lst.append((fld._name,
fld._type,
fld._order,
fld._nattrs,
fld._index,
fld._esize,
fld._isize))
return lst
def sizeof(self, fields):
"""Retrieve the size in bytes of the given fields.
Args::
fields sequence of field names to query
Returns::
total size of the fields in bytes
C library equivalent : VSsizeof
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
n = _C.VSsizeof(self._id, str)
_checkErr('sizeof', n, "cannot retrieve field sizes")
return n
def fexist(self, fields):
"""Check if a vdata contains a given set of fields.
Args::
fields sequence of field names whose presence in the
vdata must be checked
Returns::
true (1) if the given fields are present
false (0) otherwise
C library equivalent : VSfexist
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
ret = _C.VSfexist(self._id, str)
if ret < 0:
return 0
else:
return 1
def attr(self, name_or_index):
"""Create a VDAttr instance representing a vdata attribute.
Args::
name_or_index attribute name or index number; if a name is
given, the attribute may not exist; in that
case, it will be created when the VSAttr
instance set() method is called
Returns::
VSAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return VDAttr(self, name_or_index, -1) # -1: vdata attribute
def findattr(self, name):
"""Search the vdata for a given attribute.
Args::
name attribute name
Returns::
if found, VDAttr instance describing the attribute
None otherwise
C library equivalent : VSfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att
def attrinfo(self):
"""Return info about all the vdata attributes.
Args::
no argument
Returns::
dictionnary describing each vdata attribute; for each attribute
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic
def __buildStartCount(self, elem, setitem=0):
# Called by __getitem__() and __setitem__() methods
# to parse the expression used inside square brackets to
# index/slice a vdata.
# If 'setitem' is set, the call comes from __setitem__()
# We then allow the start value to be past the last record
# so as to be able to append to the vdata.
#
# Return a 2-element tuple:
# - tuple of the start indices along the vdata dimensions
# - tuple of the count values along the vdata dimensions
# a count of -1 indicates that an index, not a slice
# was applied on the correcponding dimension.
# Make sure the indexing expression does not exceed the
# vdata number of dimensions (2).
if isinstance(elem, tuple):
if len(elem) > 2:
raise HDF4Error("illegal indexing expression")
else: # Convert single index to sequence
elem = [elem]
start = []
count = []
shape = [self._nrecs, self._nfields]
n = -1
for e in elem:
n += 1
# Simple index
if isinstance(e, int):
is_slice = False
if e < 0:
e += shape[n]
if e < 0 or e >= shape[n]:
if e == shape[n] and setitem:
pass
else:
raise HDF4Error("index out of range")
beg = e
end = e + 1
# Slice index
elif isinstance(e, slice):
is_slice = True
# None or 0 means not specified
if e.start:
beg = e.start
if beg < 0:
beg += shape[n]
else:
beg = 0
# None or maxint means not specified
if e.stop and e.stop != sys.maxsize:
end = e.stop
if end < 0:
end += shape[n]
else:
end = shape[n]
# Bug
else:
raise ValueError("invalid indexing expression")
# Clip end index and compute number of elements to get
if end > shape[n]:
end = shape[n]
if beg > end:
beg = end
if is_slice:
cnt = end - beg
else:
cnt = -1
start.append(beg)
count.append(cnt)
if n == 0:
start.append(0)
count.append(shape[1])
return start, count
class VDField(object):
"""The VDField class represents a vdata field.
To create a VDField instance, call the field() method of a
VD class instance. """
def __init__(self, vdinst, fIndex):
# This method should not be called directly by the user program.
# To create a VDField instance, obtain a VD class instance and
# call its field() method.
# Args:
# vdinst VD instance to which the field belongs
# fIndex field index
#
# Private attributes:
# _vd_inst VD instance to which the field belongs
# _idx field index
self._vd_inst = vdinst
self._idx = fIndex
def __getattr__(self, name):
"""Some field properties can be queried through the following
read-only attributes. Their names all start with an "_" to avoid
clashes with user-defined attributes.
Name Description C library routine
----- ------------------- -----------------
_esize field external size VFfieldesize
_index field index number VSfindex
_isize field internal size VFfieldisize
_name field name VFfieldname
_nattrs number of attributes VSfnattrs
_order field order VFfieldorder
_type field type VFfieldtype
"""
# Check for a user defined attribute first.
att = self.attr(name)
if att._index is not None: # Then the attribute exists
return att.get()
# Check for a predefined attribute.
elif name == "_esize":
n = _C.VFfieldesize(self._vd_inst._id, self._idx)
_checkErr('_esize', n, "execution error")
return n
elif name == "_index":
return self._idx
elif name == "_isize":
n = _C.VFfieldisize(self._vd_inst._id, self._idx)
_checkErr('_isize', n, "execution error")
return n
elif name == "_name":
n = _C.VFfieldname(self._vd_inst._id, self._idx)
_checkErr('_name', n, "execution error")
return n
elif name == "_nattrs":
n = _C.VSfnattrs(self._vd_inst._id, self._idx)
_checkErr('_nattrs', n, "execution error")
return n
elif name == "_order":
n = _C.VFfieldorder(self._vd_inst._id, self._idx)
_checkErr('_order', n, "execution error")
return n
elif name == "_type":
type = _C.VFfieldtype(self._vd_inst._id, self._idx)
_checkErr('_type', type, 'cannot retrieve field type')
return type
raise AttributeError
def __setattr__(self, name, value):
# Forbid assigning to our predefined attributes
if name in ["_esize", "_index", "_isize", "_name",
"_nattrs", "_order", "_type"]:
raise AttributeError("%s: read-only attribute" % name)
# Try to set the attribute.
else:
_setattr(self, name, value)
def attr(self, name_or_index):
"""Create a VDAttr instance representing a field attribute.
Args::
name_or_index attribute name or index number; if a name is
specified, the attribute may not exist; in that
case, it will be created when the VDAttr
instance set() method is called; if an
index number is specified, the attribute
must exist
Returns::
VSAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return VDAttr(self, name_or_index, self._idx)
def find(self, name):
"""Search the field for a given attribute.
Args::
name attribute name
Returns::
if found, VDAttr instance describing the attribute
None otherwise
C library equivalent : VSfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att
def attrinfo(self):
"""Return info about all the field attributes.
Args::
no argument
Returns::
dictionnary describing each vdata attribute; for each attribute
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic
class VDAttr(object):
"""The VDAttr class encapsulates methods used to set and query attributes
defined at the level either of the vdata or of the vdata field.
To create an instance of this class, call the attr() method of a VD
(vdata) or VDField (vdata field) instance. """
def __init__(self, obj, name_or_index, fIndex):
# This constructor should not be called directly by the user
# program. The attr() method of a VD (vdata) or VDField
# (vdata field) must be called to instantiate this class.
# Args:
# obj object instance (VD or VDField) to which the
# attribute belongs
# name_or_index name or index of the attribute; if a name is
# given, an attribute with that name will be
# searched, if not found, a new index number will
# be generated
# fIndex field index, or -1 if the attribute belongs
# to the vdata
# Private attributes:
# _vd_inst VD instance
# _vdf_inst VDField instance or None
# _index attribute index or None
# _name attribute name or None
# _fIndex field index, or -1 obj is a VD instance
if isinstance(obj, VD):
self._vd_inst = obj
self._vdf_instance = None
self._fIndex = -1
else:
self._vd_inst = obj._vd_inst
self._vdf_inst = obj
self._fIndex = fIndex
# Name is given. Attribute may exist or not.
if isinstance(name_or_index, type('')):
self._name = name_or_index
self._index = _C.VSfindattr(self._vd_inst._id, self._fIndex,
self._name);
if self._index < 0:
self._index = None
# Index is given. Attribute Must exist.
else:
self._index = name_or_index
status, self._name, data_type, n_values, size = \
_C.VSattrinfo(self._vd_inst._id, self._fIndex,
self._index)
_checkErr('attr', status, 'non-existent attribute')
def get(self):
"""Retrieve the attribute value.
Args::
no argument
Returns::
attribute value(s); a list is returned if the attribute
is made up of more than one value, except in the case of a
string-valued attribute (data type HC.CHAR8) where the
values are returned as a string
C library equivalent : VSgetattr
"""
# Make sure th attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
# Obtain attribute type and the number of values.
status, aName, data_type, n_values, size = \
_C.VSattrinfo(self._vd_inst._id, self._fIndex,
self._index)
_checkErr('get', status, 'illegal parameters')
# Get attribute value.
convert = _array_to_ret
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("get: attribute index %d has an "\
"illegal or unupported type %d" % \
(self._index, data_type))
status = _C.VSgetattr(self._vd_inst._id, self._fIndex,
self._index, buf)
_checkErr('get', status, 'illegal attribute ')
return convert(buf, n_values)
def set(self, data_type, values):
"""Set the attribute value.
Args::
data_type : attribute data type (see constants HC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to HC.CHAR8 and 'values' to the corresponding
string
If the attribute already exists, it will be
updated. However, it is illegal to try to change
its data type or its order (number of values).
Returns::
None
C library equivalent : VSsetattr
"""
try:
n_values = len(values)
except:
values = [values]
n_values = 1
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
if not isinstance(values[n], int):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("set: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
status = _C.VSsetattr(self._vd_inst._id, self._fIndex, self._name,
data_type, n_values, buf)
_checkErr('attr', status, 'cannot execute')
# Update the attribute index
self._index = _C.VSfindattr(self._vd_inst._id, self._fIndex,
self._name);
if self._index < 0:
raise HDF4Error("set: error retrieving attribute index")
def info(self):
"""Retrieve info about the attribute.
Args::
no argument
Returns::
4-element tuple with the following components:
-attribute name
-attribute data type (one of HC.xxx constants)
-attribute order (number of values)
-attribute size in bytes
C library equivalent : VSattrinfo
"""
# Make sure the attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
status, name, type, order, size = \
_C.VSattrinfo(self._vd_inst._id, self._fIndex, self._index)
_checkErr('info', status, "execution error")
return name, type, order, size
###########################
# Support functions
###########################
def _setattr(obj, name, value):
# Called by the __setattr__ method of the VD and VDField objects.
#
# obj instance on which the attribute is set
# name attribute name
# value attribute value
if isinstance(value, six.string_types):
value = value.encode('utf8')
# Treat a name starting with and underscore as that of a
# standard python instance attribute.
if name[0] == '_':
obj.__dict__[name] = value
return
# Treat everything else as an HDF attribute.
if type(value) not in [list, tuple]:
value = [value]
typeList = []
for v in value:
t = type(v)
# Prohibit mixing numeric types and strings.
if t in [int, float] and \
not bytes in typeList:
if t not in typeList:
typeList.append(t)
# Prohibit sequence of strings or a mix of numbers and string.
elif t == bytes and not typeList:
typeList.append(t)
else:
typeList = []
break
if bytes in typeList:
xtype = HC.CHAR8
value = value[0]
# double is "stronger" than int
elif float in typeList:
xtype = HC.FLOAT64
elif int in typeList:
xtype = HC.INT32
else:
raise HDF4Error("Illegal attribute value")
# Assign value
try:
a = obj.attr(name)
a.set(xtype, value)
except HDF4Error as msg:
raise HDF4Error("cannot set attribute: %s" % msg)
def _array_to_ret(buf, nValues):
# Convert array 'buf' to a scalar or a list.
if nValues == 1:
ret = buf[0]
else:
ret = []
for i in xrange(nValues):
ret.append(buf[i])
return ret
def _array_to_str(buf, nValues):
# Convert array of bytes 'buf' to a string.
# Return empty string if there is no value.
if nValues == 0:
return ""
# When there is just one value, _array_to_ret returns a scalar
# over which we cannot iterate.
if nValues == 1:
chrs = [chr(buf[0])]
else:
chrs = [chr(b) for b in _array_to_ret(buf, nValues)]
# Strip NULL at end
if chrs[-1] == '\0':
del chrs[-1]
return ''.join(chrs)
| mit | -6,665,755,709,785,605,000 | 35.694785 | 89 | 0.558892 | false |
OpenProvenance/python-bitcoinlib-scripting | 03-CTxIn.py | 1 | 2407 | ### Open Provenance February 2016 - https://myveryown.org
### Bitcoin Blockchain Information using python-bitcoinlib
### CTxIn & COutPoint Objects and Properties
### Donate to Open Provenance: 1opDUZQ9nsL1LJALBdV1dvqSMtcvNj9EC
## Import the modules required and setup a connection to bitcoin
import bitcoin
## Create a proxy object and connect to the bitcoin.rpc
import bitcoin.rpc
myproxy = bitcoin.rpc.Proxy()
## Get the latest CBlock data from bitcoin rpc proxy
block_info = myproxy.getblock(myproxy.getblockhash(myproxy.getblockcount()))
## From the CBlock object we are able to get the transactions
vtx = block_info.vtx
## Print the details to the screen.
print "----------------------------------------------------------------"
print "Bitcoin CTxIn Object Information: Block Height ", myproxy.getblockcount()
print "----------------------------------------------------------------"
## We need a non coinbase transaction for this demo as coinbase transactions have no inputs.
## in this example we will show the second transaction or first non "coinbase" transaction details.
if len(vtx) > 2 :
for x in range (1, 2) :
## Each Transaction is a CTransaction Object
thetx = vtx[x]
## Now we have the object we can get info from it
print "Is Coinbase: ", thetx.is_coinbase()
print "nVersion: ", thetx.nVersion
print "nLockTime: ", thetx.nLockTime
print "TX: ", bitcoin.core.b2lx(thetx.GetHash())
## From the CTransaction Object we get the CTxIn Objects
vin = thetx.vin
## There could be more than one IN so we loop
if len(vin) >= 1 :
for i in range (0, len(vin)) :
## vi is a CTxIn Object
vi = vin[i]
print " "
## From this Object we can get info
print "is_final: ", vi.is_final()
print "nSequence : ", vi.nSequence
## the CTxIn Object also contains a COutPoint Object
vip = vi.prevout
print "COutPoint Hash: "
print bitcoin.core.b2lx(vip.hash)
print "COutPoint n: ", vip.n
print "COutPoint is_null: ", vip.is_null()
## and finally it includes a signature
print "scriptSig : "
print bitcoin.core.b2lx(vi.scriptSig)
print '----------'
print "Dump of RAW CTxIn Object:"
print vi
print " "
print "Dump of RAW COutPoint Object:"
print vip
print '----------'
else :
print "Sorry this block only has a coinbase transaction."
print "----------------------------------------------------------------"
print " "
exit()
| mit | -396,867,115,745,804,740 | 32.901408 | 99 | 0.645617 | false |
lironsc/ORange | ORange1_LoadBalancers/Project1/Controller/Split/Elcp0Table.py | 1 | 1035 | import Flow,Range
from ryu.ofproto import ofproto_v1_3
#This file contains all the logic for populating the fourth table, used for the balancing of traffic
#Creates a flow for the table, one for each range, representing the start of a range
def createThirdTableFlow(flowRange, datapath):
ofproto=ofproto_v1_3
match = datapath.ofproto_parser.OFPMatch(eth_type=0x800,ipv4_src=flowRange.getZeroELCP())
#If a match is found, send to the last table which will send the packet to the chosen server
inst = [datapath.ofproto_parser.OFPInstructionGotoTable(4),
datapath.ofproto_parser.OFPInstructionWriteMetadata(Range.fromBinary(Range.toBinary(int(flowRange.ID)) +flowRange.end), Flow.getMetaDataMask(), type_=None, len_=None)]
return Flow.createFlow(datapath,int(flowRange.ID),3,100-Range.starsInString(flowRange.zeroELCP),match,inst)
#Install all flows in table
def prepareELCP0Table(dp,ranges):
for i in range(0, len(ranges)):
dp.send_msg(createThirdTableFlow(ranges[i], dp))
| apache-2.0 | -6,491,032,691,780,397,000 | 56.5 | 187 | 0.752657 | false |
jet-code/multivariable-control-systems | cp2/cp2_method0.py | 1 | 3758 |
# coding: utf-8
# In[1]:
# Alexander Hebert
# ECE 6390
# Computer Project #2
# In[2]:
# Tested using Python v3.4 and IPython v2
##### Import libraries
# In[3]:
import numpy as np
# In[4]:
import scipy
# In[5]:
import sympy
# In[6]:
from IPython.display import display
# In[7]:
from sympy.interactive import printing
# In[8]:
np.set_printoptions(precision=6)
# In[9]:
#np.set_printoptions(suppress=True)
##### Original system:
# In[10]:
A = np.loadtxt('A_ex1.txt')
# In[11]:
A
# In[12]:
n,nc = A.shape
# In[13]:
B = np.loadtxt('B_ex1.txt')
# In[14]:
B
# In[15]:
nr,m = B.shape
##### Compute eigenvalues/poles of A to determine system stability:
# In[16]:
A_eigvals, M = np.linalg.eig(A)
# In[17]:
A_eigvals
# In[18]:
# Two poles lie in the RHP and are unstable.
# In[19]:
A_eigvals_desired = np.array([-0.2,-0.5,A_eigvals[2],A_eigvals[3]])
# In[20]:
A_eigvals_desired
# In[21]:
Lambda = np.diag(A_eigvals_desired)
# In[22]:
Lambda
##### Pole Assignment Algorithm from journal paper
# In[23]:
# Step A: Decomposition of B using SVD
# B = U*S*V.H
# In[24]:
U, s, VH = np.linalg.svd(B)
# In[25]:
U
# In[26]:
s
# In[27]:
S = np.zeros((4, 2))
S[:2, :2] = np.diag(s)
# In[28]:
S
# In[29]:
VH
# In[30]:
# Extract U_0 and U_1 from matrix U = [U_0,U_1]
# In[31]:
U_0 = U[:n,:m]
# In[32]:
U_0
# In[33]:
U_1 = U[:n,m:]
# In[34]:
U_1
# In[35]:
# B = [U_0,U_1][Z,0].T
# Compute Z from SVD of B
# In[36]:
Z = np.diag(s).dot(VH)
# In[37]:
Z
# In[38]:
# Compute the nullspace of U_1.T *(A - lambda_j*I)
# for initial eigenvectors in X
X = np.zeros((n,n))
for j in range(len(A_eigvals_desired)):
lambda_j = A_eigvals_desired[j]
# M_j is a temp matrix
exec("M_%d = np.dot(U_1.T,(A - lambda_j*np.identity(n)))" %(j+1))
# U_1.T *(A - lambda_j*I) = T_j *[Gamma_j,0]*[S_j_hat,S_j].T
exec("T_%d, gamma_%d, SH_%d = np.linalg.svd(M_%d)" %(j+1,j+1,j+1,j+1))
exec("X[:,j] = SH_%d[-2,:]" %(j+1))
# no transpose in SH_j due to 1-d vector
exec("S_hat_%d = SH_%d[:m,:].T" %(j+1,j+1))
exec("S_%d = SH_%d[m:,:].T" %(j+1,j+1))
# In[39]:
# Initial eigenvectors in X
X
# In[40]:
# Test X for full rank
X_rank = np.linalg.matrix_rank(X)
# In[41]:
all((X_rank,n))
# In[42]:
# Step X with Method 0
maxiter = 2
v2current = 0
v2prev = np.linalg.cond(X)
eps = 10e-5
flag = 0
X_j = np.zeros((n,n-1))
cond_num = np.zeros((n,1))
for r in range(maxiter):
for j in range(n):
X_j = np.delete(X,j,1)
Q,R = np.linalg.qr(X_j,mode='complete')
y_j = Q[:,-1].reshape((4,1))
exec("S_j = S_%d" %(j+1))
x_j = (S_j.dot(S_j.T).dot(y_j) / np.linalg.norm(np.dot(S_j.T,y_j)))
X[:,j] = x_j[:,0]
cond_num[j,0] = 1 / np.abs(np.dot(y_j.T,x_j))
v2current = np.linalg.cond(X)
if ((v2current - v2prev) < eps):
print("Tolerance met")
print("v2 = %.3f" %v2current)
flag = 1
else:
v2prev = v2current
if (flag == 0):
print("Tolerance not met")
print("v2 = %.3f" %v2current)
# In[43]:
X
# In[44]:
np.linalg.matrix_rank(X)
# In[45]:
X_inv = np.linalg.inv(X)
# In[46]:
X_inv
# In[47]:
# M defined as A + BF
M = X.dot(Lambda).dot(X_inv)
# In[48]:
M
# In[49]:
# Eigenvalues of controlled system
M_eigvals, H = np.linalg.eig(M)
M_eigvals
# In[50]:
# Compute feedback matrix F
F = np.dot(np.linalg.inv(Z),np.dot(U_0.T,(M - A)))
# In[51]:
F
# In[52]:
np.linalg.norm(F)
# In[53]:
# Compute condition number norms
# In[54]:
# Inf norm
np.linalg.norm(cond_num,np.inf)
# In[55]:
# 2 norm
np.linalg.norm(cond_num)
# In[55]:
| mit | -1,500,753,311,782,675,700 | 9.438889 | 75 | 0.530601 | false |
openstack/tacker | tacker/tests/unit/test_wsgi.py | 1 | 25815 | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import testtools
from unittest import mock
from urllib import request as urllibrequest
import webob
import webob.exc
from oslo_config import cfg
import oslo_i18n
from tacker.common import exceptions as exception
from tacker.tests import base
from tacker import wsgi
CONF = cfg.CONF
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'var'))
class TestWSGIServer(base.BaseTestCase):
"""WSGI server tests."""
def test_start_random_port(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="127.0.0.1")
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_start_random_port_with_ipv6(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="::1")
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_ipv6_listen_called_with_scope(self):
self.skipTest("Not ready yet")
server = wsgi.Server("test_app")
with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen:
with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr:
mock_get_addr.return_value = [
(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'',
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2))
]
with mock.patch.object(server, 'pool') as mock_pool:
server.start(None,
1234,
host="fe80::204:acff:fe96:da87%eth0")
mock_get_addr.assert_called_once_with(
"fe80::204:acff:fe96:da87%eth0",
1234,
socket.AF_UNSPEC,
socket.SOCK_STREAM
)
mock_listen.assert_called_once_with(
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2),
family=socket.AF_INET6,
backlog=cfg.CONF.backlog
)
mock_pool.spawn.assert_has_calls([
mock.call(
server._run,
None,
mock_listen.return_value)
])
def test_app(self):
self.skipTest("Not ready yet")
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = urllibrequest.urlopen('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
class SerializerTest(base.BaseTestCase):
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
input_dict = {'servers': {'test': 'pass'}}
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType, serializer.serialize,
input_dict, content_type)
def test_get_deserialize_handler_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType,
serializer.get_deserialize_handler, content_type)
def test_serialize_content_type_json(self):
"""Test serialize with content type json."""
input_data = {'servers': ['test=pass']}
content_type = 'application/json'
serializer = wsgi.Serializer()
result = serializer.serialize(input_data, content_type)
self.assertEqual(b'{"servers": ["test=pass"]}', result)
def test_deserialize_raise_bad_request(self):
"""Test serialize verifies that exception is raises."""
content_type = 'application/unknown'
data_string = 'test'
serializer = wsgi.Serializer()
self.assertRaises(
webob.exc.HTTPBadRequest,
serializer.deserialize, data_string, content_type)
def test_deserialize_json_content_type(self):
"""Test Serializer.deserialize with content type json."""
content_type = 'application/json'
data_string = '{"servers": ["test=pass"]}'
serializer = wsgi.Serializer()
result = serializer.deserialize(data_string, content_type)
self.assertEqual({'body': {'servers': ['test=pass']}}, result)
class RequestDeserializerTest(testtools.TestCase):
def setUp(self):
super(RequestDeserializerTest, self).setUp()
class JSONDeserializer(object):
def deserialize(self, data, action='default'):
return 'pew_json'
self.body_deserializers = {'application/json': JSONDeserializer()}
self.deserializer = wsgi.RequestDeserializer(self.body_deserializers)
def test_get_deserializer(self):
"""Test RequestDeserializer.get_body_deserializer."""
expected_json_serializer = self.deserializer.get_body_deserializer(
'application/json')
self.assertEqual(
expected_json_serializer,
self.body_deserializers['application/json'])
def test_get_expected_content_type(self):
"""Test RequestDeserializer.get_expected_content_type."""
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
self.assertEqual('application/json',
self.deserializer.get_expected_content_type(request))
def test_get_action_args(self):
"""Test RequestDeserializer.get_action_args."""
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12}]}
expected = {'action': 'update', 'id': 12}
self.assertEqual(expected,
self.deserializer.get_action_args(env))
def test_deserialize(self):
"""Test RequestDeserializer.deserialize."""
with mock.patch.object(
self.deserializer, 'get_action_args') as mock_method:
mock_method.return_value = {'action': 'create'}
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
deserialized = self.deserializer.deserialize(request)
expected = ('create', {}, 'application/json')
self.assertEqual(expected, deserialized)
def test_get_body_deserializer_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
deserializer = wsgi.RequestDeserializer()
self.assertRaises(
exception.InvalidContentType,
deserializer.get_body_deserializer, content_type)
class ResponseSerializerTest(testtools.TestCase):
def setUp(self):
super(ResponseSerializerTest, self).setUp()
class JSONSerializer(object):
def serialize(self, data, action='default'):
return b'pew_json'
class HeadersSerializer(object):
def serialize(self, response, data, action):
response.status_int = 404
self.body_serializers = {'application/json': JSONSerializer()}
self.serializer = wsgi.ResponseSerializer(
self.body_serializers, HeadersSerializer())
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.serialize,
{}, 'application/unknown')
def test_get_body_serializer(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.get_body_serializer, 'application/unknown')
def test_get_serializer(self):
"""Test ResponseSerializer.get_body_serializer."""
content_type = 'application/json'
self.assertEqual(self.body_serializers[content_type],
self.serializer.get_body_serializer(content_type))
def test_serialize_json_response(self):
response = self.serializer.serialize({}, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual(b'pew_json', response.body)
self.assertEqual(404, response.status_int)
def test_serialize_response_None(self):
response = self.serializer.serialize(
None, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual(b'', response.body)
self.assertEqual(404, response.status_int)
class RequestTest(base.BaseTestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = b"<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = b"fake<br />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual("application/json", result)
def test_content_type_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/new-type;"
self.assertIsNone(request.get_content_type())
def test_content_type_from_accept(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, ")
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/new_type"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
oslo_i18n.get_available_languages = mock.MagicMock()
oslo_i18n.get_available_languages.return_value = [
'known-language', 'es', 'zh']
request.headers['Accept-Language'] = 'known-language'
language = request.best_match_language()
self.assertEqual('known-language', language)
# If the Accept-Leader is an unknown language, missing or empty,
# the best match locale should be None
request.headers['Accept-Language'] = 'unknown-language'
language = request.best_match_language()
self.assertIsNone(language)
request.headers['Accept-Language'] = ''
language = request.best_match_language()
self.assertIsNone(language)
request.headers.pop('Accept-Language')
language = request.best_match_language()
self.assertIsNone(language)
class ActionDispatcherTest(base.BaseTestCase):
def test_dispatch(self):
"""Test ActionDispatcher.dispatch."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x
self.assertEqual('pants',
serializer.dispatch('pants', action='create'))
def test_dispatch_action_None(self):
"""Test ActionDispatcher.dispatch with none action."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action=None))
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action='update'))
class ResponseHeadersSerializerTest(base.BaseTestCase):
def test_default(self):
serializer = wsgi.ResponseHeaderSerializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'fake')
self.assertEqual(200, response.status_int)
def test_custom(self):
class Serializer(wsgi.ResponseHeaderSerializer):
def update(self, response, data):
response.status_int = 404
response.headers['X-Custom-Header'] = data['v']
serializer = Serializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'update')
self.assertEqual(404, response.status_int)
self.assertEqual('123', response.headers['X-Custom-Header'])
class DictSerializerTest(base.BaseTestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual('',
serializer.serialize({}, 'NonExistentAction'))
class JSONDictSerializerTest(base.BaseTestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = b'{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
def test_json_with_unicode(self):
input_dict = dict(servers=dict(a=(2, '\u7f51\u7edc')))
expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
class TextDeserializerTest(base.BaseTestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual({},
deserializer.deserialize({}, 'update'))
class JSONDeserializerTest(base.BaseTestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1'}}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_default_raise_Malformed_Exception(self):
"""Test JsonDeserializer.default.
Test verifies JsonDeserializer.default raises exception
MalformedRequestBody correctly.
"""
data_string = ""
deserializer = wsgi.JSONDeserializer()
self.assertRaises(
exception.MalformedRequestBody, deserializer.default, data_string)
def test_json_with_utf8(self):
data = b'{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}'
as_dict = {'body': {'a': '\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_json_with_unicode(self):
data = b'{"a": "\u7f51\u7edc"}'
as_dict = {'body': {'a': '\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
class RequestHeadersDeserializerTest(base.BaseTestCase):
def test_default(self):
deserializer = wsgi.RequestHeadersDeserializer()
req = wsgi.Request.blank('/')
self.assertEqual({},
deserializer.deserialize(req, 'nonExistent'))
def test_custom(self):
class Deserializer(wsgi.RequestHeadersDeserializer):
def update(self, request):
return {'a': request.headers['X-Custom-Header']}
deserializer = Deserializer()
req = wsgi.Request.blank('/')
req.headers['X-Custom-Header'] = 'b'
self.assertEqual({'a': 'b'},
deserializer.deserialize(req, 'update'))
class ResourceTest(base.BaseTestCase):
@staticmethod
def my_fault_body_function():
return 'off'
class Controller(object):
def index(self, request, index=None):
return index
def test_dispatch(self):
resource = wsgi.Resource(self.Controller())
req = wsgi.Request.blank('/')
actual = resource.dispatch(
req, 'index', action_args={'index': 'off'})
expected = 'off'
self.assertEqual(expected, actual)
def test_dispatch_unknown_controller_action(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
self.assertRaises(
AttributeError, resource.dispatch,
resource.controller, 'create', {})
def test_malformed_request_body_throws_bad_request(self):
resource = wsgi.Resource(None)
request = wsgi.Request.blank(
"/", body=b"{mal:formed", method='POST',
headers={'Content-Type': "application/json"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_throws_unsupported_media_type_error(self):
resource = wsgi.Resource(None)
request = wsgi.Request.blank(
"/", body=b"{some:json}", method='POST',
headers={'Content-Type': "xxx"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_bad_request_error(self):
resource = wsgi.Resource(self.Controller())
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_call_resource_class_bad_request(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = 'body'
def method(self):
pass
def best_match_content_type(self):
return 'best_match_content_type'
resource = wsgi.Resource(self.Controller())
request = FakeRequest()
result = resource(request)
self.assertEqual(415, result.status_int)
def test_type_error(self):
resource = wsgi.Resource(self.Controller())
request = wsgi.Request.blank(
"/", method='GET', headers={'Content-Type': "json"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_call_resource_class_bad_request_error(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = '{"Content-Type": "json"}'
def method(self):
pass
def best_match_content_type(self):
return 'application/json'
resource = wsgi.Resource(self.Controller())
request = FakeRequest()
result = resource(request)
self.assertEqual(400, result.status_int)
class MiddlewareTest(base.BaseTestCase):
def test_process_response(self):
def application(environ, start_response):
response = 'Success'
return response
response = application('test', 'fake')
result = wsgi.Middleware(application).process_response(response)
self.assertEqual('Success', result)
class FaultTest(base.BaseTestCase):
def test_call_fault(self):
class MyException(object):
code = 415
explanation = 'test'
my_exception = MyException()
converted_exp = exception.ConvertedException(code=my_exception.code,
explanation=my_exception.explanation)
my_fault = wsgi.Fault(converted_exp)
req = wsgi.Request.blank("/", method='POST',
headers={'Content-Type': "unknow"})
response = my_fault(req)
self.assertEqual(415, response.status_int)
class TestWSGIServerWithSSL(base.BaseTestCase):
"""WSGI server tests."""
def setUp(self):
super(TestWSGIServerWithSSL, self).setUp()
self.skip("Not ready yet")
def test_app_using_ssl(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = urllibrequest.urlopen('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ssl_combined_cert_and_key(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certandkey.pem'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = urllibrequest.urlopen('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ipv6_and_ssl(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="::1")
response = urllibrequest.urlopen('https://[::1]:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
| apache-2.0 | 9,851,767,278,263,516 | 34.557851 | 79 | 0.595197 | false |
freeipa/freeipa-pr-ci | tasks/test_tasks.py | 1 | 2171 | import os
import pytest
from .ansible import AnsiblePlaybook
from .common import PopenTask, TimeoutException, TaskException
from .vagrant import VagrantBoxDownload
def test_timeout():
PopenTask(['sleep', '0.1'])()
PopenTask(['sleep', '0.1'], timeout=None)()
PopenTask(['sleep', '0.1'], timeout=0.2)()
task = PopenTask(['sleep', '0.1'], timeout=0.01)
with pytest.raises(TimeoutException) as exc_info:
task()
assert exc_info.value.task == task
def test_fallible_task():
task = PopenTask(['ls', '/tmp/ag34feqfdafasdf'])
with pytest.raises(TaskException) as exc_info:
task()
assert exc_info.value.task == task
assert task.returncode != 0
task = PopenTask(['ls', '/tmp/ag34feqfdafasdf'], raise_on_err=False)
task()
assert task.returncode != 0
def test_popen():
task = PopenTask(['ls', '/tmp'])
task()
assert task.returncode == 0
task = PopenTask(['ls', '/tmp/adsdasafgsag'], raise_on_err=False)
task()
assert task.returncode == 2
PopenTask('for i in `seq 3`; do echo $i; done', shell=True)()
task = PopenTask('ls /tmp/$DIR', shell=True, raise_on_err=False)
task()
assert task.returncode == 0
env = dict(DIR='gfdsgsdfgsfd')
task = PopenTask('ls /tmp/$DIR', shell=True, env=env, raise_on_err=False)
task()
assert task.returncode == 2
def test_vagrant_box_download():
path = os.path.dirname(os.path.realpath(__file__))
task = VagrantBoxDownload(
vagrantfile='Vagrantfile.mock',
path=path)
vagrantfile = task.get_vagrantfile()
assert vagrantfile.vm.box == 'freeipa/ci-master-f25'
assert vagrantfile.vm.box_version == '0.2.5'
def test_ansible_playbook():
assert ' '.join(
AnsiblePlaybook(playbook='a.yml', inventory='hosts.test').cmd
) == 'ansible-playbook -i hosts.test a.yml'
assert ' '.join(
AnsiblePlaybook(playbook='a.yml', inventory='hosts.test',
extra_vars={'a': 1, 'b': 'xyz'}, verbosity='vvv').cmd
) == 'ansible-playbook -i hosts.test -e b=xyz -e a=1 a.yml -vvv'
with pytest.raises(TaskException):
AnsiblePlaybook()
| gpl-3.0 | -5,545,033,217,916,845,000 | 27.946667 | 77 | 0.628743 | false |
Ebag333/Pyfa | gui/builtinStatsViews/rechargeViewFull.py | 1 | 5430 | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import wx
from gui.statsView import StatsView
from gui.bitmapLoader import BitmapLoader
from gui.utils.numberFormatter import formatAmount
import gui.mainFrame
import gui.builtinStatsViews.resistancesViewFull as rvf
from service.fit import Fit
class RechargeViewFull(StatsView):
name = "rechargeViewFull"
def __init__(self, parent):
StatsView.__init__(self)
self.parent = parent
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.mainFrame.Bind(rvf.EFFECTIVE_HP_TOGGLED, self.toggleEffective)
self.effective = True
def getHeaderText(self, fit):
return "Recharge rates"
def getTextExtentW(self, text):
width, height = self.parent.GetTextExtent(text)
return width
def toggleEffective(self, event):
self.effective = event.effective
sFit = Fit.getInstance()
self.refreshPanel(sFit.getFit(self.mainFrame.getActiveFit()))
event.Skip()
def populatePanel(self, contentPanel, headerPanel):
contentSizer = contentPanel.GetSizer()
self.panel = contentPanel
self.headerPanel = headerPanel
sizerTankStats = wx.FlexGridSizer(3, 5)
for i in range(4):
sizerTankStats.AddGrowableCol(i + 1)
contentSizer.Add(sizerTankStats, 0, wx.EXPAND, 0)
# Add an empty label first for correct alignment.
sizerTankStats.Add(wx.StaticText(contentPanel, wx.ID_ANY, ""), 0)
toolTipText = {"shieldPassive": "Passive shield recharge", "shieldActive": "Active shield boost",
"armorActive": "Armor repair amount", "hullActive": "Hull repair amount"}
for tankType in ("shieldPassive", "shieldActive", "armorActive", "hullActive"):
bitmap = BitmapLoader.getStaticBitmap("%s_big" % tankType, contentPanel, "gui")
tooltip = wx.ToolTip(toolTipText[tankType])
bitmap.SetToolTip(tooltip)
sizerTankStats.Add(bitmap, 0, wx.ALIGN_CENTER)
toolTipText = {"reinforced": "Reinforced", "sustained": "Sustained"}
for stability in ("reinforced", "sustained"):
bitmap = BitmapLoader.getStaticBitmap("regen%s_big" % stability.capitalize(), contentPanel, "gui")
tooltip = wx.ToolTip(toolTipText[stability])
bitmap.SetToolTip(tooltip)
sizerTankStats.Add(bitmap, 0, wx.ALIGN_CENTER)
for tankType in ("shieldPassive", "shieldActive", "armorActive", "hullActive"):
if stability == "reinforced" and tankType == "shieldPassive":
sizerTankStats.Add(wx.StaticText(contentPanel, wx.ID_ANY, ""))
continue
tankTypeCap = tankType[0].capitalize() + tankType[1:]
lbl = wx.StaticText(contentPanel, wx.ID_ANY, "0.0", style=wx.ALIGN_RIGHT)
setattr(self, "labelTank%s%s" % (stability.capitalize(), tankTypeCap), lbl)
box = wx.BoxSizer(wx.HORIZONTAL)
box.Add(lbl, 0, wx.EXPAND)
box.Add(wx.StaticText(contentPanel, wx.ID_ANY, " HP/s"), 0, wx.EXPAND)
sizerTankStats.Add(box, 0, wx.ALIGN_CENTRE)
contentPanel.Layout()
def refreshPanel(self, fit):
# If we did anything intresting, we'd update our labels to reflect the new fit's stats here
for stability in ("reinforced", "sustained"):
if stability == "reinforced" and fit is not None:
tank = fit.effectiveTank if self.effective else fit.tank
elif stability == "sustained" and fit is not None:
tank = fit.effectiveSustainableTank if self.effective else fit.sustainableTank
else:
tank = None
for name in ("shield", "armor", "hull"):
lbl = getattr(self, "labelTank%s%sActive" % (stability.capitalize(), name.capitalize()))
if tank is not None:
lbl.SetLabel("%.1f" % tank["%sRepair" % name])
else:
lbl.SetLabel("0.0")
if fit is not None:
label = getattr(self, "labelTankSustainedShieldPassive")
value = fit.effectiveTank["passiveShield"] if self.effective else fit.tank["passiveShield"]
label.SetLabel(formatAmount(value, 3, 0, 9))
else:
value = 0
label = getattr(self, "labelTankSustainedShieldPassive")
label.SetLabel("0")
label.SetToolTip(wx.ToolTip("%.3f" % value))
self.panel.Layout()
self.headerPanel.Layout()
RechargeViewFull.register()
| gpl-3.0 | -1,832,182,773,703,005,200 | 41.093023 | 110 | 0.618785 | false |
juju/juju-gui-charm | server/runserver.py | 1 | 1408 | # This file is part of the Juju GUI, which lets users view and manage Juju
# environments within a graphical interface (https://launchpad.net/juju-gui).
# Copyright (C) 2013 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License version 3, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Juju GUI server entry point.
Arguments example:
--apiurl="wss://ec2-75-101-177-185.compute-1.example.com:17070"
--apiversion="go"
--sslpath="/etc/ssl/juju-gui"
--tests_root="/var/lib/juju/agents/unit-juju-gui-0/charm/juju-gui/test/"
--insecure
--sandbox
--logging=debug|info|warning|error
--charmworldurl="https://manage.jujucharms.com/"
The --sslpath option is ignored if --insecure is set.
The --apiurl and --apiversion options are ignored if --sandbox is set.
"""
from guiserver import manage
if __name__ == '__main__':
manage.setup()
manage.run()
| agpl-3.0 | 2,942,294,371,774,037,500 | 36.052632 | 79 | 0.724432 | false |
snehil/CNN_Text_Classification_Tensorflow | cnn/text_cnn.py | 1 | 4202 | import tensorflow as tf
import numpy as np
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda = 0.0):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32 , [None, sequence_length], name = "input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes] , name = "input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer (Pre-trained + learnt from training data!)
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), name = "W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name = "W")
b = tf.Variable(tf.constant(0.1, shape = [num_filters]), name = "b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides = [1, 1, 1, 1],
padding = "VALID",
name = "conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name = "relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize = [1, sequence_length - filter_size + 1, 1, 1],
strides = [1, 1, 1, 1],
padding = 'VALID',
name = "pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(3, pooled_outputs)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape = [num_filters_total, num_classes],
initializer = tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape = [num_classes]), name = "b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name = "scores")
self.softmaxScores = tf.nn.softmax(self.scores, name = "softmaxScores")
self.predictions = tf.argmax(self.softmaxScores, 1, name = "predictions")
self.topKPreds = tf.nn.top_k(self.softmaxScores, k = 1, sorted = True, name = "topKPreds")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name = "accuracy")
| apache-2.0 | -3,228,817,458,301,248,500 | 47.860465 | 126 | 0.540219 | false |
gdsfactory/gdsfactory | pp/mask/merge_markdown.py | 1 | 1332 | import dataclasses
import os
from glob import glob
from pathlib import Path
from omegaconf import OmegaConf
from pp.config import CONFIG, TECH, logger
def merge_markdown(
reports_directory: Path = CONFIG["doe_directory"],
mdpath: Path = CONFIG["mask_directory"] / "report.md",
**kwargs,
) -> None:
"""Merges all individual markdown reports (.md) into a single markdown
you can add a report:[Capacitors, Diodes...] in config.yml to define the merge order
"""
logger.info("Merging Markdown files:")
configpath = mdpath.with_suffix(".yml")
tech = dataclasses.asdict(TECH)
tech.pop("factory", "")
with open(configpath, "w") as f:
tech.update(**kwargs)
tech_omegaconf = OmegaConf.create(tech)
f.write(OmegaConf.to_yaml(tech_omegaconf))
with open(mdpath, "w") as f:
def wl(line="", eol="\n"):
f.write(line + eol)
reports = sorted(glob(os.path.join(reports_directory, "*.md")))
for filename in reports:
with open(filename) as infile:
for line in infile:
f.write(line)
logger.info(f"Wrote {mdpath}")
logger.info(f"Wrote {configpath}")
if __name__ == "__main__":
reports_directory = CONFIG["samples_path"] / "mask" / "does"
merge_markdown(reports_directory)
| mit | -4,302,370,956,964,467,700 | 27.956522 | 88 | 0.626126 | false |
kronenpj/python-for-android | testapps/testapp_encryption/main.py | 1 | 10290 | print('main.py was successfully called')
import os
print('imported os')
print('this dir is', os.path.abspath(os.curdir))
print('contents of this dir', os.listdir('./'))
import sys
print('pythonpath is', sys.path)
import kivy
print('imported kivy')
print('file is', kivy.__file__)
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy.uix.popup import Popup
from kivy.clock import Clock
print('Imported kivy')
from kivy.utils import platform
print('platform is', platform)
# Test cryptography
try:
from cryptography.fernet import Fernet
key = Fernet.generate_key()
f = Fernet(key)
cryptography_encrypted = f.encrypt(
b'A really secret message. Not for prying eyes.')
cryptography_decrypted = f.decrypt(cryptography_encrypted)
except Exception as e1:
print('**************************')
print('Error on cryptography operations:\n{}'.format(e1))
print('**************************')
cryptography_encrypted = 'Error'
cryptography_decrypted = 'Error'
# Test pycrypto
crypto_hash_message = 'A secret message'
try:
from Crypto.Hash import SHA256
hash = SHA256.new()
hash.update(crypto_hash_message)
crypto_hash_hexdigest = hash.hexdigest()
except Exception as e2:
print('**************************')
print('Error on Crypto operations:\n{}'.format(e2))
print('**************************')
crypto_hash_hexdigest = 'Error'
# Test scrypt
try:
from scrypt import *
status_import_scrypt = 'Success'
except ImportError as e3:
print('**************************')
print('Unable to import scrypt:\n{}'.format(e3))
print('**************************')
status_import_scrypt = 'Error'
# Test M2Crypto
try:
from M2Crypto import *
status_import_m2crypto = 'Success'
except ImportError as e5:
print('**************************')
print('Unable to import M2Crypto:\n{}'.format(e5))
print('**************************\n')
status_import_m2crypto = 'Error'
# Test pysha3
try:
import sha3
print('Ok imported pysha3, testing some basic operations...')
k = sha3.keccak_512()
k.update(b"data")
print('Test pysha3 operation (keccak_512): {}'.format(k.hexdigest()))
status_import_pysha3 = 'Success'
except ImportError as e6:
print('**************************')
print('Unable to import/operate with pysha3:\n{}'.format(e6))
print('**************************')
status_import_pysha3 = 'Error'
# Test pycryptodome
try:
from Crypto.PublicKey import RSA
print('Ok imported pycryptodome, testing some basic operations...')
secret_code = "Unguessable"
key = RSA.generate(2048)
encrypted_key = key.export_key(passphrase=secret_code, pkcs=8,
protection="scryptAndAES128-CBC")
print('\t -> Testing key for secret code "Unguessable": {}'.format(
encrypted_key))
file_out = open("rsa_key.bin", "wb")
file_out.write(encrypted_key)
print('\t -> Testing key write: {}'.format(
'ok' if os.path.exists(file_out) else 'fail'))
print('\t -> Testing Public key:'.format(key.publickey().export_key()))
status_import_pycryptodome = 'Success (import and doing simple operations)'
except ImportError as e6:
print('**************************')
print('Unable to import/operate with pycryptodome:\n{}'.format(e6))
print('**************************')
status_import_pycryptodome = 'Error'
# Test libtorrent
try:
import libtorrent as lt
print('Imported libtorrent version {}'.format(lt.version))
status_import_libtorrent = 'Success (version is: {})'.format(lt.version)
except Exception as e4:
print('**************************')
print('Unable to import libtorrent:\n{}'.format(e4))
print('**************************')
status_import_libtorrent = 'Error'
kv = '''
#:import Metrics kivy.metrics.Metrics
#:import sys sys
<FixedSizeButton@Button>:
size_hint_y: None
height: dp(60)
<TestImport@BoxLayout>:
orientation: 'vertical'
size_hint_y: None
height: self.minimum_height
test_module: ''
test_result: ''
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text: '[b]*** TEST {} MODULE ***[/b]'.format(self.parent.test_module)
halign: 'center'
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text:
'Import {}: [color=a0a0a0]{}[/color]'.format(
self.parent.test_module, self.parent.test_result)
halign: 'left'
Widget:
size_hint_y: None
height: 20
ScrollView:
GridLayout:
cols: 1
size_hint_y: None
height: self.minimum_height
FixedSizeButton:
text: 'test pyjnius'
on_press: app.test_pyjnius()
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text: '[b]*** TEST CRYPTOGRAPHY MODULE ***[/b]'
halign: 'center'
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text:
'Cryptography decrypted:\\n[color=a0a0a0]%s[/color]\\n' \\
'Cryptography encrypted:\\n[color=a0a0a0]%s[/color]' % (
app.cryptography_decrypted, app.cryptography_encrypted)
halign: 'left'
Widget:
size_hint_y: None
height: 20
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text: '[b]*** TEST CRYPTO MODULE ***[/b]'
halign: 'center'
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text:
'Crypto message: \\n[color=a0a0a0]%s[/color]\\n'\\
'Crypto hex: \\n[color=a0a0a0]%s[/color]' % (
app.crypto_hash_message, app.crypto_hash_hexdigest)
halign: 'left'
Widget:
size_hint_y: None
height: 20
TestImport:
test_module: 'scrypt'
test_result: app.status_import_scrypt
TestImport:
test_module: 'm2crypto'
test_result: app.status_import_m2crypto
TestImport:
test_module: 'pysha3'
test_result: app.status_import_pysha3
TestImport:
test_module: 'pycryptodome'
test_result: app.status_import_pycryptodome
TestImport:
test_module: 'libtorrent'
test_result: app.status_import_libtorrent
Image:
keep_ratio: False
allow_stretch: True
source: 'colours.png'
size_hint_y: None
height: dp(100)
Label:
height: self.texture_size[1]
size_hint_y: None
font_size: 100
text_size: self.size[0], None
markup: True
text: '[b]Kivy[/b] on [b]SDL2[/b] on [b]Android[/b]!'
halign: 'center'
Label:
height: self.texture_size[1]
size_hint_y: None
text_size: self.size[0], None
markup: True
text: sys.version
halign: 'center'
padding_y: dp(10)
Widget:
size_hint_y: None
height: 20
Label:
height: self.texture_size[1]
size_hint_y: None
font_size: 50
text_size: self.size[0], None
markup: True
text:
'dpi: [color=a0a0a0]%s[/color]\\n'\\
'density: [color=a0a0a0]%s[/color]\\n'\\
'fontscale: [color=a0a0a0]%s[/color]' % (
Metrics.dpi, Metrics.density, Metrics.fontscale)
halign: 'center'
FixedSizeButton:
text: 'test ctypes'
on_press: app.test_ctypes()
Widget:
size_hint_y: None
height: 1000
on_touch_down: print('touched at', args[-1].pos)
<ErrorPopup>:
title: 'Error'
size_hint: 0.75, 0.75
Label:
text: root.error_text
'''
class ErrorPopup(Popup):
error_text = StringProperty('')
def raise_error(error):
print('ERROR:', error)
ErrorPopup(error_text=error).open()
class TestApp(App):
cryptography_encrypted = cryptography_encrypted
cryptography_decrypted = cryptography_decrypted
crypto_hash_message = crypto_hash_message
crypto_hash_hexdigest = crypto_hash_hexdigest
status_import_scrypt = status_import_scrypt
status_import_m2crypto = status_import_m2crypto
status_import_pysha3 = status_import_pysha3
status_import_pycryptodome = status_import_pycryptodome
status_import_libtorrent = status_import_libtorrent
def build(self):
root = Builder.load_string(kv)
Clock.schedule_interval(self.print_something, 2)
# Clock.schedule_interval(self.test_pyjnius, 5)
print('testing metrics')
from kivy.metrics import Metrics
print('dpi is', Metrics.dpi)
print('density is', Metrics.density)
print('fontscale is', Metrics.fontscale)
return root
def print_something(self, *args):
print('App print tick', Clock.get_boottime())
def on_pause(self):
return True
def test_pyjnius(self, *args):
try:
from jnius import autoclass
except ImportError:
raise_error('Could not import pyjnius')
return
print('Attempting to vibrate with pyjnius')
python_activity = autoclass('org.kivy.android.PythonActivity')
activity = python_activity.mActivity
intent = autoclass('android.content.Intent')
context = autoclass('android.content.Context')
vibrator = activity.getSystemService(context.VIBRATOR_SERVICE)
vibrator.vibrate(1000)
def test_ctypes(self, *args):
import ctypes
TestApp().run()
| mit | 242,050,230,687,287,170 | 28.826087 | 79 | 0.568999 | false |
sadikovi/pulsar | analytics/selector/selector.py | 1 | 9133 | #!/usr/bin/env python
# import libs
from types import StringType, ListType
import warnings
# import classes
import analytics.utils.queryengine as q
import analytics.utils.misc as misc
from analytics.algorithms.algorithmsmap import AlgorithmsMap
from analytics.core.map.clustermap import ClusterMap
from analytics.core.map.elementmap import ElementMap
from analytics.core.map.pulsemap import PulseMap
from analytics.core.pulse import StaticPulse, DynamicPulse
# some of the tables to use for filtering
CLUSTERS = "CLUSTERS"
ELEMENTS = "ELEMENTS"
PULSES = "PULSES"
ALGORITHMS = "ALGORITHMS"
class FilterBlock(object):
"""
Simple class to update maps in batch.
Attributes:
_alg (AlgorithmsMap): map of algorithms
_pul (PulseMap): map of pulses
_clu (ClusterMap): map of clusters
_ele (ElementMap): map of elements
_isFiltered (bool): flag to show that filter block is filtered
"""
def __init__(self, algorithmsmap, pulsemap, clustermap, elementmap):
self._alg = algorithmsmap
self._pul = pulsemap
self._clu = clustermap
self._ele = elementmap
self._isFiltered = False
# [Public]
def filterWithBlock(queryset, flrblock):
"""
Recommended method for filtering maps with queryset. Takes care of
filtering order and overall process.
Args:
queryset (str): query set
flrblock (FilterBlock): filter block with maps
"""
# check if filter block has already been filtered
if flrblock._isFiltered:
return flrblock
# extract query blocks
blocks = parseQueryset(queryset, q.QueryEngine())
if not blocks:
return flrblock
# filter blocks to match maps
ablock = None; pblock = None; cblock = None
for block in blocks:
if block._statement._table.upper() == ALGORITHMS:
ablock = block
elif block._statement._table.upper() == PULSES:
pblock = block
elif block._statement._table.upper() == CLUSTERS:
cblock = block
# use each block to parse map
flrblock._alg = filterAlgorithms(ablock, flrblock._alg)
flrblock._pul = filterPulses(pblock, flrblock._pul)
flrblock._clu = filterClusters(cblock, flrblock._clu)
flrblock._ele = filterElements(flrblock._ele, flrblock._clu, flrblock._pul)
# finished filtering
flrblock._isFiltered = True
return flrblock
# [Public]
def parseQueryset(queryset=None, engine=None):
"""
Parsing query set. If query set is None or not a string, query set is
reset to empty string. If query set is invalid, exception is thrown.
Args:
queryset (str): query set
engine (QueryEngine): query engine to parse queryset
Returns:
list<QueryBlock>: list of query blocks
"""
if queryset is None:
queryset = ""
elif type(queryset) is not StringType:
msg = "Queryset is not a string and will be reset to empty"
warnings.warn(msg, UserWarning)
queryset = ""
else:
queryset = queryset.strip()
# query blocks
blocks = []
# check if queryset is empty, and in this case return empty list
if queryset == "":
blocks = []
else:
# return query blocks
engine = engine if type(engine) is q.QueryEngine else q.QueryEngine()
blocks = engine.parse(queryset)
return blocks
# [Public]
def filterAlgorithms(queryblock, algorithmsmap):
"""
Filters algorithms.
Args:
queryblock (QueryBlock): query block for algorithms
algorithmsmap (AlgorithmsMap): map of algorithms
Returns:
AlgorithmsMap: reference to updated algorithms map
"""
# if queryblock is None then do not filter at all
if queryblock is None:
return algorithmsmap
misc.checkTypeAgainst(type(queryblock), q.QueryBlock, __file__)
misc.checkTypeAgainst(type(algorithmsmap), AlgorithmsMap, __file__)
# get predicates
predicates = queryblock._predicates
# algorithm keys
akeys = []
for predicate in predicates:
ptype = predicate._type
parameter = predicate._parameter
# check only equal predicates with parameter "id"
if ptype == q._PREDICATE_TYPES.EQUAL and parameter.upper() == "ID":
values = predicate._values
keys.append(values[0])
# remove keys that are not selected
for key in algorithmsmap.keys():
if key not in akeys:
algorithmsmap.remove(key)
return algorithmsmap
# [Public]
def filterPulses(queryblock, pulsemap):
"""
Filters pulses.
Args:
queryblock (QueryBlock): query block for pulses
pulsemap (PulseMap): map of pulses
Returns:
PulseMap: reference to updated pulses map
"""
# if queryblock is None then do not filter at all
if queryblock is None:
return pulsemap
misc.checkTypeAgainst(type(queryblock), q.QueryBlock, __file__)
misc.checkTypeAgainst(type(pulsemap), PulseMap, __file__)
# get predicates
predicates = queryblock._predicates
# check assign predicates first
for predicate in predicates:
ptype = predicate._type
if ptype == q._PREDICATE_TYPES.ASSIGN:
values = predicate._values
pulse = pulsemap.get(predicate._parameter)
if pulse is not None and type(pulse) is DynamicPulse:
pulse.setStatic(not values[0].upper()=="DYNAMIC")
# check equal predicate
for predicate in predicates:
ptype = predicate._type
# check equal predicate
if ptype == q._PREDICATE_TYPES.EQUAL:
pulse = pulsemap.get(predicate._parameter)
if pulse is not None:
values = predicate._values
_passed = pulse.setDefaultValue(values[0])
# 30.03.2015 ivan.sadikov: added issue#27 fix
# reporting warning, if value is incorrect
if not _passed:
_n = pulse.name(); _v = str(values[0])
msg = "Pulse %s cannot set value %s as default" %(_n, _v)
warnings.warn(msg, UserWarning)
# return updated pulsemap
return pulsemap
# [Public]
def filterClusters(queryblock, clustermap):
"""
Filters clusters.
Args:
queryblock (QueryBlock): query block for clusters
clustermap (ClusterMap): map of clusters
Returns:
ClusterMap: reference to updated clusters map
"""
# if queryblock is None then do not filter at all
if queryblock is None:
return clustermap
misc.checkTypeAgainst(type(queryblock), q.QueryBlock, __file__)
misc.checkTypeAgainst(type(clustermap), ClusterMap, __file__)
# storing clusters
clusters = []
# get predicates
predicates = queryblock._predicates
for predicate in predicates:
ptype = predicate._type
parameter = predicate._parameter
if ptype == q._PREDICATE_TYPES.EQUAL and parameter.upper() == "ID":
values = predicate._values
if clustermap.has(values[0]):
clusters.append(values[0])
# filter clusters
updatedmap = ClusterMap()
for key in clusters:
if not updatedmap.has(key):
updatedmap.add(clustermap.get(key))
# return updated cluster map
return updatedmap
# [Public]
def filterElements(elementmap, clustermap, pulsemap):
"""
Filters elements using cluster map and pulse map.
Args:
elementmap (ElementMap): map of elements
clustermap (ClusterMap): filtered map of clusters
pulsemap (PulseMap): filtered map of pulses
Returns:
ElementMap: reference to updated element map
"""
misc.checkTypeAgainst(type(elementmap), ElementMap, __file__)
misc.checkTypeAgainst(type(clustermap), ClusterMap, __file__)
misc.checkTypeAgainst(type(pulsemap), PulseMap, __file__)
# filter by clusters
elements = elementmap._map.values()
for element in elements:
parent = element.cluster()
if parent is None or not clustermap.has(parent.id()):
elementmap.remove(element.id())
# filter by pulses
elements = elementmap._map.values()
# pulses
# "is selectable" closure
def isselectable(x):
if type(x) is DynamicPulse and x.static() is True:
return True if x.default() is not None else False
elif type(x) is StaticPulse:
return True if x.default() is not None else False
else:
return False
pulses = [x for x in pulsemap._map.values() if isselectable(x)]
for element in elements:
toRemove = False
for pulse in pulses:
feature = element._features[pulse.id()]
if feature is None or feature.value() != pulse.default():
toRemove = True
if toRemove:
elementmap.remove(element.id())
# return element map
return elementmap
| apache-2.0 | -2,103,489,210,357,787,600 | 33.464151 | 79 | 0.630461 | false |
mikf/gallery-dl | gallery_dl/extractor/bcy.py | 1 | 6802 | # -*- coding: utf-8 -*-
# Copyright 2020-2021 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://bcy.net/"""
from .common import Extractor, Message
from .. import text
import json
import re
class BcyExtractor(Extractor):
"""Base class for bcy extractors"""
category = "bcy"
directory_fmt = ("{category}", "{user[id]} {user[name]}")
filename_fmt = "{post[id]} {id}.{extension}"
archive_fmt = "{post[id]}_{id}"
root = "https://bcy.net"
def __init__(self, match):
Extractor.__init__(self, match)
self.item_id = match.group(1)
def items(self):
sub = re.compile(r"^https?://p\d+-bcy\.byteimg\.com/img/banciyuan").sub
iroot = "https://img-bcy-qn.pstatp.com"
noop = self.config("noop")
for post in self.posts():
if not post["image_list"]:
continue
multi = None
tags = post.get("post_tags") or ()
data = {
"user": {
"id" : post["uid"],
"name" : post["uname"],
"avatar" : sub(iroot, post["avatar"].partition("~")[0]),
},
"post": {
"id" : text.parse_int(post["item_id"]),
"tags" : [t["tag_name"] for t in tags],
"date" : text.parse_timestamp(post["ctime"]),
"parody" : post["work"],
"content": post["plain"],
"likes" : post["like_count"],
"shares" : post["share_count"],
"replies": post["reply_count"],
},
}
yield Message.Directory, data
for data["num"], image in enumerate(post["image_list"], 1):
data["id"] = image["mid"]
data["width"] = image["w"]
data["height"] = image["h"]
url = image["path"].partition("~")[0]
text.nameext_from_url(url, data)
if data["extension"]:
if not url.startswith(iroot):
url = sub(iroot, url)
data["filter"] = ""
yield Message.Url, url, data
else:
if not multi:
if len(post["multi"]) < len(post["image_list"]):
multi = self._data_from_post(post["item_id"])
multi = multi["post_data"]["multi"]
else:
multi = post["multi"]
image = multi[data["num"] - 1]
if image["origin"]:
data["filter"] = "watermark"
yield Message.Url, image["origin"], data
if noop:
data["extension"] = ""
data["filter"] = "noop"
yield Message.Url, image["original_path"], data
def posts(self):
"""Returns an iterable with all relevant 'post' objects"""
def _data_from_post(self, post_id):
url = "{}/item/detail/{}".format(self.root, post_id)
page = self.request(url).text
return json.loads(
text.extract(page, 'JSON.parse("', '");')[0]
.replace('\\\\u002F', '/')
.replace('\\"', '"')
)["detail"]
class BcyUserExtractor(BcyExtractor):
"""Extractor for user timelines"""
subcategory = "user"
pattern = r"(?:https?://)?bcy\.net/u/(\d+)"
test = (
("https://bcy.net/u/1933712", {
"pattern": r"https://img-bcy-qn.pstatp.com/\w+/\d+/post/\w+/.+jpg",
"count": ">= 20",
}),
("https://bcy.net/u/109282764041", {
"pattern": r"https://p\d-bcy.byteimg.com/img/banciyuan/[0-9a-f]+"
r"~tplv-banciyuan-logo-v3:.+\.image",
"range": "1-25",
"count": 25,
}),
)
def posts(self):
url = self.root + "/apiv3/user/selfPosts"
params = {"uid": self.item_id, "since": None}
while True:
data = self.request(url, params=params).json()
try:
items = data["data"]["items"]
except KeyError:
return
if not items:
return
for item in items:
yield item["item_detail"]
params["since"] = item["since"]
class BcyPostExtractor(BcyExtractor):
"""Extractor for individual posts"""
subcategory = "post"
pattern = r"(?:https?://)?bcy\.net/item/detail/(\d+)"
test = (
("https://bcy.net/item/detail/6355835481002893070", {
"url": "301202375e61fd6e0e2e35de6c3ac9f74885dec3",
"count": 1,
"keyword": {
"user": {
"id" : 1933712,
"name" : "wukloo",
"avatar" : "re:https://img-bcy-qn.pstatp.com/Public/",
},
"post": {
"id" : 6355835481002893070,
"tags" : list,
"date" : "dt:2016-11-22 08:47:46",
"parody" : "东方PROJECT",
"content": "re:根据微博的建议稍微做了点修改",
"likes" : int,
"shares" : int,
"replies": int,
},
"id": 8330182,
"num": 1,
"width" : 3000,
"height": 1687,
"filename": "712e0780b09011e696f973c3d1568337",
"extension": "jpg",
},
}),
# only watermarked images available
("https://bcy.net/item/detail/6950136331708144648", {
"pattern": r"https://p\d-bcy.byteimg.com/img/banciyuan/[0-9a-f]+"
r"~tplv-banciyuan-logo-v3:.+\.image",
"count": 8,
"keyword": {"filter": "watermark"},
}),
# deleted
("https://bcy.net/item/detail/6780546160802143236", {
"count": 0,
}),
# only visible to logged in users
("https://bcy.net/item/detail/6747523535150783495", {
"count": 0,
}),
)
def posts(self):
try:
data = self._data_from_post(self.item_id)
except KeyError:
return ()
post = data["post_data"]
post["image_list"] = post["multi"]
post["plain"] = text.parse_unicode_escapes(post["plain"])
post.update(data["detail_user"])
return (post,)
| gpl-2.0 | -6,658,767,170,279,108,000 | 33.186869 | 79 | 0.448663 | false |
ivankreso/fer-deep-learning | lab2/train_l2reg.py | 1 | 2240 | import time
from pathlib import Path
import numpy as np
from torchvision.datasets import MNIST
import nn
import layers
DATA_DIR = Path(__file__).parent / 'datasets' / 'MNIST'
SAVE_DIR = Path(__file__).parent / 'out'
config = {}
config['max_epochs'] = 8
config['batch_size'] = 50
config['save_dir'] = SAVE_DIR
config['weight_decay'] = 1e-3
config['lr_policy'] = {1:{'lr':1e-1}, 3:{'lr':1e-2}, 5:{'lr':1e-3}, 7:{'lr':1e-4}}
def dense_to_one_hot(y, class_count):
return np.eye(class_count)[y]
#np.random.seed(100)
np.random.seed(int(time.time() * 1e6) % 2**31)
ds_train, ds_test = MNIST(DATA_DIR, train=True, download=True), MNIST(DATA_DIR, train=False)
train_x = ds_train.data.reshape([-1, 1, 28, 28]).numpy().astype(np.float) / 255
train_y = ds_train.targets.numpy()
train_x, valid_x = train_x[:55000], train_x[55000:]
train_y, valid_y = train_y[:55000], train_y[55000:]
test_x = ds_test.data.reshape([-1, 1, 28, 28]).numpy().astype(np.float) / 255
test_y = ds_test.targets.numpy()
train_mean = train_x.mean()
train_x, valid_x, test_x = (x - train_mean for x in (train_x, valid_x, test_x))
train_y, valid_y, test_y = (dense_to_one_hot(y, 10) for y in (train_y, valid_y, test_y))
weight_decay = config['weight_decay']
net = []
regularizers = []
inputs = np.random.randn(config['batch_size'], 1, 28, 28)
net += [layers.Convolution(inputs, 16, 5, "conv1")]
regularizers += [layers.L2Regularizer(net[-1].weights, weight_decay, 'conv1_l2reg')]
net += [layers.MaxPooling(net[-1], "pool1")]
net += [layers.ReLU(net[-1], "relu1")]
net += [layers.Convolution(net[-1], 32, 5, "conv2")]
regularizers += [layers.L2Regularizer(net[-1].weights, weight_decay, 'conv2_l2reg')]
net += [layers.MaxPooling(net[-1], "pool2")]
net += [layers.ReLU(net[-1], "relu2")]
## 7x7
net += [layers.Flatten(net[-1], "flatten3")]
net += [layers.FC(net[-1], 512, "fc3")]
regularizers += [layers.L2Regularizer(net[-1].weights, weight_decay, 'fc3_l2reg')]
net += [layers.ReLU(net[-1], "relu3")]
net += [layers.FC(net[-1], 10, "logits")]
data_loss = layers.SoftmaxCrossEntropyWithLogits()
loss = layers.RegularizedLoss(data_loss, regularizers)
nn.train(train_x, train_y, valid_x, valid_y, net, loss, config)
nn.evaluate("Test", test_x, test_y, net, loss, config)
| mit | 5,554,988,731,413,853,000 | 36.333333 | 92 | 0.659821 | false |
vangj/py-bbn | tests/graph/test_variable.py | 1 | 1107 | import copy
from nose import with_setup
from pybbn.graph.variable import Variable
def setup():
"""
Setup.
:return: None.
"""
pass
def teardown():
"""
Teardown.
:return: None.
"""
pass
@with_setup(setup, teardown)
def test_copy():
"""
Tests variable copy.
:return: None.
"""
lhs = Variable(0, 'a', ['t', 'f'])
rhs = copy.copy(lhs)
assert lhs.id == rhs.id
assert lhs.name == rhs.name
assert len(lhs.values) == len(rhs.values)
for lhs_v, rhs_v in zip(lhs.values, rhs.values):
assert lhs_v == rhs_v
lhs.values[0] = 'true'
assert lhs.values[0] == rhs.values[0]
@with_setup(setup, teardown)
def test_deep_copy():
"""
Tests variable deepcopy.
:return: None.
"""
lhs = Variable(0, 'a', ['t', 'f'])
rhs = copy.deepcopy(lhs)
assert lhs.id == rhs.id
assert lhs.name == rhs.name
assert len(lhs.values) == len(rhs.values)
for lhs_v, rhs_v in zip(lhs.values, rhs.values):
assert lhs_v == rhs_v
lhs.values[0] = 'true'
assert lhs.values[0] != rhs.values[0]
| apache-2.0 | -6,682,240,162,407,768,000 | 17.762712 | 52 | 0.566396 | false |
suizokukan/anceps | dchars/dchars.py | 1 | 13816 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
################################################################################
# DChars Copyright (C) 2012 Suizokukan
# Contact: suizokukan _A.T._ orange dot fr
#
# This file is part of DChars.
# DChars is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DChars is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DChars. If not, see <http://www.gnu.org/licenses/>.
################################################################################
"""
❏DChars❏ : dchars/dchars.py
"""
# problem with Pylint :
# pylint: disable=E0611
# "No name 'errors' in module 'dchars.errors'"
from dchars.errors.errors import DCharsError
import os.path
from dchars.languages_name import LANGUAGES_NAME, \
BIBLICAL_HEBREW__NAME, \
LANGUAGES_AND_TRANSLITERATIONS
import dchars.config_ini
from dchars.config_ini_data import DATA
#...............................................................................
# CONFIG_INI : options read in the configuration file.
#...............................................................................
# problem with Pylint :
# pylint: disable=F0401
# "Unable to import 'configparser'"
import configparser, codecs
CONFIG_INI = configparser.ConfigParser()
# about the following line : why not simply CONFIG_INI.read( "dchars", "config.ini") ?
# -> once installed, DChars have to know the exact path to config.ini,
# hence the following line (idea given by Frank Zago)
CONFIG_INI_FILENAME = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"config.ini" )
# Something's wrong with configparser : instead of simply writing
# DATA.read( open(CONFIG_INI_FILENAME, "r", encoding="utf-8") )
# we have to use this strange hack :
CONFIG_INI.readfp( codecs.open(CONFIG_INI_FILENAME, "r", "utf-8") )
# we check the accurency of the informations stored in the config.ini file :
dchars.config_ini.check(CONFIG_INI)
#...............................................................................
# LANGUAGES : informations about each language. Please use three kinds of keys
# for each language : English name, iso639-3 name and original name.
#
# (iso639-3 name,
# string type's name,
# default transliteration method,
# default options)
#
#...............................................................................
LANGUAGES = {
#...............................................................
"Ænglisc" :
("ang",
"DStringANG",
CONFIG_INI["ang"]["transliteration method"],
{DATA["ang"].get_optionname("sorting method"): \
CONFIG_INI["ang"]["sorting method"],
DATA["ang"].get_optionname("anonymize the unknown characters"): \
CONFIG_INI["ang"]["anonymize the unknown characters"],
}
),
#...............................................................
"བོད་ཡིག" :
("bod",
"DStringBOD",
CONFIG_INI["bod"]["transliteration method"],
{DATA["bod"].get_optionname("sorting method") : \
CONFIG_INI["bod"]["sorting method"],
DATA["bod"].get_optionname("expected structure") : \
CONFIG_INI["bod"]["expected structure"],
DATA["bod"].get_optionname("look up in the buffers"): \
CONFIG_INI["bod"]["look up in the buffers"],
DATA["bod"].get_optionname("fill the buffers") : \
CONFIG_INI["bod"]["fill the buffers"],
DATA["bod"].get_optionname("anonymize the unknown characters") : \
CONFIG_INI["bod"]["anonymize the unknown characters"],
},
),
#...............................................................
"Ἑλληνικὴ γλῶττα":
("grc",
"DStringGRC",
CONFIG_INI["grc"]["transliteration method"],
{DATA["grc"].get_optionname("sorting method"): \
CONFIG_INI["grc"]["sorting method"],
DATA["grc"].get_optionname("anonymize the unknown characters"): \
CONFIG_INI["grc"]["anonymize the unknown characters"],
DATA["grc"].get_optionname("ignore accents"): \
CONFIG_INI["grc.gutenberg"]["ignore accents"],
DATA["grc"].get_optionname("ignore smooth breathing"): \
CONFIG_INI["grc.gutenberg"]["ignore smooth breathing"],
DATA["grc"].get_optionname("ignore diaeresis"): \
CONFIG_INI["grc.gutenberg"]["ignore diaeresis"],
DATA["grc"].get_optionname("ignore iota subscript"): \
CONFIG_INI["grc.gutenberg"]["ignore iota subscript"],
DATA["grc"].get_optionname("transliteration for upsilon"): \
CONFIG_INI["grc.gutenberg"]["transliteration for upsilon"],
DATA["grc"].get_optionname("hh becomes h"): \
CONFIG_INI["grc.gutenberg"]["hh becomes h"],
DATA["grc"].get_optionname("ignore makron and brakhu"): \
CONFIG_INI["grc.gutenberg"]["ignore makron and brakhu"],
}
),
#...............................................................
BIBLICAL_HEBREW__NAME :
("hbo",
"DStringHBO",
CONFIG_INI["hbo"]["transliteration method"],
{DATA["hbo"].get_optionname("sorting method"): \
CONFIG_INI["hbo"]["sorting method"],
DATA["hbo"].get_optionname("anonymize the unknown characters"): \
CONFIG_INI["hbo"]["anonymize the unknown characters"],
}
),
#...............................................................
"日本語" :
("jpn",
"DStringJPN",
CONFIG_INI["jpn"]["transliteration method"],
{DATA["jpn"].get_optionname("sorting method"): \
CONFIG_INI["jpn"]["sorting method"],
DATA["jpn"].get_optionname("anonymize the unknown characters"): \
CONFIG_INI["jpn"]["anonymize the unknown characters"],
DATA["jpn"].get_optionname("long vowels written with circumflex"): \
CONFIG_INI["jpn.shepburn"]["long vowels written with circumflex"],
DATA["jpn"].get_optionname("katakanas written with upper case letters"): \
CONFIG_INI["jpn.shepburn"]["katakanas written with upper case letters"],
DATA["jpn"].get_optionname("ou becomes ō"): \
CONFIG_INI["jpn.shepburn"]["ou becomes ō"],
}
),
#...............................................................
"latīna" :
("lat",
"DStringLAT",
CONFIG_INI["lat"]["transliteration method"],
{DATA["lat"].get_optionname("sorting method"): \
CONFIG_INI["lat"]["sorting method"],
DATA["lat"].get_optionname("anonymize the unknown characters"): \
CONFIG_INI["lat"]["anonymize the unknown characters"],
}
),
#...............................................................
"संस्कृतम्" :
("san",
"DStringSAN",
CONFIG_INI["san"]["transliteration method"],
{DATA["san"].get_optionname("sorting method"): \
CONFIG_INI["san"]["sorting method"],
DATA["san"].get_optionname("anonymize the unknown characters"): \
CONFIG_INI["san"]["anonymize the unknown characters"],
}
),
}
# dict : key(language's name, iso639-3) : corresponding dstring type
# E.g. LOADED_LANGUAGES["bod"] = dchars.languages.bod.dstring.DStringBOD
LOADED_LANGUAGES = {}
#///////////////////////////////////////////////////////////////////////////////
def new_dstring(language, transliteration_method=None, options=None):
"""
Return a DString* type, e.g. DStringBOD for Tibetan.
_language : str
transliteration_method : str / None to use defaulttransliteration
options : dict of strings
"""
#...........................................................................
# error : unknown language's name.
#...........................................................................
if not language in LANGUAGES_NAME:
msg = "unknown language : '{0}'; known languages={1}".format(
language,
list(LANGUAGES.keys() ))
raise DCharsError( context = "dchars/dchars.py",
message = msg,
)
#...........................................................................
# original language's name :
#...........................................................................
_language = LANGUAGES_NAME[language]
#...........................................................................
# we get the informations from LANGUAGES :
#...........................................................................
(language_iso639_3_name,
dstring_name,
default_trans_method,
default_options) = LANGUAGES[_language]
#...........................................................................
# we import the module linked to dstring_name :
#...........................................................................
if language_iso639_3_name not in LOADED_LANGUAGES:
# the following lines are equivalent, e.g. to :
# from dchars.languages.lat.dstring import DStringLAT
#
# (see http://docs.python.org/3.3/library/functions.html?highlight=__import__#__import__)
module_name = "dchars.languages.{0}.dstring".format(language_iso639_3_name)
module = __import__( module_name, globals(), locals(), [dstring_name,], 0)
dstring_type = getattr( module, dstring_name )
LOADED_LANGUAGES[language_iso639_3_name] = dstring_type
else:
dstring_type = LOADED_LANGUAGES[language_iso639_3_name]
#...........................................................................
# if no transliteration method specified as argument, we get the default method :
#...........................................................................
_transliteration_method = transliteration_method
if _transliteration_method is None:
_transliteration_method = default_trans_method
# error : unknown transliteration method
if not _transliteration_method in LANGUAGES_AND_TRANSLITERATIONS[_language]:
msg = "unknown transliteration method : '{0}'; known methods={1}".format(
_transliteration_method,
LANGUAGES_AND_TRANSLITERATIONS[_language]
)
raise DCharsError( context = "dchars/dchars.py",
message = msg,
)
#...........................................................................
# _options is either equal to <options> either equal to the default options :
#...........................................................................
if options is None:
_options = default_options.copy()
else:
_options = default_options.copy()
# we add the options given in the arguments :
for option in options:
_options[option] = options[option]
#...........................................................................
# return value :
#...........................................................................
dstring = type( 'DString',
(dstring_type,),
{'iso639_3_name' : language_iso639_3_name,
'transliteration_method' : _transliteration_method,
'options' : _options} )
return dstring
#///////////////////////////////////////////////////////////////////////////////
def sort_a_list_of_words(words, dstring_object):
"""
sort_a_list_of_words function :
* words : iterable of (unicode) words, like ["Μῆνιν", "ἄειδε", ...]
* dstring_object, DSTRING object, like new_dstring(language="grc")
Return an object whose type is type(words), sorted.
"""
# list of (unicode) words -> list of (DString*) words
dstring_words = map(dstring_object, words)
# we sort the list :
sorted_words = sorted(dstring_words, key=dstring_object.sortingvalue)
# we return a list of (unicode) words :
return type(words)(map(dstring_object.__str__, sorted_words))
| gpl-3.0 | -4,939,655,221,155,261,000 | 41.673913 | 97 | 0.46314 | false |
detrout/telepathy-python | examples/roomlist.py | 1 | 2757 | from __future__ import print_function
import dbus.glib
import gobject
import logging
import sys
from time import sleep
from account import connection_from_file
from telepathy.client.channel import Channel
from telepathy.constants import (
CONNECTION_HANDLE_TYPE_NONE as HANDLE_TYPE_NONE,
CONNECTION_HANDLE_TYPE_ROOM as HANDLE_TYPE_ROOM,
CONNECTION_STATUS_CONNECTED,
CHANNEL_TEXT_MESSAGE_TYPE_NORMAL)
from telepathy.interfaces import CHANNEL_TYPE_ROOM_LIST, CONN_INTERFACE
logging.basicConfig()
class RoomListExample:
def __init__(self, conn):
self.conn = conn
conn[CONN_INTERFACE].connect_to_signal('StatusChanged',
self.status_changed_cb)
def run(self):
print("main loop running")
self.loop = gobject.MainLoop()
self.loop.run()
def quit(self):
if self.loop:
self.loop.quit()
self.loop = None
def status_changed_cb(self, state, reason):
if state != CONNECTION_STATUS_CONNECTED:
return
print("connection became ready, requesting channel")
try:
channel = conn.request_channel(
CHANNEL_TYPE_ROOM_LIST, HANDLE_TYPE_NONE, 0, True)
except Exception as e:
print(e)
self.quit()
return
print("Connecting to ListingRooms")
channel[CHANNEL_TYPE_ROOM_LIST].connect_to_signal('ListingRooms',
self.listing_cb)
print("Connecting to GotRooms")
channel[CHANNEL_TYPE_ROOM_LIST].connect_to_signal('GotRooms',
self.rooms_cb)
print("Calling ListRooms")
channel[CHANNEL_TYPE_ROOM_LIST].ListRooms()
def listing_cb(self, listing):
if listing:
print("Listing rooms...")
else:
print("Finished listing rooms")
self.quit()
def rooms_cb(self, rooms):
handles = [room[0] for room in rooms]
names = self.conn[CONN_INTERFACE].InspectHandles(HANDLE_TYPE_ROOM,
handles)
for i in xrange(len(rooms)):
handle, ctype, info = rooms[i]
name = names[i]
print("Found room:", name)
print("\t", ctype)
for key in info:
print("\t", repr(str(key)), " => ", repr(info[key]))
if __name__ == '__main__':
conn = connection_from_file(sys.argv[1])
ex = RoomListExample(conn)
print("connecting")
conn[CONN_INTERFACE].Connect()
try:
ex.run()
except KeyboardInterrupt:
print("killed")
print("disconnecting")
conn[CONN_INTERFACE].Disconnect()
| lgpl-2.1 | -2,763,615,783,980,631,000 | 28.329787 | 74 | 0.574175 | false |
gregorlarson/loxodo | src/random_password.py | 1 | 3092 | #!/usr/bin/env python
"""
A simple script for making random passwords, WITHOUT 1,l,O,0. Because
those characters are hard to tell the difference between in some fonts.
"""
import sys
from random import Random
class random_password(object):
def __init__(self):
self._characters = {
'righthand': 'yuiophjklnm',
'lefthand': 'qwertasdfgzxcvb',
'RIGHTHAND': 'YUIOPHJKLNM',
'LEFTHAND': 'QWERTASDFGZXCVB',
'symbols': '/@#$%^&*\|[]~`',
'simplesymbols': "?!-_'",
'numbers': '23456789',
}
self.password_length = 8
self.rng = Random()
def generate_char_list(self, password_policy=None):
"""
"""
character_list = ''
if not password_policy:
for k, v in self._characters.iteritems():
character_list = character_list + v
else:
final_characters = self._characters.copy()
for k, v in password_policy.iteritems():
if k == "L" and v is False:
if 'lefthand' in final_characters:
final_characters.pop('lefthand')
if 'LEFTHAND' in final_characters:
final_characters.pop('LEFTHAND')
if k == "R" and v is False:
if 'righthand' in final_characters:
final_characters.pop('righthand')
if 'RIGHTHAND' in final_characters:
final_characters.pop('RIGHTHAND')
if k == "U" and v is False:
if 'LEFTHAND' in final_characters:
final_characters.pop('LEFTHAND')
if 'RIGHTHAND' in final_characters:
final_characters.pop('RIGHTHAND')
if k == "l" and v is False:
if 'righthand' in final_characters:
final_characters.pop('righthand')
if 'lefthand' in final_characters:
final_characters.pop('lefthand')
if k == "2" and v is False:
if 'numbers' in final_characters:
final_characters.pop('numbers')
if k == "s" and v is False:
if 'simplesymbols' in final_characters:
final_characters.pop('simplesymbols')
if k == "S" and v is False:
if 'symbols' in final_characters:
final_characters.pop('symbols')
for k, v in final_characters.iteritems():
try:
character_list = character_list + v
except:
pass
return character_list
def generate_password(self, password_policy=None):
"""
"""
password = ""
all_chars = self.generate_char_list(password_policy)
for length in range(self.password_length):
password = password + self.rng.choice(all_chars)
return password
| gpl-2.0 | -8,314,864,223,982,383,000 | 34.953488 | 71 | 0.494179 | false |
nhuntwalker/expense_tracker | expense_tracker/expense_tracker/views/default.py | 1 | 4951 | """The main views for our expense_tracker app."""
from pyramid.view import view_config, forbidden_view_config
from expense_tracker.models import Expense
from pyramid.httpexceptions import HTTPFound
from pyramid.response import Response
import datetime
from expense_tracker.security import check_credentials
from pyramid.security import remember, forget # <--- add this line
CATEGORIES = [
"rent",
"utilities",
"groceries",
"food",
"diapers",
"autoloan",
"netflix",
"booze",
"therapist"
]
@view_config(route_name="list",
renderer="../templates/list.jinja2")
def list_view(request):
"""A listing of expenses for the home page."""
if request.POST and request.POST["category"]:
return HTTPFound(request.route_url("category",
cat=request.POST["category"]))
query = request.dbsession.query(Expense)
expenses = query.order_by(Expense.date.desc()).all()
return {
"expenses": expenses,
"categories": CATEGORIES
}
@view_config(route_name="detail",
renderer="../templates/detail.jinja2")
def detail_view(request):
"""The detail page for an expense."""
the_id = int(request.matchdict["id"])
expense = request.dbsession.query(Expense).get(the_id)
if not expense:
return Response("Not Found", content_type='text/plain', status=404)
return {"expense": expense}
@view_config(
route_name="create",
renderer="../templates/add.jinja2",
permission="add"
)
def create_view(request):
"""Create a new expense."""
if request.POST:
expense = Expense(
item=request.POST["item"],
amount=float(request.POST["amount"]),
paid_to=request.POST["paid_to"],
category=request.POST["category"],
date=datetime.datetime.now(),
description=request.POST["description"]
)
request.dbsession.add(expense)
return HTTPFound(request.route_url('list'))
return {}
@view_config(
route_name="edit",
renderer="../templates/edit.jinja2",
permission="add"
)
def edit_view(request):
"""Edit an existing expense."""
the_id = int(request.matchdict["id"])
expense = request.dbsession.query(Expense).get(the_id)
if request.POST:
expense.item = request.POST["item"]
expense.amount = float(request.POST["amount"])
expense.paid_to = request.POST["paid_to"]
expense.category = request.POST["category"]
expense.description = request.POST["description"]
request.dbsession.flush()
return HTTPFound(request.route_url('list'))
form_fill = {
"item": expense.item,
"amount": expense.amount,
"paid_to": expense.paid_to,
"category": expense.category,
"description": expense.description
}
return {"data": form_fill}
@view_config(route_name="category", renderer="../templates/list.jinja2")
def category_view(request):
"""List expenses of a certain category."""
if request.POST and request.POST["category"]:
return HTTPFound(request.route_url("category",
cat=request.POST["category"]))
query = request.dbsession.query(Expense)
the_category = request.matchdict["cat"]
query = query.filter(Expense.category == the_category)
expenses = query.order_by(Expense.date.desc()).all()
return {
"expenses": expenses,
"categories": CATEGORIES,
"selected": the_category
}
@view_config(route_name="login",
renderer="../templates/login.jinja2",
require_csrf=False)
def login_view(request):
"""Authenticate the incoming user."""
if request.POST:
username = request.POST["username"]
password = request.POST["password"]
if check_credentials(username, password):
auth_head = remember(request, username)
return HTTPFound(
request.route_url("list"),
headers=auth_head
)
return {}
@view_config(route_name="logout")
def logout_view(request):
"""Remove authentication from the user."""
auth_head = forget(request)
return HTTPFound(request.route_url("list"), headers=auth_head)
@view_config(route_name="delete", permission="delete")
def delete_view(request):
"""To delete individual items."""
expense = request.dbsession.query(Expense).get(request.matchdict["id"])
request.dbsession.delete(expense)
return HTTPFound(request.route_url("list"))
@view_config(route_name="api_list", renderer="string")
def api_list_view(request):
expenses = request.dbsession.query(Expense).all()
output = [item.to_json() for item in expenses]
return output
@forbidden_view_config(renderer="../templates/forbidden.jinja2")
def not_allowed_view(request):
"""Some special stuff for the forbidden view."""
return {}
| mit | 7,871,310,019,494,615,000 | 29.561728 | 75 | 0.633407 | false |
go2school/Python-HierarchicalSVM | python/liblinear_xiao.py | 1 | 8774 | #!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
import sys
import os
# For unix the prefix 'lib' is not considered.
if find_library('linear'):
liblinear = CDLL(find_library('linear'))
elif find_library('liblinear'):
liblinear = CDLL(find_library('liblinear'))
else:
if sys.platform == 'win32':
liblinear = CDLL(os.path.join(os.path.dirname(__file__),\
'../windows/liblinear.dll'))
else:
liblinear = CDLL(os.path.join(os.path.dirname(__file__),\
'../liblinear.so.1'))
# Construct constants
SOLVER_TYPE = ['L2R_LR', 'L2R_L2LOSS_SVC_DUAL', 'L2R_L2LOSS_SVC', 'L2R_L1LOSS_SVC_DUAL',\
'MCSVM_CS', 'L1R_L2LOSS_SVC', 'L1R_LR', 'L2R_LR_DUAL', \
None, None, None, \
'L2R_L2LOSS_SVR', 'L2R_L2LOSS_SVR_DUAL', 'L2R_L1LOSS_SVR_DUAL']
for i, s in enumerate(SOLVER_TYPE):
if s is not None: exec("%s = %d" % (s , i))
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class feature_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def gen_feature_nodearray(xi, feature_max=None, issparse=True):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
xi = [0] + xi # idx should start from 1
index_range = range(1, len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if issparse:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (feature_node * (len(index_range)+2))()
ret[-1].index = -1 # for bias term
ret[-2].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range :
max_idx = index_range[-1]
return ret, max_idx
class problem(Structure):
_names = ["l", "n", "y", "x", "bias"]
_types = [c_int, c_int, POINTER(c_double), POINTER(POINTER(feature_node)), c_double]
_fields_ = genFields(_names, _types)
def __init__(self, y, x, bias = -1):
if len(y) != len(x) :
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
self.bias = -1
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_feature_nodearray(xi)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = y[i]
self.x = (POINTER(feature_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
self.set_bias(bias)
def set_bias(self, bias):
if self.bias == bias:
return
if bias >= 0 and self.bias < 0:
self.n += 1
node = feature_node(self.n, bias)
if bias < 0 and self.bias >= 0:
self.n -= 1
node = feature_node(-1, bias)
for xi in self.x_space:
xi[-2] = node
self.bias = bias
class parameter(Structure):
_names = ["solver_type", "eps", "C", "nr_weight", "weight_label", "weight", "p"]
_types = [c_int, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double), c_double]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def show(self):
attrs = parameter._names + self.__dict__.keys()
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
print(' %s: %s' % (attr, val))
def set_to_default_values(self):
self.solver_type = L2R_L2LOSS_SVC_DUAL
self.eps = float('inf')
self.C = 1
self.p = 0.1
self.nr_weight = 0
self.weight_label = (c_int * 0)()
self.weight = (c_double * 0)()
self.bias = -1
self.cross_validation = False
self.nr_fold = 0
self.print_func = None
def parse_options(self, options):
argv = options.split()
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv) :
if argv[i] == "-s":
i = i + 1
self.solver_type = int(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-B":
i = i + 1
self.bias = float(argv[i])
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2 :
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
nr_weight = self.nr_weight
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
else :
raise ValueError("Wrong options")
i += 1
liblinear.set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
if self.eps == float('inf'):
if self.solver_type in [L2R_LR, L2R_L2LOSS_SVC]:
self.eps = 0.01
elif self.solver_type in [L2R_L2LOSS_SVR]:
self.eps = 0.001
elif self.solver_type in [L2R_L2LOSS_SVC_DUAL, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L2R_LR_DUAL]:
self.eps = 0.1
elif self.solver_type in [L1R_L2LOSS_SVC, L1R_LR]:
self.eps = 0.01
elif self.solver_type in [L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL]:
self.eps = 0.1
class model(Structure):
_names = ["param", "nr_class", "nr_feature", "w", "label", "bias", "probA", "probB"]
_types = [parameter, c_int, c_int, POINTER(c_double), POINTER(c_int), c_double, c_double, c_double]
_fields_ = genFields(_names, _types)
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
liblinear.free_and_destroy_model(pointer(self))
def get_nr_feature(self):
return liblinear.get_nr_feature(self)
def get_nr_class(self):
return liblinear.get_nr_class(self)
def get_nr_class(self):
return liblinear.get_nr_class(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
liblinear.get_labels(self, labels)
return labels[:nr_class]
def is_probability_model(self):
return (liblinear.check_probability_model(self) == 1)
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> model
Convert a ctypes POINTER(model) to a Python model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(liblinear.train, POINTER(model), [POINTER(problem), POINTER(parameter)])
fillprototype(liblinear.cross_validation, None, [POINTER(problem), POINTER(parameter), c_int, POINTER(c_double)])
fillprototype(liblinear.predict_values, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
fillprototype(liblinear.predict, c_double, [POINTER(model), POINTER(feature_node)])
fillprototype(liblinear.predict_probability, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
fillprototype(liblinear.save_model, c_int, [c_char_p, POINTER(model)])
fillprototype(liblinear.load_model, POINTER(model), [c_char_p])
fillprototype(liblinear.get_nr_feature, c_int, [POINTER(model)])
fillprototype(liblinear.get_nr_class, c_int, [POINTER(model)])
fillprototype(liblinear.get_labels, None, [POINTER(model), POINTER(c_int)])
fillprototype(liblinear.free_model_content, None, [POINTER(model)])
fillprototype(liblinear.free_and_destroy_model, None, [POINTER(POINTER(model))])
fillprototype(liblinear.destroy_param, None, [POINTER(parameter)])
fillprototype(liblinear.check_parameter, c_char_p, [POINTER(problem), POINTER(parameter)])
fillprototype(liblinear.check_probability_model, c_int, [POINTER(model)])
fillprototype(liblinear.set_print_string_function, None, [CFUNCTYPE(None, c_char_p)])
#added by xiao for probability estimation
#for training to estimate A and B
fillprototype(liblinear.svm_binary_svc_probability, None, [POINTER(problem), POINTER(parameter), c_double, c_double, POINTER(c_double), POINTER(c_double)])
#convert score to prob
fillprototype(liblinear.sigmoid_predict, c_double, [c_double, c_double, c_double])
#estimate platt A and B
fillprototype(liblinear.estimate_platt_models, None, [POINTER(problem), POINTER(parameter), POINTER(model)])
#get platt's model A and B
fillprototype(liblinear.getPlattsA, c_double, [POINTER(model)])
fillprototype(liblinear.getPlattsB, c_double, [POINTER(model)])
| bsd-3-clause | 6,014,900,592,383,761,000 | 30.67509 | 155 | 0.658992 | false |
ronreiter/interactive-tutorials | constants.py | 1 | 12253 | IDEONE_USERNAME = "ronreiter"
IDEONE_PASSWORD = "18132ce2b97e"
CACHE_HOST = "direct.learnpython.org"
DB_HOST = "direct.learnpython.org"
SECRET_KEY = "this is a secret. really."
LEARNPYTHON_DOMAIN = "learnpython.org"
LEARNJAVA_DOMAIN = "learnjavaonline.org"
LEARNC_DOMAIN = "learn-c.org"
LEARNCPP_DOMAIN = "learn-cpp.org"
LEARNJS_DOMAIN = "learn-js.org"
LEARNRUBY_DOMAIN = "learnrubyonline.org"
LEARNSHELL_DOMAIN = "learnshell.org"
LEARNPHP_DOMAIN = "learn-php.org"
LEARNPERL_DOMAIN = "learn-perl.org"
LEARNCS_DOMAIN = "learncs.org"
LEARNHTML_DOMAIN = "learn-html.org"
LEARNGO_DOMAIN = "learn-golang.org"
LEARNSCALA_DOMAIN = "learnscala.org"
LEARNSOLIDITY_DOMAIN = "learnsolidity.org"
LEARNSQL_DOMAIN = "learnsqlonline.org"
from collections import OrderedDict
# {1: C++ [GCC] (5.1.1),
# 2: Pascal [GPC] (gpc 20070904),
# 3: Perl (perl 5.20.1),
# 4: Python 2.x [Pypy] (2.7.13),
# 5: Fortran (5.1.1),
# 6: Whitespace (wspace 0.3),
# 7: Ada (gnat 5.1.1),
# 8: Ocaml (4.01.0),
# 9: Intercal (c-intercal 28.0-r1),
# 10: Java (jdk 8u51),
# 11: C (gcc 5.1.1),
# 12: Brainf**k (1.0.6),
# 13: Assembler [NASM] (NASM 2.11.05),
# 14: CLIPS (clips 6.24),
# 15: Prolog [SWI] (swi 7.2),
# 16: Icon (icon 9.4.3),
# 17: Ruby (ruby 2.1.5),
# 18: Scheme (stalin 0.3),
# 19: Pike (pike v7.8),
# 20: D [GDC] (gdc-5 5.1.1),
# 21: Haskell (ghc 7.8),
# 22: Pascal [FPC] (fpc 2.6.4+dfsg-6),
# 23: Smalltalk (gst 3.2.4),
# 25: Nice (0.9.13),
# 26: Lua (lua 7.2),
# 27: C# [Mono] (Mono 4.0.2),
# 28: Bash (bash 4.3.33),
# 29: PHP (PHP 5.6.11-1),
# 30: Nemerle (ncc 1.2.0),
# 32: Common Lisp [CLISP] (clisk 2.49),
# 33: Scheme [Guile] (guile 2.0.11),
# 34: C99 strict (gcc-5 5.1.1),
# 35: JavaScript [Rhino] (rhino 1.7.7),
# 36: Erlang (erl 18),
# 38: Tcl (tclsh 8.6),
# 39: Scala (2.11.7),
# 40: SQL (sqlite3-3.8.7),
# 41: C++ 4.3.2 (gcc-4.3.2),
# 42: Assembler [NASM 64bit] (nasm 2.12.01),
# 43: Objective-C (gcc-5 5.1.1),
# 44: C++14 [GCC] (gcc-5 5.1.1),
# 45: Assembler [GCC] (gcc 4.9.3),
# 46: Sed (sed 4.2.2),
# 47: Kotlin (kotlin 1.0.6),
# 50: VB.NET (mono 4.0.2),
# 54: Perl 6 (perl6 2014.07),
# 56: Node.js (node 7.4.0),
# 57: TypeScript (3.4.5),
# 85: Swift (swift 3.0.2),
# 93: Rust (1.14.0),
# 97: Scheme [Chicken] (chicken 4.11.0),
# 99: Python (Pypy) (PyPy 2.6.0),
# 102: D [DMD] (dmd 2.072.2),
# 104: AWK [GAWK] (fawk 4.1.1),
# 105: AWK [MAWK] (mawk 1.3.3),
# 107: Forth (gforth 0.7.2),
# 108: Prolog [GNU] (gnu prolog 1.4.5),
# 110: bc (bc 1.06.95),
# 111: Clojure (clojure 1.7.0),
# 112: JavaScript [SpiderMonkey] (24.2.0),
# 114: Go (1.4),
# 116: Python 3.x (3.5.3),
# 117: R (3.2.2),
# 118: COBOL (1.1.0),
# 121: Groovy (2.4),
# 124: F# (1.3),
# 127: Octave (4.0.0)}
DOMAIN_DATA = OrderedDict()
DOMAIN_DATA[LEARNPYTHON_DOMAIN] = {
"language" : "python",
"language_id": 116,
"codemirror_mode": "python",
"prism_mode": "language-python",
"analytics" : "UA-22741967-1",
"language_uppercase" : "Python",
"default_code" : """# Welcome to the Interactive Python Tutorial.
# Start by choosing a chapter and
# write your code in this window.
print("Hello, World!")
"""
}
DOMAIN_DATA[LEARNJAVA_DOMAIN] = {
"language" : "java",
"language_id": 10,
"codemirror_mode": "text/x-java",
"prism_mode": "language-java",
"analytics" : "UA-22741967-4",
"language_uppercase" : "Java",
"default_code" : """// Welcome to the Interactive Java Tutorial.
// Start by choosing a chapter and
// write your code in this window.
public class Main {
public static void main(String[] args) {
System.out.println("Hello, World!");
}
}
""",
"container_word" : "class",
"container_indent" : " ",
"container" : """public class Main {
public static void main(String[] args) {
{code}
}
}
"""
}
DOMAIN_DATA[LEARNHTML_DOMAIN] = {
"language" : "html",
"codemirror_mode": "text/html",
"prism_mode": "language-html",
"analytics" : "UA-22741967-11",
"language_uppercase" : "HTML",
"default_code" : """<!-- Welcome to the Interactive HTML & CSS Tutorial.
Start by choosing a chapter and
write your code in this window.
-->
<!DOCTYPE html>
<html>
<head>
<title>Hello, World!</title>
</head>
<body>
<p>Hello, World!</p>
</body>
</html>
"""
}
DOMAIN_DATA[LEARNGO_DOMAIN] = {
"language" : "go",
"language_id": 114,
"codemirror_mode": "text/x-go",
"prism_mode": "language-go",
"analytics" : "UA-22741967-13",
"language_uppercase" : "Go",
"default_code" : """// Welcome to the Interactive Go Tutorial.
// Start by choosing a chapter, write your code in this window.
package main
import (
"fmt"
)
func main() {
fmt.Println("Hello, world!")
}
""",
"container_word" : "class",
"container_indent" : " ",
"container" : """package main
import (
"fmt"
)
func main() {
{code}
}
""",
}
DOMAIN_DATA[LEARNC_DOMAIN] = {
"language" : "c",
"language_id": 1,
"codemirror_mode": "text/x-csrc",
"prism_mode": "language-c",
"analytics" : "UA-22741967-3",
"language_uppercase" : "C",
"default_code" : """/* Welcome to the Interactive C Tutorial.
Start by choosing a chapter and
write your code in this window. */
#include <stdio.h>
int main() {
printf("Hello, World!");
return 0;
}
""",
"container_word" : "main()",
"container_indent" : " ",
"container" : """#include <stdio.h>
int main() {
{code}
return 0;
}
"""
}
DOMAIN_DATA[LEARNCPP_DOMAIN] = {
"language" : "c++11",
"language_id": 1,
"codemirror_mode": "text/x-csrc",
"prism_mode": "language-cpp",
"analytics" : "UA-22741967-12",
"language_uppercase" : "C++",
"default_code" : """// Welcome to the Interactive C++ Tutorial.
// Start by choosing a chapter and
// write your code in this window.
#include <iostream>
using namespace std;
int main() {
cout << "Hello, World!" << endl;
return 0;
}
""",
"container_word" : "main()",
"container_indent" : " ",
"container" : """#include <iostream>
using namespace std;
int main() {
{code}
return 0;
}
"""
}
DOMAIN_DATA[LEARNJS_DOMAIN] = {
"language" : "javascript",
"language_id": 35,
"codemirror_mode": "text/javascript",
"prism_mode": "language-javascript",
"analytics" : "UA-22741967-5",
"language_uppercase" : "JavaScript",
"default_code" : """// Welcome to the Interactive JavaScript Tutorial.
// Start by choosing a chapter and
// write your code in this window.
console.log("Hello, World!");
"""
}
DOMAIN_DATA[LEARNPHP_DOMAIN] = {
"language" : "php",
"language_id": 29,
"codemirror_mode": "application/x-httpd-php",
"prism_mode": "language-php",
"analytics" : "UA-22741967-9",
"language_uppercase" : "PHP",
"default_code" : """<?php
// Welcome to the Interactive PHP Tutorial.
// Start by choosing a chapter and
// write your code in this window.
echo "Hello, World!";
?>
""",
"container_word" : "<?",
"container_indent" : "",
"container" : """<?php
{code}
?>""",
}
DOMAIN_DATA[LEARNSHELL_DOMAIN] = {
"language" : "bash",
"language_id": 28,
"codemirror_mode": "text/x-sh",
"prism_mode": "language-bash",
"analytics" : "UA-22741967-7",
"language_uppercase" : "Shell",
"default_code" : """#!/bin/bash
# Welcome to the Interactive Shell Tutorial.
# Start by choosing a chapter and
# write your code in this window.
echo "Hello, World!";
"""
}
DOMAIN_DATA[LEARNCS_DOMAIN] = {
"language" : "c#",
"language_id": 27,
"codemirror_mode": "text/x-csharp",
"prism_mode": "language-csharp",
"analytics" : "UA-22741967-10",
"language_uppercase" : "C#",
"default_code" : """// Welcome to the Interactive C# Tutorial.
// Start by choosing a chapter and write your code in this window.
using System;
public class Hello
{
public static void Main()
{
Console.WriteLine("Hello, World!");
}
}
""",
"container_word" : "class",
"container_indent" : " ",
"container" : """using System;
using System.Collections.Generic;
public class Hello
{
public static void Main()
{
{code}
}
}
""",
}
DOMAIN_DATA[LEARNPERL_DOMAIN] = {
"language" : "perl",
"language_id": 3,
"codemirror_mode": "text/x-perl",
"prism_mode": "language-perl",
"analytics" : "UA-22741967-8",
"language_uppercase" : "Perl",
"default_code" : """# Welcome to the Interactive Perl Tutorial.
# Start by choosing a chapter and write your code in this window.
print 'Hello, World!';
"""
}
DOMAIN_DATA[LEARNRUBY_DOMAIN] = {
"language" : "ruby",
"language_id": 17,
"codemirror_mode": "text/x-ruby",
"prism_mode": "language-ruby",
"analytics" : "UA-22741967-6",
"language_uppercase" : "Ruby",
"default_code" : """# Welcome to the Interactive Ruby Tutorial.
# Start by choosing a chapter and
# write your code in this window.
puts 'Hello, World!'
"""
}
DOMAIN_DATA[LEARNSCALA_DOMAIN] = {
"language" : "scala",
"language_id": 39,
"codemirror_mode": "text/x-scala",
"prism_mode": "language-scala",
"analytics" : "UA-22741967-14",
"namespace" : "learnscala.org",
"language_uppercase" : "Scala",
"default_code" : """// Welcome to the Interactive Scala Tutorial.
// Start by choosing a chapter, write your code in this window.
object Main {
def main(args: Array[String]) {
println("Hello, World!\\n");
}
}
""",
"container_word": "object",
"container_indent": " ",
"container": """object Test {
def main(args: Array[String]) {
println("Hello, World!\\n");
}
}
""",
}
# DOMAIN_DATA[LEARNSOLIDITY_DOMAIN] = {
# "language" : "solidity",
# "codemirror_mode": "text/x-solidity",
# "prism_mode": "language-solidity",
# "analytics" : "UA-22741967-15",
# "language_uppercase" : "Solidity",
# "default_code" : """// Welcome to the Interactive Solidity Tutorial.
# // Start by choosing a chapter, write your code in this window.
#
# """,
# "container_word" : "",
# "container_indent" : "",
# "container" : """
# """,
#
# }
DOMAIN_DATA[LEARNSQL_DOMAIN] = {
"language" : "sql",
"language_id": 40,
"codemirror_mode": "text/x-sql",
"prism_mode": "language-sql",
"analytics" : "UA-22741967-16",
"language_uppercase" : "SQL",
"default_code" : """-- Welcome to the Interactive SQL Tutorial.
-- Start by choosing a chapter, write your code in this window.
CREATE TABLE helloworld (phrase TEXT);
INSERT INTO helloworld VALUES ("Hello, World!");
INSERT INTO helloworld VALUES ("Goodbye, World!");
SELECT * FROM helloworld WHERE phrase = "Hello, World!";
""",
"container_word" : "",
"container_indent" : "",
"container" : """
""",
}
# this will run once
for domain, v in list(DOMAIN_DATA.items()):
v["namespace"] = domain
v["full_url"] = "https://www." + v["namespace"]
v["contact_email"] = "admin@" + v["namespace"]
v["support_email"] = "support@" + v["namespace"]
v["logo"] = "/static/img/logos/" + v["namespace"] + ".png"
v["share_logo"] = "/static/img/share-logos/" + v["namespace"] + ".png"
v["favicon"] = "/static/img/favicons/" + v["namespace"] + ".ico"
v["styled_domain"] = domain
v["sender"] = "%s <%s>" % (domain, v["contact_email"])
import os
if not os.path.exists(v["logo"][1:]):
raise Exception("no logo for %s - %s" % (domain, v["logo"][1:]))
if not os.path.exists(v["share_logo"][1:]):
raise Exception("no share logo for %s - %s" % (domain, v["share_logo"][1:]))
if not os.path.exists(v["favicon"][1:]):
raise Exception("no favicon for %s - %s" % (domain, v["favicon"][1:]))
| apache-2.0 | -8,052,458,547,135,455,000 | 25.350538 | 84 | 0.568106 | false |
diego-carvalho/FAiR | app/src/plotGraphs.py | 1 | 3083 | # -*- coding: utf-8 -*
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def plot_graphs(listX, listY, labelX, labelY, out, t='nada'):
fig, ax = plt.subplots()
plt.plot(listX, listY, linewidth=3.0)
ax.set_xlabel(labelX, fontsize='xx-large', labelpad=25, weight='semibold')
ax.set_ylabel(labelY, fontsize='xx-large', labelpad=25, weight='semibold')
plt.tick_params(axis='both', labelsize=20, pad=25)
# for tick in ax.xaxis.get_ticklabels():
# tick.set_fontsize('x-large')
# tick.set_weight('bold')
# for tick in ax.yaxis.get_ticklabels():
# tick.set_fontsize('x-large')
# tick.set_weight('bold')
plt.tight_layout()
if t != 'nada':
plt.title(t, fontsize='xx-large', weight='semibold')
plt.savefig(out)
def plot_two_graphs(listAX, listAY, listBX, listBY, labelA, labelB, labelX, labelY, out, t='nada'):
fig, ax = plt.subplots()
plt.plot(listAX, listAY, label=labelA, linewidth=3.0)
plt.plot(listBX, listBY, label=labelB, linewidth=3.0)
ax.set_xlabel(labelX, fontsize='xx-large', labelpad=25, weight='semibold')
ax.set_ylabel(labelY, fontsize='xx-large', labelpad=25, weight='semibold')
plt.legend()
plt.tick_params(axis='both', labelsize=20, pad=25)
plt.tight_layout()
if t != 'nada':
plt.title(t, fontsize='xx-large', weight='semibold')
plt.savefig(out)
def plot_two_graphs_point(listAX, listAY, listBX, listBY, labelA, labelB, labelX, labelY, out, t='nada'):
plt.rcParams['axes.unicode_minus'] = False
fig, ax = plt.subplots()
plt.plot(listAX, listAY, 'o', label=labelA, linewidth=3.0)
plt.plot(listBX, listBY, 'o', label=labelB, linewidth=3.0)
ax.set_xlabel(labelX, fontsize='xx-large', labelpad=25, weight='semibold')
ax.set_ylabel(labelY, fontsize='xx-large', labelpad=25, weight='semibold')
plt.legend()
plt.tick_params(axis='both', labelsize=20, pad=25)
plt.tight_layout()
if t != 'nada':
plt.title(t, fontsize='xx-large', weight='semibold')
plt.show()
def plot_graphs_bar_old(listX, listY, labelX, labelY, out, t='nada'):
fig, ax = plt.subplots()
plt.barh(listX, listY, 0.5, align='edge')
# plt.xticks(listX)
ax.set_xlabel(labelX, fontsize='xx-large', labelpad=25, weight='semibold')
ax.set_ylabel(labelY, fontsize='xx-large', labelpad=25, weight='semibold')
plt.tick_params(axis='both', labelsize=20, pad=25)
plt.tight_layout()
if t != 'nada':
plt.title(t, fontsize='xx-large', weight='semibold')
plt.savefig(out)
def plot_graphs_bar(listX, listY, labelX, labelY, out, t='nada'):
fig, ax = plt.subplots()
plt.rcdefaults()
y_pos = np.arange(len(listX))
with plt.style.context('fivethirtyeight'):
plt.barh(y_pos, listY, 1, align='edge', alpha=0.5)
plt.yticks(y_pos, listX, size=9)
ax.set_xlabel(labelY)
ax.set_ylabel(labelX)
plt.title(t, fontsize='xx-large', weight='semibold')
plt.savefig(out)
| mit | 6,083,376,086,326,227,000 | 27.027273 | 105 | 0.639961 | false |
DQE-Polytech-University/Beamplex | src/laserstructure.py | 1 | 3771 | import matplotlib.pyplot as plt
#stores information about laser structure
#saves refraction and electric field profiles in text and graphic form to HDD
class Laser:
refraction = []
field = []
gridX = []
gridN = []
field = []
def __init__(self, (wavelength, concentration, thickness)):
if isinstance(wavelength, (int, float)) == False:
raise TypeError("wavelength should be a number")
if isinstance(concentration, list) == False:
raise TypeError("concentration should be a list")
if isinstance( thickness, (list)) == False:
raise TypeError("thickness should be a list")
for i in range(5):
if isinstance(concentration[i], (int, float)) == False or isinstance( thickness[i], (int, float)) == False:
raise TypeError("concentration and thickness elements should be numbers")
if wavelength is None:
raise ValueError("wavelength is undefined")
if concentration is None:
raise ValueError("concentration is undefined")
if thickness is None:
raise ValueError("thickness is undefined")
if wavelength < 0.85 or wavelength > 1.5:
raise ValueError("wavelength out of range")
self.wavelength = wavelength
self.concentration = concentration
self.thickness = thickness
#refraction profile output
def plotRefraction(self):
if isinstance(self.gridX, list) == False:
raise TypeError("self.gridX should be a list")
if isinstance(self.gridN, list) == False:
raise TypeError("self.gridN should be a list")
if len(self.gridX) <= 20:
raise ValueError("len(self.gridX) out of range")
if len(self.gridN) <= 20:
raise ValueError("len(self.gridN) out of range")
if (len(self.gridX) == len(self.gridN)) == False:
raise IndexError("self.gridX should be the same dimension as self.gridN")
plt.plot(self.gridX, self.gridN)
plt.xlabel('position, micrometers')
plt.ylabel('refraction index, arb. units')
plt.title('Refraction Index Profile')
plt.savefig('refraction.png', format='png', dpi=100)
plt.clf()
refractionFile = open("refraction.txt", "w")
for i in range(len(self.gridN)):
refractionFile.write(str(self.gridX[i]) + ": " + str(self.gridN[i]) + "\n")
refractionFile.close()
#field profile output
def plotField(self):
if isinstance(self.gridX, list) == False:
raise TypeError("self.gridX should be a list")
if isinstance(self.field, list) == False:
raise TypeError("self.field should be a list")
if len(self.gridX) <= 20:
raise ValueError("len(self.gridX) out of range")
if len(self.field) <= 20:
raise ValueError("len(self.field) out of range")
if (len(self.gridX) == len(self.field)) == False:
raise TypeError("self.gridX should be the same dimension as self.field")
for i in range(len(self.field)):
self.field[i] = self.field[i] ** 2
plt.plot(self.gridX, self.field)
plt.xlabel('position, micrometers')
plt.ylabel('electric field, arb. units')
plt.title('Electric field in laser structure')
plt.savefig('field.png', format='png', dpi=100)
plt.clf()
fieldFile = open("field.txt", "w")
for i in range(len(self.gridN)):
fieldFile.write(str(self.gridX[i]) + ": " + str(self.field[i]) + "\n")
fieldFile.close()
| mit | -3,386,450,447,366,524,000 | 39.43956 | 119 | 0.581543 | false |
PhyNerd/pi-timolo | source/pi-timolo.py | 1 | 37923 | #!/usr/bin/python
# pi-timolo - Raspberry Pi Long Duration Timelapse, Motion Detection, with Low Light Capability
# written by Claude Pageau Dec-2014 (original issue)
# getStreamImage function based on utpalc code based on brainflakes lightweight motion detection code on Raspberry PI forum - Thanks
# Complete pi-timolo code and wiki instructions are available on my github repo at https://github.com/pageauc/pi-timolo
# 2.7 released 20-Jul-2015 added saving of exif metadata when text written to image sinc PIL does not retain this.
# 2.8 released 2-Aug-2015 updated gdrive and replaced mencoder with avconv
# 2.92 release 22-Mar-2016 fixed getCurrentCount when file contains non integer data due to a write error or corruption.
# 2.93 release 21-Jul-2016 improved getCurrentCount logic and changed default motion image size to 128x80 per picamra default
# 2.94 release 14-Aug-2016 implemented camera.rotation = cameraRotate but not yet fully tested
# 2.95 release 20-Dec-2016 Updated logging to be more pythonic and minor bug fix
# 2.96 release 26-Dec-2016 Fixed fatal bug error in logging when verbose = False
# 2.97 release 28-Dec-2016 Modified logging setup to simplify and better display messages
# 2.98 release 04-Jan-2017 Added convid.sh and associated changes. Added flip to video option
# 2.99 release 06-Jan-2017 Added sync_lock option to motion video
# 3.00 release 09-Jan-2017 Added takeVideo subprocess to convert h264
# 3.10 release 12-Jan-2017 Added takeVideo annotate datetime text using image text settings on and size.
# 4.00 release 23-Jan-2017 Added menubox.sh and sh config vars stored in conf files so upgrades won't delete settings
# 4.10 release 09-Mar-2017 Moved position of camera.exposure_mode = 'off' for night shots
# 4.20 release 13-Mar-2017 Updated takeNightImage settings
# 4.30 release 30-Mar-2017 Add variables for day camera motion and timelapse camera warmup before taking image
progVer = "ver 4.30"
import datetime
import glob
import logging
import os
import sys
import time
import subprocess
mypath = os.path.abspath(__file__) # Find the full path of this python script
baseDir = os.path.dirname(mypath) # get the path location only (excluding script name)
baseFileName = os.path.splitext(os.path.basename(mypath))[0]
progName = os.path.basename(__file__)
logFilePath = os.path.join(baseDir, baseFileName + ".log")
print("----------------------------------------------------------------------------------------------")
print("%s %s" %( progName, progVer ))
# Check for variable file to import and error out if not found.
configFilePath = os.path.join(baseDir, "config.py")
if not os.path.exists(configFilePath):
print("ERROR - Cannot Import Configuration Variables. Missing Configuration File %s" % ( configFilePath ))
quit()
else:
# Read Configuration variables from config.py file
print("Importing Configuration Variables from File %s" % ( configFilePath ))
from config import *
# Now that variable are imported from config.py Setup Logging
if logDataToFile:
print("Sending Logging Data to %s (Console Messages Disabled)" %( logFilePath ))
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(funcName)-10s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=logFilePath,
filemode='w')
elif verbose:
print("Logging to Console per Variable verbose=True")
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(funcName)-10s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
else:
print("Logging Disabled per Variable verbose=False")
logging.basicConfig(level=logging.CRITICAL,
format='%(asctime)s %(levelname)-8s %(funcName)-10s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
print("Loading Python Libraries ...")
# import remaining python libraries
import picamera
import picamera.array
import numpy as np
import pyexiv2
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from fractions import Fraction
#==================================
# System Variables
# Should not need to be customized
#==================================
SECONDS2MICRO = 1000000 # Used to convert from seconds to microseconds
nightMaxShut = int(nightMaxShut * SECONDS2MICRO) # default=5 sec IMPORTANT- 6 sec works sometimes but occasionally locks RPI and HARD reboot required to clear
nightMinShut = int(nightMinShut * SECONDS2MICRO) # lowest shut camera setting for transition from day to night (or visa versa)
testWidth = 128 # width of rgb image stream used for motion detection and day/night changes
testHeight = 80 # height of rgb image stream used for motion detection and day/night changes
daymode = False # default should always be False.
motionPath = os.path.join(baseDir, motionDir) # Store Motion images
motionNumPath = os.path.join(baseDir, motionPrefix + baseFileName + ".dat") # dat file to save currentCount
timelapsePath = os.path.join(baseDir, timelapseDir) # Store Time Lapse images
timelapseNumPath = os.path.join(baseDir, timelapsePrefix + baseFileName + ".dat") # dat file to save currentCount
lockFilePath = os.path.join(baseDir, baseFileName + ".sync")
#-----------------------------------------------------------------------------------------------
def userMotionCodeHere():
# Users can put code here that needs to be run prior to taking motion capture images
# Eg Notify or activate something.
# User code goes here
return
#-----------------------------------------------------------------------------------------------
def shut2Sec (shutspeed):
shutspeedSec = shutspeed/float(SECONDS2MICRO)
shutstring = str("%.3f sec") % ( shutspeedSec )
return shutstring
#-----------------------------------------------------------------------------------------------
def showTime():
rightNow = datetime.datetime.now()
currentTime = "%04d-%02d-%02d %02d:%02d:%02d" % (rightNow.year, rightNow.month, rightNow.day, rightNow.hour, rightNow.minute, rightNow.second)
return currentTime
#-----------------------------------------------------------------------------------------------
def showDots(dotcnt):
if motionOn and verbose:
dotcnt += 1
if dotcnt > motionMaxDots + 2:
print("")
dotcnt = 0
elif dotcnt > motionMaxDots:
print("")
stime = showTime() + " ."
sys.stdout.write(stime)
sys.stdout.flush()
dotcnt = 0
else:
sys.stdout.write('.')
sys.stdout.flush()
return dotcnt
#-----------------------------------------------------------------------------------------------
def checkConfig():
if not motionOn and not timelapseOn:
logging.warning("Both Motion and Timelapse are turned OFF - motionOn=%s timelapseOn=%s", motionOn, timelapseOn)
return
#-----------------------------------------------------------------------------------------------
def takeTestImage():
# Check if any parameter was passed to this script from the command line.
# This is useful for taking a single image for aligning camera without editing script settings.
mytime=showTime()
testfilename = "takeTestImage.jpg"
testfilepath = os.path.join(baseDir, testfilename)
takeDayImage(testfilepath, timelapseCamSleep)
imagetext = "%s %s" % (mytime, testfilename)
writeTextToImage(testfilepath, imagetext, daymode)
logging.info("imageTestPrint=%s Captured Test Image to %s " % (imageTestPrint, testfilepath))
sys.exit(2)
return
#-----------------------------------------------------------------------------------------------
def displayInfo(motioncount, timelapsecount):
if verbose:
print("-------------------------------------- Settings ----------------------------------------------")
print("Config File .. Title=%s" % configTitle)
print(" config-template filename=%s" % configName)
print("Image Info ... Size=%ix%i Prefix=%s VFlip=%s HFlip=%s Preview=%s" % (imageWidth, imageHeight, imageNamePrefix, imageVFlip, imageHFlip, imagePreview))
shutStr = shut2Sec(nightMaxShut)
print(" Low Light. twilightThreshold=%i nightMaxShut=%s nightMaxISO=%i nightSleepSec=%i sec" % (twilightThreshold, shutStr, nightMaxISO, nightSleepSec))
print(" No Shots . noNightShots=%s noDayShots=%s" % (noNightShots, noDayShots))
if showDateOnImage:
print(" Img Text . On=%s Bottom=%s (False=Top) WhiteText=%s (False=Black) showTextWhiteNight=%s" % (showDateOnImage, showTextBottom, showTextWhite, showTextWhiteNight))
print(" showTextFontSize=%i px height" % (showTextFontSize))
else:
print(" No Text .. showDateOnImage=%s Text on Image Disabled" % (showDateOnImage))
print("Motion ....... On=%s Prefix=%s threshold=%i(How Much) sensitivity=%i(How Many)" % (motionOn, motionPrefix, threshold, sensitivity))
print(" forceTimer=%i min(If No Motion)" % (motionForce/60))
print(" Number of previous images to use to check for motion=%i" % (motionAverage))
print(" Use video port for motion image capture? %s" % (useVideoPort))
print(" motionPath=%s motionCamSleep=%.2f sec" % (motionPath, motionCamSleep))
if motionNumOn:
print(" Num Seq .. motionNumOn=%s current=%s numStart=%i numMax=%i numRecycle=%s" % (motionNumOn, motioncount, motionNumStart, motionNumMax, motionNumRecycle))
print(" motionNumPath=%s " % (motionNumPath))
else:
print(" Date-Time. motionNumOn=%s Image Numbering Disabled" % (motionNumOn))
if motionQuickTLOn:
print(" Quick TL . motionQuickTLOn=%s motionQuickTLTimer=%i sec motionQuickTLInterval=%i sec (0=fastest)" % (motionQuickTLOn, motionQuickTLTimer, motionQuickTLInterval))
else:
print(" Quick TL . motionQuickTLOn=%s Quick Time Lapse Disabled" % (motionQuickTLOn))
if motionVideoOn:
print(" Video .... motionVideoOn=%s motionVideoTimer=%i sec (superseded by QuickTL)" % (motionVideoOn, motionVideoTimer))
else:
print(" Video .... motionVideoOn=%s Motion Video Disabled" % (motionVideoOn))
print("Time Lapse ... On=%s Prefix=%s Timer=%i sec timeLapseExit=%i sec (0=Continuous)" % (timelapseOn, timelapsePrefix, timelapseTimer, timelapseExit))
print(" timelapsePath=%s timelapseCamSleep=%.2f sec" % (timelapsePath, timelapseCamSleep))
if timelapseNumOn:
print(" Num Seq .. On=%s current=%s numStart=%i numMax=%i numRecycle=%s" % (timelapseNumOn, timelapsecount, timelapseNumStart, timelapseNumMax, timelapseNumRecycle))
print(" numPath=%s" % (timelapseNumPath))
else:
print(" Date-Time. motionNumOn=%s Numbering Disabled" % (timelapseNumOn))
if createLockFile:
print("gdrive Sync .. On=%s Path=%s Note: syncs for motion images only." % (createLockFile, lockFilePath))
print("Logging ...... verbose=%s (True = Log To Console)" % ( verbose ))
print(" logDataToFile=%s logFilePath=%s" % ( logDataToFile, logFilePath ))
print("------------------------------------ Log Activity --------------------------------------------")
checkConfig()
return
#-----------------------------------------------------------------------------------------------
def checkImagePath():
# Checks for image folders and creates them if they do not already exist.
if motionOn:
if not os.path.isdir(motionPath):
logging.info("Creating Image Motion Detection Storage Folder %s", motionPath)
os.makedirs(motionPath)
if timelapseOn:
if not os.path.isdir(timelapsePath):
logging.info("Creating Time Lapse Image Storage Folder %s", timelapsePath)
os.makedirs(timelapsePath)
return
#-----------------------------------------------------------------------------------------------
def getCurrentCount(numberpath, numberstart):
# Create a .dat file to store currentCount or read file if it already Exists
# Create numberPath file if it does not exist
if not os.path.exists(numberpath):
logging.info("Creating New File %s numberstart= %s", numberpath, numberstart)
open(numberpath, 'w').close()
f = open(numberpath, 'w+')
f.write(str(numberstart))
f.close()
# Read the numberPath file to get the last sequence number
with open(numberpath, 'r') as f:
writeCount = f.read()
f.closed
try:
numbercounter = int(writeCount)
except ValueError: # Found Corrupt dat file since cannot convert to integer
# Try to determine if this is motion or timelapse
if numberpath.find(motionPrefix) > 0:
filePath = motionPath + "/*.jpg"
fprefix = motionPath + motionPrefix + imageNamePrefix
else:
filePath = timelapsePath + "/*.jpg"
fprefix = timelapsePath + timelapsePrefix + imageNamePrefix
try:
# Scan image folder for most recent file and try to extract numbercounter
newest = max(glob.iglob(filePath), key=os.path.getctime)
writeCount = newest[len(fprefix)+1:newest.find(".jpg")]
except:
writeCount = numberstart
try:
numbercounter = int(writeCount)+1
except ValueError:
numbercounter = numberstart
logging.error("Invalid Data in File %s Reset numbercounter to %s", numberpath, numbercounter)
f = open(numberpath, 'w+')
f.write(str(numbercounter))
f.close()
f = open(numberpath, 'r')
writeCount = f.read()
f.closed
numbercounter = int(writeCount)
return numbercounter
#-----------------------------------------------------------------------------------------------
def writeTextToImage(imagename, datetoprint, daymode):
# function to write date/time stamp directly on top or bottom of images.
if showTextWhite:
FOREGROUND = ( 255, 255, 255 ) # rgb settings for white text foreground
textColour = "White"
else:
FOREGROUND = ( 0, 0, 0 ) # rgb settings for black text foreground
textColour = "Black"
if showTextWhiteNight and ( not daymode):
FOREGROUND = ( 255, 255, 255 ) # rgb settings for black text foreground
textColour = "White"
# centre text and compensate for graphics text being wider
x = int((imageWidth/2) - (len(imagename)*2))
if showTextBottom:
y = (imageHeight - 50) # show text at bottom of image
else:
y = 10 # show text at top of image
TEXT = imageNamePrefix + datetoprint
font_path = '/usr/share/fonts/truetype/freefont/FreeSansBold.ttf'
font = ImageFont.truetype(font_path, showTextFontSize, encoding='unic')
text = TEXT.decode('utf-8')
# Read exif data since ImageDraw does not save this metadata
img = Image.open(imagename)
metadata = pyexiv2.ImageMetadata(imagename)
metadata.read()
draw = ImageDraw.Draw(img)
# draw.text((x, y),"Sample Text",(r,g,b))
draw.text(( x, y ), text, FOREGROUND, font=font)
img.save(imagename)
metadata.write() # Write previously saved exif data to image file
logging.info("Added %s Text[%s] on %s", textColour, datetoprint, imagename)
return
#-----------------------------------------------------------------------------------------------
def postImageProcessing(numberon, counterstart, countermax, counter, recycle, counterpath, filename, daymode):
# If required process text to display directly on image
if (not motionVideoOn):
rightNow = datetime.datetime.now()
if showDateOnImage:
dateTimeText = "%04d%02d%02d_%02d:%02d:%02d" % (rightNow.year, rightNow.month, rightNow.day, rightNow.hour, rightNow.minute, rightNow.second)
if numberon:
counterStr = "%i " % ( counter )
imageText = counterStr + dateTimeText
else:
imageText = dateTimeText
# Now put the imageText on the current image
writeTextToImage(filename, imageText, daymode)
if createLockFile and motionOn:
createSyncLockFile(filename)
# Process currentCount for next image if number sequence is enabled
if numberon:
counter += 1
if countermax > 0:
if (counter > counterstart + countermax):
if recycle:
counter = counterstart
else:
print("%s - Exceeded Image Count numberMax=%i" % ( progName, countermax ))
print("Exiting %s" % progName)
sys.exit(2)
# write next image counter number to dat file
currentTime = showTime()
writeCount = str(counter)
if not os.path.exists(counterpath):
logging.info("Create New Counter File writeCount=%s %s", writeCount, counterpath)
open(counterpath, 'w').close()
f = open(counterpath, 'w+')
f.write(str(writeCount))
f.close()
logging.info("Next Counter=%s %s", writeCount, counterpath)
return counter
#-----------------------------------------------------------------------------------------------
def getFileName(path, prefix, numberon, counter, video, dateSubDir):
# build image file names by number sequence or date/time
ext= ".h264" if video else ".jpg"
rightNow = datetime.datetime.now()
if dateSubDir:
path = "%s/%04d-%02d-%02d" % (path, rightNow.year, rightNow.month, rightNow.day)
if not os.path.exists(path):
os.makedirs(path)
if numberon:
filename = os.path.join(path, prefix + str(counter) + ext)
else:
filename = "%s/%s%04d%02d%02d-%02d%02d%02d%s" % ( path, prefix ,rightNow.year, rightNow.month, rightNow.day, rightNow.hour, rightNow.minute, rightNow.second, ext)
return filename
#-----------------------------------------------------------------------------------------------
def takeDayImage(filename, cam_sleep_time):
# Take a Day image using exp=auto and awb=auto
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
camera.vflip = imageVFlip
camera.hflip = imageHFlip
camera.rotation = imageRotation #Note use imageVFlip and imageHFlip variables
# Day Automatic Mode
camera.exposure_mode = 'auto'
camera.awb_mode = 'auto'
time.sleep(cam_sleep_time) # sleep for a little while so camera can get adjustments
# motion is minimal to capture movement while timelapse is longer for better images
if imagePreview:
camera.start_preview()
camera.capture(filename, use_video_port=useVideoPort)
logging.info("Size=%ix%i exp=auto awb=auto %s" % (imageWidth, imageHeight, filename))
return
#-----------------------------------------------------------------------------------------------
def takeNightImage(filename):
dayStream = getStreamImage(True)
dayPixAve = getStreamPixAve(dayStream)
currentShut, currentISO = getNightCamSettings(dayPixAve)
# Take low light Night image (including twilight zones)
with picamera.PiCamera() as camera:
# Take Low Light image
# Set a framerate_range then set shutter
camera.resolution = (imageWidth, imageHeight)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
camera.sensor_mode = 3
camera.vflip = imageVFlip
camera.hflip = imageHFlip
camera.rotation = imageRotation #Note use imageVFlip and imageHFlip variables
camera.shutter_speed = currentShut
camera.iso = currentISO
# Give the camera a good long time to measure AWB
time.sleep(nightSleepSec)
camera.exposure_mode = 'off'
if imagePreview:
camera.start_preview()
camera.capture(filename)
shutSec = shut2Sec(currentShut)
logging.info("Size=%ix%i dayPixAve=%i ISO=%i shut=%s %s" % (imageWidth, imageHeight, dayPixAve, currentISO, shutSec, filename))
return
#-----------------------------------------------------------------------------------------------
def takeQuickTimeLapse(motionPath, imagePrefix, motionNumOn, motionNumCount, daymode, motionNumPath):
logging.info("motion Quick Time Lapse for %i sec every %i sec" % (motionQuickTLTimer, motionQuickTLInterval))
checkTimeLapseTimer = datetime.datetime.now()
keepTakingImages = True
filename = getFileName(motionPath, imagePrefix, motionNumOn, motionNumCount, False, motionDateSubDir)
while keepTakingImages:
yield filename
rightNow = datetime.datetime.now()
timelapseDiff = (rightNow - checkTimeLapseTimer).total_seconds()
if timelapseDiff > motionQuickTLTimer:
keepTakingImages=False
else:
motionNumCount = postImageProcessing(motionNumOn, motionNumStart, motionNumMax, motionNumCount, motionNumRecycle, motionNumPath, filename, daymode)
filename = getFileName(motionPath, imagePrefix, motionNumOn, motionNumCount, False, motionDateSubDir)
time.sleep(motionQuickTLInterval)
#-----------------------------------------------------------------------------------------------
def takeVideo(filename):
# Take a short motion video if required
logging.info("Size %ix%i for %i sec %s" % (imageWidth, imageHeight, motionVideoTimer, filename))
if motionVideoOn:
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
camera.vflip = imageVFlip
camera.hflip = imageHFlip
camera.rotation = imageRotation #Note use imageVFlip and imageHFlip variables
if showDateOnImage:
rightNow = datetime.datetime.now()
dateTimeText = " Started at %04d-%02d-%02d %02d:%02d:%02d " % (rightNow.year, rightNow.month, rightNow.day, rightNow.hour, rightNow.minute, rightNow.second)
camera.annotate_text_size = showTextFontSize
camera.annotate_foreground = picamera.Color('black')
camera.annotate_background = picamera.Color('white')
camera.annotate_text = dateTimeText
camera.start_recording(filename)
camera.wait_recording(motionVideoTimer)
camera.stop_recording()
# This creates a subprocess that runs convid.sh with the filename as a parameter
try:
convid = "%s/convid.sh %s" % ( baseDir, filename )
proc = subprocess.Popen(convid, shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
except IOError:
print("subprocess %s failed" %s ( convid ))
else:
print("unidentified error")
createSyncLockFile(filename)
return
#-----------------------------------------------------------------------------------------------
def createSyncLockFile(imagefilename):
# If required create a lock file to indicate file(s) to process
if createLockFile:
if not os.path.exists(lockFilePath):
open(lockFilePath, 'w').close()
logging.info("Create gdrive sync.sh Lock File %s", lockFilePath)
rightNow = datetime.datetime.now()
now = "%04d%02d%02d-%02d%02d%02d" % ( rightNow.year, rightNow.month, rightNow.day, rightNow.hour, rightNow.minute, rightNow.second )
filecontents = now + " createSyncLockFile - " + imagefilename + " Ready to sync using sudo ./sync.sh command."
f = open(lockFilePath, 'w+')
f.write(filecontents)
f.close()
return
#-----------------------------------------------------------------------------------------------
def getStreamImage(isDay):
# Capture an image stream to memory based on daymode
with picamera.PiCamera() as camera:
camera.resolution = (testWidth, testHeight)
with picamera.array.PiRGBArray(camera) as stream:
if isDay:
time.sleep(0.5)
camera.exposure_mode = 'auto'
camera.awb_mode = 'auto'
camera.capture(stream, format='rgb', use_video_port=useVideoPort)
else:
# Take Low Light image
# Set a framerate_range then set shutter
# speed to 6s
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
camera.sensor_mode = 3
camera.shutter_speed = nightMaxShut
camera.iso = nightMaxISO
# Give the camera a good long time to measure AWB
# Note sleep time is hard coded and not set by nightSleepSec
time.sleep( 10 )
camera.exposure_mode = 'off'
camera.capture(stream, format='rgb')
return stream.array
#-----------------------------------------------------------------------------------------------
def getStreamPixAve(streamData):
# Calculate the average pixel values for the specified stream (used for determining day/night or twilight conditions)
pixAverage = int(np.average(streamData[...,1]))
return pixAverage
#-----------------------------------------------------------------------------------------------
def getNightCamSettings(dayPixAve):
# Calculate Ratio for adjusting shutter and ISO values
if dayPixAve <= twilightThreshold:
ratio = ((twilightThreshold - dayPixAve)/float(twilightThreshold))
outShut = int(nightMaxShut * ratio)
outISO = int(nightMaxISO * ratio)
else:
ratio = 0.0
outShut = nightMinShut
outISO = nightMinISO
# Do some Bounds Checking to avoid potential problems
if outShut < nightMinShut:
outShut = nightMinShut
if outShut > nightMaxShut:
outShut = nightMaxShut
if outISO < nightMinISO:
outISO = nightMinISO
if outISO > nightMaxISO:
outISO = nightMaxISO
logging.info("dayPixAve=%i ratio=%.3f ISO=%i shut=%i %s" % ( dayPixAve, ratio, outISO, outShut, shut2Sec(outShut)))
return outShut, outISO
#-----------------------------------------------------------------------------------------------
def checkIfDay(currentDayMode, dataStream):
# Try to determine if it is day, night or twilight.
dayPixAverage = 0
if currentDayMode:
dayPixAverage = getStreamPixAve(dataStream)
else:
dayStream = getStreamImage(True)
dayPixAverage = getStreamPixAve(dayStream)
if dayPixAverage > twilightThreshold:
currentDayMode = True
else:
currentDayMode = False
return currentDayMode
#-----------------------------------------------------------------------------------------------
def timeToSleep(currentDayMode):
if noNightShots:
if currentDayMode:
sleepMode=False
else:
sleepMode=True
elif noDayShots:
if currentDayMode:
sleepMode=True
else:
sleepMode=False
else:
sleepMode=False
return sleepMode
#-----------------------------------------------------------------------------------------------
def checkForTimelapse (timelapseStart):
# Check if timelapse timer has expired
rightNow = datetime.datetime.now()
timeDiff = ( rightNow - timelapseStart).total_seconds()
if timeDiff > timelapseTimer:
timelapseStart = rightNow
timelapseFound = True
else:
timelapseFound = False
return timelapseFound
#-----------------------------------------------------------------------------------------------
def checkForMotion(data1, data2):
# Find motion between two data streams based on sensitivity and threshold
motionDetected = False
pixColor = 3 # red=0 green=1 blue=2 all=3 default=1
if pixColor == 3:
pixChanges = (np.absolute(data1-data2)>threshold).sum()/3
else:
pixChanges = (np.absolute(data1[...,pixColor]-data2[...,pixColor])>threshold).sum()
if pixChanges > sensitivity:
motionDetected = True
if motionDetected:
dotCount = showDots(motionMaxDots + 2) # New Line
logging.info("Found Motion - threshold=%s sensitivity=%s changes=%s", threshold, sensitivity, pixChanges)
return motionDetected
#-----------------------------------------------------------------------------------------------
def dataLogger():
# Replace main() with this function to log day/night pixAve to a file.
# Note variable logDataToFile must be set to True in config.py
# You may want to delete pi-timolo.log to clear old data.
print("dataLogger - One Moment Please ....")
while True:
dayStream = getStreamImage(True)
dayPixAverage = getStreamPixAve(dayStream)
nightStream = getStreamImage(False)
nightPixAverage = getStreamPixAve(nightStream)
logging.info("nightPixAverage=%i dayPixAverage=%i twilightThreshold=%i " % (nightPixAverage, dayPixAverage, twilightThreshold))
time.sleep(1)
return
#-----------------------------------------------------------------------------------------------
def Main():
# Main program initialization and logic loop
dotCount = 0 # Counter for showDots() display if not motion found (shows system is working)
checkImagePath()
timelapseNumCount = 0
motionNumCount = 0
try: #if motionAverage hasn't been included in config file (so it works with previous versions)
global motionAverage
if motionAverage > 1:
resetSensitivity = sensitivity*150 # number of changed pixels to trigger reset of background average
if resetSensitivity > testHeight*testWidth*2:
resetSensitivity = testHeight*testWidth*2 #limit the resetSensitivity
else:
motionAverage = 1
except NameError:
motionAverage = 1
try:
global useVideoPort
useVideoPort = useVideoPort
except NameError:
useVideoPort = False
moCnt = "non"
tlCnt = "non"
if timelapseOn:
if timelapseNumOn:
timelapseNumCount = getCurrentCount(timelapseNumPath, timelapseNumStart)
tlCnt = str(timelapseNumCount)
if motionOn:
if motionNumOn:
motionNumCount = getCurrentCount(motionNumPath, motionNumStart)
moCnt = str(motionNumCount)
displayInfo(moCnt, tlCnt)
if imageTestPrint:
takeTestImage() # prints one image and exits if imageTestPrint = True in config.py
daymode = False
data1 = getStreamImage(True).astype(float) #All functions should still work with float instead of int - just takes more memory
daymode = checkIfDay(daymode, data1)
data2 = getStreamImage(daymode) # initialise data2 to use in main loop
if not daymode:
data1 = data2.astype(float)
timelapseStart = datetime.datetime.now()
checkDayTimer = timelapseStart
checkMotionTimer = timelapseStart
forceMotion = False # Used for forcing a motion image if no motion for motionForce time exceeded
logging.info("Entering Loop for Time Lapse and/or Motion Detect Please Wait ...")
dotCount = showDots(motionMaxDots) # reset motion dots
# Start main program loop here. Use Ctl-C to exit if run from terminal session.
while True:
# use data2 to check daymode as data1 may be average that changes slowly, and data1 may not be updated
if daymode != checkIfDay(daymode, data2): # if daymode has changed, reset background, to avoid false motion trigger
daymode = not daymode
data2 = getStreamImage(daymode) #get new stream
data1 = data2.astype(float) #reset background
else:
data2 = getStreamImage(daymode) # This gets the second stream of motion analysis
rightNow = datetime.datetime.now() # refresh rightNow time
if not timeToSleep(daymode): # Don't take images if noNightShots or noDayShots settings are valid
if timelapseOn:
takeTimeLapse = checkForTimelapse(timelapseStart)
if takeTimeLapse:
timelapseStart = datetime.datetime.now() # reset time lapse timer
dotCount = showDots(motionMaxDots + 2) # reset motion dots
logging.info("Scheduled Time Lapse Image - daymode=%s", daymode)
imagePrefix = timelapsePrefix + imageNamePrefix
filename = getFileName(timelapsePath, imagePrefix, timelapseNumOn, timelapseNumCount, False, timelapseDateSubDir)
if daymode:
takeDayImage(filename, timelapseCamSleep)
else:
takeNightImage(filename)
timelapseNumCount = postImageProcessing(timelapseNumOn, timelapseNumStart, timelapseNumMax, timelapseNumCount, timelapseNumRecycle, timelapseNumPath, filename, daymode)
dotCount = showDots(motionMaxDots)
if motionOn:
# IMPORTANT - Night motion detection may not work very well due to long exposure times and low light (may try checking red instead of green)
# Also may need night specific threshold and sensitivity settings (Needs more testing)
motionFound = checkForMotion(data1, data2)
if motionAverage > 1 and (np.absolute(data2-data1)>threshold).sum() > resetSensitivity:
data1 = data2.astype(float)
else:
data1 = data1+(data2-data1)/motionAverage
rightNow = datetime.datetime.now()
timeDiff = (rightNow - checkMotionTimer).total_seconds()
if timeDiff > motionForce:
dotCount = showDots(motionMaxDots + 2) # New Line
logging.info("No Motion Detected for %s minutes. Taking Forced Motion Image.", (motionForce / 60))
checkMotionTimer = rightNow
forceMotion = True
if motionFound or forceMotion:
dotCount = showDots(motionMaxDots + 2) # New Line
checkMotionTimer = rightNow
if forceMotion:
forceMotion = False
imagePrefix = motionPrefix + imageNamePrefix
# check if motion Quick Time Lapse option is On. This option supersedes motionVideoOn
if motionQuickTLOn and daymode:
filename = getFileName(motionPath, imagePrefix, motionNumOn, motionNumCount, False, motionDateSubDir)
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
camera.vflip = imageVFlip
camera.hflip = imageHFlip
time.sleep(.5)
# This uses yield to loop through time lapse sequence but does not seem to be faster due to writing images
camera.capture_sequence(takeQuickTimeLapse(motionPath, imagePrefix, motionNumOn, motionNumCount, daymode, motionNumPath))
motionNumCount = getCurrentCount(motionNumPath, motionNumStart)
else:
if motionVideoOn:
filename = getFileName(motionPath, imagePrefix, motionNumOn, motionNumCount, True, timelapseDateSubDir)
takeVideo(filename)
else:
filename = getFileName(motionPath, imagePrefix, motionNumOn, motionNumCount, False, motionDateSubDir)
if daymode:
takeDayImage(filename, timelapseCamSleep)
else:
takeNightImage(filename)
motionNumCount = postImageProcessing(motionNumOn, motionNumStart, motionNumMax, motionNumCount, motionNumRecycle, motionNumPath, filename, daymode)
if motionFound:
# =========================================================================
# Put your user code in userMotionCodeHere() function at top of this script
# =========================================================================
userMotionCodeHere()
dotCount = showDots(motionMaxDots)
else:
dotCount = showDots(dotCount) # show progress dots when no motion found
return
#-----------------------------------------------------------------------------------------------
if __name__ == '__main__':
try:
if debug:
dataLogger()
else:
Main()
finally:
print("")
print("+++++++++++++++++++++++++++++++++++")
print("%s - Exiting Program" % progName)
print("+++++++++++++++++++++++++++++++++++")
print("")
| mit | -3,198,899,546,623,500,000 | 50.736698 | 188 | 0.584764 | false |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/funnyordie.py | 1 | 4662 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
unified_timestamp,
)
class FunnyOrDieIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
_TESTS = [{
'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
'info_dict': {
'id': '0732f586d7',
'ext': 'mp4',
'title': 'Heart-Shaped Box: Literal Video Version',
'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',
'thumbnail': r're:^http:.*\.jpg$',
'uploader': 'DASjr',
'timestamp': 1317904928,
'upload_date': '20111006',
'duration': 318.3,
},
}, {
'url': 'http://www.funnyordie.com/embed/e402820827',
'info_dict': {
'id': 'e402820827',
'ext': 'mp4',
'title': 'Please Use This Song (Jon Lajoie)',
'description': 'Please use this to sell something. www.jonlajoie.com',
'thumbnail': r're:^http:.*\.jpg$',
'timestamp': 1398988800,
'upload_date': '20140502',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
links = re.findall(r'<source src="([^"]+/v)[^"]+\.([^"]+)" type=\'video', webpage)
if not links:
raise ExtractorError('No media links available for %s' % video_id)
links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0)
m3u8_url = self._search_regex(
r'<source[^>]+src=(["\'])(?P<url>.+?/master\.m3u8[^"\']*)\1',
webpage, 'm3u8 url', group='url')
formats = []
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
source_formats = list(filter(
lambda f: f.get('vcodec') != 'none', m3u8_formats))
bitrates = [int(bitrate) for bitrate in re.findall(r'[,/]v(\d+)(?=[,/])', m3u8_url)]
bitrates.sort()
if source_formats:
self._sort_formats(source_formats)
for bitrate, f in zip(bitrates, source_formats or [{}] * len(bitrates)):
for path, ext in links:
ff = f.copy()
if ff:
if ext != 'mp4':
ff = dict(
[(k, v) for k, v in ff.items()
if k in ('height', 'width', 'format_id')])
ff.update({
'format_id': ff['format_id'].replace('hls', ext),
'ext': ext,
'protocol': 'http',
})
else:
ff.update({
'format_id': '%s-%d' % (ext, bitrate),
'vbr': bitrate,
})
ff['url'] = self._proto_relative_url(
'%s%d.%s' % (path, bitrate, ext))
formats.append(ff)
self._check_formats(formats, video_id)
formats.extend(m3u8_formats)
self._sort_formats(
formats, field_preference=('height', 'width', 'tbr', 'format_id'))
subtitles = {}
for src, src_lang in re.findall(r'<track kind="captions" src="([^"]+)" srclang="([^"]+)"', webpage):
subtitles[src_lang] = [{
'ext': src.split('/')[-1],
'url': 'http://www.funnyordie.com%s' % src,
}]
timestamp = unified_timestamp(self._html_search_meta(
'uploadDate', webpage, 'timestamp', default=None))
uploader = self._html_search_regex(
r'<h\d[^>]+\bclass=["\']channel-preview-name[^>]+>(.+?)</h',
webpage, 'uploader', default=None)
title, description, thumbnail, duration = [None] * 4
medium = self._parse_json(
self._search_regex(
r'jsonMedium\s*=\s*({.+?});', webpage, 'JSON medium',
default='{}'),
video_id, fatal=False)
if medium:
title = medium.get('title')
duration = float_or_none(medium.get('duration'))
if not timestamp:
timestamp = unified_timestamp(medium.get('publishDate'))
post = self._parse_json(
self._search_regex(
r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details',
default='{}'),
video_id, fatal=False)
if post:
if not title:
title = post.get('name')
description = post.get('description')
thumbnail = post.get('picture')
if not title:
title = self._og_search_title(webpage)
if not description:
description = self._og_search_description(webpage)
if not duration:
duration = int_or_none(self._html_search_meta(
('video:duration', 'duration'), webpage, 'duration', default=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 | 4,937,359,392,362,063,000 | 27.777778 | 112 | 0.607465 | false |
MuckRock/muckrock | muckrock/jurisdiction/filters.py | 1 | 1513 | """
Filters for jurisdiction Views
"""
# Third Party
import django_filters
from dal import forward
# MuckRock
from muckrock.core import autocomplete
from muckrock.jurisdiction.models import Exemption, Jurisdiction
LEVELS = (("", "All"), ("f", "Federal"), ("s", "State"), ("l", "Local"))
class JurisdictionFilterSet(django_filters.FilterSet):
"""Allows jurisdiction to be filtered by level of government and state."""
level = django_filters.ChoiceFilter(choices=LEVELS)
parent = django_filters.ModelChoiceFilter(
label="State",
queryset=Jurisdiction.objects.filter(level="s", hidden=False),
widget=autocomplete.ModelSelect2(
url="jurisdiction-autocomplete",
attrs={"data-placeholder": "Search for state"},
forward=(forward.Const(["s"], "levels"),),
),
)
class Meta:
model = Jurisdiction
fields = ["level", "parent"]
class ExemptionFilterSet(django_filters.FilterSet):
"""Allows exemptions to be filtered by jurisdiction"""
jurisdiction = django_filters.ModelChoiceFilter(
label="Jurisdiction",
queryset=Jurisdiction.objects.filter(level__in=("s", "f"), hidden=False),
widget=autocomplete.ModelSelect2(
url="jurisdiction-autocomplete",
attrs={"data-placeholder": "Search for jurisdiction"},
forward=(forward.Const(["s", "f"], "levels"),),
),
)
class Meta:
model = Exemption
fields = ["jurisdiction"]
| agpl-3.0 | 2,079,680,591,371,576,800 | 29.26 | 81 | 0.643093 | false |
noplay/gns3-gui | gns3/modules/dynamips/ui/atm_bridge_configuration_page_ui.py | 1 | 10574 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/grossmj/workspace/git/gns3-gui/gns3/modules/dynamips/ui/atm_bridge_configuration_page.ui'
#
# Created: Sun Mar 16 11:16:57 2014
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_atmBridgeConfigPageWidget(object):
def setupUi(self, atmBridgeConfigPageWidget):
atmBridgeConfigPageWidget.setObjectName(_fromUtf8("atmBridgeConfigPageWidget"))
atmBridgeConfigPageWidget.resize(432, 358)
self.gridLayout_2 = QtGui.QGridLayout(atmBridgeConfigPageWidget)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.uiMappingGroupBox = QtGui.QGroupBox(atmBridgeConfigPageWidget)
self.uiMappingGroupBox.setObjectName(_fromUtf8("uiMappingGroupBox"))
self.vboxlayout = QtGui.QVBoxLayout(self.uiMappingGroupBox)
self.vboxlayout.setObjectName(_fromUtf8("vboxlayout"))
self.uiMappingTreeWidget = QtGui.QTreeWidget(self.uiMappingGroupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiMappingTreeWidget.sizePolicy().hasHeightForWidth())
self.uiMappingTreeWidget.setSizePolicy(sizePolicy)
self.uiMappingTreeWidget.setRootIsDecorated(False)
self.uiMappingTreeWidget.setObjectName(_fromUtf8("uiMappingTreeWidget"))
self.vboxlayout.addWidget(self.uiMappingTreeWidget)
self.gridLayout_2.addWidget(self.uiMappingGroupBox, 0, 2, 3, 1)
self.uiEthernetGroupBox = QtGui.QGroupBox(atmBridgeConfigPageWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiEthernetGroupBox.sizePolicy().hasHeightForWidth())
self.uiEthernetGroupBox.setSizePolicy(sizePolicy)
self.uiEthernetGroupBox.setObjectName(_fromUtf8("uiEthernetGroupBox"))
self.gridlayout = QtGui.QGridLayout(self.uiEthernetGroupBox)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.uiEthernetPortLabel = QtGui.QLabel(self.uiEthernetGroupBox)
self.uiEthernetPortLabel.setObjectName(_fromUtf8("uiEthernetPortLabel"))
self.gridlayout.addWidget(self.uiEthernetPortLabel, 0, 0, 1, 1)
self.uiEthernetPortSpinBox = QtGui.QSpinBox(self.uiEthernetGroupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiEthernetPortSpinBox.sizePolicy().hasHeightForWidth())
self.uiEthernetPortSpinBox.setSizePolicy(sizePolicy)
self.uiEthernetPortSpinBox.setMinimum(0)
self.uiEthernetPortSpinBox.setMaximum(65535)
self.uiEthernetPortSpinBox.setProperty("value", 1)
self.uiEthernetPortSpinBox.setObjectName(_fromUtf8("uiEthernetPortSpinBox"))
self.gridlayout.addWidget(self.uiEthernetPortSpinBox, 0, 1, 1, 1)
self.gridLayout_2.addWidget(self.uiEthernetGroupBox, 1, 0, 1, 2)
self.uiATMGroupBox = QtGui.QGroupBox(atmBridgeConfigPageWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiATMGroupBox.sizePolicy().hasHeightForWidth())
self.uiATMGroupBox.setSizePolicy(sizePolicy)
self.uiATMGroupBox.setObjectName(_fromUtf8("uiATMGroupBox"))
self.gridlayout1 = QtGui.QGridLayout(self.uiATMGroupBox)
self.gridlayout1.setObjectName(_fromUtf8("gridlayout1"))
self.uiATMPortLabel = QtGui.QLabel(self.uiATMGroupBox)
self.uiATMPortLabel.setObjectName(_fromUtf8("uiATMPortLabel"))
self.gridlayout1.addWidget(self.uiATMPortLabel, 0, 0, 1, 1)
self.uiATMPortSpinBox = QtGui.QSpinBox(self.uiATMGroupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiATMPortSpinBox.sizePolicy().hasHeightForWidth())
self.uiATMPortSpinBox.setSizePolicy(sizePolicy)
self.uiATMPortSpinBox.setMinimum(0)
self.uiATMPortSpinBox.setMaximum(65535)
self.uiATMPortSpinBox.setProperty("value", 10)
self.uiATMPortSpinBox.setObjectName(_fromUtf8("uiATMPortSpinBox"))
self.gridlayout1.addWidget(self.uiATMPortSpinBox, 0, 1, 1, 1)
self.uiATMVPILabel = QtGui.QLabel(self.uiATMGroupBox)
self.uiATMVPILabel.setObjectName(_fromUtf8("uiATMVPILabel"))
self.gridlayout1.addWidget(self.uiATMVPILabel, 1, 0, 1, 1)
self.uiATMVPISpinBox = QtGui.QSpinBox(self.uiATMGroupBox)
self.uiATMVPISpinBox.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiATMVPISpinBox.sizePolicy().hasHeightForWidth())
self.uiATMVPISpinBox.setSizePolicy(sizePolicy)
self.uiATMVPISpinBox.setMinimum(0)
self.uiATMVPISpinBox.setMaximum(65535)
self.uiATMVPISpinBox.setSingleStep(1)
self.uiATMVPISpinBox.setProperty("value", 0)
self.uiATMVPISpinBox.setObjectName(_fromUtf8("uiATMVPISpinBox"))
self.gridlayout1.addWidget(self.uiATMVPISpinBox, 1, 1, 1, 1)
self.uiATMVCILabel = QtGui.QLabel(self.uiATMGroupBox)
self.uiATMVCILabel.setObjectName(_fromUtf8("uiATMVCILabel"))
self.gridlayout1.addWidget(self.uiATMVCILabel, 2, 0, 1, 1)
self.uiATMVCISpinBox = QtGui.QSpinBox(self.uiATMGroupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.uiATMVCISpinBox.sizePolicy().hasHeightForWidth())
self.uiATMVCISpinBox.setSizePolicy(sizePolicy)
self.uiATMVCISpinBox.setMaximum(65535)
self.uiATMVCISpinBox.setProperty("value", 100)
self.uiATMVCISpinBox.setObjectName(_fromUtf8("uiATMVCISpinBox"))
self.gridlayout1.addWidget(self.uiATMVCISpinBox, 2, 1, 1, 1)
self.gridLayout_2.addWidget(self.uiATMGroupBox, 2, 0, 1, 2)
self.uiAddPushButton = QtGui.QPushButton(atmBridgeConfigPageWidget)
self.uiAddPushButton.setObjectName(_fromUtf8("uiAddPushButton"))
self.gridLayout_2.addWidget(self.uiAddPushButton, 3, 0, 1, 1)
self.uiDeletePushButton = QtGui.QPushButton(atmBridgeConfigPageWidget)
self.uiDeletePushButton.setEnabled(False)
self.uiDeletePushButton.setObjectName(_fromUtf8("uiDeletePushButton"))
self.gridLayout_2.addWidget(self.uiDeletePushButton, 3, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(371, 121, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 4, 0, 1, 3)
self.uiGeneralGroupBox = QtGui.QGroupBox(atmBridgeConfigPageWidget)
self.uiGeneralGroupBox.setObjectName(_fromUtf8("uiGeneralGroupBox"))
self.gridLayout = QtGui.QGridLayout(self.uiGeneralGroupBox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.uiNameLabel = QtGui.QLabel(self.uiGeneralGroupBox)
self.uiNameLabel.setObjectName(_fromUtf8("uiNameLabel"))
self.gridLayout.addWidget(self.uiNameLabel, 0, 0, 1, 1)
self.uiNameLineEdit = QtGui.QLineEdit(self.uiGeneralGroupBox)
self.uiNameLineEdit.setObjectName(_fromUtf8("uiNameLineEdit"))
self.gridLayout.addWidget(self.uiNameLineEdit, 0, 1, 1, 1)
self.gridLayout_2.addWidget(self.uiGeneralGroupBox, 0, 0, 1, 2)
self.retranslateUi(atmBridgeConfigPageWidget)
QtCore.QMetaObject.connectSlotsByName(atmBridgeConfigPageWidget)
atmBridgeConfigPageWidget.setTabOrder(self.uiEthernetPortSpinBox, self.uiATMPortSpinBox)
atmBridgeConfigPageWidget.setTabOrder(self.uiATMPortSpinBox, self.uiATMVPISpinBox)
atmBridgeConfigPageWidget.setTabOrder(self.uiATMVPISpinBox, self.uiATMVCISpinBox)
atmBridgeConfigPageWidget.setTabOrder(self.uiATMVCISpinBox, self.uiAddPushButton)
atmBridgeConfigPageWidget.setTabOrder(self.uiAddPushButton, self.uiDeletePushButton)
def retranslateUi(self, atmBridgeConfigPageWidget):
atmBridgeConfigPageWidget.setWindowTitle(_translate("atmBridgeConfigPageWidget", "ATM Bridge", None))
self.uiMappingGroupBox.setTitle(_translate("atmBridgeConfigPageWidget", "Mapping", None))
self.uiMappingTreeWidget.headerItem().setText(0, _translate("atmBridgeConfigPageWidget", "Ethernet Port", None))
self.uiMappingTreeWidget.headerItem().setText(1, _translate("atmBridgeConfigPageWidget", "Port:VPI:VCI", None))
self.uiEthernetGroupBox.setTitle(_translate("atmBridgeConfigPageWidget", "Ethernet side", None))
self.uiEthernetPortLabel.setText(_translate("atmBridgeConfigPageWidget", "Port:", None))
self.uiATMGroupBox.setTitle(_translate("atmBridgeConfigPageWidget", "ATM side", None))
self.uiATMPortLabel.setText(_translate("atmBridgeConfigPageWidget", "Port:", None))
self.uiATMVPILabel.setText(_translate("atmBridgeConfigPageWidget", "VPI:", None))
self.uiATMVCILabel.setText(_translate("atmBridgeConfigPageWidget", "VCI:", None))
self.uiAddPushButton.setText(_translate("atmBridgeConfigPageWidget", "&Add", None))
self.uiDeletePushButton.setText(_translate("atmBridgeConfigPageWidget", "&Delete", None))
self.uiGeneralGroupBox.setTitle(_translate("atmBridgeConfigPageWidget", "General", None))
self.uiNameLabel.setText(_translate("atmBridgeConfigPageWidget", "Name:", None))
| gpl-3.0 | 2,089,908,112,776,014,600 | 61.568047 | 149 | 0.746832 | false |
WebCampZg/conference-web | ui/templatetags/webcamp.py | 1 | 3417 | import json
import math
import re
from urllib.parse import urlparse, parse_qs
from django import template
from django.utils.safestring import mark_safe
from ui.utils import get_icon_svg
register = template.Library()
@register.filter
def labelize(value):
return mark_safe(re.sub(r"\[(\w+)\]", r'<span class="yellow label">\g<1></span>', str(value)))
@register.filter
def skill_level(skill_level):
"""Given an AudienceSkillLevel object, renders a skill level label"""
icon_html = icon("solid/square")
level = skill_level.name.lower()
class_name = "skill-level {}".format(level)
return mark_safe(f'<span class="{class_name}">{icon_html} {level}</span>')
@register.filter
def embed_youtube(code):
return mark_safe("""
<div class="video-embed">
<div class="video-embed-inner">
<iframe width="640" height="360" src="https://www.youtube.com/embed/{}"
frameborder="0" allowfullscreen></iframe>
</div>
</div>""".format(code))
def embed_vimeo(code):
return mark_safe("""
<div class="video-embed">
<div class="video-embed-inner">
<iframe width="640" height="360" frameborder="0" src="https://player.vimeo.com/video/{}"
webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>
</div>
</div>""".format(code))
@register.filter
def embed_video(url):
try:
parsed_url = urlparse(url)
except:
return ""
netloc = parsed_url.netloc
path = parsed_url.path
query = parse_qs(parsed_url.query)
if netloc in ['youtube.com', 'www.youtube.com'] and path == '/watch' and 'v' in query and query['v']:
return embed_youtube(query['v'][0])
if netloc in ['youtube.com', 'www.youtube.com'] and path.startswith('/embed/'):
matches = re.match(r'^/embed/([^/]+)$', path)
if matches:
return embed_youtube(matches.group(1))
if netloc == 'youtu.be' and path.startswith('/') and '/' not in path[1:]:
return embed_youtube(path[1:])
if netloc == 'vimeo.com' and path.startswith('/') and re.match(r'^\d+$', path[1:]):
return embed_vimeo(path[1:])
return ""
@register.filter
def smaller_headings(html, level=5):
"""Reduce headings larger than h<level> to h<level>"""
tags = ["h{}".format(x) for x in range(1, level)]
search = '<(/)?({})>'.format("|".join(tags))
replace = '<\\1h{}>'.format(level)
return mark_safe(re.sub(search, replace, html))
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def batch(iterable, n):
"""Splits an iterable into batches containing upto n items."""
length = len(iterable)
for i in range(0, length, n):
yield iterable[i:i + n]
@register.filter
def split(iterable, n):
"""Splits an iterable into n chunks of equal size."""
length = len(iterable)
size = math.ceil(length / n)
return batch(iterable, size)
@register.filter
def jsonify(data):
return mark_safe(json.dumps(data))
@register.simple_tag
def icon(name, cls="", title="", scale=1):
svg = get_icon_svg(name)
title = f' title="{title}"' if title else ""
style = f' style="font-size: {scale:.2f}rem"' if scale != 1 else ""
html = f'<span class="icon {cls}"{style}{title}>{svg}</span>'
return mark_safe(html)
| bsd-3-clause | -6,631,614,206,404,134,000 | 27.475 | 105 | 0.614574 | false |
Mandalo/mandalo | mandalo/mandalo/settings.py | 1 | 3176 | """
Django settings for mandalo project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '39zcfz*f&ao)lk50ei0mk1a&wi9jk)d-z&7h(e_vfumy$b+11r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["127.0.0.1", "localhost", "akgunter.ddns.net"]
# Application definition
INSTALLED_APPS = [
'submit.apps.SubmitConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mandalo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mandalo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| gpl-3.0 | 6,032,108,704,326,601,000 | 25.247934 | 91 | 0.687343 | false |
catapult-project/catapult | third_party/typ/typ/runner.py | 3 | 46267 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import importlib
import inspect
import json
import os
import pdb
import sys
import unittest
import traceback
from collections import OrderedDict
# This ensures that absolute imports of typ modules will work when
# running typ/runner.py as a script even if typ is not installed.
# We need this entry in addition to the one in __main__.py to ensure
# that typ/runner.py works when invoked via subprocess on windows in
# _spawn_main().
path_to_file = os.path.realpath(__file__)
if path_to_file.endswith('.pyc'): # pragma: no cover
path_to_file = path_to_file[:-1]
dir_above_typ = os.path.dirname(os.path.dirname(path_to_file))
dir_cov = os.path.join(os.path.dirname(dir_above_typ), 'coverage')
for path in (dir_above_typ, dir_cov):
if path not in sys.path: # pragma: no cover
sys.path.append(path)
from typ import artifacts
from typ import json_results
from typ import result_sink
from typ.arg_parser import ArgumentParser
from typ.expectations_parser import TestExpectations, Expectation
from typ.host import Host
from typ.pool import make_pool
from typ.stats import Stats
from typ.printer import Printer
from typ.test_case import TestCase as TypTestCase
from typ.version import VERSION
Result = json_results.Result
ResultSet = json_results.ResultSet
ResultType = json_results.ResultType
def main(argv=None, host=None, win_multiprocessing=None, **defaults):
host = host or Host()
runner = Runner(host=host)
if win_multiprocessing is not None:
runner.win_multiprocessing = win_multiprocessing
return runner.main(argv, **defaults)
class TestInput(object):
def __init__(self, name, msg='', timeout=None, expected=None, iteration=0):
self.name = name
self.msg = msg
self.timeout = timeout
self.expected = expected
# Iteration makes more sense as part of the test run, not the test
# input, but since the pool used to run tests persists across
# iterations, we need to store the iteration number in something that
# gets updated each test run, such as TestInput.
self.iteration = iteration
class TestSet(object):
def __init__(self, test_name_prefix='', iteration=0):
self.test_name_prefix = test_name_prefix
self.parallel_tests = []
self.isolated_tests = []
self.tests_to_skip = []
self.iteration = iteration
def copy(self):
test_set = TestSet(self.test_name_prefix)
test_set.tests_to_skip = self.tests_to_skip[:]
test_set.isolated_tests = self.isolated_tests[:]
test_set.parallel_tests = self.parallel_tests[:]
return test_set
def _get_test_name(self, test_case):
_validate_test_starts_with_prefix(
self.test_name_prefix, test_case.id())
return test_case.id()[len(self.test_name_prefix):]
def add_test_to_skip(self, test_case, reason=''):
self.tests_to_skip.append(
TestInput(self._get_test_name(
test_case), reason, iteration=self.iteration))
def add_test_to_run_isolated(self, test_case):
self.isolated_tests.append(
TestInput(self._get_test_name(test_case), iteration=self.iteration))
def add_test_to_run_in_parallel(self, test_case):
self.parallel_tests.append(
TestInput(self._get_test_name(test_case), iteration=self.iteration))
def _validate_test_starts_with_prefix(prefix, test_name):
assert test_name.startswith(prefix), (
'The test prefix passed at the command line does not match the prefix '
'of all the tests generated')
class WinMultiprocessing(object):
ignore = 'ignore'
importable = 'importable'
spawn = 'spawn'
values = [ignore, importable, spawn]
class _AddTestsError(Exception):
pass
class Runner(object):
def __init__(self, host=None):
self.args = None
self.classifier = None
self.cov = None
self.context = None
self.coverage_source = None
self.host = host or Host()
self.loader = unittest.loader.TestLoader()
self.printer = None
self.setup_fn = None
self.stats = None
self.teardown_fn = None
self.top_level_dir = None
self.top_level_dirs = []
self.win_multiprocessing = WinMultiprocessing.spawn
self.final_responses = []
self.has_expectations = False
self.expectations = None
self.metadata = {}
self.path_delimiter = json_results.DEFAULT_TEST_SEPARATOR
self.artifact_output_dir = None
# initialize self.args to the defaults.
parser = ArgumentParser(self.host)
self.parse_args(parser, [])
def main(self, argv=None, **defaults):
parser = ArgumentParser(self.host)
self.parse_args(parser, argv, **defaults)
if parser.exit_status is not None:
return parser.exit_status
try:
ret, _, _ = self.run()
return ret
except KeyboardInterrupt:
self.print_("interrupted, exiting", stream=self.host.stderr)
return 130
def parse_args(self, parser, argv, **defaults):
for attrname in defaults:
if not hasattr(self.args, attrname):
parser.error("Unknown default argument name '%s'" % attrname,
bailout=False)
return
parser.set_defaults(**defaults)
self.args = parser.parse_args(args=argv)
if parser.exit_status is not None:
return
def print_(self, msg='', end='\n', stream=None):
self.host.print_(msg, end, stream=stream)
def run(self, test_set=None):
ret = 0
h = self.host
if self.args.version:
self.print_(VERSION)
return ret, None, None
if self.args.write_full_results_to:
self.artifact_output_dir = os.path.join(
os.path.dirname(
self.args.write_full_results_to), 'artifacts')
should_spawn = self._check_win_multiprocessing()
if should_spawn:
return self._spawn(test_set)
ret = self._set_up_runner()
if ret:
return ret, None, None
find_start = h.time()
if self.cov: # pragma: no cover
self.cov.erase()
self.cov.start()
full_results = None
result_set = ResultSet()
if not test_set:
ret, test_set = self.find_tests(self.args)
find_end = h.time()
if not ret:
self.stats.total = (len(test_set.parallel_tests) +
len(test_set.isolated_tests) +
len(test_set.tests_to_skip)) * self.args.repeat
all_tests = [ti.name for ti in
_sort_inputs(test_set.parallel_tests +
test_set.isolated_tests +
test_set.tests_to_skip)]
self.metadata = {tup[0]:tup[1]
for tup in
[md.split('=', 1) for md in self.args.metadata]}
if self.args.test_name_prefix:
self.metadata['test_name_prefix'] = self.args.test_name_prefix
if self.args.tags:
self.metadata['tags'] = self.args.tags
if self.args.expectations_files:
self.metadata['expectations_files'] = [
os.path.basename(exp)
if not self.args.repository_absolute_path
else ('//' + os.path.relpath(
exp, self.args.repository_absolute_path).replace(
os.path.sep, '/'))
for exp in self.args.expectations_files]
if self.args.list_only:
self.print_('\n'.join(all_tests))
else:
for _ in range(self.args.repeat):
current_ret, full_results=self._run_tests(
result_set, test_set.copy(), all_tests)
ret = ret or current_ret
if self.cov: # pragma: no cover
self.cov.stop()
self.cov.save()
test_end = h.time()
trace = self._trace_from_results(result_set)
if full_results:
self._summarize(full_results)
self._write(self.args.write_full_results_to, full_results)
upload_ret = self._upload(full_results)
if not ret:
ret = upload_ret
reporting_end = h.time()
self._add_trace_event(trace, 'run', find_start, reporting_end)
self._add_trace_event(trace, 'discovery', find_start, find_end)
self._add_trace_event(trace, 'testing', find_end, test_end)
self._add_trace_event(trace, 'reporting', test_end, reporting_end)
self._write(self.args.write_trace_to, trace)
self.report_coverage()
else:
upload_ret = 0
return ret, full_results, trace
def _check_win_multiprocessing(self):
wmp = self.win_multiprocessing
ignore, importable, spawn = WinMultiprocessing.values
if wmp not in WinMultiprocessing.values:
raise ValueError('illegal value %s for win_multiprocessing' %
wmp)
h = self.host
if wmp == ignore and h.platform == 'win32': # pragma: win32
raise ValueError('Cannot use WinMultiprocessing.ignore for '
'win_multiprocessing when actually running '
'on Windows.')
if wmp == ignore or self.args.jobs == 1:
return False
if wmp == importable:
if self._main_is_importable():
return False
raise ValueError('The __main__ module (%s) ' # pragma: no cover
'may not be importable' %
sys.modules['__main__'].__file__)
assert wmp == spawn
return True
def _main_is_importable(self): # pragma: untested
path = sys.modules['__main__'].__file__
if not path:
return False
if path.endswith('.pyc'):
path = path[:-1]
if not path.endswith('.py'):
return False
if path.endswith('__main__.py'):
# main modules are not directly importable.
return False
path = self.host.realpath(path)
for d in sys.path:
if path.startswith(self.host.realpath(d)):
return True
return False # pragma: no cover
def _spawn(self, test_set):
# TODO: Handle picklable hooks, rather than requiring them to be None.
assert self.classifier is None
assert self.context is None
assert self.setup_fn is None
assert self.teardown_fn is None
assert test_set is None
h = self.host
if self.args.write_trace_to: # pragma: untested
should_delete_trace = False
else:
should_delete_trace = True
fp = h.mktempfile(delete=False)
fp.close()
self.args.write_trace_to = fp.name
if self.args.write_full_results_to: # pragma: untested
should_delete_results = False
else:
should_delete_results = True
fp = h.mktempfile(delete=False)
fp.close()
self.args.write_full_results_to = fp.name
argv = ArgumentParser(h).argv_from_args(self.args)
ret = h.call_inline([h.python_interpreter, path_to_file] + argv)
trace = self._read_and_delete(self.args.write_trace_to,
should_delete_trace)
full_results = self._read_and_delete(self.args.write_full_results_to,
should_delete_results)
return ret, full_results, trace
def _set_up_runner(self):
h = self.host
args = self.args
self.stats = Stats(args.status_format, h.time, args.jobs)
self.printer = Printer(
self.print_, args.overwrite, args.terminal_width)
if self.args.top_level_dirs and self.args.top_level_dir:
self.print_(
'Cannot specify both --top-level-dir and --top-level-dirs',
stream=h.stderr)
return 1
self.top_level_dirs = args.top_level_dirs
if not self.top_level_dirs and args.top_level_dir:
self.top_level_dirs = [args.top_level_dir]
if not self.top_level_dirs:
for test in [t for t in args.tests if h.exists(t)]:
if h.isdir(test):
top_dir = test
else:
top_dir = h.dirname(test)
while h.exists(top_dir, '__init__.py'):
top_dir = h.dirname(top_dir)
top_dir = h.realpath(top_dir)
if not top_dir in self.top_level_dirs:
self.top_level_dirs.append(top_dir)
if not self.top_level_dirs:
top_dir = h.getcwd()
while h.exists(top_dir, '__init__.py'):
top_dir = h.dirname(top_dir)
top_dir = h.realpath(top_dir)
self.top_level_dirs.append(top_dir)
if not self.top_level_dir and self.top_level_dirs:
self.top_level_dir = self.top_level_dirs[0]
for path in self.top_level_dirs:
h.add_to_path(path)
for path in args.path:
h.add_to_path(path)
if args.coverage: # pragma: no cover
try:
import coverage
except ImportError:
self.print_('Error: coverage is not installed.')
return 1
source = self.args.coverage_source
if not source:
source = self.top_level_dirs + self.args.path
self.coverage_source = source
self.cov = coverage.coverage(source=self.coverage_source,
data_suffix=True)
self.cov.erase()
if args.expectations_files:
ret = self.parse_expectations()
if ret:
return ret
elif args.tags:
self.print_('Error: tags require expectations files.')
return 1
return 0
def parse_expectations(self):
args = self.args
if len(args.expectations_files) != 1:
# TODO(crbug.com/835690): Fix this.
self.print_(
'Only a single expectation file is currently supported',
stream=self.host.stderr)
return 1
contents = self.host.read_text_file(args.expectations_files[0])
expectations = TestExpectations(set(args.tags), args.ignored_tags)
err, msg = expectations.parse_tagged_list(
contents, args.expectations_files[0])
if err:
self.print_(msg, stream=self.host.stderr)
return err
self.has_expectations = True
self.expectations = expectations
def find_tests(self, args):
test_set = TestSet(self.args.test_name_prefix)
orig_skip = unittest.skip
orig_skip_if = unittest.skipIf
if args.all:
unittest.skip = lambda reason: lambda x: x
unittest.skipIf = lambda condition, reason: lambda x: x
try:
names = self._name_list_from_args(args)
classifier = self.classifier or self.default_classifier
for name in names:
try:
self._add_tests_to_set(test_set, args.suffixes,
self.top_level_dirs, classifier,
name)
except (AttributeError, ImportError, SyntaxError) as e:
ex_str = traceback.format_exc()
self.print_('Failed to load "%s" in find_tests: %s' %
(name, e))
self.print_(' %s' %
'\n '.join(ex_str.splitlines()))
self.print_(ex_str)
return 1, None
except _AddTestsError as e:
self.print_(str(e))
return 1, None
# TODO: Add support for discovering setupProcess/teardownProcess?
shard_index = args.shard_index
total_shards = args.total_shards
assert total_shards >= 1
assert shard_index >= 0 and shard_index < total_shards, (
'shard_index (%d) must be >= 0 and < total_shards (%d)' %
(shard_index, total_shards))
test_set.parallel_tests = _sort_inputs(
test_set.parallel_tests)[shard_index::total_shards]
test_set.isolated_tests = _sort_inputs(
test_set.isolated_tests)[shard_index::total_shards]
test_set.tests_to_skip = _sort_inputs(
test_set.tests_to_skip)[shard_index::total_shards]
return 0, test_set
finally:
unittest.skip = orig_skip
unittest.skipIf = orig_skip_if
def _name_list_from_args(self, args):
if args.tests:
names = args.tests
elif args.file_list:
if args.file_list == '-':
s = self.host.stdin.read()
else:
s = self.host.read_text_file(args.file_list)
names = [line.strip() for line in s.splitlines()]
else:
names = self.top_level_dirs
return names
def _add_tests_to_set(self, test_set, suffixes, top_level_dirs, classifier,
name):
h = self.host
loader = self.loader
add_tests = _test_adder(test_set, classifier)
found = set()
for d in top_level_dirs:
if h.isfile(name):
rpath = h.relpath(name, d)
if rpath.startswith('..'):
continue
if rpath.endswith('.py'):
rpath = rpath[:-3]
module = rpath.replace(h.sep, '.')
if module not in found:
found.add(module)
add_tests(loader.loadTestsFromName(module))
elif h.isdir(name):
rpath = h.relpath(name, d)
if rpath.startswith('..'):
continue
for suffix in suffixes:
if not name in found:
found.add(name + '/' + suffix)
add_tests(loader.discover(name, suffix, d))
else:
possible_dir = name.replace('.', h.sep)
if h.isdir(d, possible_dir):
for suffix in suffixes:
path = h.join(d, possible_dir)
if not path in found:
found.add(path + '/' + suffix)
suite = loader.discover(path, suffix, d)
add_tests(suite)
elif not name in found:
found.add(name)
add_tests(loader.loadTestsFromName(
self.args.test_name_prefix + name))
# pylint: disable=no-member
if hasattr(loader, 'errors') and loader.errors: # pragma: python3
# In Python3's version of unittest, loader failures get converted
# into failed test cases, rather than raising exceptions. However,
# the errors also get recorded so you can err out immediately.
raise ImportError(loader.errors)
def _run_tests(self, result_set, test_set, all_tests):
h = self.host
self.last_runs_retry_on_failure_tests = set()
def get_tests_to_retry(results):
# If the --retry-only-retry-on-failure-tests command line argument
# is passed , then a set of test failures with the RetryOnFailure
# expectation from the last run of tests will be returned. The
# self.last_runs_retry_on_failure_tests will be set to an empty set
# for the next run of tests. Otherwise all regressions from the
# last run will be returned.
if self.args.retry_only_retry_on_failure_tests:
ret = self.last_runs_retry_on_failure_tests.copy()
self.last_runs_retry_on_failure_tests = set()
return ret
else:
return json_results.regressions(results)
if len(test_set.parallel_tests):
jobs = min(
len(test_set.parallel_tests), self.args.jobs)
else:
jobs = 1
child = _Child(self)
pool = make_pool(h, jobs, _run_one_test, child,
_setup_process, _teardown_process)
self._run_one_set(self.stats, result_set, test_set, jobs, pool)
tests_to_retry = sorted(get_tests_to_retry(result_set))
retry_limit = self.args.retry_limit
try:
# Start at 1 since we already did iteration 0 above.
for iteration in range(1, self.args.retry_limit + 1):
if not tests_to_retry:
break
if retry_limit == self.args.retry_limit:
self.flush()
self.args.overwrite = False
self.printer.should_overwrite = False
self.args.verbose = min(self.args.verbose, 1)
self.print_('')
self.print_('Retrying failed tests (attempt #%d of %d)...' %
(iteration, self.args.retry_limit))
self.print_('')
stats = Stats(self.args.status_format, h.time, 1)
stats.total = len(tests_to_retry)
test_set = TestSet(self.args.test_name_prefix)
test_set.isolated_tests = [
TestInput(name,
iteration=iteration) for name in tests_to_retry]
tests_to_retry = test_set
retry_set = ResultSet()
self._run_one_set(stats, retry_set, tests_to_retry, 1, pool)
result_set.results.extend(retry_set.results)
tests_to_retry = get_tests_to_retry(retry_set)
retry_limit -= 1
pool.close()
finally:
self.final_responses.extend(pool.join())
if retry_limit != self.args.retry_limit:
self.print_('')
full_results = json_results.make_full_results(self.metadata,
int(h.time()),
all_tests, result_set,
self.path_delimiter)
retcode = (json_results.exit_code_from_full_results(full_results)
| result_sink.result_sink_retcode_from_result_set(result_set))
return (retcode, full_results)
def _run_one_set(self, stats, result_set, test_set, jobs, pool):
self._skip_tests(stats, result_set, test_set.tests_to_skip)
self._run_list(stats, result_set,
test_set.parallel_tests, jobs, pool)
self._run_list(stats, result_set,
test_set.isolated_tests, 1, pool)
def _skip_tests(self, stats, result_set, tests_to_skip):
for test_input in tests_to_skip:
last = self.host.time()
stats.started += 1
self._print_test_started(stats, test_input)
now = self.host.time()
result = Result(test_input.name, actual=ResultType.Skip,
started=last, took=(now - last), worker=0,
expected=[ResultType.Skip],
out=test_input.msg)
result_set.add(result)
stats.finished += 1
self._print_test_finished(stats, result)
def _run_list(self, stats, result_set, test_inputs, jobs, pool):
running_jobs = set()
while test_inputs or running_jobs:
while test_inputs and (len(running_jobs) < jobs):
test_input = test_inputs.pop(0)
stats.started += 1
pool.send(test_input)
running_jobs.add(test_input.name)
self._print_test_started(stats, test_input)
result, should_retry_on_failure = pool.get()
if result.is_regression:
stats.failed += 1
if (self.args.typ_max_failures is not None
and stats.failed >= self.args.typ_max_failures):
print('\nAborting, waiting for processes to close')
pool.close()
pool.join()
raise RuntimeError(
'Encountered %d failures with max of %d set, aborting.' % (
stats.failed, self.args.typ_max_failures))
if (self.args.retry_only_retry_on_failure_tests and
result.actual == ResultType.Failure and
should_retry_on_failure):
self.last_runs_retry_on_failure_tests.add(result.name)
running_jobs.remove(result.name)
result_set.add(result)
stats.finished += 1
self._print_test_finished(stats, result)
def _print_test_started(self, stats, test_input):
if self.args.quiet:
# Print nothing when --quiet was passed.
return
# If -vvv was passed, print when the test is queued to be run.
# We don't actually know when the test picked up to run, because
# that is handled by the child process (where we can't easily
# print things). Otherwise, only print when the test is started
# if we know we can overwrite the line, so that we do not
# get multiple lines of output as noise (in -vvv, we actually want
# the noise).
test_start_msg = stats.format() + test_input.name
if self.args.verbose > 2:
self.update(test_start_msg + ' queued', elide=False)
if self.args.overwrite:
self.update(test_start_msg, elide=(not self.args.verbose))
def _print_test_finished(self, stats, result):
stats.add_time()
assert result.actual in [ResultType.Failure, ResultType.Skip,
ResultType.Pass]
if result.actual == ResultType.Failure:
result_str = ' failed'
elif result.actual == ResultType.Skip:
result_str = ' was skipped'
elif result.actual == ResultType.Pass:
result_str = ' passed'
if result.unexpected:
result_str += ' unexpectedly'
elif result.actual == ResultType.Failure:
result_str += ' as expected'
if self.args.timing:
timing_str = ' %.4fs' % result.took
else:
timing_str = ''
suffix = '%s%s' % (result_str, timing_str)
out = result.out
err = result.err
if result.is_regression:
if out or err:
suffix += ':\n'
self.update(stats.format() + result.name + suffix, elide=False)
for l in out.splitlines():
self.print_(' %s' % l)
for l in err.splitlines():
self.print_(' %s' % l)
elif not self.args.quiet:
if self.args.verbose > 1 and (out or err):
suffix += ':\n'
self.update(stats.format() + result.name + suffix,
elide=(not self.args.verbose))
if self.args.verbose > 1:
for l in out.splitlines():
self.print_(' %s' % l)
for l in err.splitlines():
self.print_(' %s' % l)
if self.args.verbose:
self.flush()
def update(self, msg, elide):
self.printer.update(msg, elide)
def flush(self):
self.printer.flush()
def _summarize(self, full_results):
num_passes = json_results.num_passes(full_results)
num_failures = json_results.num_failures(full_results)
num_skips = json_results.num_skips(full_results)
if self.args.quiet and num_failures == 0:
return
if self.args.timing:
timing_clause = ' in %.1fs' % (self.host.time() -
self.stats.started_time)
else:
timing_clause = ''
self.update('%d test%s passed%s, %d skipped, %d failure%s.' %
(num_passes,
'' if num_passes == 1 else 's',
timing_clause,
num_skips,
num_failures,
'' if num_failures == 1 else 's'), elide=False)
self.print_()
def _read_and_delete(self, path, delete):
h = self.host
obj = None
if h.exists(path):
contents = h.read_text_file(path)
if contents:
obj = json.loads(contents)
if delete:
h.remove(path)
return obj
def _write(self, path, obj):
if path:
self.host.write_text_file(path, json.dumps(obj, indent=2) + '\n')
def _upload(self, full_results):
h = self.host
if not self.args.test_results_server:
return 0
url, content_type, data = json_results.make_upload_request(
self.args.test_results_server, self.args.builder_name,
self.args.master_name, self.args.test_type,
full_results)
try:
h.fetch(url, data, {'Content-Type': content_type})
return 0
except Exception as e:
h.print_('Uploading the JSON results raised "%s"' % str(e))
return 1
def report_coverage(self):
if self.args.coverage: # pragma: no cover
self.host.print_()
import coverage
cov = coverage.coverage(data_suffix=True)
cov.combine()
cov.report(show_missing=self.args.coverage_show_missing,
omit=self.args.coverage_omit)
if self.args.coverage_annotate:
cov.annotate(omit=self.args.coverage_omit)
def _add_trace_event(self, trace, name, start, end):
event = {
'name': name,
'ts': int((start - self.stats.started_time) * 1000000),
'dur': int((end - start) * 1000000),
'ph': 'X',
'pid': self.host.getpid(),
'tid': 0,
}
trace['traceEvents'].append(event)
def _trace_from_results(self, result_set):
trace = OrderedDict()
trace['traceEvents'] = []
trace['otherData'] = {}
if self.metadata:
trace['otherData'] = self.metadata
for result in result_set.results:
started = int((result.started - self.stats.started_time) * 1000000)
took = int(result.took * 1000000)
event = OrderedDict()
event['name'] = result.name
event['dur'] = took
event['ts'] = started
event['ph'] = 'X' # "Complete" events
event['pid'] = result.pid
event['tid'] = result.worker
args = OrderedDict()
args['expected'] = sorted(str(r) for r in result.expected)
args['actual'] = str(result.actual)
args['out'] = result.out
args['err'] = result.err
args['code'] = result.code
args['unexpected'] = result.unexpected
args['flaky'] = result.flaky
event['args'] = args
trace['traceEvents'].append(event)
return trace
def expectations_for(self, test_case):
test_name = test_case.id()[len(self.args.test_name_prefix):]
if self.has_expectations:
return self.expectations.expectations_for(test_name)
else:
return Expectation(test=test_name)
def default_classifier(self, test_set, test):
if self.matches_filter(test):
if self.should_skip(test):
test_set.add_test_to_skip(test, 'skipped by request')
elif self.should_isolate(test):
test_set.add_test_to_run_isolated(test)
else:
test_set.add_test_to_run_in_parallel(test)
def matches_filter(self, test_case):
_validate_test_starts_with_prefix(
self.args.test_name_prefix, test_case.id())
test_name = test_case.id()[len(self.args.test_name_prefix):]
if self.args.test_filter:
return any(
fnmatch.fnmatch(test_name, glob)
for glob in self.args.test_filter.split('::'))
if self.args.partial_match_filter:
return any(
substr in test_name
for substr in self.args.partial_match_filter)
return True
def should_isolate(self, test_case):
_validate_test_starts_with_prefix(
self.args.test_name_prefix, test_case.id())
test_name = test_case.id()[len(self.args.test_name_prefix):]
return any(fnmatch.fnmatch(test_name, glob)
for glob in self.args.isolate)
def should_skip(self, test_case):
_validate_test_starts_with_prefix(
self.args.test_name_prefix, test_case.id())
if self.args.all:
return False
test_name = test_case.id()[len(self.args.test_name_prefix):]
if self.has_expectations:
expected_results = self.expectations.expectations_for(test_name).results
else:
expected_results = {ResultType.Pass}
return (
ResultType.Skip in expected_results or
any(fnmatch.fnmatch(test_name, glob) for glob in self.args.skip))
def _test_adder(test_set, classifier):
def add_tests(obj):
if isinstance(obj, unittest.suite.TestSuite):
for el in obj:
add_tests(el)
elif (obj.id().startswith('unittest.loader.LoadTestsFailure') or
obj.id().startswith('unittest.loader.ModuleImportFailure')):
# Access to protected member pylint: disable=W0212
module_name = obj._testMethodName
try:
method = getattr(obj, obj._testMethodName)
method()
except Exception as e:
if 'LoadTests' in obj.id():
raise _AddTestsError('%s.load_tests() failed: %s'
% (module_name, str(e)))
else:
raise _AddTestsError(str(e))
else:
assert isinstance(obj, unittest.TestCase)
classifier(test_set, obj)
return add_tests
class _Child(object):
def __init__(self, parent):
self.host = None
self.worker_num = None
self.all = parent.args.all
self.debugger = parent.args.debugger
self.coverage = parent.args.coverage and parent.args.jobs > 1
self.coverage_source = parent.coverage_source
self.dry_run = parent.args.dry_run
self.loader = parent.loader
self.passthrough = parent.args.passthrough
self.context = parent.context
self.setup_fn = parent.setup_fn
self.teardown_fn = parent.teardown_fn
self.context_after_setup = None
self.top_level_dir = parent.top_level_dir
self.top_level_dirs = parent.top_level_dirs
self.loaded_suites = {}
self.cov = None
self.has_expectations = parent.has_expectations
self.expectations = parent.expectations
self.test_name_prefix = parent.args.test_name_prefix
self.artifact_output_dir = parent.artifact_output_dir
self.result_sink_reporter = None
self.disable_resultsink = parent.args.disable_resultsink
def _setup_process(host, worker_num, child):
child.host = host
child.result_sink_reporter = result_sink.ResultSinkReporter(
host, child.disable_resultsink)
child.worker_num = worker_num
# pylint: disable=protected-access
if child.coverage: # pragma: no cover
import coverage
child.cov = coverage.coverage(source=child.coverage_source,
data_suffix=True)
child.cov._warn_no_data = False
child.cov.start()
if child.setup_fn:
child.context_after_setup = child.setup_fn(child, child.context)
else:
child.context_after_setup = child.context
return child
def _teardown_process(child):
res = None
exc = None
if child.teardown_fn:
try:
res = child.teardown_fn(child, child.context_after_setup)
except Exception as e:
exc = e
pass
if child.cov: # pragma: no cover
child.cov.stop()
child.cov.save()
return (child.worker_num, res, exc)
def _run_one_test(child, test_input):
h = child.host
pid = h.getpid()
test_name = test_input.name
started = h.time()
# It is important to capture the output before loading the test
# to ensure that
# 1) the loader doesn't logs something we don't captured
# 2) neither the loader nor the test case grab a reference to the
# uncaptured stdout or stderr that later is used when the test is run.
# This comes up when using the FakeTestLoader and testing typ itself,
# but could come up when testing non-typ code as well.
h.capture_output(divert=not child.passthrough)
if child.has_expectations:
expectation = child.expectations.expectations_for(test_name)
expected_results, should_retry_on_failure = (
expectation.results, expectation.should_retry_on_failure)
else:
expected_results, should_retry_on_failure = {ResultType.Pass}, False
ex_str = ''
try:
orig_skip = unittest.skip
orig_skip_if = unittest.skipIf
if child.all:
unittest.skip = lambda reason: lambda x: x
unittest.skipIf = lambda condition, reason: lambda x: x
elif ResultType.Skip in expected_results:
h.restore_output()
return (Result(test_name, ResultType.Skip, started, 0,
child.worker_num, expected=expected_results,
unexpected=False, pid=pid), False)
test_name_to_load = child.test_name_prefix + test_name
try:
suite = child.loader.loadTestsFromName(test_name_to_load)
# From Python 3.5, AttributeError will not be thrown when calling
# LoadTestsFromName. Instead, it adds error messages in the loader.
# As a result, the original handling cannot kick in properly. We
# now check the error message and throw exception as needed.
if hasattr(child.loader, 'errors') and child.loader.errors:
raise AttributeError(child.loader.errors)
except Exception as e:
ex_str = ('loadTestsFromName("%s") failed: %s\n%s\n' %
(test_name_to_load, e, traceback.format_exc()))
try:
suite = _load_via_load_tests(child, test_name_to_load)
ex_str += ('\nload_via_load_tests(\"%s\") returned %d tests\n' %
(test_name_to_load, len(list(suite))))
except Exception as e: # pragma: untested
suite = []
ex_str += ('\nload_via_load_tests("%s") failed: %s\n%s\n' %
(test_name_to_load, e, traceback.format_exc()))
finally:
unittest.skip = orig_skip
unittest.skipIf = orig_skip_if
tests = list(suite)
if len(tests) != 1:
err = 'Failed to load "%s" in run_one_test' % test_name
if ex_str: # pragma: untested
err += '\n ' + '\n '.join(ex_str.splitlines())
h.restore_output()
return (Result(test_name, ResultType.Failure, started, took=0,
worker=child.worker_num, unexpected=True, code=1,
err=err, pid=pid), False)
art = artifacts.Artifacts(
child.artifact_output_dir, h, test_input.iteration, test_name)
test_case = tests[0]
if isinstance(test_case, TypTestCase):
test_case.child = child
test_case.context = child.context_after_setup
test_case.set_artifacts(art)
test_result = unittest.TestResult()
out = ''
err = ''
try:
if child.dry_run:
pass
elif child.debugger: # pragma: no cover
_run_under_debugger(h, test_case, suite, test_result)
else:
suite.run(test_result)
finally:
out, err = h.restore_output()
# Clear the artifact implementation so that later tests don't try to
# use a stale instance.
if isinstance(test_case, TypTestCase):
test_case.set_artifacts(None)
took = h.time() - started
result = _result_from_test_result(test_result, test_name, started, took, out,
err, child.worker_num, pid,
expected_results, child.has_expectations,
art.artifacts)
test_location = inspect.getsourcefile(test_case.__class__)
test_method = getattr(test_case, test_case._testMethodName)
# Test methods are often wrapped by decorators such as @mock. Try to get to
# the actual test method instead of the wrapper.
if hasattr(test_method, '__wrapped__'):
test_method = test_method.__wrapped__
# Some tests are generated and don't have valid line numbers. Such test
# methods also have a source location different from module location.
if inspect.getsourcefile(test_method) == test_location:
test_line = inspect.getsourcelines(test_method)[1]
else:
test_line = None
result.result_sink_retcode =\
child.result_sink_reporter.report_individual_test_result(
child.test_name_prefix, result, child.artifact_output_dir,
child.expectations, test_location, test_line)
return (result, should_retry_on_failure)
def _run_under_debugger(host, test_case, suite,
test_result): # pragma: no cover
# Access to protected member pylint: disable=W0212
test_func = getattr(test_case, test_case._testMethodName)
fname = inspect.getsourcefile(test_func)
lineno = inspect.getsourcelines(test_func)[1] + 1
dbg = pdb.Pdb(stdout=host.stdout.stream)
dbg.set_break(fname, lineno)
dbg.runcall(suite.run, test_result)
def _result_from_test_result(test_result, test_name, started, took, out, err,
worker_num, pid, expected_results,
has_expectations, artifacts):
if test_result.failures:
actual = ResultType.Failure
code = 1
err = err + test_result.failures[0][1]
unexpected = actual not in expected_results
elif test_result.errors:
actual = ResultType.Failure
code = 1
err = err + test_result.errors[0][1]
unexpected = actual not in expected_results
elif test_result.skipped:
actual = ResultType.Skip
err = err + test_result.skipped[0][1]
code = 0
if has_expectations:
unexpected = actual not in expected_results
else:
unexpected = False
expected_results = {ResultType.Skip}
elif test_result.expectedFailures:
actual = ResultType.Failure
code = 1
err = err + test_result.expectedFailures[0][1]
unexpected = False
elif test_result.unexpectedSuccesses:
actual = ResultType.Pass
code = 0
unexpected = True
else:
actual = ResultType.Pass
code = 0
unexpected = actual not in expected_results
flaky = False
return Result(test_name, actual, started, took, worker_num,
expected_results, unexpected, flaky, code, out, err, pid,
artifacts)
def _load_via_load_tests(child, test_name):
# If we couldn't import a test directly, the test may be only loadable
# via unittest's load_tests protocol. See if we can find a load_tests
# entry point that will work for this test.
loader = child.loader
comps = test_name.split('.')
new_suite = unittest.TestSuite()
while comps:
name = '.'.join(comps)
module = None
suite = None
if name not in child.loaded_suites:
try:
module = importlib.import_module(name)
except ImportError:
pass
if module:
suite = loader.loadTestsFromModule(module)
child.loaded_suites[name] = suite
suite = child.loaded_suites[name]
if suite:
for test_case in suite:
assert isinstance(test_case, unittest.TestCase)
if test_case.id() == test_name: # pragma: untested
new_suite.addTest(test_case)
break
comps.pop()
return new_suite
def _sort_inputs(inps):
return sorted(inps, key=lambda inp: inp.name)
if __name__ == '__main__': # pragma: no cover
sys.modules['__main__'].__file__ = path_to_file
sys.exit(main(win_multiprocessing=WinMultiprocessing.importable))
| bsd-3-clause | -6,846,329,629,762,986,000 | 37.079835 | 84 | 0.557136 | false |
ardi69/pyload-0.4.10 | pyload/plugin/crypter/DlProtectCom.py | 1 | 2395 | # -*- coding: utf-8 -*-
import base64
import re
import time
from pyload.plugin.internal.SimpleCrypter import SimpleCrypter
class DlProtectCom(SimpleCrypter):
__name = "DlProtectCom"
__type = "crypter"
__version = "0.03"
__pattern = r'https?://(?:www\.)?dl-protect\.com/((en|fr)/)?\w+'
__config = [("use_premium" , "bool", "Use premium account if available" , True),
("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description = """Dl-protect.com decrypter plugin"""
__license = "GPLv3"
__authors = [("Walter Purcaro", "[email protected]")]
COOKIES = [("dl-protect.com", "l", "en")]
OFFLINE_PATTERN = r'Unfortunately, the link you are looking for is not found'
def getLinks(self):
# Direct link with redirect
if not re.match(r"https?://(?:www\.)?dl-protect\.com/.+", self.req.http.lastEffectiveURL):
return [self.req.http.lastEffectiveURL]
post_req = {'key' : re.search(r'name="key" value="(.+?)"', self.html).group(1),
'submitform': ""}
if "Please click on continue to see the content" in self.html:
post_req['submitform'] = "Continue"
self.wait(2)
else:
mstime = int(round(time.time() * 1000))
b64time = "_" + base64.urlsafe_b64encode(str(mstime)).replace("=", "%3D")
post_req.update({'i' : b64time,
'submitform': "Decrypt+link"})
if "Password :" in self.html:
post_req['pwd'] = self.getPassword()
if "Security Code" in self.html:
captcha_id = re.search(r'/captcha\.php\?uid=(.+?)"', self.html).group(1)
captcha_url = "http://www.dl-protect.com/captcha.php?uid=" + captcha_id
captcha_code = self.decryptCaptcha(captcha_url, imgtype="gif")
post_req['secure'] = captcha_code
self.html = self.load(self.pyfile.url, post=post_req)
for errmsg in ("The password is incorrect", "The security code is incorrect"):
if errmsg in self.html:
self.fail(_(errmsg[1:]))
return re.findall(r'<a href="([^/].+?)" target="_blank">', self.html)
| gpl-3.0 | 7,238,583,991,759,091,000 | 35.846154 | 98 | 0.546138 | false |
SUSE/azurectl | test/unit/defaults_test.py | 1 | 3658 | from .test_helper import argv_kiwi_tests
from mock import patch
import mock
from azurectl.defaults import Defaults
class TestDefaults:
def __set_account_type_docopts(self):
self.account_type_docopts = {
'--locally-redundant': False,
'--zone-redundant': False,
'--geo-redundant': False,
'--read-access-geo-redundant': False
}
def __host_caching_docopts(self, selection=None):
docopts = {
'--no-cache': False,
'--read-only-cache': False,
'--read-write-cache': False
}
if selection:
docopts[selection] = True
return docopts
def test_set_attribute(self):
class X:
def __init__(self):
self.name = 'value'
instance = X()
Defaults.set_attribute(instance, 'name', 'foo')
assert instance.name == 'foo'
def test_get_attribute(self):
class X:
def __init__(self):
self.name = 'value'
instance = X()
Defaults.get_attribute(instance, 'name')
assert instance.name == 'value'
def test_account_type_for_docopts(self):
self.__set_account_type_docopts()
self.account_type_docopts['--locally-redundant'] = True
result = Defaults.account_type_for_docopts(self.account_type_docopts)
assert result == 'Standard_LRS'
self.__set_account_type_docopts()
self.account_type_docopts['--zone-redundant'] = True
result = Defaults.account_type_for_docopts(self.account_type_docopts)
assert result == 'Standard_ZRS'
self.__set_account_type_docopts()
self.account_type_docopts['--geo-redundant'] = True
result = Defaults.account_type_for_docopts(self.account_type_docopts)
assert result == 'Standard_GRS'
self.__set_account_type_docopts()
self.account_type_docopts['--read-access-geo-redundant'] = True
result = Defaults.account_type_for_docopts(self.account_type_docopts)
assert result == 'Standard_RAGRS'
def test_default_account_type_for_docopts(self):
self.__set_account_type_docopts()
result = Defaults.account_type_for_docopts(self.account_type_docopts)
assert result == 'Standard_GRS'
def test_docopt_for_account_type(self):
result = Defaults.docopt_for_account_type('Standard_LRS')
assert result == '--locally-redundant'
result = Defaults.docopt_for_account_type('Standard_ZRS')
assert result == '--zone-redundant'
result = Defaults.docopt_for_account_type('Standard_GRS')
assert result == '--geo-redundant'
result = Defaults.docopt_for_account_type('Standard_RAGRS')
assert result == '--read-access-geo-redundant'
def test_host_caching_for_docopts(self):
# No cache
host_caching_docopts = self.__host_caching_docopts('--no-cache')
assert Defaults.host_caching_for_docopts(host_caching_docopts) == 'None'
# read-only cache
host_caching_docopts = self.__host_caching_docopts('--read-only-cache')
assert Defaults.host_caching_for_docopts(host_caching_docopts) == \
'ReadOnly'
# read-write cache
host_caching_docopts = self.__host_caching_docopts('--read-write-cache')
assert Defaults.host_caching_for_docopts(host_caching_docopts) == \
'ReadWrite'
def test_default_host_caching_for_docopts(self):
host_caching_docopts = self.__host_caching_docopts()
assert Defaults.host_caching_for_docopts(host_caching_docopts) == \
'ReadOnly'
| apache-2.0 | 7,178,612,053,280,598,000 | 35.949495 | 80 | 0.614543 | false |
makelove/OpenCV-Python-Tutorial | ch21-轮廓Contours/凸包-凸性检测-边界矩形-最小外接圆-拟合.py | 1 | 3210 | # -*- coding: utf-8 -*-
# @Time : 2017/7/12 下午8:28
# @Author : play4fun
# @File : 凸包-凸性检测-边界矩形-最小外接圆-拟合.py
# @Software: PyCharm
"""
凸包-凸性检测-边界矩形-最小外接圆-拟合.py:
"""
import cv2
import numpy as np
img=cv2.imread('../data/lightning.png',0)
image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt=contours[0]
'''
函数 cv2.convexHull() 可以用来检测一个曲线是否具有凸性缺 并能纠 正缺 。一般来 凸性曲线总是凸出来的 至少是平的。如果有地方凹 去 了就 叫做凸性缺
例如下图中的手。红色曲线显示了手的凸包 凸性缺 双箭头标出来了。
'''
# convexHull(points, hull=None, clockwise=None, returnPoints=None)
hull = cv2.convexHull(points, hull, clockwise, returnPoints)
'''
points 我们 传入的 廓
• hull 输出 通常不需要
• clockwise 方向标志。如果 置为 True 出的凸包是顺时针 方向的。 否则为逆时针 方向。
• returnPoints 值为 True。它会 回凸包上点的坐标。如果 置 为 False 就会 回与凸包点对应的 廓上的点。
'''
hull = cv2.convexHull(cnt)
# 凸性检测
# 函数 cv2.isContourConvex() 可以可以用来检测一个曲线是不是凸的。它只能 回 True 或 False。没什么大不了的。
k = cv2.isContourConvex(cnt)
# 边界矩形
'''
直边界矩形 一个直矩形 就是没有旋转的矩形 。它不会考虑对象是否旋转。 所以边界矩形的 积不是最小的。可以使用函数 cv2.boundingRect() 查 找得到。
x y 为矩形左上角的坐标 w h 是矩形的宽和 。
'''
x, y, w, h = cv2.boundingRect(cnt)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
'''
旋转矩形
这里,以最小面积绘制边界矩形,因此也考虑旋转。使用的功能是cv2.minAreaRect()。它返回一个Box2D结构,其中包含以下条件 - (中心(x,y),(宽度,高度),旋转角度)。但是要绘制这个矩形,我们需要矩形的四个角。它是通过函数cv2.boxPoints()
'''
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
# 最小外接圆
# 函数 cv2.minEnclosingCircle() 可以帮我们找到一个对 的外切圆。它是所有能够包括对 的圆中 积最小的一个。
(x, y), radius = cv2.minEnclosingCircle(cnt)
center = (int(x), int(y))
radius = int(radius)
img = cv2.circle(img, center, radius, (0, 255, 0), 2)
# 椭圆拟合
# 使用的函数为 cv2.ellipse() 回值其实就是旋 界矩形的内切圆
ellipse = cv2.fitEllipse(cnt)
#((135.34278869628906, 134.22764587402344),(57.018402099609375, 166.91265869140625),136.8311767578125)
angle=ellipse[2]
im = cv2.ellipse(img, ellipse, (0, 255, 0), 2)
# 直线拟合
# 我们可以根据一组点拟合出一条直线 同样我们也可以为图像中的白色点 拟合出一条直线。
rows, cols = img.shape[:2]
[vx, vy, x, y] = cv2.fitLine(cnt, cv2.DIST_L2, 0, 0.01, 0.01)
lefty = int((-x * vy / vx) + y)
righty = int(((cols - x) * vy / vx) + y)
cv2.line(img, (cols - 1, righty), (0, lefty), (0, 255, 0), 2)
| mit | 3,029,899,284,800,181,000 | 28.108108 | 136 | 0.687558 | false |
tiancj/emesene | emesene/e3/xmpp/SleekXMPP/sleekxmpp/plugins/xep_0191/blocking.py | 1 | 2536 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp import Iq
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.xmlstream import register_stanza_plugin, JID
from sleekxmpp.plugins.xep_0191 import stanza, Block, Unblock, BlockList
log = logging.getLogger(__name__)
class XEP_0191(BasePlugin):
name = 'xep_0191'
description = 'XEP-0191: Simple Communications Blocking'
dependencies = set(['xep_0030'])
stanza = stanza
def plugin_init(self):
register_stanza_plugin(Iq, BlockList)
register_stanza_plugin(Iq, Block)
register_stanza_plugin(Iq, Unblock)
self.xmpp.register_handler(
Callback('Blocked Contact',
StanzaPath('iq@type=set/block'),
self._handle_blocked))
self.xmpp.register_handler(
Callback('Unblocked Contact',
StanzaPath('iq@type=set/unblock'),
self._handle_unblocked))
def plugin_end(self):
self.xmpp.remove_handler('Blocked Contact')
self.xmpp.remove_handler('Unblocked Contact')
def get_blocked(self, ifrom=None, block=True, timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'get'
iq['from'] = 'ifrom'
iq.enable('blocklist')
return iq.send(block=block, timeout=timeout, callback=callback)
def block(self, jids, ifrom=None, block=True, timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['from'] = ifrom
if not isinstance(jids, (set, list)):
jids = [jids]
iq['block']['items'] = jids
return iq.send(block=block, timeout=timeout, callback=callback)
def unblock(self, jids=None, ifrom=None, block=True, timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['from'] = ifrom
if jids is None:
jids = []
if not isinstance(jids, (set, list)):
jids = [jids]
iq['unblock']['items'] = jids
return iq.send(block=block, timeout=timeout, callback=callback)
def _handle_blocked(self, iq):
self.xmpp.event('blocked', iq)
def _handle_unblocked(self, iq):
self.xmpp.event('unblocked', iq)
| gpl-3.0 | 5,357,166,866,941,608,000 | 29.554217 | 86 | 0.616325 | false |
tylertian/Openstack | openstack F/python-glanceclient/tests/v2/test_images.py | 1 | 19273 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import testtools
import warlock
from glanceclient.v2 import images
from tests import utils
_CHKSUM = '93264c3edf5972c9f1cb309543d38a5c'
_CHKSUM1 = '54264c3edf5972c9f1cb309453d38a46'
_BOGUS_ID = '63e7f218-29de-4477-abdc-8db7c9533188'
_EVERYTHING_ID = '802cbbb7-0379-4c38-853f-37302b5e3d29'
_OWNED_IMAGE_ID = 'a4963502-acc7-42ba-ad60-5aa0962b7faf'
_OWNER_ID = '6bd473f0-79ae-40ad-a927-e07ec37b642f'
_PRIVATE_ID = 'e33560a7-3964-4de5-8339-5a24559f99ab'
_PUBLIC_ID = '857806e7-05b6-48e0-9d40-cb0e6fb727b9'
_SHARED_ID = '331ac905-2a38-44c5-a83d-653db8f08313'
_STATUS_REJECTED_ID = 'f3ea56ff-d7e4-4451-998c-1e3d33539c8e'
fixtures = {
'/v2/images?limit=%d' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
]},
),
},
'/v2/images?limit=1': {
'GET': (
{},
{
'images': [
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
],
'next': ('/v2/images?limit=1&'
'marker=3a4560a1-e585-443e-9b39-553b46ec92d1'),
},
),
},
('/v2/images?limit=1&marker=3a4560a1-e585-443e-9b39-553b46ec92d1'): {
'GET': (
{},
{'images': [
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
]},
),
},
'/v2/images/3a4560a1-e585-443e-9b39-553b46ec92d1': {
'GET': (
{},
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
),
'PATCH': (
{},
'',
),
},
'/v2/images/e7e59ff6-fa2e-4075-87d3-1a1398a07dc3': {
'GET': (
{},
{
'id': 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3',
'name': 'image-3',
'barney': 'rubble',
'george': 'jetson',
},
),
'PATCH': (
{},
'',
),
},
'/v2/images': {
'POST': (
{},
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
),
},
'v2/images/87b634c1-f893-33c9-28a9-e5673c99239a': {
'DELETE': (
{},
{
'id': '87b634c1-f893-33c9-28a9-e5673c99239a',
},
),
},
'/v2/images/606b0e88-7c5a-4d54-b5bb-046105d4de6f/file': {
'PUT': (
{},
'',
),
},
'/v2/images/5cc4bebc-db27-11e1-a1eb-080027cbe205/file': {
'GET': (
{},
'A',
),
},
'/v2/images/66fb18d6-db27-11e1-a1eb-080027cbe205/file': {
'GET': (
{
'content-md5': 'wrong'
},
'BB',
),
},
'/v2/images/1b1c6366-dd57-11e1-af0f-02163e68b1d8/file': {
'GET': (
{
'content-md5': 'defb99e69a9f1f6e06f15006b1f166ae'
},
'CCC',
),
},
'/v2/images?limit=%d&visibility=public' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': _PUBLIC_ID,
'harvey': 'lipshitz',
},
]},
),
},
'/v2/images?limit=%d&visibility=private' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': _PRIVATE_ID,
},
]},
),
},
'/v2/images?limit=%d&visibility=shared' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': _SHARED_ID,
},
]},
),
},
'/v2/images?limit=%d&member_status=rejected' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': [
{
'id': _STATUS_REJECTED_ID,
},
]},
),
},
'/v2/images?limit=%d&member_status=pending' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': []},
),
},
'/v2/images?owner=%s&limit=%d' % (_OWNER_ID, images.DEFAULT_PAGE_SIZE): {
'GET': (
{},
{'images': [
{
'id': _OWNED_IMAGE_ID,
},
]},
),
},
'/v2/images?owner=%s&limit=%d' % (_BOGUS_ID, images.DEFAULT_PAGE_SIZE): {
'GET': (
{},
{'images': []},
),
},
'/v2/images?owner=%s&limit=%d&member_status=pending&visibility=shared'
% (_BOGUS_ID, images.DEFAULT_PAGE_SIZE): {
'GET': (
{},
{'images': [
{
'id': _EVERYTHING_ID,
},
]},
),
},
'/v2/images?checksum=%s&limit=%d' % (_CHKSUM, images.DEFAULT_PAGE_SIZE): {
'GET': (
{},
{'images': [
{
'id': '3a4560a1-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
}
]},
),
},
'/v2/images?checksum=%s&limit=%d' % (_CHKSUM1, images.DEFAULT_PAGE_SIZE): {
'GET': (
{},
{'images': [
{
'id': '2a4560b2-e585-443e-9b39-553b46ec92d1',
'name': 'image-1',
},
{
'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810',
'name': 'image-2',
},
]},
),
},
'/v2/images?checksum=wrong&limit=%d' % images.DEFAULT_PAGE_SIZE: {
'GET': (
{},
{'images': []},
),
},
}
fake_schema = {
'name': 'image',
'properties': {'id': {}, 'name': {}},
'additionalProperties': {'type': 'string'}
}
FakeModel = warlock.model_factory(fake_schema)
class TestController(testtools.TestCase):
def setUp(self):
super(TestController, self).setUp()
self.api = utils.FakeAPI(fixtures)
self.controller = images.Controller(self.api, FakeModel)
def test_list_images(self):
#NOTE(bcwaldon): cast to list since the controller returns a generator
images = list(self.controller.list())
self.assertEqual(images[0].id, '3a4560a1-e585-443e-9b39-553b46ec92d1')
self.assertEqual(images[0].name, 'image-1')
self.assertEqual(images[1].id, '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810')
self.assertEqual(images[1].name, 'image-2')
def test_list_images_paginated(self):
#NOTE(bcwaldon): cast to list since the controller returns a generator
images = list(self.controller.list(page_size=1))
self.assertEqual(images[0].id, '3a4560a1-e585-443e-9b39-553b46ec92d1')
self.assertEqual(images[0].name, 'image-1')
self.assertEqual(images[1].id, '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810')
self.assertEqual(images[1].name, 'image-2')
def test_list_images_visibility_public(self):
filters = {'filters': dict([('visibility', 'public')])}
images = list(self.controller.list(**filters))
self.assertEqual(images[0].id, _PUBLIC_ID)
def test_list_images_visibility_private(self):
filters = {'filters': dict([('visibility', 'private')])}
images = list(self.controller.list(**filters))
self.assertEqual(images[0].id, _PRIVATE_ID)
def test_list_images_visibility_shared(self):
filters = {'filters': dict([('visibility', 'shared')])}
images = list(self.controller.list(**filters))
self.assertEqual(images[0].id, _SHARED_ID)
def test_list_images_member_status_rejected(self):
filters = {'filters': dict([('member_status', 'rejected')])}
images = list(self.controller.list(**filters))
self.assertEqual(images[0].id, _STATUS_REJECTED_ID)
def test_list_images_for_owner(self):
filters = {'filters': dict([('owner', _OWNER_ID)])}
images = list(self.controller.list(**filters))
self.assertEqual(images[0].id, _OWNED_IMAGE_ID)
def test_list_images_for_checksum_single_image(self):
fake_id = '3a4560a1-e585-443e-9b39-553b46ec92d1'
filters = {'filters': dict([('checksum', _CHKSUM)])}
images = list(self.controller.list(**filters))
self.assertEquals(1, len(images))
self.assertEqual(images[0].id, '%s' % fake_id)
def test_list_images_for_checksum_multiple_images(self):
fake_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1'
fake_id2 = '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'
filters = {'filters': dict([('checksum', _CHKSUM1)])}
images = list(self.controller.list(**filters))
self.assertEquals(2, len(images))
self.assertEqual(images[0].id, '%s' % fake_id1)
self.assertEqual(images[1].id, '%s' % fake_id2)
def test_list_images_for_wrong_checksum(self):
filters = {'filters': dict([('checksum', 'wrong')])}
images = list(self.controller.list(**filters))
self.assertEquals(0, len(images))
def test_list_images_for_bogus_owner(self):
filters = {'filters': dict([('owner', _BOGUS_ID)])}
images = list(self.controller.list(**filters))
self.assertEqual(images, [])
def test_list_images_for_bunch_of_filters(self):
filters = {'filters': dict([('owner', _BOGUS_ID),
('visibility', 'shared'),
('member_status', 'pending')])}
images = list(self.controller.list(**filters))
self.assertEqual(images[0].id, _EVERYTHING_ID)
def test_list_images_filters_encoding(self):
filters = {"owner": u"ni\xf1o"}
try:
list(self.controller.list(filters=filters))
except KeyError:
# NOTE(flaper87): It raises KeyError because there's
# no fixture supporting this query:
# /v2/images?owner=ni%C3%B1o&limit=20
# We just want to make sure filters are correctly encoded.
pass
self.assertEqual(filters["owner"], "ni\xc3\xb1o")
def test_get_image(self):
image = self.controller.get('3a4560a1-e585-443e-9b39-553b46ec92d1')
self.assertEqual(image.id, '3a4560a1-e585-443e-9b39-553b46ec92d1')
self.assertEqual(image.name, 'image-1')
def test_create_image(self):
properties = {
'name': 'image-1'
}
image = self.controller.create(**properties)
self.assertEqual(image.id, '3a4560a1-e585-443e-9b39-553b46ec92d1')
self.assertEqual(image.name, 'image-1')
def test_create_bad_additionalProperty_type(self):
properties = {
'name': 'image-1',
'bad_prop': True,
}
with testtools.ExpectedException(TypeError):
self.controller.create(**properties)
def test_delete_image(self):
self.controller.delete('87b634c1-f893-33c9-28a9-e5673c99239a')
expect = [
('DELETE',
'v2/images/87b634c1-f893-33c9-28a9-e5673c99239a',
{},
None)]
self.assertEqual(self.api.calls, expect)
def test_data_upload(self):
image_data = 'CCC'
image_id = '606b0e88-7c5a-4d54-b5bb-046105d4de6f'
self.controller.upload(image_id, image_data)
expect = [('PUT', '/v2/images/%s/file' % image_id,
{'Content-Type': 'application/octet-stream'},
image_data)]
self.assertEqual(self.api.calls, expect)
def test_data_without_checksum(self):
body = self.controller.data('5cc4bebc-db27-11e1-a1eb-080027cbe205',
do_checksum=False)
body = ''.join([b for b in body])
self.assertEqual(body, 'A')
body = self.controller.data('5cc4bebc-db27-11e1-a1eb-080027cbe205')
body = ''.join([b for b in body])
self.assertEqual(body, 'A')
def test_data_with_wrong_checksum(self):
body = self.controller.data('66fb18d6-db27-11e1-a1eb-080027cbe205',
do_checksum=False)
body = ''.join([b for b in body])
self.assertEqual(body, 'BB')
body = self.controller.data('66fb18d6-db27-11e1-a1eb-080027cbe205')
try:
body = ''.join([b for b in body])
self.fail('data did not raise an error.')
except IOError as e:
self.assertEqual(errno.EPIPE, e.errno)
msg = 'was 9d3d9048db16a7eee539e93e3618cbe7 expected wrong'
self.assertTrue(msg in str(e))
def test_data_with_checksum(self):
body = self.controller.data('1b1c6366-dd57-11e1-af0f-02163e68b1d8',
do_checksum=False)
body = ''.join([b for b in body])
self.assertEqual(body, 'CCC')
body = self.controller.data('1b1c6366-dd57-11e1-af0f-02163e68b1d8')
body = ''.join([b for b in body])
self.assertEqual(body, 'CCC')
def test_update_replace_prop(self):
image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1'
params = {'name': 'pong'}
image = self.controller.update(image_id, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.0-json-patch',
}
expect_body = '[{"path": "/name", "value": "pong", "op": "replace"}]'
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(image.id, image_id)
#NOTE(bcwaldon): due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual(image.name, 'image-1')
def test_update_add_prop(self):
image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1'
params = {'finn': 'human'}
image = self.controller.update(image_id, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.0-json-patch',
}
expect_body = '[{"path": "/finn", "value": "human", "op": "add"}]'
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(image.id, image_id)
#NOTE(bcwaldon): due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual(image.name, 'image-1')
def test_update_remove_prop(self):
image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3'
remove_props = ['barney']
image = self.controller.update(image_id, remove_props)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.0-json-patch',
}
expect_body = '[{"path": "/barney", "op": "remove"}]'
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(image.id, image_id)
#NOTE(bcwaldon): due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual(image.name, 'image-3')
def test_update_replace_remove_same_prop(self):
image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3'
# Updating a property takes precedence over removing a property
params = {'barney': 'miller'}
remove_props = ['barney']
image = self.controller.update(image_id, remove_props, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.0-json-patch',
}
expect_body = '[{"path": "/barney", "value": "miller", ' \
'"op": "replace"}]'
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(image.id, image_id)
#NOTE(bcwaldon): due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual(image.name, 'image-3')
def test_update_add_remove_same_prop(self):
image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3'
# Adding a property takes precedence over removing a property
params = {'finn': 'human'}
remove_props = ['finn']
image = self.controller.update(image_id, remove_props, **params)
expect_hdrs = {
'Content-Type': 'application/openstack-images-v2.0-json-patch',
}
expect_body = '[{"path": "/finn", "value": "human", "op": "add"}]'
expect = [
('GET', '/v2/images/%s' % image_id, {}, None),
('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body),
('GET', '/v2/images/%s' % image_id, {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(image.id, image_id)
#NOTE(bcwaldon): due to limitations of our fake api framework, the name
# will not actually change - yet in real life it will...
self.assertEqual(image.name, 'image-3')
def test_update_bad_additionalProperty_type(self):
image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3'
params = {'name': 'pong', 'bad_prop': False}
with testtools.ExpectedException(TypeError):
self.controller.update(image_id, **params)
| apache-2.0 | -4,692,995,391,996,027,000 | 34.756957 | 79 | 0.522493 | false |
cstein/fmo-ie-analyzer | src/fie_ui.py | 1 | 2694 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'fie.ui'
#
# Created: Fri May 31 09:50:27 2013
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_main(object):
def setupUi(self, main):
main.setObjectName(_fromUtf8("main"))
main.resize(809, 727)
self.frame = QtGui.QFrame(main)
self.frame.setGeometry(QtCore.QRect(10, 10, 671, 671))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.btn1File = QtGui.QPushButton(main)
self.btn1File.setGeometry(QtCore.QRect(690, 10, 114, 32))
self.btn1File.setObjectName(_fromUtf8("btn1File"))
self.IEslider = QtGui.QSlider(main)
self.IEslider.setGeometry(QtCore.QRect(290, 690, 171, 28))
self.IEslider.setMinimum(1)
self.IEslider.setMaximum(50)
self.IEslider.setProperty("value", 1)
self.IEslider.setOrientation(QtCore.Qt.Horizontal)
self.IEslider.setObjectName(_fromUtf8("IEslider"))
self.label = QtGui.QLabel(main)
self.label.setGeometry(QtCore.QRect(10, 690, 271, 28))
self.label.setObjectName(_fromUtf8("label"))
self.lbIEValue = QtGui.QLabel(main)
self.lbIEValue.setGeometry(QtCore.QRect(480, 690, 121, 28))
self.lbIEValue.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbIEValue.setObjectName(_fromUtf8("lbIEValue"))
self.btn2Files = QtGui.QPushButton(main)
self.btn2Files.setGeometry(QtCore.QRect(690, 50, 114, 32))
self.btn2Files.setObjectName(_fromUtf8("btn2Files"))
self.retranslateUi(main)
QtCore.QMetaObject.connectSlotsByName(main)
main.setTabOrder(self.btn1File, self.IEslider)
def retranslateUi(self, main):
main.setWindowTitle(QtGui.QApplication.translate("main", "FMO Interaction Energies Analyser", None, QtGui.QApplication.UnicodeUTF8))
self.btn1File.setText(QtGui.QApplication.translate("main", "1 File", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("main", "Interaction Energy Threshold:", None, QtGui.QApplication.UnicodeUTF8))
self.lbIEValue.setText(QtGui.QApplication.translate("main", "1 kcal/mol", None, QtGui.QApplication.UnicodeUTF8))
self.btn2Files.setText(QtGui.QApplication.translate("main", "2 Files", None, QtGui.QApplication.UnicodeUTF8))
| mit | -2,192,095,076,984,438,500 | 46.263158 | 140 | 0.698589 | false |
ttreeagency/PootleTypo3Org | pootle/apps/pootle_app/management/commands/__init__.py | 1 | 9386 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2012 Zuza Software Foundation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import logging
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, NoArgsCommand
from pootle_language.models import Language
from pootle_project.models import Project
from pootle_translationproject.models import TranslationProject
class PootleCommand(NoArgsCommand):
"""Base class for handling recursive pootle store management commands."""
shared_option_list = (
make_option('--directory', dest='directory',
help='Directory to refresh relative to po directory'),
make_option('--project', action='append', dest='projects',
help='Project to refresh'),
make_option('--language', action='append', dest='languages',
help='Language to refresh'),
make_option('--path-prefix', action='store', dest='path',
help='Path prefix relative to translation project of '
'files to refresh'),
)
option_list = NoArgsCommand.option_list + shared_option_list
def do_translation_project(self, tp, pootle_path, **options):
if hasattr(self, "handle_translation_project"):
logging.info(u"Running %s over %s", self.name, tp)
try:
self.handle_translation_project(tp, **options)
except Exception, e:
logging.error(u"Failed to run %s over %s:\n%s",
self.name, tp, e)
return
if not pootle_path and hasattr(self, "handle_all_stores"):
logging.info(u"Running %s over %s's files", self.name, tp)
try:
self.handle_all_stores(tp, **options)
except Exception, e:
logging.error(u"Failed to run %s over %s's files\n%s",
self.name, tp, e)
return
elif hasattr(self, "handle_store"):
store_query = tp.stores.all()
if pootle_path:
pootle_path = tp.pootle_path + pootle_path
store_query = store_query.filter(
pootle_path__startswith=pootle_path
)
for store in store_query.iterator():
logging.info(u"Running %s over %s",
self.name, store.pootle_path)
try:
self.handle_store(store, **options)
except Exception, e:
logging.error(u"Failed to run %s over %s:\n%s",
self.name, store.pootle_path, e)
def handle_noargs(self, **options):
# adjust debug level to the verbosity option
verbosity = int(options.get('verbosity', 1))
debug_levels = {0: logging.ERROR, 1: logging.WARNING, 2: logging.DEBUG}
debug_level = debug_levels.get(verbosity, logging.DEBUG)
logging.getLogger().setLevel(debug_level)
# reduce size of parse pool early on
self.name = self.__class__.__module__.split('.')[-1]
from pootle_store.fields import TranslationStoreFieldFile
TranslationStoreFieldFile._store_cache.maxsize = 2
TranslationStoreFieldFile._store_cache.cullsize = 2
TranslationProject._non_db_state_cache.maxsize = 2
TranslationProject._non_db_state_cache.cullsize = 2
directory = options.get('directory', '')
if directory:
languages = []
projects = []
path = ''
path_parts = directory.split('/')
if path_parts and path_parts[0]:
projects = [path_parts[0]]
if len(path_parts) > 1 and path_parts[1]:
if Language.objects.filter(code=path_parts[1]).count():
languages = [path_parts[1]]
if len(path_parts) > 2:
path = '/'.join(path_parts[2:])
else:
path = '/'.join(path_parts[1:])
else:
projects = options.get('projects', [])
languages = options.get('languages', [])
path = options.get('path', '')
if languages and hasattr(self, "handle_language"):
lang_query = Language.objects.all()
if languages:
lang_query = lang_query.filter(code__in=languages)
for lang in lang_query.iterator():
logging.info(u"Running %s over %s", self.name, lang)
try:
self.handle_language(lang, **options)
except Exception, e:
logging.error(u"Failed to run %s over %s:\n%s",
self.name, lang, e)
project_query = Project.objects.all()
if projects:
project_query = project_query.filter(code__in=projects)
for project in project_query.iterator():
if hasattr(self, "handle_project"):
logging.info(u"Running %s over %s", self.name, project)
try:
self.handle_project(project, **options)
except Exception, e:
logging.error(u"Failed to run %s over %s:\n%s",
self.name, project, e)
continue
template_tp = project.get_template_translationproject()
tp_query = project.translationproject_set.order_by('language__code')
if languages:
if template_tp and template_tp.language.code not in languages:
template_tp = None
tp_query = tp_query.filter(language__code__in=languages)
# update the template translation project first
if template_tp:
self.do_translation_project(template_tp, path, **options)
for tp in tp_query.iterator():
if tp == template_tp:
continue
self.do_translation_project(tp, path, **options)
class NoArgsCommandMixin(NoArgsCommand):
"""Intermediary class to allow multiple inheritance from
:class:`NoArgsCommand` and mixins that implement :func:`handle_noargs`.
Classes derived from this will provide the implementation for
:func:`handle_noargs`.
"""
def handle_noargs(self, **options):
pass
class ModifiedSinceMixin(object):
option_modified_since = (
make_option('--modified-since', action='store', dest='modified_since',
default=0, type=int,
help="Only process translations newer than CHANGE_ID "
"(as given by latest_change_id)"),
)
def __init__(self, *args, **kwargs):
super(ModifiedSinceMixin, self).__init__(*args, **kwargs)
self.__class__.option_list += self.__class__.option_modified_since
def handle_noargs(self, **options):
change_id = options.get('modified_since', 0)
if change_id == 0:
logging.info(u"Change ID is zero, ignoring altogether.")
options.pop('modified_since')
elif change_id < 0:
logging.error(u"Change IDs must be positive integers.")
sys.exit(1)
else:
from pootle_statistics.models import Submission
latest_change_id = Submission.objects.values_list('id', flat=True) \
.select_related('').latest()
if change_id > latest_change_id:
logging.warning(u"The given change ID is higher than the "
u"latest known change.\nAborting.")
sys.exit(1)
super(ModifiedSinceMixin, self).handle_noargs(**options)
class BaseRunCommand(BaseCommand):
"""Base class to build new server runners.
Based on code from `django-shoes
<https://bitbucket.org/mlzboy/django-shoes/>`_."""
hostport_option_list = (
make_option('--host', action='store', dest='host', default='127.0.0.1',
help='Hostname to listen on.'),
make_option('--port', action='store', dest='port', default=8000,
type=int, help='The TCP port to listen on.'),
)
option_list = BaseCommand.option_list + hostport_option_list
def handle(self, *args, **options):
return self.serve_forever(*args, **options)
def get_app(self):
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core.handlers.wsgi import WSGIHandler
app = StaticFilesHandler(WSGIHandler())
return app
def serve_forever(self, *args, **kwargs):
raise NotImplementedError
| gpl-2.0 | 7,473,940,043,474,504,000 | 39.808696 | 80 | 0.575112 | false |
MaikeMota/bulk-downloader | bulk-downloader.py | 1 | 3289 | import requests
import sys, getopt, os
import time
import datetime
CHUNK_SIZE = 1024
MB_SIZE = 1048576
links = None
outputdir = None
def main():
try:
opts, args = getopt.getopt(sys.argv[1:],"hf:o:",["file=","outdir="])
except getopt.GetoptError:
print('usage: bulk-downloader.py -f <link.txt> -o <output_dir>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('usage: bulk-downloader.py -f <link.txt> -o <output_dir>')
sys.exit()
elif opt in ("-f", "--file"):
links = arg
elif opt in ("-o", "--outdir"):
outputdir = arg
if links is None:
print('Missing links.txt parameter.')
sys.exit(2)
if outputdir is None:
print('Missing output_dir parameter.')
sys.exit(2)
print('Output dir: ' + outputdir)
if not os.path.exists(outputdir):
print(outputdir + " does not exists... creating...")
os.makedirs(outputdir)
print(outputdir + " created!")
print('Opening ' + links + "...")
with open(links) as links_file:
for url in links_file.readlines():
url = url.replace('\n', '')
last_slash_index = url.rindex('/')
file_name = url[last_slash_index+1 : len(url)]
res = requests.get(url, stream=True)
total_length = res.headers.get('content-length')
print("downloading " + file_name)
dl = 0
total_length = int(total_length)
loops = 0
speeds = 0
with open(outputdir + "/" + file_name, 'wb') as file:
total_length_mb = total_length / MB_SIZE
start_time = time.mktime(time.localtime())
for chunk in res.iter_content(CHUNK_SIZE):
file.write(chunk)
elapsed_time = time.mktime(time.localtime()) - start_time
if elapsed_time == 0:
elapsed_time = 1
dl = dl + len(chunk)
done = int(25 * dl / total_length)
total_mb_downloaded = float(dl / MB_SIZE)
remaining_size = total_length_mb - total_mb_downloaded
speed = float(total_mb_downloaded / elapsed_time)
speeds = speeds + speed;
loops = loops + 1
sys.stdout.write('\r[%s%s] %.2f Mb of %.2f Mb %.2f Mb/s ETA: %s' %
(
'=' * done, ' ' * (25-done),
total_mb_downloaded,
float(total_length_mb),
speed,
str(datetime.timedelta(seconds=int(remaining_size/speed)))
)
)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.write("\n")
sys.stdout.flush()
print("Elapsed time: %s, Avg Speed: %.2f Mb/s" %
(
str(datetime.timedelta(seconds= elapsed_time)), float(speeds/loops))
)
print(file_name + " saved to " + outputdir + " folder")
if __name__ == "__main__":
main()
| mit | 1,124,491,160,564,913,800 | 35.966292 | 87 | 0.472484 | false |
talon-one/talon_one.py | test/test_role_assign.py | 1 | 2130 | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.role_assign import RoleAssign # noqa: E501
from talon_one.rest import ApiException
class TestRoleAssign(unittest.TestCase):
"""RoleAssign unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test RoleAssign
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.role_assign.RoleAssign() # noqa: E501
if include_optional :
return RoleAssign(
users = [
56
],
roles = [
56
]
)
else :
return RoleAssign(
users = [
56
],
roles = [
56
],
)
def testRoleAssign(self):
"""Test RoleAssign"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| mit | 8,608,335,731,823,112,000 | 32.809524 | 647 | 0.604225 | false |
lobnek/pyutil | source/conf.py | 1 | 1730 | #!/usr/bin/env python3
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
#sys.path.insert(0, "/pylobnek/pylobnek")
sys.path.insert(0, "/pyutil/")
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.viewcode'
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyutil'
copyright = '2017, Lobnek Wealth Management'
author = 'Lobnek Wealth Management'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.2'
# The full version, including alpha/beta/rc tags.
release = '3.2.0'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyutil'
| mit | 3,677,162,227,022,036,000 | 25.212121 | 79 | 0.684971 | false |
NickRuiz/wikitrans-pootle | local_apps/wt_articles/utils.py | 1 | 3867 | from goopytrans import translate as gtranslate
from apyrtium import translate as atranslate
import nltk.data
from django.utils.safestring import SafeUnicode
from wt_languages.models import TARGET_LANGUAGE, SOURCE_LANGUAGE, BOTH
from wt_languages.models import LanguageCompetancy
from wt_articles.models import SourceArticle, SourceSentence, TranslatedArticle, TranslatedSentence
from wt_articles import GOOGLE,APERTIUM
from wt_articles import MECHANICAL_TURK,HUMAN,DEFAULT_TRANNY
class Translator:
"""
A container class for various translation methods
"""
def __init__(self, name, func):
self.name = name
self.translate = func
def translate(self, text, source, target):
self.translate(text, source=source, target=target)
def google_translator():
return Translator(GOOGLE, gtranslate)
def apertium_translator():
return Translator(APERTIUM, atranslate)
def _group_sentences(sentences):
p_groups = []
prev_s = None
for s in sentences:
if prev_s == None or prev_s.end_of_paragraph:
cur_list = []
p_groups.append(cur_list)
cur_list.append(s)
prev_s = s
return p_groups
def _format_sentences(sentences, fun):
sentence_groups = _group_sentences(sentences)
formatted = ''
for s_list in sentence_groups:
raw_text = [(s.text) for s in s_list]
formatted = formatted + fun(' '.join(raw_text))
formatted = SafeUnicode(formatted)
return formatted
def sentences_as_text(sentences):
format_p = lambda s: '%s\n\n' % (s)
text = _format_sentences(sentences, format_p)
return text
def sentences_as_html(sentences):
format_p = lambda s: '<p>%s</p>' % (s)
html = _format_sentences(sentences, format_p)
return html
def sentences_as_html_span(sentences):
format_span = lambda sid, text: u"<span id='ss_%d'>%s</span>" % (sid, text)
# span_sentences = [ format_span(s.segment_id, s.text) for s in sentences ]
for s in sentences:
s.text = format_span(s.segment_id, s.text)
html = sentences_as_html(sentences)
return html
def _all_articles(article_model):
articles = set(article_model.objects.order_by('title'))
return articles
def all_source_articles():
return _all_articles(SourceArticle)
def all_translated_articles():
return _all_articles(TranslatedArticle)
def all_articles():
source_articles = all_source_articles()
translated_articles = all_translated_articles()
return translated_articles.union(source_articles)
def _user_compatible_articles(user, article_model, language_direction):
profile = user.get_profile()
languages = set([lc.language for lc in
user.languagecompetancy_set.exclude(translation_options=language_direction)])
languages.add(profile.native_language)
languages.add(profile.display_language)
articles = set(article_model.objects.filter(language__in=languages))
return articles
def user_compatible_source_articles(user):
return _user_compatible_articles(user, SourceArticle, TARGET_LANGUAGE)
def user_compatible_target_articles(user):
return _user_compatible_articles(user, TranslatedArticle, SOURCE_LANGUAGE)
def user_compatible_articles(user):
source_articles = user_compatible_source_articles(user)
target_articles = user_compatible_target_articles(user)
articles = target_articles.union(source_articles)
return articles
def target_pairs_by_user(user, source):
target_languages = set([lc.language for lc in
user.languagecompetancy_set.exclude(translation_options=SOURCE_LANGUAGE)])
# Exclude identical source/target pairs
target_languages.discard(source)
st_pair_builder = lambda t: (t, '%s-%s' % (source, t))
pairs = map(st_pair_builder, target_languages)
return pairs | gpl-2.0 | -6,224,162,047,502,436,000 | 32.059829 | 102 | 0.698733 | false |
globocom/GloboNetworkAPI-client-python | networkapiclient/ApiVlan.py | 1 | 5003 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapiclient.ApiGenericClient import ApiGenericClient
from networkapiclient.utils import build_uri_with_ids
class ApiVlan(ApiGenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(ApiVlan, self).__init__(
networkapi_url,
user,
password,
user_ldap
)
def acl_remove_draft(self, id_vlan, type_acl):
"""
Remove Acl draft by type
:param id_vlan: Identity of Vlan
:param type_acl: Acl type v4 or v6
:return: None
:raise VlanDoesNotExistException: Vlan Does Not Exist.
:raise InvalidIdVlanException: Invalid id for Vlan.
:raise NetworkAPIException: Failed to access the data source.
"""
parameters = dict(id_vlan=id_vlan, type_acl=type_acl)
uri = 'api/vlan/acl/remove/draft/%(id_vlan)s/%(type_acl)s/' % parameters
return super(ApiVlan, self).get(uri)
def acl_save_draft(self, id_vlan, type_acl, content_draft):
"""
Save Acl draft by type
:param id_vlan: Identity of Vlan
:param type_acl: Acl type v4 or v6
:return: None
:raise VlanDoesNotExistException: Vlan Does Not Exist.
:raise InvalidIdVlanException: Invalid id for Vlan.
:raise NetworkAPIException: Failed to access the data source.
"""
parameters = dict(id_vlan=id_vlan, type_acl=type_acl)
data = dict(content_draft=content_draft)
uri = 'api/vlan/acl/save/draft/%(id_vlan)s/%(type_acl)s/' % parameters
return super(ApiVlan, self).post(uri, data=data)
def search(self, **kwargs):
"""
Method to search vlan's based on extends search.
:param search: Dict containing QuerySets to find vlan's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing vlan's
"""
return super(ApiVlan, self).get(self.prepare_url('api/v3/vlan/',
kwargs))
def get(self, ids, **kwargs):
"""
Method to get vlan's by their id's
:param ids: List containing identifiers of vlan's
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing vlan's
"""
url = build_uri_with_ids('api/v3/vlan/%s/', ids)
return super(ApiVlan, self).get(self.prepare_url(url, kwargs))
def delete(self, ids):
"""
Method to delete vlan's by their ids
:param ids: Identifiers of vlan's
:return: None
"""
url = build_uri_with_ids('api/v3/vlan/%s/', ids)
return super(ApiVlan, self).delete(url)
def update(self, vlans):
"""
Method to update vlan's
:param vlans: List containing vlan's desired to updated
:return: None
"""
data = {'vlans': vlans}
vlans_ids = [str(vlan.get('id')) for vlan in vlans]
return super(ApiVlan, self).put('api/v3/vlan/%s/' %
';'.join(vlans_ids), data)
def create(self, vlans):
"""
Method to create vlan's
:param vlans: List containing vlan's desired to be created on database
:return: None
"""
data = {'vlans': vlans}
return super(ApiVlan, self).post('api/v3/vlan/', data)
| apache-2.0 | 4,708,255,731,514,050,000 | 34.232394 | 88 | 0.620228 | false |
eek6/squeakspace | www/proxy/scripts/proxy/last_message_time.py | 1 | 1171 | import squeakspace.common.util as ut
import squeakspace.common.util_http as ht
import squeakspace.proxy.server.db_sqlite3 as db
import squeakspace.common.squeak_ex as ex
import config
def get_handler(environ):
query = ht.parse_get_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
public_key_hash = ht.get_required(query, 'public_key_hash')
passphrase = ht.get_optional(query, 'passphrase')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
resp = db.read_last_message_time(c, user_id, session_id, node_name, public_key_hash, passphrase)
raise ht.ok_json({'status' : 'ok', 'resp' : resp})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def main_handler(environ):
ht.dispatch_on_method(environ, {
'GET' : get_handler})
def application(environ, start_response):
return ht.respond_with_handler(environ, start_response, main_handler)
| gpl-3.0 | 1,875,072,872,462,335,700 | 29.815789 | 104 | 0.678907 | false |
ethanbao/artman | artman/tasks/format_tasks.py | 1 | 4309 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks related to format"""
import os
import subprocess
from artman.tasks import task_base
from artman.tasks.requirements import go_requirements
from artman.tasks.requirements import php_requirements
from artman.utils import task_utils
from artman.utils.logger import logger
# TODO: Store both intermediate and final output in all format tasks.
class JavaFormatTask(task_base.TaskBase):
def execute(self, gapic_code_dir, toolkit_path):
logger.info('Formatting files in %s.' %
os.path.abspath(gapic_code_dir))
# TODO(shinfan): Move gradle task into requirement
path = task_utils.get_gradle_task_output(
'showJavaFormatterPath', toolkit_path)
targetFiles = []
for root, dirs, files in os.walk(gapic_code_dir):
for filename in files:
if filename.endswith('.java'):
targetFile = os.path.abspath(os.path.join(root, filename))
targetFiles.append(targetFile)
self.exec_command(
['java', '-jar', path, '--replace'] + targetFiles)
def validate(self):
return []
class PythonFormatTask(task_base.TaskBase):
def execute(self, gapic_code_dir):
logger.info('Formatting files in %s.' %
os.path.abspath(gapic_code_dir))
targetFiles = []
for root, dirs, files in os.walk(gapic_code_dir):
for filename in files:
if filename.endswith('.py'):
targetFile = os.path.abspath(os.path.join(root, filename))
targetFiles.append(targetFile)
# yapf returns code 2 when it formats, so we can't use `check_call`.
exit_code = subprocess.call(['yapf', '-i'] + targetFiles)
if exit_code not in [0, 2]:
raise subprocess.CalledProcessError(exit_code, 'yapf')
# yapf is installed by tox for the entire pipeline project's virtualenv,
# so we shouldn't need a separate validation task.
def validate(self):
return []
class GoFormatTask(task_base.TaskBase):
def execute(self, gapic_code_dir):
logger.info('Formatting files in %s.' %
os.path.abspath(gapic_code_dir))
self.exec_command(['gofmt', '-w', gapic_code_dir])
def validate(self):
return [go_requirements.GoFormatRequirements]
class PhpFormatTask(task_base.TaskBase):
def execute(self, gapic_code_dir):
abs_code_dir = os.path.abspath(gapic_code_dir)
logger.info('Formatting file using php-cs-fixer in %s.' % abs_code_dir)
subprocess.call(['php-cs-fixer', 'fix',
'--rules=@Symfony,-phpdoc_annotation_without_dot',
gapic_code_dir])
# We require a second call to php-cs-fixer because instances of @type
# have been converted to @var. We cannot disable this conversion in
# the first call without affecting other aspects of the formatting.
subprocess.call(['php-cs-fixer',
'fix',
'--rules={"phpdoc_no_alias_tag" : {"replacements" : '
'{"var" : "type"}}}',
gapic_code_dir])
logger.info('Formatting file using phpcbf in %s.' % abs_code_dir)
subprocess.call(['phpcbf', '--standard=PSR2', '--no-patch',
gapic_code_dir])
def validate(self):
return [php_requirements.PhpFormatRequirements]
_FORMAT_TASK_DICT = {
'java': JavaFormatTask,
'python': PythonFormatTask,
'go': GoFormatTask,
'php': PhpFormatTask,
}
def get_format_task(language):
return _FORMAT_TASK_DICT.get(language, task_base.EmptyTask)
| apache-2.0 | -1,544,038,701,614,378,200 | 37.473214 | 79 | 0.629148 | false |
bninja/rump | setup.py | 1 | 1748 | import re
import setuptools
import sys
install_requires = [
'netaddr >=0.7,<0.8',
'pilo >=0.5.2,<0.6',
'pyparsing >=2.0.1,<3.0',
'coid >=0.1,<0.2',
'ohmr >=0.1,<0.2',
'wsgim-rip >=0.1,<0.2',
]
if sys.version_info[0:2] < (2, 7):
install_requires.append('ordereddict')
extras_require = {
'kazoo': ['kazoo >=1.3.1,<2.0'],
'redis': ['redis >=2.10,<3'],
'etcd': ['python-etcd >=0.3,<0.4'],
'gunicorn': [
'gevent ==1.0',
'gunicorn',
'setproctitle >=1.1.8,<2.0',
],
}
extras_require['tests'] = [
'mock >=1,<2',
'pytest >=2.5.2,<3',
'pytest-cov >=1.7,<2',
'requests >=2.0,<3',
] + (
extras_require['kazoo'] +
extras_require['redis'] +
extras_require['gunicorn']
)
setuptools.setup(
name='rump',
version=(
re
.compile(r".*__version__ = '(.*?)'", re.S)
.match(open('rump/__init__.py').read())
.group(1)
),
url='https://github.com/bninja/rump/',
author='Rump Us',
author_email='[email protected]',
license='MIT',
description='Upstream selection.',
long_description=open('README.rst').read(),
platforms='any',
install_requires=install_requires,
extras_require=extras_require,
tests_require=extras_require['tests'],
packages=setuptools.find_packages('.', exclude=('test',)),
scripts=['bin/rump', 'bin/rumpd'],
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
test_suite='nose.collector',
)
| isc | 5,402,368,327,795,054,000 | 24.333333 | 62 | 0.544622 | false |
ladybug-tools/honeybee | honeybee_plus/radiance/factory.py | 1 | 4428 | """Material utility."""
import honeybee_plus.radiance.material.bsdf
import honeybee_plus.radiance.material.glass
import honeybee_plus.radiance.material.glow
import honeybee_plus.radiance.material.light
import honeybee_plus.radiance.material.metal
import honeybee_plus.radiance.material.mirror
import honeybee_plus.radiance.material.plastic
import honeybee_plus.radiance.material.spotlight
import honeybee_plus.radiance.primitive as primitive
import honeybee_plus.radiance.radparser as radparser
material_mapper = {
'BSDF': honeybee_plus.radiance.material.bsdf,
'glass': honeybee_plus.radiance.material.glass,
'glow': honeybee_plus.radiance.material.glow,
'light': honeybee_plus.radiance.material.light,
'metal': honeybee_plus.radiance.material.metal,
'mirror': honeybee_plus.radiance.material.mirror,
'plastic': honeybee_plus.radiance.material.plastic,
'spotlight': honeybee_plus.radiance.material.spotlight
}
def primitive_from_json(prm_json):
"""
Args:
prm_json: A radiance modifier as a dictionary.
Returns:
A list of Honeybee Radiance primitives. If input includes polygons and
materials, materials will be added to polygons as modifiers. This method
will return all the polygons and only the materials that are not used.
"""
# parse input json
if not prm_json or prm_json == 'void':
return primitive.Void()
type = prm_json['type']
if type in primitive.Primitive.MATERIALTYPES:
return material_from_json(prm_json)
else:
raise NotImplementedError(
'Pasring for {} primitives is not implemented!'.format(type)
)
def material_from_json(mat_json):
"""Create Honeybee Radiance material from string.
Args:
mat_json: A radiance modifier string. The input can be a multi-line string.
Returns:
A list of Honeybee Radiance materials.
"""
# parse input json
if not mat_json or mat_json == 'void':
return primitive.Void()
type = mat_json['type']
assert type in primitive.Primitive.MATERIALTYPES, \
'{} is not a Radiance material:\n{}'.format(
type, '\n'.join(primitive.Primitive.MATERIALTYPES)
)
# create a Radiance material based on the input
try:
matcls = getattr(material_mapper[type], type.capitalize())
return matcls.from_json(mat_json)
except AttributeError:
# BSDF
matcls = getattr(material_mapper[type], type)
return matcls.from_json(mat_json)
def primitive_from_string(prm_string):
"""Create Honeybee Radiance primitives from string.
Args:
prim_string: A radiance modifier string. The input can be a multi-line string.
Returns:
A list of Honeybee Radiance primitives. If input includes polygons and
materials, materials will be added to polygons as modifiers. This method
will return all the polygons and only the materials that are not used.
"""
# parse input json
if not prm_string or prm_string == 'void':
return primitive.Void()
# run the initial parsing
materials = radparser.parse_from_string(prm_string)
type = materials[-1].split()[1]
if type in primitive.Primitive.MATERIALTYPES:
return material_from_string(prm_string)
else:
raise NotImplementedError(
'Pasring for {} primitives is not implemented!'.format(type)
)
def material_from_string(mat_string):
"""Create Honeybee Radiance material from string.
Args:
mat_string: A radiance modifier string. The input can be a multi-line string.
Returns:
A list of Honeybee Radiance materials.
"""
# parse input json
if not mat_string or mat_string == 'void':
return primitive.Void()
# run the initial parsing
materials = radparser.parse_from_string(mat_string)
type = materials[-1].split()[1]
assert type in primitive.Primitive.MATERIALTYPES, \
'{} is not a Radiance material:\n{}'.format(
type, '\n'.join(primitive.Primitive.MATERIALTYPES)
)
# create a Radiance material based on the input
try:
matcls = getattr(material_mapper[type], type.capitalize())
return matcls.from_string(mat_string)
except AttributeError:
# BSDF
matcls = getattr(material_mapper[type], type)
return matcls.from_string(mat_string)
| gpl-3.0 | -8,126,225,587,974,622,000 | 32.293233 | 86 | 0.681798 | false |
greenpau/PyEwsClient | pyewsclient/ews_helper.py | 1 | 2656 | # PyEwsClient - Microsoft Office 365 EWS (Exchange Web Services) Client Library
# Copyright (C) 2013 Paul Greenberg <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os;
import sys;
import requests;
import datetime;
import traceback;
import requests;
from requests.auth import HTTPBasicAuth;
from lxml import etree;
import pprint;
import re;
class EWSXmlSchemaValidator:
'''Represents Microsoft Office 365 EWS XML Schema Validation Funstion.'''
def __init__(self, xmlreq, xmlsch=None):
''' XML Schema Validation '''
self.valid = False;
self.logs = [];
if xmlsch is None:
xmlsch = 'xml/messages.xsd';
else:
xmlsch = 'xml/' + xmlsch;
if not isinstance(xmlreq, bytes):
xmlreq = bytes(xmlreq, 'utf-8');
try:
msg_schema_xsd = os.path.join('/'.join(os.path.abspath(__file__).split('/')[:-1]), xmlsch);
msg_schema = etree.XMLSchema(file=msg_schema_xsd);
except Exception as err:
self.logs.append((str(err), 'ERROR'));
self.logs.append((str(traceback.format_exc()), 'ERROR'));
return;
try:
xmlreq_valid = msg_schema.validate(etree.fromstring(xmlreq));
self.valid = True;
except Exception as err:
self.logs.append((str(err), 'ERROR'));
self.logs.append((str(traceback.format_exc()), 'ERROR'));
self.valid = False;
try:
msg_schema.assertValid(etree.fromstring(xmlreq));
self.valid = True;
except Exception as err:
self.logs.append((str(err), 'ERROR'));
self.logs.append((str(traceback.format_exc()), 'ERROR'));
self.valid = False;
if self.valid is not True:
self.logs.append(('XML document failed XML schema validation', 'ERROR'));
return;
self.logs.append(('XML document passed XML schema validation', 'INFO'));
self.valid = True;
return;
| gpl-3.0 | -2,420,592,681,874,634,000 | 33.947368 | 103 | 0.623117 | false |
novoid/Memacs | memacs/tests/kodi_test.py | 1 | 5212 | # -*- coding: utf-8 -*-
import os
import unittest
from memacs.kodi import Kodi
class TestKodi(unittest.TestCase):
def setUp(self):
log_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data',
'kodi_audio.log')
argv = []
argv.append("-f")
argv.append(log_file)
argv.append("--fieldnames")
argv.append(
'timestamp,action,position,length,path,album,artist,title,')
argv.append("--timestamp-field")
argv.append("timestamp")
argv.append("--action-field")
argv.append("action")
argv.append("--identification-fields")
argv.append("artist,title")
argv.append("--output-format")
argv.append("{artist} - {title}")
argv.append("--properties")
argv.append("album,artist,title")
self.argv = argv
def test_audio_log(self):
memacs = Kodi(argv=self.argv)
data = memacs.test_get_entries()
# Test Simple Play and Paused
self.assertEqual(
data[0],
"** <2018-10-01 Mon 21:58>--<2018-10-01 Mon 21:59> Clueso - So sehr dabei"
)
self.assertEqual(data[1], " :PROPERTIES:")
self.assertEqual(data[2], " :ALBUM: Barfuss")
self.assertEqual(data[3], " :ARTIST: Clueso")
self.assertEqual(data[4], " :TITLE: So sehr dabei")
self.assertEqual(
data[5],
" :ID: 332b5cd71e335d2cf55f681a3a1fc26161465069")
self.assertEqual(data[6], " :END:")
#Test started one track and switched to another
self.assertEqual(
data[7],
"** <2018-10-01 Mon 22:03>--<2018-10-01 Mon 22:08> Clueso - Chicago"
)
self.assertEqual(data[8], " :PROPERTIES:")
self.assertEqual(data[9], " :ALBUM: Barfuss")
self.assertEqual(data[10], " :ARTIST: Clueso")
self.assertEqual(data[11], " :TITLE: Chicago")
self.assertEqual(
data[12],
" :ID: 13b38e428bb4d8c9e55183877096c921bee871e5")
self.assertEqual(data[13], " :END:")
self.assertEqual(
data[14],
"** <2018-10-01 Mon 22:08>--<2018-10-01 Mon 22:15> Clueso - So sehr dabei"
)
self.assertEqual(data[15], " :PROPERTIES:")
self.assertEqual(data[16], " :ALBUM: Barfuss")
self.assertEqual(data[17], " :ARTIST: Clueso")
self.assertEqual(data[18], " :TITLE: So sehr dabei")
self.assertEqual(
data[19],
" :ID: 4ed907d4337faaca7b2fd059072fc5046e80dc11")
self.assertEqual(data[20], " :END:")
# Pause is logged
self.assertEqual(
data[21],
"** <2018-10-01 Mon 22:16>--<2018-10-01 Mon 22:26> Clueso - So sehr dabei"
)
self.assertEqual(data[22], " :PROPERTIES:")
self.assertEqual(data[23], " :ALBUM: Barfuss")
self.assertEqual(data[24], " :ARTIST: Clueso")
self.assertEqual(data[25], " :TITLE: So sehr dabei")
self.assertEqual(
data[26],
" :ID: 9e504573886f483fa8f84fb5a8bc5d9e05be7bab")
self.assertEqual(data[27], " :END:")
def test_audio_log_with_minimal_duration(self):
self.argv.append('--minimal-pause-duration')
self.argv.append('120')
memacs = Kodi(argv=self.argv)
data = memacs.test_get_entries()
# pause is ignored
self.assertEqual(
data[0],
"** <2018-10-01 Mon 21:58>--<2018-10-01 Mon 21:59> Clueso - So sehr dabei"
)
self.assertEqual(data[1], " :PROPERTIES:")
self.assertEqual(data[2], " :ALBUM: Barfuss")
self.assertEqual(data[3], " :ARTIST: Clueso")
self.assertEqual(data[4], " :TITLE: So sehr dabei")
self.assertEqual(
data[5],
" :ID: 332b5cd71e335d2cf55f681a3a1fc26161465069")
self.assertEqual(data[6], " :END:")
self.assertEqual(
data[7],
"** <2018-10-01 Mon 22:03>--<2018-10-01 Mon 22:08> Clueso - Chicago"
)
self.assertEqual(data[8], " :PROPERTIES:")
self.assertEqual(data[9], " :ALBUM: Barfuss")
self.assertEqual(data[10], " :ARTIST: Clueso")
self.assertEqual(data[11], " :TITLE: Chicago")
self.assertEqual(
data[12],
" :ID: 13b38e428bb4d8c9e55183877096c921bee871e5")
self.assertEqual(data[13], " :END:")
self.assertEqual(
data[14],
"** <2018-10-01 Mon 22:08>--<2018-10-01 Mon 22:26> Clueso - So sehr dabei"
)
self.assertEqual(data[15], " :PROPERTIES:")
self.assertEqual(data[16], " :ALBUM: Barfuss")
self.assertEqual(data[17], " :ARTIST: Clueso")
self.assertEqual(data[18], " :TITLE: So sehr dabei")
self.assertEqual(
data[19],
" :ID: 9e504573886f483fa8f84fb5a8bc5d9e05be7bab")
self.assertEqual(data[20], " :END:")
| gpl-3.0 | -3,444,619,362,779,928,600 | 38.484848 | 86 | 0.533193 | false |
pcmagic/stokes_flow | codeStore/support_fun_table.py | 1 | 139054 | # coding: utf-8
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 19 11:05:23 2017
@author: zhangji
"""
import matplotlib
import subprocess
import os
devnull = open(os.devnull, 'w')
latex_installed = not subprocess.call(['which', 'latex'], stdout=devnull, stderr=devnull)
matplotlib.use('agg')
font = {'size': 20,
'family': 'sans-serif'}
# matplotlib.rc('font', **font)
if latex_installed:
matplotlib.rc('text', usetex=True)
# matplotlib.rc('text', usetex=True)
import numpy as np
import pandas as pd
from scipy.io import loadmat
from scipy import interpolate, integrate, spatial, signal
from scipy.optimize import leastsq, curve_fit
from src import jeffery_model as jm
from src.objComposite import *
from src.support_class import *
from matplotlib import animation
from matplotlib import pyplot as plt
# from mpl_toolkits.axes_grid1 import colorbar
from matplotlib import colorbar
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, zoomed_inset_axes
import matplotlib.ticker as mtick
from matplotlib import colors as mcolors
import importlib
import inspect
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdm_notebook
import glob
import natsort
from time import time
import pickle
import re
from codeStore import support_fun as spf
import shutil
import multiprocessing
import warnings
markerstyle_list = ['^', 'v', 'o', 's', 'p', 'd', 'H',
'1', '2', '3', '4', '8', 'P', '*',
'h', '+', 'x', 'X', 'D', '|', '_', ]
PWD = os.getcwd()
if latex_installed:
params = {'text.latex.preamble': [r'\usepackage{bm}', r'\usepackage{amsmath}']}
plt.rcParams.update(params)
# params = {'text.latex.preamble': [r'\usepackage{bm}', r'\usepackage{amsmath}']}
# plt.rcParams.update(params)
def read_data_lookup_table(psi_dir, tcenter):
ecoli_U_list = []
ecoli_norm_list = []
ecoli_center_list = []
ecoli_nodes_list = []
ecoli_u_list = []
ecoli_f_list = []
ecoli_lateral_norm_list = []
norm_phi_list = []
norm_psi_list = []
norm_theta_list = []
planeShearRate = None
file_handle = os.path.basename(psi_dir)
mat_names = natsort.natsorted(glob.glob('%s/%s_*.mat' % (psi_dir, file_handle)))
for mati in mat_names:
mat_contents = loadmat(mati)
ecoli_U = mat_contents['ecoli_U'].flatten()
ecoli_norm = mat_contents['ecoli_norm'].flatten()
ecoli_center = mat_contents['ecoli_center'].flatten()
ecoli_nodes = mat_contents['ecoli_nodes']
ecoli_u = mat_contents['ecoli_u']
ecoli_f = mat_contents['ecoli_f']
planeShearRate = mat_contents['planeShearRate'].flatten()
norm_phi = mat_contents['norm_phi'].flatten()
norm_psi = mat_contents['norm_psi'].flatten()
norm_theta = mat_contents['norm_theta'].flatten()
ecoli_U_list.append(ecoli_U)
ecoli_norm_list.append(ecoli_norm)
ecoli_center_list.append(ecoli_center)
norm_phi_list.append(norm_phi)
norm_psi_list.append(norm_psi)
norm_theta_list.append(norm_theta)
r0 = ecoli_nodes[-1] - ecoli_center
n0 = np.dot(r0, ecoli_norm) * ecoli_norm / np.dot(ecoli_norm, ecoli_norm)
t0 = r0 - n0
ecoli_lateral_norm_list.append(t0 / np.linalg.norm(t0))
ecoli_U = np.vstack(ecoli_U_list)
ecoli_norm = np.vstack(ecoli_norm_list)
ecoli_center = np.vstack(ecoli_center_list)
ecoli_lateral_norm = np.vstack(ecoli_lateral_norm_list)
norm_phi = np.hstack(norm_phi_list)
norm_psi = np.hstack(norm_psi_list)
norm_theta = np.hstack(norm_theta_list)
norm_tpp = np.vstack((norm_theta, norm_phi, norm_psi)).T
# calculate velocity u000(t,x,y,z) that the location initially at (0, 0, 0): u000(0, 0, 0, 0)
n_u000 = -np.linalg.norm(ecoli_center[0] - tcenter) * ecoli_norm
ecoli_u000 = ecoli_U[:, :3] + np.cross(ecoli_U[:, 3:], n_u000)
# calculate center center000(t,x,y,z) that at initially at (0, 0, 0): center000(0, 0, 0, 0)
ecoli_center000 = ecoli_center + n_u000
using_U = ecoli_U
omega_norm = np.array(
[np.dot(t1, t2) * t2 / np.dot(t2, t2) for t1, t2 in zip(using_U[:, 3:], ecoli_norm)])
omega_tang = using_U[:, 3:] - omega_norm
return ecoli_U, ecoli_norm, ecoli_center, ecoli_lateral_norm, norm_tpp, \
ecoli_u000, ecoli_center000, omega_norm, omega_tang, planeShearRate, file_handle
def get_ecoli_table(tnorm, lateral_norm, tcenter, max_iter, eval_dt=0.001, update_order=1,
planeShearRate=np.array((1, 0, 0))):
ellipse_kwargs = {'name': 'ecoli_torque',
'center': tcenter,
'norm': tnorm / np.linalg.norm(tnorm),
'lateral_norm': lateral_norm / np.linalg.norm(lateral_norm),
'speed': 0,
'lbd': np.nan,
'omega_tail': 193.66659814,
'table_name': 'planeShearRatex_1d', }
fileHandle = 'ShearTableProblem'
ellipse_obj = jm.TableEcoli(**ellipse_kwargs)
ellipse_obj.set_update_para(fix_x=False, fix_y=False, fix_z=False, update_order=update_order)
problem = jm.ShearTableProblem(name=fileHandle, planeShearRate=planeShearRate)
problem.add_obj(ellipse_obj)
t0 = time()
for idx in range(1, max_iter + 1):
problem.update_location(eval_dt, print_handle='%d / %d' % (idx, max_iter))
t1 = time()
Table_X = np.vstack(ellipse_obj.center_hist)
Table_U = np.vstack(ellipse_obj.U_hist)
Table_P = np.vstack(ellipse_obj.norm_hist)
Table_t = np.arange(max_iter) * eval_dt + eval_dt
Table_theta, Table_phi, Table_psi = ellipse_obj.theta_phi_psi
t1U = np.array([np.dot(t1, t2) for t1, t2 in zip(Table_U[:, :3], Table_P)]).reshape((-1, 1))
t1W = np.array([np.dot(t1, t2) for t1, t2 in zip(Table_U[:, 3:], Table_P)]).reshape((-1, 1))
# Table_U_horizon = np.hstack((Table_P * t1U, Table_P * t1W))
# Table_U_vertical = Table_U - Table_U_horizon
omega = Table_U[:, 3:]
dP = np.vstack([np.cross(t1, t2) for t1, t2 in zip(omega, Table_P)])
Table_dtheta = -dP[:, 2] / np.sin(np.abs(Table_theta))
Table_dphi = (dP[:, 1] * np.cos(Table_phi) - dP[:, 0] * np.sin(Table_phi)) / np.sin(Table_theta)
Table_eta = np.arccos(np.sin(Table_theta) * np.sin(Table_phi))
# print('%s: run %d loops using %f' % (fileHandle, max_iter, (t1 - t0)))
return Table_t, Table_theta, Table_phi, Table_psi, Table_eta, Table_dtheta, Table_dphi, \
Table_X, Table_U, Table_P
def _do_calculate_prepare_v1(norm):
importlib.reload(jm)
norm = norm / np.linalg.norm(norm)
planeShearRate = np.array((1, 0, 0))
tcenter = np.zeros(3)
# print('dbg do_calculate_prepare')
tlateral_norm = np.array((np.pi, np.e, np.euler_gamma))
# tlateral_norm = np.random.sample(3)
tlateral_norm = tlateral_norm / np.linalg.norm(tlateral_norm)
tlateral_norm = tlateral_norm - norm * np.dot(norm, tlateral_norm)
tlateral_norm = tlateral_norm / np.linalg.norm(tlateral_norm)
P0 = norm / np.linalg.norm(norm)
P20 = tlateral_norm / np.linalg.norm(tlateral_norm)
fileHandle = 'ShearTableProblem'
problem = jm.ShearTableProblem(name=fileHandle, planeShearRate=planeShearRate)
return P0, P20, tcenter, problem
def _do_calculate_prepare_v2(norm):
importlib.reload(jm)
t_theta = np.arccos(norm[2] / np.linalg.norm(norm))
t_phi = np.arctan2(norm[1], norm[0])
tfct = 2 if t_phi < 0 else 0
t_phi = t_phi + tfct * np.pi # (-pi,pi) -> (0, 2pi)
rotM = Rloc2glb(t_theta, t_phi, 0)
P0 = rotM[:, 2]
P20 = rotM[:, 1]
planeShearRate = np.array((1, 0, 0))
tcenter = np.zeros(3)
fileHandle = 'ShearTableProblem'
problem = jm.ShearTableProblem(name=fileHandle, planeShearRate=planeShearRate)
return P0, P20, tcenter, problem
def do_calculate_prepare(norm):
return _do_calculate_prepare_v2(norm)
def do_calculate(problem, obj, ini_t, max_t, update_fun, rtol, atol, eval_dt, save_every, tqdm_fun):
obj.set_update_para(fix_x=False, fix_y=False, fix_z=False, update_fun=update_fun,
rtol=rtol, atol=atol, save_every=save_every, tqdm_fun=tqdm_fun)
problem.add_obj(obj)
Table_t, Table_dt, Table_X, Table_P, Table_P2 = \
obj.update_self(t0=ini_t, t1=max_t, eval_dt=eval_dt)
Table_theta, Table_phi, Table_psi = obj.theta_phi_psi
Table_eta = np.arccos(np.sin(Table_theta) * np.sin(Table_phi))
return Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta
def do_ellipse_kwargs(tcenter, P0, P20, ini_psi, table_name):
ellipse_kwargs = {'name': 'ellipse',
'center': tcenter,
'norm': P0,
'lateral_norm': P20,
'speed': 0,
'lbd': np.nan,
'ini_psi': ini_psi,
'omega_tail': 0,
'table_name': table_name, }
return ellipse_kwargs
def do_ecoli_kwargs(tcenter, P0, P20, ini_psi, omega_tail, table_name,
flow_strength=0, name='ecoli_torque'):
ecoli_kwargs = {'name': name,
'center': tcenter,
'norm': P0,
'lateral_norm': P20,
'speed': 0,
'lbd': np.nan,
'ini_psi': ini_psi,
'omega_tail': omega_tail,
'flow_strength': flow_strength,
'table_name': table_name, }
return ecoli_kwargs
def do_ecoli_passive_kwargs(tcenter, P0, P20, ini_psi, table_name):
ecoli_passive_kwargs = {'name': 'ecoli_passive',
'center': tcenter,
'norm': P0,
'lateral_norm': P20,
'speed': 0,
'lbd': np.nan,
'ini_psi': ini_psi,
'omega_tail': 0,
'table_name': table_name, }
return ecoli_passive_kwargs
def do_helix_kwargs(tcenter, P0, P20, ini_psi, table_name):
helix_kwargs = {'name': 'helix',
'center': tcenter,
'norm': P0,
'lateral_norm': P20,
'speed': 0,
'lbd': np.nan,
'ini_psi': ini_psi,
'omega_tail': 0,
'table_name': table_name, }
return helix_kwargs
def do_calculate_helix_Petsc4n(norm, ini_psi, max_t, update_fun='3bs', rtol=1e-6, atol=1e-9,
eval_dt=0.001, ini_t=0,
save_every=1, table_name='hlxB01_tau1a', tqdm_fun=tqdm_notebook,
omega_tail=0):
fun_name = inspect.stack()[0][3]
err_msg = '%s: omega_tail NOT 0 (now omega_tail=%f)' % (fun_name, omega_tail)
assert np.isclose(omega_tail, 0), err_msg
P0, P20, tcenter, problem = do_calculate_prepare(norm)
helix_kwargs = do_helix_kwargs(tcenter, P0, P20, ini_psi, table_name=table_name)
helix_obj = jm.TablePetsc4nEcoli(**helix_kwargs)
return do_calculate(problem, helix_obj, ini_t, max_t, update_fun, rtol, atol, eval_dt,
save_every, tqdm_fun)
def do_calculate_helix_AvrPetsc4n(norm, ini_psi, max_t, update_fun='3bs', rtol=1e-6, atol=1e-9,
eval_dt=0.001, ini_t=0,
save_every=1, table_name='hlxB01_tau1a_avr',
tqdm_fun=tqdm_notebook, omega_tail=0):
fun_name = inspect.stack()[0][3]
err_msg = '%s: omega_tail NOT 0 (now omega_tail=%f)' % (fun_name, omega_tail)
assert np.isclose(omega_tail, 0), err_msg
P0, P20, tcenter, problem = do_calculate_prepare(norm)
helix_kwargs = do_helix_kwargs(tcenter, P0, P20, ini_psi, table_name=table_name)
helix_obj = jm.TableAvrPetsc4nEcoli(**helix_kwargs)
return do_calculate(problem, helix_obj, ini_t, max_t, update_fun, rtol, atol, eval_dt,
save_every, tqdm_fun)
def do_calculate_ellipse_Petsc4n(norm, ini_psi, max_t, update_fun='3bs', rtol=1e-6, atol=1e-9,
eval_dt=0.001, ini_t=0,
save_every=1, table_name='ellipse_alpha3', tqdm_fun=tqdm_notebook,
omega_tail=0):
fun_name = inspect.stack()[0][3]
err_msg = '%s: omega_tail NOT 0 (now omega_tail=%f)' % (fun_name, omega_tail)
assert np.isclose(omega_tail, 0), err_msg
P0, P20, tcenter, problem = do_calculate_prepare(norm)
ellipse_kwargs = do_ellipse_kwargs(tcenter, P0, P20, ini_psi, table_name=table_name)
ellipse_obj = jm.TablePetsc4nEcoli(**ellipse_kwargs)
return do_calculate(problem, ellipse_obj, ini_t, max_t, update_fun, rtol, atol, eval_dt,
save_every, tqdm_fun)
def do_calculate_ellipse_AvrPetsc4n(norm, ini_psi, max_t, update_fun='3bs', rtol=1e-6, atol=1e-9,
eval_dt=0.001, ini_t=0,
save_every=1, table_name='ellipse_alpha3_avr',
tqdm_fun=tqdm_notebook,
omega_tail=0):
fun_name = inspect.stack()[0][3]
err_msg = '%s: omega_tail NOT 0 (now omega_tail=%f)' % (fun_name, omega_tail)
assert np.isclose(omega_tail, 0), err_msg
P0, P20, tcenter, problem = do_calculate_prepare(norm)
ellipse_kwargs = do_ellipse_kwargs(tcenter, P0, P20, ini_psi, table_name=table_name)
ellipse_obj = jm.TableAvrPetsc4nEcoli(**ellipse_kwargs)
return do_calculate(problem, ellipse_obj, ini_t, max_t, update_fun, rtol, atol, eval_dt,
save_every, tqdm_fun)
def do_calculate_ecoli_Petsc4n(norm, ini_psi, max_t, update_fun='3bs', rtol=1e-6, atol=1e-9,
eval_dt=0.001, ini_t=0,
save_every=1, table_name='planeShearRatex_1d',
tqdm_fun=tqdm_notebook,
omega_tail=193.66659814):
# fun_name = inspect.stack()[0][3]
# err_msg = '%s: omega_tail IS 0 (now omega_tail=%f)' % (fun_name, omega_tail)
# assert not np.isclose(omega_tail, 0), err_msg
P0, P20, tcenter, problem = do_calculate_prepare(norm)
ecoli_kwargs = do_ecoli_kwargs(tcenter, P0, P20, ini_psi, omega_tail, table_name)
ecoli_obj = jm.TablePetsc4nEcoli(**ecoli_kwargs)
return do_calculate(problem, ecoli_obj, ini_t, max_t, update_fun, rtol, atol, eval_dt,
save_every, tqdm_fun)
def do_calculate_ecoli_Petsc4nPsi(norm, ini_psi, max_t, update_fun='3bs', rtol=1e-6, atol=1e-9,
eval_dt=0.001, ini_t=0,
save_every=1, table_name='planeShearRatex_1d',
tqdm_fun=tqdm_notebook,
omega_tail=193.66659814):
# fun_name = inspect.stack()[0][3]
# err_msg = '%s: omega_tail IS 0 (now omega_tail=%f)' % (fun_name, omega_tail)
# assert not np.isclose(omega_tail, 0), err_msg
P0, P20, tcenter, problem = do_calculate_prepare(norm)
ecoli_kwargs = do_ecoli_kwargs(tcenter, P0, P20, ini_psi, omega_tail, table_name)
ecoli_obj = jm.TablePetsc4nPsiEcoli(**ecoli_kwargs)
obj = ecoli_obj
obj.set_update_para(fix_x=False, fix_y=False, fix_z=False, update_fun=update_fun,
rtol=rtol, atol=atol, save_every=save_every, tqdm_fun=tqdm_fun)
problem.add_obj(obj)
Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_psi = \
obj.update_self(t0=ini_t, t1=max_t, eval_dt=eval_dt)
Table_theta, Table_phi, Table_psib = obj.theta_phi_psi
Table_eta = np.arccos(np.sin(Table_theta) * np.sin(Table_phi))
# return Table_t, Table_dt, Table_X, Table_P, Table_P2, \
# Table_theta, Table_phi, Table_psib, Table_eta, Table_psi
return Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta,
def do_ShearFlowPetsc4nPsiObj(norm, ini_psi, max_t, table_name, update_fun='3bs',
rtol=1e-6, atol=1e-9, eval_dt=0.001, ini_t=0, save_every=1,
tqdm_fun=tqdm_notebook, omega_tail=0, flow_strength=0,
return_psi_body=False):
P0, P20, tcenter, problem = do_calculate_prepare(norm)
ecoli_kwargs = do_ecoli_kwargs(tcenter, P0, P20, ini_psi, omega_tail, table_name,
flow_strength=flow_strength, name='ShearFlowPetsc4nPsi')
obj = jm.ShearFlowPetsc4nPsiObj(**ecoli_kwargs)
obj.set_update_para(fix_x=False, fix_y=False, fix_z=False, update_fun=update_fun,
rtol=rtol, atol=atol, save_every=save_every, tqdm_fun=tqdm_fun)
problem.add_obj(obj)
Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_psi = \
obj.update_self(t0=ini_t, t1=max_t, eval_dt=eval_dt)
Table_theta, Table_phi, Table_psib = obj.theta_phi_psi
Table_eta = np.arccos(np.sin(Table_theta) * np.sin(Table_phi))
if return_psi_body:
return Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta, Table_psib,
else:
return Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta,
def do_ShearFlowPetsc4nPsiObj_dbg(norm, ini_psi, max_t, table_name, update_fun='3bs',
rtol=1e-6, atol=1e-9, eval_dt=0.001, ini_t=0, save_every=1,
tqdm_fun=tqdm_notebook, omega_tail=0, flow_strength=0,
return_psi_body=False):
P0, P20, tcenter, problem = do_calculate_prepare(norm)
ecoli_kwargs = do_ecoli_kwargs(tcenter, P0, P20, ini_psi, omega_tail, table_name,
flow_strength=flow_strength, name='ShearFlowPetsc4nPsi')
obj = jm.ShearFlowPetsc4nPsiObj_dbg(**ecoli_kwargs)
obj.set_update_para(fix_x=False, fix_y=False, fix_z=False, update_fun=update_fun,
rtol=rtol, atol=atol, save_every=save_every, tqdm_fun=tqdm_fun)
problem.add_obj(obj)
Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_psi = \
obj.update_self(t0=ini_t, t1=max_t, eval_dt=eval_dt)
Table_theta, Table_phi, Table_psib = obj.theta_phi_psi
Table_eta = np.arccos(np.sin(Table_theta) * np.sin(Table_phi))
if return_psi_body:
return Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta, Table_psib,
else:
return Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta,
def do_calculate_ecoli_AvrPetsc4n(norm, ini_psi, max_t, update_fun='3bs', rtol=1e-6, atol=1e-9,
eval_dt=0.001, ini_t=0,
save_every=1, table_name='planeShearRatex_1d_avr',
tqdm_fun=tqdm_notebook,
omega_tail=193.66659814):
fun_name = inspect.stack()[0][3]
err_msg = '%s: omega_tail IS 0 (now omega_tail=%f)' % (fun_name, omega_tail)
assert not np.isclose(omega_tail, 0), err_msg
P0, P20, tcenter, problem = do_calculate_prepare(norm)
ecoli_kwargs = do_ecoli_kwargs(tcenter, P0, P20, ini_psi, omega_tail, table_name)
ecoli_obj = jm.TableAvrPetsc4nEcoli(**ecoli_kwargs)
return do_calculate(problem, ecoli_obj, ini_t, max_t, update_fun, rtol, atol, eval_dt,
save_every, tqdm_fun)
def do_calculate_ecoli_passive_Petsc4n(norm, ini_psi, max_t, update_fun='3bs', rtol=1e-6, atol=1e-9,
eval_dt=0.001, ini_t=0, save_every=1,
table_name='planeShearRatex_1d_passive',
tqdm_fun=tqdm_notebook,
omega_tail=0):
fun_name = inspect.stack()[0][3]
err_msg = '%s: omega_tail NOT 0 (now omega_tail=%f)' % (fun_name, omega_tail)
assert np.isclose(omega_tail, 0), err_msg
P0, P20, tcenter, problem = do_calculate_prepare(norm)
ecoli_passive_kwargs = do_ecoli_passive_kwargs(tcenter, P0, P20, ini_psi, table_name)
ecoli_passive_obj = jm.TablePetsc4nEcoli(**ecoli_passive_kwargs)
return do_calculate(problem, ecoli_passive_obj, ini_t, max_t, update_fun, rtol, atol, eval_dt,
save_every, tqdm_fun)
def do_calculate_ecoli_passive_AvrPetsc4n(norm, ini_psi, max_t, update_fun='3bs', rtol=1e-6,
atol=1e-9, eval_dt=0.001, ini_t=0,
save_every=1, table_name='planeShearRatex_1d_passive_avr',
tqdm_fun=tqdm_notebook, omega_tail=0):
fun_name = inspect.stack()[0][3]
err_msg = '%s: omega_tail NOT 0 (now omega_tail=%f)' % (fun_name, omega_tail)
assert np.isclose(omega_tail, 0), err_msg
P0, P20, tcenter, problem = do_calculate_prepare(norm)
ecoli_passive_kwargs = do_ecoli_passive_kwargs(tcenter, P0, P20, ini_psi, table_name)
ecoli_passive_obj = jm.TableAvrPetsc4nEcoli(**ecoli_passive_kwargs)
return do_calculate(problem, ecoli_passive_obj, ini_t, max_t, update_fun, rtol, atol, eval_dt,
save_every, tqdm_fun)
def core_show_table_theta_phi_list(theta_phi_list, job_dir, Table_t_range=(-np.inf, np.inf),
figsize=np.array((20, 20)), dpi=100, fast_mode=0):
cmap_list = ['Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']
def _get_ax():
# fig = plt.figure(figsize=figsize, dpi=dpi)
# fig.tight_layout(rect=[0, 0, 1, 0.8])
# ax0 = fig.add_subplot(111)
# ax0.set_xlim(-np.pi * 1.1, np.pi * 1.1)
# ax0.set_ylim(-np.pi * 1.1, np.pi * 1.1)
# ax0.axis('off')
# ax0.set_aspect('equal')
# fig.tight_layout(rect=[0, 0, 1, 0.8])
# ax1 = fig.add_axes(ax0.get_position(), projection='polar')
# ax1.patch.set_alpha(0)
# plt.sca(ax1)
# ax1.set_ylim(0, np.pi)
# ax1.xaxis.set_ticklabels(['$\dfrac{%d}{8}2\pi$' % i0 for i0 in np.arange(8)])
# ax1.yaxis.set_ticklabels([])
fig, ax1 = plt.subplots(1, 1, figsize=np.ones(2) * np.min(figsize), dpi=dpi,
subplot_kw=dict(polar=True))
plt.sca(ax1)
ax1.set_ylim(0, np.pi)
# ax1.xaxis.set_ticklabels(['$\dfrac{%d}{8}2\pi$' % i0 for i0 in np.arange(8)])
ax1.yaxis.set_ticklabels([])
return fig, ax1
if fast_mode:
fig, ax1 = _get_ax()
fig2, ax2 = _get_ax()
fig3, ax3 = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
# fig3.patch.set_facecolor('white')
for theta, phi in theta_phi_list:
# print(theta, phi)
tpick, _ = load_table_date_pickle(job_dir, theta, phi)
Table_t = tpick['Table_t']
# Table_dt = tpick['Table_dt']
# Table_X = tpick['Table_X']
# Table_P = tpick['Table_P']
# Table_P2 = tpick['Table_P2']
Table_theta = tpick['Table_theta']
Table_phi = tpick['Table_phi']
Table_psi = tpick['Table_psi']
# Table_eta = tpick['Table_eta']
idx = np.logical_and(Table_t >= Table_t_range[0], Table_t <= Table_t_range[1])
if not np.any(idx):
continue
ax1.plot(Table_phi[idx], Table_theta[idx], '.', markersize=0.1)
ax1.scatter(Table_phi[idx][0], Table_theta[idx][0], c='k', marker='*')
ax2.plot(Table_psi[idx], Table_theta[idx], '.', markersize=0.1)
ax2.scatter(Table_psi[idx][0], Table_theta[idx][0], c='k', marker='*')
# tidx = Table_phi > 1.5 * np.pi
tidx = Table_phi > 15 * np.pi
t1 = Table_phi.copy()
t1[tidx] = Table_phi[tidx] - 2 * np.pi
ax3.plot(t1[idx] / np.pi, Table_psi[idx] / np.pi, '.', markersize=0.1)
ax3.scatter(t1[idx][0] / np.pi, Table_psi[idx][0] / np.pi, c='k', marker='*')
fig.suptitle('$\\theta - \\phi$')
fig2.suptitle('$\\theta - \\psi$')
ax3.set_xlabel('$\\phi / \\pi$')
ax3.set_ylabel('$\\psi / \\pi$')
fig.tight_layout(rect=[0, 0, 1, 0.95])
fig2.tight_layout(rect=[0, 0, 1, 0.95])
fig3.tight_layout()
else:
fig, ax1 = _get_ax()
for (theta, phi), cmap in zip(theta_phi_list, cmap_list):
tpick, _ = load_table_date_pickle(job_dir, theta, phi)
Table_t = tpick['Table_t']
# Table_dt = tpick['Table_dt']
# Table_X = tpick['Table_X']
# Table_P = tpick['Table_P']
# Table_P2 = tpick['Table_P2']
Table_theta = tpick['Table_theta']
Table_phi = tpick['Table_phi']
# Table_psi = tpick['Table_psi']
# Table_eta = tpick['Table_eta']
idx = np.logical_and(Table_t >= Table_t_range[0], Table_t <= Table_t_range[1])
t1 = Table_t[idx].max() - Table_t[idx].min()
norm = plt.Normalize(Table_t[idx].min() - 0.3 * t1, Table_t[idx].max())
ax1.scatter(Table_phi[idx][0], Table_theta[idx][0], c='k', marker='*')
spf.colorline(Table_phi[idx], Table_theta[idx], z=Table_t[idx], cmap=plt.get_cmap(cmap),
norm=norm, linewidth=1, alpha=1.0, ax=ax1)
return fig
def show_table_theta_phi_list(*args, **kwargs):
core_show_table_theta_phi_list(*args, **kwargs)
return True
def core_show_pickle_theta_phi_list(pickle_path_list, Table_t_range=(-np.inf, np.inf),
figsize=np.array((20, 20)), dpi=100, fast_mode=0,
markersize=3, linewidth=1, alpha=0.5):
cmap_list = ['Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']
# cmap_list = ['jet'] * len(pickle_path_list)
def _get_ax():
fig, ax1 = plt.subplots(1, 1, figsize=np.ones(2) * np.min(figsize), dpi=dpi,
subplot_kw=dict(polar=True))
plt.sca(ax1)
ax1.set_ylim(0, np.pi)
# ax1.xaxis.set_ticklabels(['$\dfrac{%d}{8}2\pi$' % i0 for i0 in np.arange(8)])
ax1.yaxis.set_ticklabels([])
return fig, ax1
if fast_mode:
fig, ax1 = _get_ax()
fig2, ax2 = _get_ax()
fig3, ax3 = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
# fig3.patch.set_facecolor('white')
for pickle_path in pickle_path_list:
with open(pickle_path, 'rb') as handle:
tpick = pickle.load(handle)
Table_t = tpick['Table_t']
# Table_dt = tpick['Table_dt']
# Table_X = tpick['Table_X']
# Table_P = tpick['Table_P']
# Table_P2 = tpick['Table_P2']
Table_theta = tpick['Table_theta']
Table_phi = tpick['Table_phi']
Table_psi = tpick['Table_psi']
# Table_eta = tpick['Table_eta']
idx = np.logical_and(Table_t >= Table_t_range[0], Table_t <= Table_t_range[1])
if not np.any(idx):
continue
ax1.plot(Table_phi[idx], Table_theta[idx], '-', markersize=0.1, alpha=0.5)
ax1.scatter(Table_phi[idx][0], Table_theta[idx][0], c='k', marker='*', s=markersize)
ax2.plot(Table_psi[idx], Table_theta[idx], '-', markersize=0.1, alpha=0.5)
ax2.scatter(Table_psi[idx][0], Table_theta[idx][0], c='k', marker='*', s=markersize)
# tidx = Table_phi > 1.5 * np.pi
tidx = Table_phi > 15 * np.pi
t1 = Table_phi.copy()
t1[tidx] = Table_phi[tidx] - 2 * np.pi
ax3.plot(t1[idx] / np.pi, Table_psi[idx] / np.pi, '-', markersize=0.1, alpha=0.5)
ax3.scatter(t1[idx][0] / np.pi, Table_psi[idx][0] / np.pi, c='k', marker='*',
s=markersize)
fig.suptitle('$\\theta - \\phi$')
fig2.suptitle('$\\theta - \\psi$')
ax3.set_xlabel('$\\phi / \\pi$')
ax3.set_ylabel('$\\psi / \\pi$')
fig.tight_layout(rect=[0, 0, 1, 0.95])
fig2.tight_layout(rect=[0, 0, 1, 0.95])
fig3.tight_layout()
else:
fig, ax1 = _get_ax()
start_list = []
for pickle_path, cmap in zip(pickle_path_list, cmap_list):
with open(pickle_path, 'rb') as handle:
tpick = pickle.load(handle)
Table_t = tpick['Table_t']
# Table_dt = tpick['Table_dt']
# Table_X = tpick['Table_X']
# Table_P = tpick['Table_P']
# Table_P2 = tpick['Table_P2']
Table_theta = tpick['Table_theta']
Table_phi = tpick['Table_phi']
# Table_psi = tpick['Table_psi']
# Table_eta = tpick['Table_eta']
idx = np.logical_and(Table_t >= Table_t_range[0], Table_t <= Table_t_range[1])
t1 = Table_t[idx].max() - Table_t[idx].min()
norm = plt.Normalize(Table_t[idx].min() - 0.3 * t1, Table_t[idx].max())
spf.colorline(Table_phi[idx], Table_theta[idx], z=Table_t[idx], cmap=plt.get_cmap(cmap),
norm=norm, linewidth=linewidth, alpha=alpha, ax=ax1)
start_list.append((Table_phi[idx][0], Table_theta[idx][0]))
for tx, ty in start_list:
ax1.scatter(tx, ty, c='k', marker='*', s=markersize)
return fig
def show_pickle_theta_phi_list(*args, **kwargs):
core_show_pickle_theta_phi_list(*args, **kwargs)
return True
def core_show_table_result_list(theta_phi_list, job_dir, label_list=None,
Table_t_range=(-np.inf, np.inf),
figsize=np.array((20, 20)), dpi=100):
if label_list is None:
label_list = [None] * len(theta_phi_list)
fig = plt.figure(figsize=figsize, dpi=dpi)
fig.patch.set_facecolor('white')
axs = fig.subplots(nrows=3, ncols=2)
for (theta, phi), tlabel in zip(theta_phi_list, label_list):
tpick, _ = load_table_date_pickle(job_dir, theta, phi)
Table_t = tpick['Table_t']
idx = np.logical_and(Table_t >= Table_t_range[0], Table_t <= Table_t_range[1])
Table_t = tpick['Table_t'][idx]
Table_X = tpick['Table_X'][idx]
Table_theta = tpick['Table_theta'][idx]
Table_phi = tpick['Table_phi'][idx]
Table_psi = tpick['Table_psi'][idx]
for _ in zip(axs,
(Table_X[:, 0], Table_X[:, 1], Table_X[:, 2]),
(Table_theta, Table_phi, Table_psi),
('$x - x_{mean}$', '$y - y_{mean}$', '$z - z_{mean}$'),
('$\\theta / \pi$', '$\\phi / \pi$', '$\\psi / \pi$'), ):
(ax1, ax2), ty1, ty2, ylab1, ylab2 = _
if tlabel is None:
ax1.plot(Table_t, ty1 - np.mean(ty1), '-')
ax2.plot(Table_t, ty2 / np.pi, '-')
else:
ax1.plot(Table_t, ty1 - np.mean(ty1), '-', label=tlabel)
ax2.plot(Table_t, ty2 / np.pi, '-', label=tlabel)
ax1.legend()
ax2.legend()
ax1.set_ylabel(ylab1)
ax2.set_ylabel(ylab2)
axs[0, 0].xaxis.set_ticklabels([])
axs[0, 1].xaxis.set_ticklabels([])
axs[1, 0].xaxis.set_ticklabels([])
axs[1, 1].xaxis.set_ticklabels([])
axs[2, 0].set_xlabel('$t$')
axs[2, 1].set_xlabel('$t$')
plt.tight_layout()
return fig
def show_table_result_list(*args, **kwargs):
core_show_table_result_list(*args, **kwargs)
return True
def core_show_table_theta_phi_psi_fft_list(theta_phi_list, job_dir, label_list,
figsize=np.array((20, 20)), dpi=100,
resampling_fct=2, use_welch=False):
fig, axs = plt.subplots(nrows=3, ncols=2, figsize=figsize, dpi=dpi)
fig.patch.set_facecolor('white')
for (theta, phi), tlabel in zip(theta_phi_list, label_list):
tpick, _ = load_table_date_pickle(job_dir, theta, phi)
Table_t = tpick['Table_t']
Table_t = Table_t
# Table_dt = tpick['Table_dt']
# Table_X = tpick['Table_X']
# Table_P = tpick['Table_P']
# Table_P2 = tpick['Table_P2']
Table_theta = tpick['Table_theta']
Table_phi = tpick['Table_phi']
Table_psi = tpick['Table_psi']
Table_eta = tpick['Table_eta']
Table_t, Table_theta, Table_phi, Table_psi, Table_eta = \
resampling_angle(Table_t, Table_theta, Table_phi, Table_psi, Table_eta, resampling_fct)
for (ax1, ax2), ty1, ylab in zip(axs,
(Table_theta, Table_phi, Table_psi),
('\\theta', '\\phi', '\\psi')):
# find major frequence and display
tmin = np.max((0, Table_t.max() - 1000))
idx = Table_t > tmin
freq_pk = get_major_fre(Table_t[idx], np.cos(Table_theta[idx]))
idx = Table_t > (Table_t.max() - 1 / freq_pk * 10)
if use_welch:
fs = ty1[idx].size / (Table_t[idx].max() - Table_t[idx].min())
nperseg = fs / freq_pk * 8
tfreq, tfft = signal.welch(np.cos(ty1)[idx], fs=fs, nperseg=nperseg)
else:
tfft = np.fft.rfft(np.cos(ty1[idx]))
# noinspection PyTypeChecker
tfreq = np.fft.rfftfreq(Table_t[idx].size, np.mean(np.diff(Table_t[idx])))
tfft_abs = np.abs(tfft)
ax1.semilogx(tfreq[:], tfft_abs[:], '-', label=tlabel)
ax2.loglog(tfreq[:], tfft_abs[:], '-', label=tlabel)
ax1.set_title('FFT of $\\cos %s$' % ylab)
ax2.set_title('FFT of $\\cos %s$' % ylab)
ax1.legend()
axs[0, 0].xaxis.set_ticklabels([])
axs[0, 1].xaxis.set_ticklabels([])
axs[1, 0].xaxis.set_ticklabels([])
axs[1, 1].xaxis.set_ticklabels([])
axs[2, 0].set_xlabel('$Hz$')
axs[2, 1].set_xlabel('$Hz$')
# fig.tight_layout()
return fig
def show_table_theta_phi_psi_fft_list(*args, **kwargs):
core_show_table_theta_phi_psi_fft_list(*args, **kwargs)
return True
def core_show_table_result(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, move_z=False,
planeShearRate=np.array((1, 0, 0)), fig=None,
save_every=1, resampling=False, resampling_fct=2):
fontsize = 40
figsize = np.array((20, 15))
if move_z:
z_mean = np.mean(Table_X[:, 2])
Table_X[:, 2] = Table_X[:, 2] - z_mean
ux_shear = z_mean * planeShearRate[0]
Xz_mean = (Table_t - Table_t[0]) * ux_shear
Table_X[:, 0] = Table_X[:, 0] - Xz_mean
if resampling:
Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta = \
resampling_data(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, resampling_fct)
# show table results.
if fig is None:
fig = plt.figure(figsize=figsize)
else:
fig.clf()
fig.patch.set_facecolor('white')
ax0 = plt.subplot2grid((7, 6), (0, 0), rowspan=3, colspan=3, polar=True)
ax4 = plt.subplot2grid((7, 6), (3, 3), colspan=3)
ax1 = plt.subplot2grid((7, 6), (0, 3), colspan=3, sharex=ax4)
ax2 = plt.subplot2grid((7, 6), (1, 3), colspan=3, sharex=ax4)
ax3 = plt.subplot2grid((7, 6), (2, 3), colspan=3, sharex=ax4)
axdt = plt.subplot2grid((7, 6), (3, 0), colspan=3)
axP = plt.subplot2grid((7, 6), (6, 0), colspan=2)
axP2 = plt.subplot2grid((7, 6), (6, 2), colspan=2)
axPdotP2 = plt.subplot2grid((7, 6), (6, 4), colspan=2)
ax5 = plt.subplot2grid((7, 6), (4, 0), rowspan=2, colspan=2, sharex=axP)
ax6 = plt.subplot2grid((7, 6), (4, 2), rowspan=2, colspan=2, sharex=axP2)
ax7 = plt.subplot2grid((7, 6), (4, 4), rowspan=2, colspan=2, sharex=axPdotP2)
# polar version
norm = plt.Normalize(Table_t.min(), Table_t.max())
cmap = plt.get_cmap('jet')
ax0.plot(Table_phi, Table_theta, '-', alpha=0.2)
ax0.plot(Table_phi[0], Table_theta[0], '*k')
lc = ax0.scatter(Table_phi, Table_theta, c=Table_t, cmap=cmap, norm=norm, s=fontsize * 0.1)
clb = fig.colorbar(lc, ax=ax0, orientation="vertical")
clb.ax.tick_params(labelsize=fontsize * 0.5)
clb.ax.set_title('time', size=fontsize * 0.5)
# ax0.set_xlabel('$\\phi / \pi$', size=fontsize*0.7)
# ax0.set_ylabel('$\\theta / \pi$', size=fontsize*0.7)
ax0.set_ylim(0, np.pi)
plt.sca(ax0)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
# # phase map version
# norm=plt.Normalize(Table_t.min(), Table_t.max())
# cmap=plt.get_cmap('jet')
# ax0.plot(Table_phi / np.pi, Table_theta / np.pi, ' ')
# lc = spf.colorline(Table_phi / np.pi, Table_theta / np.pi, Table_t,
# ax=ax0, cmap=cmap, norm=norm, linewidth=3)
# clb = fig.colorbar(lc, ax=ax0, orientation="vertical")
# clb.ax.tick_params(labelsize=fontsize*0.5)
# clb.ax.set_title('time', size=fontsize*0.5)
# ax0.set_xlabel('$\\phi / \pi$', size=fontsize*0.7)
# ax0.set_ylabel('$\\theta / \pi$', size=fontsize*0.7)
# plt.sca(ax0)
# plt.xticks(fontsize=fontsize*0.5)
# plt.yticks(fontsize=fontsize*0.5)
xticks = np.around(np.linspace(Table_t.min(), Table_t.max(), 21), decimals=2)[1::6]
for axi, ty, axyi in zip((ax1, ax2, ax3, ax4, ax5, ax6, ax7, axdt, axP, axP2, axPdotP2),
(Table_theta / np.pi, Table_phi / np.pi, Table_psi / np.pi,
Table_eta / np.pi,
Table_X[:, 0], Table_X[:, 1], Table_X[:, 2], Table_dt,
np.linalg.norm(Table_P, axis=1),
np.linalg.norm(Table_P2, axis=1),
np.abs(np.einsum('ij,ij->i', Table_P, Table_P2))),
('$\\theta / \pi$', '$\\phi / \pi$', '$\\psi / \pi$', '$\\eta / \pi$',
'$center_x$', '$center_y$', '$center_z$', '$dt$',
'$\|P_1\|$', '$\|P_2\|$', '$\|P_1 \cdot P_2\|$')):
plt.sca(axi)
axi.plot(Table_t, ty, '-*', label='Table')
# axi.set_xlabel('t', size=fontsize)
# axi.legend()
axi.set_ylabel('%s' % axyi, size=fontsize * 0.7)
axi.set_xticks(xticks)
axi.set_xticklabels(xticks)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
for axi in (ax4, axdt, axP, axP2, axPdotP2):
axi.set_xlabel('$t$', size=fontsize * 0.7)
for axi in (axP, axP2):
axi.set_ylim(0.9, 1.1)
axdt.axes.set_yscale('log')
plt.tight_layout()
return fig
def show_table_result(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, move_z=False,
planeShearRate=np.array((1, 0, 0)), fig=None,
save_every=1, resampling=False, resampling_fct=2):
core_show_table_result(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, move_z,
planeShearRate, fig, save_every, resampling, resampling_fct)
return True
def save_table_result(filename, Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, move_z=False,
planeShearRate=np.array((1, 0, 0)), fig=None,
save_every=1, resampling=False, resampling_fct=2):
fig = core_show_table_result(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, move_z,
planeShearRate, fig, save_every, resampling, resampling_fct)
fig.savefig(filename, dpi=100)
return fig
def core_show_table_result_v2(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, move_z=False,
planeShearRate=np.array((1, 0, 0)), fig=None,
save_every=1, resampling=False, resampling_fct=2,
figsize=np.array((16, 9)) * 1.5, dpi=100):
markersize = 10
fontsize = 10
norm = plt.Normalize(Table_t.min(), Table_t.max())
cmap = plt.get_cmap('jet')
def _plot_polar(ax0, Table_angle, title):
# polar version
ax0.plot(Table_angle, Table_theta, '-', alpha=0.2)
ax0.plot(Table_angle[0], Table_theta[0], '*k', markersize=markersize * 1.5)
ax0.scatter(Table_angle, Table_theta, c=Table_t, cmap=cmap, norm=norm, s=markersize)
ax0.set_ylim(0, np.pi)
ax0.set_title(title, size=fontsize * 0.8)
plt.sca(ax0)
plt.xticks(fontsize=fontsize * 0.8)
plt.yticks(fontsize=fontsize * 0.8)
return True
if move_z:
z_mean = np.mean(Table_X[:, 2])
Table_X[:, 2] = Table_X[:, 2] - z_mean
ux_shear = z_mean * planeShearRate[0]
Xz_mean = (Table_t - Table_t[0]) * ux_shear
Table_X[:, 0] = Table_X[:, 0] - Xz_mean
if resampling:
Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta = \
resampling_data(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, resampling_fct)
# show table results.
if fig is None:
fig = plt.figure(figsize=figsize, dpi=dpi)
else:
fig.clf()
fig.patch.set_facecolor('white')
cax = plt.subplot2grid((19, 32), (0, 0), rowspan=18, colspan=1)
ax0 = plt.subplot2grid((19, 32), (0, 2), rowspan=8, colspan=8, polar=True)
ax1 = plt.subplot2grid((19, 32), (10, 2), rowspan=8, colspan=8, polar=True)
ax2 = plt.subplot2grid((19, 32), (0, 11), rowspan=8, colspan=8)
ax3 = plt.subplot2grid((19, 32), (10, 11), rowspan=8, colspan=8, projection='3d')
ax9 = plt.subplot2grid((19, 32), (15, 21), rowspan=3, colspan=12)
ax4 = plt.subplot2grid((19, 32), (0, 21), rowspan=3, colspan=12)
ax5 = plt.subplot2grid((19, 32), (3, 21), rowspan=3, colspan=12)
ax6 = plt.subplot2grid((19, 32), (6, 21), rowspan=3, colspan=12)
ax7 = plt.subplot2grid((19, 32), (9, 21), rowspan=3, colspan=12)
ax8 = plt.subplot2grid((19, 32), (12, 21), rowspan=3, colspan=12)
_plot_polar(ax0, Table_phi, '$\\theta - \\phi$')
_plot_polar(ax1, Table_psi, '$\\theta - \\psi$')
ax2.plot(Table_phi / np.pi, Table_psi / np.pi, '-', alpha=0.2)
ax2.plot(Table_phi[0] / np.pi, Table_psi[0] / np.pi, '*k', markersize=markersize * 1.5)
ax2.scatter(Table_phi / np.pi, Table_psi / np.pi, c=Table_t, cmap=cmap, norm=norm, s=markersize)
ax2.set_xlabel('$\\phi / \\pi$')
ax2.set_ylabel('$\\psi / \\pi$')
plt.sca(ax2)
plt.xticks(fontsize=fontsize * 0.8)
plt.yticks(fontsize=fontsize * 0.8)
ax3.set_title('$P_1$', size=fontsize)
points = Table_P.reshape(-1, 1, 3)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = Line3DCollection(segments, cmap=cmap, norm=norm)
lc.set_array(Table_t)
ax3.add_collection3d(lc, zs=points[:, :, 2].flatten(), zdir='z')
ax3.set_xlim(points[:, :, 0].min(), points[:, :, 0].max())
ax3.set_ylim(points[:, :, 1].min(), points[:, :, 1].max())
ax3.set_zlim(points[:, :, 2].min(), points[:, :, 2].max())
spf.set_axes_equal(ax3)
ax3.plot(np.ones_like(points[:, :, 0].flatten()) * ax3.get_xlim()[0], points[:, :, 1].flatten(),
points[:, :, 2].flatten())
ax3.plot(points[:, :, 0].flatten(), np.ones_like(points[:, :, 1].flatten()) * ax3.get_ylim()[1],
points[:, :, 2].flatten())
ax3.plot(points[:, :, 0].flatten(), points[:, :, 1].flatten(),
np.ones_like(points[:, :, 2].flatten()) * ax3.get_zlim()[0])
plt.sca(ax3)
ax3.set_xlabel('$x$', size=fontsize)
ax3.set_ylabel('$y$', size=fontsize)
ax3.set_zlabel('$z$', size=fontsize)
plt.xticks(fontsize=fontsize * 0.8)
plt.yticks(fontsize=fontsize * 0.8)
for t in ax3.zaxis.get_major_ticks():
t.label.set_fontsize(fontsize * 0.8)
for spine in ax3.spines.values():
spine.set_visible(False)
clb = fig.colorbar(lc, cax=cax)
clb.ax.tick_params(labelsize=fontsize)
clb.ax.set_title('time', size=fontsize)
for _ in zip(((ax4, ax7), (ax5, ax8), (ax6, ax9)),
(Table_X[:, 0], Table_X[:, 1], Table_X[:, 2]),
(Table_theta, Table_phi, Table_psi),
('$x - x_{mean}$', '$y - y_{mean}$', '$z - z_{mean}$'),
('x_{mean}', 'y_{mean}', 'z_{mean}'),
('$\\theta / \pi$', '$\\phi / \pi$', '$\\psi / \pi$'), ):
(ax1, ax2), ty1, ty2, ylab1, txt1, ylab2 = _
ax1.plot(Table_t, ty1 - np.mean(ty1), '-')
t1 = '$%s = %.2e$' % (txt1, np.mean(ty1))
ax1.text(Table_t.min(), (ty1 - np.mean(ty1)).max() / 2, t1, fontsize=fontsize)
for i0, i1 in separate_angle_idx(ty2):
ax2.plot(Table_t[i0:i1], ty2[i0:i1] / np.pi, '-', color='#1f77b4')
# ax2.plot(Table_t, ty2 / np.pi, '-')
ax1.set_ylabel(ylab1)
ax2.set_ylabel(ylab2)
for axi in (ax4, ax5, ax6, ax7, ax8):
axi.set_xticklabels([])
plt.sca(ax9)
ax9.set_xlabel('$t$')
plt.xticks(fontsize=fontsize * 0.8)
plt.yticks(fontsize=fontsize * 0.8)
plt.tight_layout()
return fig
def show_table_result_v2(*args, **kwargs):
core_show_table_result_v2(*args, **kwargs)
return True
def save_table_result_v2(filename, *args, dpi=100, **kwargs):
fig = core_show_table_result_v2(*args, **kwargs)
fig.savefig(fname=filename, dpi=dpi)
return fig
def core_show_theta_phi(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,
fig=None, show_back_direction=True):
def add_axs_psi_theta(ax0, psi_list, theta_list, ax_size_fct=0.1, alpha=0.0):
for tphi in psi_list:
for ttheta in theta_list:
tx = ttheta * np.cos(tphi)
ty = ttheta * np.sin(tphi)
bbox = (tx - ax_size_fct / 2 * np.pi, ty - ax_size_fct / 2 * np.pi,
ax_size_fct * np.pi, ax_size_fct * np.pi)
axin = spf.add_inset(ax0, bbox, projection='3d')
for spine in axin.spines.values():
spine.set_visible(False)
axin.xaxis.set_major_locator(plt.NullLocator())
axin.yaxis.set_major_locator(plt.NullLocator())
axin.zaxis.set_major_locator(plt.NullLocator())
axin.set_xlim(-1, 1)
axin.set_ylim(-1, 1)
axin.set_zlim(-1, 1)
axin.patch.set_alpha(alpha)
axin.quiver(0, 0, 0,
np.sin(ttheta) * np.cos(tphi),
np.sin(ttheta) * np.sin(tphi),
np.cos(ttheta),
arrow_length_ratio=0.5, colors='k', linewidth=fontsize * 0.1)
# background
fontsize = 30
if fig is None:
fig = plt.figure(figsize=(20, 20))
else:
fig.clf()
fig.patch.set_facecolor('white')
ax0 = fig.add_subplot(111)
ax0.set_xlim(-np.pi * 1.1, np.pi * 1.1)
ax0.set_ylim(-np.pi * 1.1, np.pi * 1.1)
ax0.axis('off')
cax0 = colorbar.make_axes(ax0, orientation='vertical', aspect=20, shrink=0.6)[0]
ax0.set_aspect('equal')
# norms of different directions
if show_back_direction:
# 1
psi_list = (0,)
theta_list = (0,)
add_axs_psi_theta(ax0, psi_list, theta_list, ax_size_fct=0.2, alpha=0.3)
# 2
psi_list = np.linspace(0, 2 * np.pi, 8, endpoint=False)
theta_list = np.linspace(0.2 * np.pi, np.pi, 4)
add_axs_psi_theta(ax0, psi_list, theta_list, ax_size_fct=0.2, alpha=0.3)
# 3
psi_list = np.linspace(0, 2 * np.pi, 16, endpoint=False)[1::2]
theta_list = np.linspace(0.25 * np.pi, np.pi, 8)[1::2]
add_axs_psi_theta(ax0, psi_list, theta_list, ax_size_fct=0.2, alpha=0.3)
# 4
psi_list = np.linspace(0, 2 * np.pi, 32, endpoint=False)[1::2]
t1 = np.linspace(0.25 * np.pi, np.pi, 8)[1::2]
theta_list = (np.mean((t1[2], t1[3])), np.mean((t1[1], t1[2])))
add_axs_psi_theta(ax0, psi_list, theta_list, ax_size_fct=0.2, alpha=0.3)
# polar version of theta-phi
ax1 = fig.add_axes(ax0.get_position(), projection='polar')
ax1.patch.set_alpha(0)
plt.sca(ax1)
ax1.set_ylim(0, np.pi)
ax1.xaxis.set_ticklabels(['$\dfrac{%d}{8}2\pi$' % i0 for i0 in np.arange(8)])
ax1.yaxis.set_ticklabels([])
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
norm = plt.Normalize(Table_t.min(), Table_t.max())
cmap = plt.get_cmap('jet')
ax1.plot(Table_phi, Table_theta, '-', alpha=0.2)
ax1.scatter(Table_phi[0], Table_theta[0], c='k', s=fontsize * 6, marker='*')
lc = ax1.scatter(Table_phi, Table_theta, c=Table_t, cmap=cmap, norm=norm, s=fontsize * 0.2)
clb = fig.colorbar(lc, cax=cax0, orientation="vertical")
clb.ax.tick_params(labelsize=fontsize * 0.6)
clb.ax.set_title('time', size=fontsize * 0.6)
fig2 = plt.figure(figsize=(20, 20))
fig2.patch.set_facecolor('white')
ax0 = fig2.add_subplot(1, 1, 1, projection='3d')
ax0.set_title('$P_1$', size=fontsize)
cax0 = inset_axes(ax0, width="80%", height="5%", bbox_to_anchor=(0, 0.1, 1, 1),
loc=1, bbox_transform=ax0.transAxes, borderpad=0, )
norm = plt.Normalize(Table_t.min(), Table_t.max())
cmap = plt.get_cmap('jet')
# Create the 3D-line collection object
points = Table_P.reshape(-1, 1, 3)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = Line3DCollection(segments, cmap=cmap, norm=norm)
lc.set_array(Table_t)
ax0.add_collection3d(lc, zs=points[:, :, 2].flatten(), zdir='z')
ax0.set_xlim(points[:, :, 0].min(), points[:, :, 0].max())
ax0.set_ylim(points[:, :, 1].min(), points[:, :, 1].max())
ax0.set_zlim(points[:, :, 2].min(), points[:, :, 2].max())
spf.set_axes_equal(ax0)
ax0.plot(np.ones_like(points[:, :, 0].flatten()) * ax0.get_xlim()[0], points[:, :, 1].flatten(),
points[:, :, 2].flatten())
ax0.plot(points[:, :, 0].flatten(), np.ones_like(points[:, :, 1].flatten()) * ax0.get_ylim()[1],
points[:, :, 2].flatten())
ax0.plot(points[:, :, 0].flatten(), points[:, :, 1].flatten(),
np.ones_like(points[:, :, 2].flatten()) * ax0.get_zlim()[0])
clb = fig2.colorbar(lc, cax=cax0, orientation="horizontal")
clb.ax.tick_params(labelsize=fontsize)
clb.ax.set_title('Sim, time', size=fontsize)
plt.sca(ax0)
ax0.set_xlabel('$x$', size=fontsize)
ax0.set_ylabel('$y$', size=fontsize)
ax0.set_zlabel('$z$', size=fontsize)
plt.xticks(fontsize=fontsize * 0.8)
plt.yticks(fontsize=fontsize * 0.8)
for t in ax0.zaxis.get_major_ticks():
t.label.set_fontsize(fontsize * 0.8)
for spine in ax0.spines.values():
spine.set_visible(False)
plt.tight_layout()
return fig, fig2
def show_theta_phi(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, fig=None,
show_back_direction=True):
core_show_theta_phi(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, fig,
show_back_direction)
return True
def save_theta_phi(filename, Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, fig=None,
show_back_direction=True):
fig, fig2 = core_show_theta_phi(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, fig,
show_back_direction)
fig.savefig(filename + '_1', dpi=100)
fig2.savefig(filename + '_2', dpi=100)
return fig, fig2
def core_light_show_theta_phi(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,
fig=None, show_colorbar=True, title=''):
fontsize = 30
if fig is None:
fig = plt.figure(figsize=(10, 10), dpi=200)
else:
pass
fig.clf()
fig.patch.set_facecolor('white')
ax0 = fig.add_subplot(111)
ax0.set_xlim(-np.pi * 1.1, np.pi * 1.1)
ax0.set_ylim(-np.pi * 1.1, np.pi * 1.1)
ax0.axis('off')
if show_colorbar:
cax0 = colorbar.make_axes(ax0, orientation='vertical', aspect=20, shrink=0.6)[0]
ax0.set_aspect('equal')
# polar version of theta-phi
ax1 = fig.add_axes(ax0.get_position(), projection='polar')
ax1.patch.set_alpha(0)
plt.sca(ax1)
ax1.set_ylim(0, np.pi)
ax1.xaxis.set_ticklabels(['$\dfrac{%d}{8}2\pi$' % i0 for i0 in np.arange(8)])
ax1.yaxis.set_ticklabels([])
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
norm = plt.Normalize(Table_t.min(), Table_t.max())
cmap = plt.get_cmap('jet')
ax1.plot(Table_phi, Table_theta, '-', alpha=0.2)
ax1.scatter(Table_phi[0], Table_theta[0], c='k', s=fontsize * 6, marker='*')
if show_colorbar:
lc = ax1.scatter(Table_phi, Table_theta, c=Table_t, cmap=cmap, norm=norm, s=fontsize * 0.2)
clb = fig.colorbar(lc, cax=cax0, orientation="vertical")
clb.ax.tick_params(labelsize=fontsize * 0.6)
clb.ax.set_title('time', size=fontsize * 0.6)
else:
ax1.scatter(Table_phi, Table_theta, cmap=cmap, norm=norm, s=fontsize * 0.2)
# plt.sca(ax1)
# plt.tight_layout()
ax1.set_title(title, y=1.1, size=fontsize * 0.6)
# plt.tight_layout()
return fig
def light_show_theta_phi(*args, **kwargs):
core_light_show_theta_phi(*args, **kwargs)
return True
def light_save_theta_phi(filename, *args, **kwargs):
fig = core_light_show_theta_phi(*args, **kwargs)
fig.savefig(filename, dpi=300)
return fig
def core_show_pickle_results(job_dir, theta, phi, table_name, fast_mode=0):
tpick, _ = load_table_date_pickle(job_dir, theta, phi)
Table_t = tpick['Table_t']
Table_dt = tpick['Table_dt']
Table_X = tpick['Table_X']
Table_P = tpick['Table_P']
Table_P2 = tpick['Table_P2']
Table_theta = tpick['Table_theta']
Table_phi = tpick['Table_phi']
Table_psi = tpick['Table_psi']
Table_eta = tpick['Table_eta']
print('-ini_theta %f -ini_phi %f -ini_psi %f' %
(tpick['Table_theta'][0], tpick['Table_phi'][0], tpick['Table_psi'][0]))
freq_pk = get_major_fre(Table_t, Table_theta)
idx = Table_t > Table_t.max() - 1 / freq_pk * 10
if fast_mode == 0:
show_theta_phi(Table_t[idx], Table_dt[idx], Table_X[idx], Table_P[idx], Table_P2[idx],
Table_theta[idx], Table_phi[idx], Table_psi[idx], Table_eta[idx],
show_back_direction=False)
show_theta_phi_psi_eta(Table_t[idx], Table_dt[idx], Table_X[idx],
Table_P[idx], Table_P2[idx],
Table_theta[idx], Table_phi[idx], Table_psi[idx], Table_eta[idx])
show_center_X(Table_t[idx], Table_dt[idx], Table_X[idx], Table_P[idx], Table_P2[idx],
Table_theta[idx], Table_phi[idx], Table_psi[idx], Table_eta[idx],
table_name=table_name)
elif fast_mode == 1:
show_table_result(Table_t[idx], Table_dt[idx], Table_X[idx], Table_P[idx], Table_P2[idx],
Table_theta[idx], Table_phi[idx], Table_psi[idx], Table_eta[idx],
save_every=1)
elif fast_mode == 2:
light_show_theta_phi(Table_t[idx], Table_dt[idx], Table_X[idx], Table_P[idx], Table_P2[idx],
Table_theta[idx], Table_phi[idx], Table_psi[idx], Table_eta[idx], )
return True
def show_pickle_results(job_dir, theta, phi, table_name, fast_mode=0):
core_show_pickle_results(job_dir, theta, phi, table_name, fast_mode=fast_mode)
return True
def core_show_theta_phi_psi_eta(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,
fig=None, resampling_fct=2, fft_full_mode=False,
show_prim_freq=3, dpi=100):
fontsize = 40
figsize = (20, 15)
Table_t, Table_theta, Table_phi, Table_psi, Table_eta = \
resampling_angle(Table_t, Table_theta, Table_phi, Table_psi, Table_eta, resampling_fct)
if fig is None:
fig = plt.figure(figsize=figsize, dpi=dpi)
else:
fig.clf()
fig.patch.set_facecolor('white')
axs = fig.subplots(nrows=4, ncols=2)
for (ax0, ax1), ty1, ylab in zip(axs,
(Table_theta, Table_phi, Table_psi, Table_eta),
('$\\theta / \pi$', '$\\phi / \pi$',
'$\\psi / \pi$', '$\\eta / \pi$')):
for i0, i1 in separate_angle_idx(ty1):
ax0.plot(Table_t[i0:i1], ty1[i0:i1] / np.pi, '-', color='#1f77b4')
ax0.set_ylabel(ylab, size=fontsize * 0.7)
plt.sca(ax0)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
# find major frequrence and display
idx = np.ones_like(Table_t, dtype=bool)
if not fft_full_mode:
idx[:-20000] = False
tfft = np.fft.rfft(np.cos(ty1[idx]))
# tfft = signal.stft(np.cos(ty1[idx]))
tfft_abs = np.abs(tfft)
# noinspection PyTypeChecker
tfreq = np.fft.rfftfreq(Table_t[idx].size, np.mean(np.diff(Table_t[idx])))
ax1.loglog(tfreq, tfft_abs, '.')
tpk = signal.find_peaks(tfft_abs)[0]
if tpk.size > 0:
fft_abs_pk = tfft_abs[tpk]
freq_pk = tfreq[tpk]
tidx = np.argsort(fft_abs_pk)[-show_prim_freq:]
# ax1.text(freq_pk[tidx] / 5, fft_abs_pk[tidx], '$%.5f$' % freq_pk[tidx],
# fontsize=fontsize * 0.7)
ax1.loglog(freq_pk[tidx], fft_abs_pk[tidx], '*', ms=fontsize * 0.5)
t1 = 'starred freq: \n' + '\n'.join(['$%.5f$' % freq_pk[ti] for ti in tidx])
ax1.text(ax1.get_xlim()[0] * 1.1, ax1.get_ylim()[0] * 1.1,
t1, fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
axs[-1, 0].set_xlabel('$t$', size=fontsize * 0.7)
axs[-1, 1].set_xlabel('$Hz$', size=fontsize * 0.7)
plt.tight_layout()
return fig
def show_theta_phi_psi_eta(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,
fig=None, resampling_fct=2, fft_full_mode=False,
show_prim_freq=3, dpi=100):
core_show_theta_phi_psi_eta(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,
fig, resampling_fct, fft_full_mode,
show_prim_freq, dpi)
return True
def save_theta_phi_psi_eta(filename, Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,
fig=None, resampling_fct=2, fft_full_mode=False,
show_prim_freq=3, dpi=100):
fig = core_show_theta_phi_psi_eta(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,
fig, resampling_fct, fft_full_mode,
show_prim_freq, dpi)
fig.savefig(filename, dpi=100)
return fig
def core_show_center_X(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, table_name,
move_z=False, planeShearRate=np.array((1, 0, 0)), fig=None,
resampling=False, resampling_fct=2):
fontsize = 40
figsize = (20, 15)
if move_z:
z_mean = np.mean(Table_X[:, 2])
Table_X[:, 2] = Table_X[:, 2] - z_mean
ux_shear = z_mean * planeShearRate[0]
Xz_mean = (Table_t - Table_t[0]) * ux_shear
Table_X[:, 0] = Table_X[:, 0] - Xz_mean
if resampling:
Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta = \
resampling_data(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, resampling_fct)
# get velocity from table
norm = np.array((0, 0, 1))
P0, P20, tcenter, problem = do_calculate_prepare(norm)
tkwargs = do_ellipse_kwargs(tcenter=tcenter, P0=P0, P20=P20, ini_psi=0, table_name=table_name)
tobj = jm.TableObj(**tkwargs)
problem.add_obj(tobj)
Table_dX_rel = []
for X, theta, phi, psi in zip(Table_X, Table_theta, Table_phi, Table_psi):
# ref_U = tobj.get_velocity_at(X, P, P2, check_orthogonality=False)
ref_U = tobj.get_velocity_at3(X, theta, phi, psi)
Ub = problem.flow_velocity(X)
rel_U = ref_U - np.hstack((Ub, np.zeros(3)))
Table_dX_rel.append(rel_U)
Table_dX_rel = np.vstack(Table_dX_rel)
# relative translational and rotational velocities at norm direction
up_rel = np.array([np.dot(P, U[:3]) for (P, U) in zip(Table_P, Table_dX_rel)])
wp_rel = np.array([np.dot(P, U[3:]) for (P, U) in zip(Table_P, Table_dX_rel)])
if fig is None:
fig = plt.figure(figsize=figsize)
else:
fig.clf()
fig.patch.set_facecolor('white')
axs = fig.subplots(nrows=5, ncols=1)
# center and velocity
for ax0, ty1, ty2, ylab1, ylab2 in zip(axs, Table_X.T, Table_dX_rel.T,
('$x$', '$y$', '$z$'),
('$u_x-u_{fx}$', '$u_y-u_{fy}$', '$u_z-u_{fz}$')):
color = 'tab:red'
ax0.plot(Table_t, ty1, '-', color=color)
ax0.set_ylabel(ylab1, size=fontsize * 0.7, color=color)
ax0.tick_params(axis='y', labelcolor=color)
plt.sca(ax0)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
ax1 = ax0.twinx()
color = 'tab:blue'
ax1.plot(Table_t, ty2, '-', color=color)
ax1.set_ylabel(ylab2, size=fontsize * 0.7, color=color)
ax1.tick_params(axis='y', labelcolor=color)
plt.sca(ax1)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
# translational and rotational velocity at norm direction
ax0 = axs[3]
color = 'tab:red'
ax0.plot(Table_t, up_rel, '-', color=color)
ax0.set_ylabel('$\\bm{u}_p = \\bm{u} \\cdot \\bm{p}$', size=fontsize * 0.7, color=color)
ax0.tick_params(axis='y', labelcolor=color)
plt.sca(ax0)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
ax1 = ax0.twinx()
color = 'tab:blue'
ax1.plot(Table_t, wp_rel, '-', color=color)
ax1.set_ylabel('$\\bm{\omega}_{bp} = \\bm{\omega}_b \\cdot \\bm{p}$',
size=fontsize * 0.7, color=color)
ax1.tick_params(axis='y', labelcolor=color)
plt.sca(ax1)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
ax0 = axs[4]
ax0.plot(Table_t, wp_rel / up_rel, '.')
ax0.set_ylabel('$\\bm{\omega}_{bp} / \\bm{u}_p$', size=fontsize * 0.7)
ax0.set_yscale('symlog', linthreshy=0.01)
t1 = np.max((1, ax0.get_yticks().size // 4))
tticks = ax0.get_yticks()[::t1]
ax0.set_yticks(tticks)
ax0.set_yticklabels(tticks)
ax0.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
fig.tight_layout()
ax0.set_xlabel('t', size=fontsize * 0.7)
plt.sca(ax0)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
plt.tight_layout()
return fig
def show_center_X(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, table_name,
move_z=False, planeShearRate=np.array((1, 0, 0)), fig=None,
resampling=False, resampling_fct=2):
core_show_center_X(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, table_name,
move_z, planeShearRate, fig, resampling, resampling_fct)
return True
def save_center_X(filename, Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, table_name,
move_z=False, planeShearRate=np.array((1, 0, 0)), fig=None,
resampling=False, resampling_fct=2):
fig = core_show_center_X(Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, table_name,
move_z, planeShearRate, fig, resampling, resampling_fct)
fig.savefig(filename, dpi=100)
return fig
def get_increase_angle(ty1):
ty = ty1.copy()
for i0, dt in enumerate(np.diff(ty)):
if dt > np.pi:
ty[i0 + 1:] = ty[i0 + 1:] - 2 * np.pi
elif dt < -np.pi:
ty[i0 + 1:] = ty[i0 + 1:] + 2 * np.pi
return ty
def get_continue_angle(tx, ty1, t_use=None):
if t_use is None:
t_use = np.linspace(tx.min(), tx.max(), 2 * tx.size)
if np.array(t_use).size == 1:
t_use = np.linspace(tx.min(), tx.max(), t_use * tx.size)
ty = get_increase_angle(ty1)
intp_fun1d = interpolate.interp1d(tx, ty, kind='quadratic', copy=False, axis=0,
bounds_error=True)
return intp_fun1d(t_use) % (2 * np.pi)
def separate_angle_idx(ty):
# separate to small components to avoid the jump between 0 and 2pi.
idx_list = []
dty = np.diff(ty)
idx_list.append(np.argwhere(dty > np.pi).flatten())
idx_list.append(np.argwhere(dty < -np.pi).flatten())
idx_list.append(-1) # first idx is 0, but later will plus 1.
idx_list.append(ty.size - 1) # last idx is (size-1).
t1 = np.sort(np.hstack(idx_list))
return np.vstack((t1[:-1] + 1, t1[1:])).T
def get_major_fre(tx, ty1, fft_full_mode=False):
freq_pk = get_primary_fft_fre(tx, ty1, fft_full_mode=fft_full_mode)
return freq_pk[-1]
def get_primary_fft_fre(tx, ty1, continue_angle=True, sub_mean=False,
cos_mode=False, fft_full_mode=False):
idx = np.ones_like(tx, dtype=bool)
if not fft_full_mode:
idx[:-20000] = False
if continue_angle:
t_use = np.linspace(tx[idx].min(), tx[idx].max(), tx[idx].size)
ty = get_continue_angle(tx[idx], ty1[idx], t_use)
else:
t_use = tx
ty = ty1
if sub_mean:
ty = ty - np.mean(ty)
if cos_mode:
tfft = np.fft.rfft(np.cos(ty))
else:
tfft = np.fft.rfft(ty)
tfft_abs = np.abs(tfft)
# noinspection PyTypeChecker
tfreq = np.fft.rfftfreq(t_use.size, np.mean(np.diff(t_use)))
tpk = signal.find_peaks(tfft_abs)[0]
fft_abs_pk = tfft_abs[tpk]
freq_pk = tfreq[tpk]
tidx = np.argsort(fft_abs_pk)
return freq_pk[tidx]
def get_primary_autocorrelate_fft_fre(tx, ty1, continue_angle=True, sub_mean=False, sin_mode=False,
fft_full_mode=False, strength_threshold=0):
idx = np.ones_like(tx, dtype=bool)
if not fft_full_mode:
idx[:-20000] = False
if continue_angle:
t_use = np.linspace(tx[idx].min(), tx[idx].max(), tx[idx].size)
ty = get_continue_angle(tx[idx], ty1[idx], t_use)
else:
t_use = tx
ty = ty1
if sub_mean:
ty = ty - np.mean(ty)
if sin_mode:
ty = np.sin(ty)
sampling_rate = ty.size / (t_use.max() - t_use.min())
tfft = np.fft.rfft(np.correlate(ty, ty, mode='full')[ty.size - 1:])
tfft = tfft / ty.size / sampling_rate * 2
tfft_abs = np.abs(tfft)
# noinspection PyTypeChecker
tfreq = np.fft.rfftfreq(t_use.size, np.mean(np.diff(t_use)))
tpk = signal.find_peaks(tfft_abs)[0]
fft_abs_pk = tfft_abs[tpk]
freq_pk = tfreq[tpk]
freq_pk = freq_pk[fft_abs_pk > (fft_abs_pk.max() * strength_threshold)]
fft_abs_pk = fft_abs_pk[fft_abs_pk > (fft_abs_pk.max() * strength_threshold)]
tidx = np.argsort(fft_abs_pk)
return freq_pk[tidx]
# return freq_pk[tidx], fft_abs_pk[tidx]
def get_primary_autocorrelate_fft_fre_v2(tx, ty1, continue_angle=True, fft_full_mode=False):
idx = np.ones_like(tx, dtype=bool)
if not fft_full_mode:
idx[:-20000] = False
if continue_angle:
t_use = np.linspace(tx[idx].min(), tx[idx].max(), tx[idx].size)
ty = get_continue_angle(tx[idx], ty1[idx], t_use)
else:
t_use = tx
ty = ty1
ty = np.cos(ty - np.mean(ty) + np.pi / 2)
sampling_rate = ty.size / (t_use.max() - t_use.min())
tfft = np.fft.rfft(np.correlate(ty, ty, mode='full')[ty.size - 1:])
tfft = tfft / ty.size / sampling_rate * 2
tfft_abs = np.abs(tfft)
# noinspection PyTypeChecker
tfreq = np.fft.rfftfreq(t_use.size, np.mean(np.diff(t_use)))
# tfft_abs = tfft_abs[:-1]
# tfreq = tfreq[:-1]
# plt.plot(t_use, ty)
# plt.loglog(tfreq, tfft_abs)
tpk = signal.find_peaks(tfft_abs)[0]
fft_abs_pk = tfft_abs[tpk]
tidx = np.argsort(fft_abs_pk)
fft_abs_pk = fft_abs_pk[tidx]
freq_pk = tfreq[tpk][tidx]
low_fft_abs_pk = fft_abs_pk[freq_pk < freq_pk[-1]]
low_freq_pk = freq_pk[freq_pk < freq_pk[-1]]
if low_fft_abs_pk.size > 0:
tidx2 = np.argmax(low_fft_abs_pk)
pk_fre = np.hstack((freq_pk[-1], low_freq_pk[tidx2]))
pk_fft = np.hstack((fft_abs_pk[-1], low_fft_abs_pk[tidx2]))
else:
pk_fre = np.hstack((freq_pk[-1], freq_pk[-1],))
pk_fft = np.hstack((fft_abs_pk[-1], fft_abs_pk[-1]))
return pk_fre, pk_fft
def resampling_data(Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi,
Table_psi, Table_eta, resampling_fct=2, t_use=None):
def intp_fun(ty):
intp_fun1d = interpolate.interp1d(Table_t, ty, kind='quadratic', copy=False, axis=0,
bounds_error=True)
return intp_fun1d(t_use)
# resampling the date to a uniform distance
# noinspection PyTypeChecker
if t_use is None:
t_use = np.linspace(Table_t.min(), Table_t.max(), np.around(Table_t.size * resampling_fct))
else:
war_msg = 'size of t_use is %d, resampling_fct is IGNORED' % t_use.size
warnings.warn(war_msg)
Table_X = intp_fun(Table_X)
Table_P = intp_fun(Table_P)
Table_P2 = intp_fun(Table_P2)
Table_dt = intp_fun(Table_dt)
Table_theta = get_continue_angle(Table_t, Table_theta, t_use)
Table_phi = get_continue_angle(Table_t, Table_phi, t_use)
Table_psi = get_continue_angle(Table_t, Table_psi, t_use)
Table_eta = np.arccos(np.sin(Table_theta) * np.sin(Table_phi))
Table_t = t_use
return Table_t, Table_dt, Table_X, Table_P, Table_P2, \
Table_theta, Table_phi, Table_psi, Table_eta
def resampling_angle(Table_t, Table_theta, Table_phi, Table_psi, Table_eta, resampling_fct=2):
# resampling the date to a uniform distance
# noinspection PyTypeChecker
t_use = np.linspace(Table_t.min(), Table_t.max(), np.around(Table_t.size * resampling_fct))
tidx = np.isfinite(Table_t)
if Table_t[1] - Table_t[0] <= 0:
tidx[0] = False
if Table_t[-1] - Table_t[-2] <= 0:
tidx[-1] = False
Table_theta = get_continue_angle(Table_t[tidx], Table_theta[tidx], t_use)
Table_phi = get_continue_angle(Table_t[tidx], Table_phi[tidx], t_use)
Table_psi = get_continue_angle(Table_t[tidx], Table_psi[tidx], t_use)
Table_eta = np.arccos(np.sin(Table_theta) * np.sin(Table_phi))
Table_t = t_use
return Table_t, Table_theta, Table_phi, Table_psi, Table_eta
def make_table_video(Table_t, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,
zm_fct=1, stp=1, interval=50, trange=None, resampling_fct=2):
fontsize = 35
figsize = (25, 15)
def update_fun(num, tl1, tl2, tl3, scs, Table_t, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, zm_fct):
num = num * stp
tqdm_fun.update(1)
# print('update_fun', num)
# left, 3d trajection
tX = Table_X[num]
tP1 = Table_P[num]
tP2 = Table_P2[num]
tP1 = tP1 / np.linalg.norm(tP1) * zm_fct
tP2 = tP2 / np.linalg.norm(tP2) * zm_fct
tP3 = np.cross(tP1, tP2) / zm_fct
t1 = np.vstack([tX, tX + tP1])
tl1.set_data(t1[:, 0], t1[:, 1])
tl1.set_3d_properties(t1[:, 2])
t2 = np.vstack([tX, tX + tP2])
tl2.set_data(t2[:, 0], t2[:, 1])
tl2.set_3d_properties(t2[:, 2])
t3 = np.vstack([tX, tX + tP3])
tl3.set_data(t3[:, 0], t3[:, 1])
tl3.set_3d_properties(t3[:, 2])
# right, theta-phi
scs[0].set_data(Table_phi[num], Table_theta[num])
# right, other 2d plots
for axi, ty, sci, in zip((ax3, ax4, ax5, ax6),
(Table_psi / np.pi, Table_X[:, 0], Table_X[:, 1], Table_X[:, 2]),
scs[1:]):
sci.set_data(Table_t[num], ty[num])
return tl1, tl2, tl3, scs
fig = plt.figure(figsize=figsize)
fig.patch.set_facecolor('white')
ax0 = plt.subplot2grid((6, 8), (0, 0), rowspan=6, colspan=6, projection='3d')
ax6 = plt.subplot2grid((6, 8), (5, 6), colspan=2) # Table_X[:, 2]
# ax1 = plt.subplot2grid((6, 8), (0, 6), colspan=2, sharex=ax6) #Table_theta
# ax2 = plt.subplot2grid((6, 8), (1, 6), colspan=2, sharex=ax6) #Table_phi
axth_ph = plt.subplot2grid((6, 8), (0, 6), rowspan=2, colspan=2,
projection='polar') # Table_theta-#Table_phi
ax3 = plt.subplot2grid((6, 8), (2, 6), colspan=2, sharex=ax6) # Table_psi
ax4 = plt.subplot2grid((6, 8), (3, 6), colspan=2, sharex=ax6) # Table_X[:, 0]
ax5 = plt.subplot2grid((6, 8), (4, 6), colspan=2, sharex=ax6) # Table_X[:, 1]
for spine in ax0.spines.values():
spine.set_visible(False)
# left part, animate of axis (which represent the object, i.e. helix, ecoli...)
tX = Table_X[0]
tP1 = Table_P[0]
tP2 = Table_P2[0]
tP1 = tP1 / np.linalg.norm(tP1) * zm_fct
tP2 = tP2 / np.linalg.norm(tP2) * zm_fct
tP3 = np.cross(tP1, tP2) / zm_fct
tmp_line1 = ax0.plot([tX[0], tX[0] + tP1[0]],
[tX[1], tX[1] + tP1[1]],
[tX[2], tX[2] + tP1[2]], color='k', lw=fontsize * 0.1)[0]
tmp_line2 = ax0.plot([tX[0], tX[0] + tP2[0]],
[tX[1], tX[1] + tP2[1]],
[tX[2], tX[2] + tP2[2]], color='r')[0]
tmp_line3 = ax0.plot([tX[0], tX[0] + tP3[0]],
[tX[1], tX[1] + tP3[1]],
[tX[2], tX[2] + tP3[2]], color='b')[0]
if trange is None:
trange = np.max(Table_X.max(axis=0) - Table_X.min(axis=0))
print('trange=', trange)
tmid = (Table_X.max(axis=0) + Table_X.min(axis=0)) / 2
ax0.set_xlim3d([tmid[0] - trange, tmid[0] + trange])
tticks = np.around(np.linspace(tmid[0] - trange, tmid[0] + trange, 21), decimals=2)[1::6]
ax0.set_xticks(tticks)
ax0.set_xticklabels(tticks)
ax0.set_xlabel('$X_1$')
ax0.set_ylim3d([tmid[1] - trange, tmid[1] + trange])
tticks = np.around(np.linspace(tmid[1] - trange, tmid[1] + trange, 21), decimals=2)[1::6]
ax0.set_xticks(tticks)
ax0.set_xticklabels(tticks)
ax0.set_ylabel('$X_2$')
ax0.set_zlim3d([tmid[2] - trange, tmid[2] + trange])
tticks = np.around(np.linspace(tmid[2] - trange, tmid[2] + trange, 21), decimals=2)[1::6]
ax0.set_xticks(tticks)
ax0.set_xticklabels(tticks)
ax0.set_zlabel('$X_3$')
# right part, standard part
# theta-phi
plt.sca(axth_ph)
axth_ph.plot(Table_phi, Table_theta, '-.', alpha=0.5)
axth_ph.set_ylim(0, np.pi)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
xticks = np.around(np.linspace(Table_t.min(), Table_t.max(), 21), decimals=2)[1::6]
# xticks = np.linspace(Table_t.min(), Table_t.max(), 3)
# other variables
for axi, ty, axyi in zip((ax3, ax4, ax5, ax6),
(Table_psi / np.pi, Table_X[:, 0], Table_X[:, 1], Table_X[:, 2]),
('$\\psi / \pi$', '$X_1$', '$X_2$', '$X_3$')):
plt.sca(axi)
axi.plot(Table_t, ty, '-.', label='Table')
axi.set_ylabel('%s' % axyi, size=fontsize * 0.7)
axi.set_xticks(xticks)
axi.set_xticklabels(xticks)
plt.xticks(fontsize=fontsize * 0.5)
plt.yticks(fontsize=fontsize * 0.5)
for axi in (ax6,):
axi.set_xlabel('t', size=fontsize * 0.7)
plt.tight_layout()
# right part, point indicates the time.
scs = []
scs.append(axth_ph.plot(Table_phi[0], Table_theta[0], 'or', markersize=fontsize * 0.3)[0])
for axi, ty, in zip((ax3, ax4, ax5, ax6),
(Table_psi / np.pi, Table_X[:, 0], Table_X[:, 1], Table_X[:, 2])):
plt.sca(axi)
scs.append(axi.plot(Table_t[0], ty[0], 'or', markersize=fontsize * 0.3)[0])
Table_dt = np.hstack((np.diff(Table_t), 0))
Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi, Table_psi, Table_eta \
= resampling_data(Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi,
Table_psi, Table_eta, resampling_fct)
frames = Table_t.size // stp
tqdm_fun = tqdm_notebook(total=frames + 2)
anim = animation.FuncAnimation(fig, update_fun, frames, interval=interval, blit=False,
fargs=(tmp_line1, tmp_line2, tmp_line3, scs,
Table_t, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,
zm_fct), )
return anim
def make_table_video_geo(Table_t, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, move_z=False,
zm_fct=1, stp=1, interval=50, trange_geo=None, trange_trj=None,
create_obj_at_fun=get_tail_nodes_split_at, resampling_fct=2,
dbg_mode=False, figsize=np.array((8, 6)), dpi=100,
**problem_kwargs):
assert figsize[0] > figsize[1]
assert Table_t.size > 3
if move_z:
z_mean = np.mean(Table_X[:, 2])
Table_X[:, 2] = Table_X[:, 2] - z_mean
planeShearRate = problem_kwargs['planeShearRate'][0]
ux_shear = z_mean * planeShearRate[0]
Xz_mean = (Table_t - Table_t[0]) * ux_shear
Table_X[:, 0] = Table_X[:, 0] - Xz_mean
def update_fun(num, tmp_line, tmp_trj, scs, Table_t, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, zm_fct):
num = num * stp
tqdm_fun.update(1)
# print('update_fun', num)
# left, 3d orientation
ttheta = Table_theta[num]
tphi = Table_phi[num]
tpsi = Table_psi[num]
tnodes = create_obj_at_fun(ttheta, tphi, tpsi, now_center=np.zeros(3), **problem_kwargs)
for tnodei, tmp_linei in zip(tnodes, tmp_line):
tmp_linei.set_data(tnodei[:, 0], tnodei[:, 1])
tmp_linei.set_3d_properties(tnodei[:, 2])
# left, 3d trajectory
tX = Table_X[num]
tmp_trj.set_data(tX[0], tX[1])
tmp_trj.set_3d_properties(tX[2])
# right, theta-phi
scs[0].set_data(Table_phi[num], Table_theta[num])
# right, other 2d plots
for axi, ty, sci, in zip((ax3, ax4, ax5, ax6),
(Table_psi / np.pi, Table_X[:, 0], Table_X[:, 1], Table_X[:, 2]),
scs[1:]):
sci.set_data(Table_t[num], ty[num])
# return tmp_line, tmp_trj, scs
fig = plt.figure(figsize=figsize, dpi=dpi)
fig.patch.set_facecolor('white')
ax0 = plt.subplot2grid((6, 8), (0, 0), rowspan=6, colspan=6, projection='3d')
axtrj = fig.add_axes((0, 0.7, 0.3 * figsize[1] / figsize[0], 0.3), projection='3d', )
ax6 = plt.subplot2grid((6, 8), (5, 6), colspan=2) # Table_X[:, 2]
axth_ph = plt.subplot2grid((6, 8), (0, 6), rowspan=2, colspan=2,
projection='polar') # theta-phi
ax3 = plt.subplot2grid((6, 8), (2, 6), colspan=2, sharex=ax6) # Table_psi
ax4 = plt.subplot2grid((6, 8), (3, 6), colspan=2, sharex=ax6) # Table_X[:, 0]
ax5 = plt.subplot2grid((6, 8), (4, 6), colspan=2, sharex=ax6) # Table_X[:, 1]
for spine in ax0.spines.values():
spine.set_visible(False)
for spine in axtrj.spines.values():
spine.set_visible(False)
axtrj.patch.set_alpha(0.2)
# left part, animate of axis (which represent the object, i.e. helix, ecoli...)
# object orientation
ttheta = Table_theta[0]
tphi = Table_phi[0]
tpsi = Table_psi[0]
tnodes = create_obj_at_fun(ttheta, tphi, tpsi, now_center=np.zeros(3), **problem_kwargs)
tmp_line = []
for tnodei in tnodes:
tmp_line.append(ax0.plot(tnodei[:, 0], tnodei[:, 1], tnodei[:, 2])[0])
if trange_geo is None:
tnode = np.vstack(tnodes)
trange_geo = np.linalg.norm(tnode.max(axis=0) - tnode.min(axis=0))
print('trange_geo=', trange_geo)
tmid = np.zeros(3)
ax0.set_xlim3d([tmid[0] - trange_geo, tmid[0] + trange_geo])
tticks = np.around(np.linspace(tmid[0] - trange_geo, tmid[0] + trange_geo, 21),
decimals=2)[1::6]
ax0.set_xticks(tticks)
ax0.set_xticklabels(tticks)
ax0.set_xlabel('$X_1$')
ax0.set_ylim3d([tmid[1] - trange_geo, tmid[1] + trange_geo])
tticks = np.around(np.linspace(tmid[1] - trange_geo, tmid[1] + trange_geo, 21),
decimals=2)[1::6]
ax0.set_yticks(tticks)
ax0.set_yticklabels(tticks)
ax0.set_ylabel('$X_2$')
ax0.set_zlim3d([tmid[2] - trange_geo, tmid[2] + trange_geo])
tticks = np.around(np.linspace(tmid[2] - trange_geo, tmid[2] + trange_geo, 21),
decimals=2)[1::6]
ax0.set_zticks(tticks)
ax0.set_zticklabels(tticks)
ax0.set_zlabel('$X_3$')
# object trajectory
tX = Table_X[0]
axtrj.plot(Table_X[:, 0], Table_X[:, 1], Table_X[:, 2], '-.') # stable part
tmp_trj = axtrj.plot((tX[0],), (tX[1],), (tX[2],), 'or')[0]
if trange_trj is None:
trange_trj = np.max(Table_X.max(axis=0) - Table_X.min(axis=0))
print('trange_trj=', trange_trj)
tmid = (Table_X.max(axis=0) + Table_X.min(axis=0)) / 2
axtrj.set_xlim3d([tmid[0] - trange_trj, tmid[0] + trange_trj])
tticks = np.around(np.linspace(tmid[0] - trange_trj, tmid[0] + trange_trj, 8),
decimals=2)[[1, -2]]
axtrj.set_xticks(tticks)
axtrj.set_xticklabels(tticks)
# axtrj.set_xlabel('$X_1$')
axtrj.set_ylim3d([tmid[1] - trange_trj, tmid[1] + trange_trj])
tticks = np.around(np.linspace(tmid[1] - trange_trj, tmid[1] + trange_trj, 8),
decimals=2)[[1, -2]]
axtrj.set_yticks(tticks)
axtrj.set_yticklabels(tticks)
# axtrj.set_ylabel('$X_2$')
axtrj.set_zlim3d([tmid[2] - trange_trj, tmid[2] + trange_trj])
tticks = np.around(np.linspace(tmid[2] - trange_trj, tmid[2] + trange_trj, 8),
decimals=2)[[1, -2]]
axtrj.set_zticks(tticks)
axtrj.set_zticklabels(tticks)
# axtrj.set_zlabel('$X_3$')
# right part, standard part
# theta-phi
plt.sca(axth_ph)
axth_ph.plot(Table_phi, Table_theta, '-.', alpha=0.5)
axth_ph.set_ylim(0, np.pi)
xticks = np.around(np.linspace(Table_t.min(), Table_t.max(), 8), decimals=2)[1::6]
# other variables
for axi, ty, axyi in zip((ax3, ax4, ax5, ax6),
(Table_psi / np.pi, Table_X[:, 0], Table_X[:, 1], Table_X[:, 2]),
('$\\psi / \pi$', '$X_1$', '$X_2$', '$X_3$')):
plt.sca(axi)
axi.plot(Table_t, ty, '-.', label='Table')
axi.set_ylabel('%s' % axyi)
axi.set_xticks(xticks)
axi.set_xticklabels(xticks)
ax6.set_xlabel('t')
# right part, point indicates the time.
scs = []
scs.append(axth_ph.plot(Table_phi[0], Table_theta[0], 'or')[0])
for axi, ty, in zip((ax3, ax4, ax5, ax6),
(Table_psi / np.pi, Table_X[:, 0], Table_X[:, 1], Table_X[:, 2])):
plt.sca(axi)
scs.append(axi.plot(Table_t[0], ty[0], 'or')[0])
plt.tight_layout()
Table_dt = np.hstack((np.diff(Table_t), 0))
Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi, Table_psi, Table_eta \
= resampling_data(Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi,
Table_psi, Table_eta, resampling_fct)
fargs = (tmp_line, tmp_trj, scs, Table_t, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, zm_fct)
if dbg_mode:
tqdm_fun = tqdm_notebook(total=3)
anim = animation.FuncAnimation(fig, update_fun, 1, interval=interval,
blit=False, fargs=fargs, )
else:
frames = Table_t.size // stp
tqdm_fun = tqdm_notebook(total=frames + 2)
anim = animation.FuncAnimation(fig, update_fun, frames, interval=interval,
blit=False, fargs=fargs, )
return anim
def ext_simple_shear_flow(axi, n_arrow=6, taus=1, **problem_kwargs):
# background simple shear flow.
xmin, xmax = axi.get_xlim3d()
ymin, ymax = axi.get_ylim3d()
zmin, zmax = axi.get_zlim3d()
xmean = np.mean((xmin, xmax))
zmean = np.mean((zmin, zmax))
x = np.zeros(n_arrow)
y = np.ones(n_arrow) * ymax
z = np.linspace(zmin, zmax, n_arrow)
dx = (z - zmean) * taus
dy = np.zeros(n_arrow)
dz = np.zeros(n_arrow)
axi.plot((xmean + dx.min(), xmean + dx.max()), (ymax, ymax), (zmin, zmax), '-k')
axi.plot((xmean, xmean), (ymax, ymax), (zmin, zmax), '-k')
for tx, ty, tz, tdx, tdy, tdz in zip(x, y, z, dx, dy, dz):
axi.arrow3D(tx, ty, tz, tdx, tdy, tdz, arrowstyle="->", linestyle='dashed',
mutation_scale=10, )
return True
def ext_simple_shear_flow_2D(axi, n_arrow=6, taus=1, **problem_kwargs):
# background simple shear flow.
xmin, xmax = axi.get_xlim()
ymin, ymax = axi.get_ylim()
xmean = np.mean((xmin, xmax))
ymean = np.mean((ymin, ymax))
x = np.zeros(n_arrow)
y = np.linspace(ymin, ymax, n_arrow)
dx = (y - ymean) * taus
dy = np.zeros(n_arrow)
axi.plot((xmean + dx.min(), xmean + dx.max()), (ymin, ymax), '-k')
axi.plot((xmean, xmean), (ymin, ymax), '-k')
for tx, ty, tdx, tdy, in zip(x, y, dx, dy):
axi.arrow(tx, ty, tdx, tdy, linestyle='dashed', width=0.003)
return True
def make_table_video_geo_v2(Table_t, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, move_z=False,
stp=1, interval=50, trange_geo=None,
create_obj_at_fun=get_tail_nodes_split_at, resampling_fct=2,
dbg_mode=False, figsize=np.array((16, 9)) * 0.5, dpi=100,
suptitle='', extFlow=ext_simple_shear_flow,
video_duration=None, total_frame=None, head_center=False,
add_info=False, **problem_kwargs):
assert figsize[0] > figsize[1]
assert Table_t.size > 3
def update_fun(num, tmp_geo, scs, Table_t, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta):
num = num * stp
tqdm_fun.update(1)
# orientation
ttheta = Table_theta[num]
tphi = Table_phi[num]
tpsi = Table_psi[num]
tnodes = create_obj_at_fun(ttheta, tphi, tpsi, now_center=np.zeros(3), **problem_kwargs)
for tnodei, tmp_geoi in zip(tnodes, tmp_geo):
tmp_geoi.set_data(tnodei[:, 0], tnodei[:, 1])
tmp_geoi.set_3d_properties(tnodei[:, 2])
# other variables
scs[0].set_data(Table_phi[num], Table_theta[num])
scs[1].set_data(Table_X[num, 1], Table_X[num, 2])
for axi, ty, sci in zip((axeta, ax_x1, axpsi),
(Table_X[:, 0], Table_eta / np.pi, Table_psi / np.pi,),
scs[2:]):
sci.set_data(Table_t[num], ty[num])
setattr(spf.Axes3D, 'arrow3D', spf._arrow3D)
if move_z:
z_mean = np.mean(Table_X[:, 2])
Table_X[:, 2] = Table_X[:, 2] - z_mean
planeShearRate = problem_kwargs['planeShearRate'][0]
ux_shear = z_mean * planeShearRate[0]
Xz_mean = (Table_t - Table_t[0]) * ux_shear
Table_X[:, 0] = Table_X[:, 0] - Xz_mean
if head_center:
dc = (problem_kwargs['dist_hs'] + problem_kwargs['ch'] * problem_kwargs['ph']) / 2
Table_X = Table_X + dc * Table_P
Table_dt = np.hstack((np.diff(Table_t), 0))
if total_frame is None:
Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi, Table_psi, Table_eta \
= resampling_data(Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi,
Table_psi, Table_eta, resampling_fct)
else:
war_msg = 'total_frame is %d, resampling_fct is IGNORED' % total_frame
warnings.warn(war_msg)
t_use = np.linspace(Table_t.min(), Table_t.max(), total_frame)
Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi, Table_psi, Table_eta \
= resampling_data(Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi,
Table_psi, Table_eta, t_use=t_use)
fig = plt.figure(figsize=figsize, dpi=dpi)
fig.patch.set_facecolor('white')
nrow_ncol = (9, 17)
axorin = plt.subplot2grid(nrow_ncol, (1, 0), rowspan=5, colspan=5,
projection='3d') # 3D orientation
axx2x3 = plt.subplot2grid(nrow_ncol, (1, 6), rowspan=4, colspan=5) # x_2 x_3
axthph = plt.subplot2grid(nrow_ncol, (1, 12), rowspan=5, colspan=5,
projection='polar') # theta - phi
axeta = plt.subplot2grid(nrow_ncol, (6, 0), rowspan=3, colspan=5) # Table_eta
ax_x1 = plt.subplot2grid(nrow_ncol, (6, 6), rowspan=3, colspan=5) # Table_X[:, 0]
axpsi = plt.subplot2grid(nrow_ncol, (6, 12), rowspan=3, colspan=5) # Table_psi
for spine in axorin.spines.values():
spine.set_visible(False)
axorin.set_xlabel('$\\textbf{X}_1$')
axorin.set_ylabel('$\\textbf{X}_2$')
axorin.set_zlabel('$\\textbf{X}_3$')
axx2x3.set_xlabel('$x_2$')
axx2x3.set_ylabel('$x_3$')
# axthph.set_xlabel('$\\phi$')
# axthph.set_ylabel('$\\theta$')
axeta.set_xlabel('$t$')
axeta.set_ylabel('$\\eta / \\pi$')
ax_x1.set_xlabel('$t$')
ax_x1.set_ylabel('$x_1$')
axpsi.set_xlabel('$t$')
axpsi.set_ylabel('$\\psi\' / \\pi$')
axthph.set_title('$\\theta$ (radial coordinate) $-$ $\\phi$ (angular coordinate)', y=1.1)
fig.suptitle(suptitle, fontsize='xx-large')
# axx2x3.set_title(suptitle, fontsize='xx-large')
# axorin.grid(False)
axorin.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axorin.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axorin.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
plt.tight_layout()
# object orientation
ttheta = Table_theta[0]
tphi = Table_phi[0]
tpsi = Table_psi[0]
tnodes = create_obj_at_fun(ttheta, tphi, tpsi, now_center=np.zeros(3), **problem_kwargs)
if trange_geo is None:
tnode = np.vstack(tnodes)
trange_geo = np.linalg.norm(tnode.max(axis=0) - tnode.min(axis=0)) * 0.4
print('trange_geo=', trange_geo)
tmid = np.zeros(3)
axorin.set_xlim3d([tmid[0] - trange_geo, tmid[0] + trange_geo])
tticks = np.around(np.linspace(tmid[0] - trange_geo, tmid[0] + trange_geo, 21),
decimals=2)[1::6]
axorin.set_xticks(tticks)
axorin.set_xticklabels(tticks)
axorin.set_ylim3d([tmid[1] - trange_geo, tmid[1] + trange_geo])
tticks = np.around(np.linspace(tmid[1] - trange_geo, tmid[1] + trange_geo, 21),
decimals=2)[1::6]
axorin.set_yticks(tticks)
axorin.set_yticklabels(tticks)
axorin.set_zlim3d([tmid[2] - trange_geo, tmid[2] + trange_geo])
tticks = np.around(np.linspace(tmid[2] - trange_geo, tmid[2] + trange_geo, 21),
decimals=2)[1::6]
axorin.set_zticks(tticks)
axorin.set_zticklabels(tticks)
extFlow(axorin, trange_geo=trange_geo, **problem_kwargs)
tmp_geo = []
for tnodei in tnodes:
tmp_geo.append(axorin.plot(tnodei[:, 0], tnodei[:, 1], tnodei[:, 2])[0])
# Jeffery sphere
u, v = np.mgrid[0:2 * np.pi:100j, 0:np.pi:100j]
tr = np.linalg.norm(np.vstack(tnodes), axis=-1).max()
x = np.cos(u) * np.sin(v) * tr
y = np.sin(u) * np.sin(v) * tr
z = np.cos(v) * tr
color1 = plt.get_cmap('gray')(np.linspace(0.2, 0.8, 256))
cmap = mcolors.LinearSegmentedColormap.from_list('my_colormap', color1)
axorin.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cmap, edgecolor='none', alpha=0.1)
axorin.plot(Table_P[:, 0] * tr, Table_P[:, 1] * tr, Table_P[:, 2] * tr, 'k')
# other variables
scs = []
axthph.plot(Table_phi, Table_theta, '-')
axthph.set_ylim(0, np.pi)
axthph.set_yticklabels([])
scs.append(axthph.plot(Table_phi[0], Table_theta[0], 'or')[0])
axx2x3.plot(Table_X[:, 1], Table_X[:, 2], '-')
scs.append(axx2x3.plot(Table_X[0, 1], Table_X[0, 2], 'or')[0])
ax_x1.plot(Table_t, Table_X[:, 0], '-', color='#1f77b4')
scs.append(ax_x1.plot(Table_t[0], Table_X[0, 0], 'or')[0])
for axi, ty, in zip((axeta, axpsi),
(Table_eta, Table_psi)):
for i0, i1 in separate_angle_idx(ty):
axi.plot(Table_t[i0:i1], ty[i0:i1] / np.pi, '-', color='#1f77b4')
scs.append(axi.plot(Table_t[0], ty[0] / np.pi, 'or')[0])
# make movie
fargs = (tmp_geo, scs, Table_t, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta,)
frames = Table_t.size // stp
if total_frame is not None:
assert Table_t.size == total_frame
stp = 1
war_msg = 'size of Table_t is %d, total_frame is %d, stp is set to %d' % \
(Table_t.size, total_frame, stp)
warnings.warn(war_msg)
frames = total_frame
if video_duration is not None:
interval = video_duration / frames
war_msg = 'video_duration is %f, interval is set to %f' % (video_duration, interval)
warnings.warn(war_msg)
if np.isclose(dbg_mode, 1):
frames = 1
tqdm_fun = tqdm_notebook(total=frames + 1)
anim = animation.FuncAnimation(fig, update_fun, frames, interval=interval,
blit=False, fargs=fargs, )
elif np.isclose(dbg_mode, 2):
anim = None
else:
tqdm_fun = tqdm_notebook(total=frames + 1)
anim = animation.FuncAnimation(fig, update_fun, frames, interval=interval,
blit=False, fargs=fargs, )
if add_info:
t1 = (frames,)
return anim, t1
else:
return anim
def load_problem_kwargs(pickle_name):
pickle_name = check_file_extension(pickle_name, extension='.pickle')
t_path = os.path.dirname(os.path.abspath(__file__))
full_path = os.path.normpath(t_path + '/' + pickle_name)
with open(full_path, 'rb') as handle:
problem_kwargs = pickle.load(handle)
return problem_kwargs
def load_table_date_pickle(job_dir, theta, phi):
t_headle = 'th%5.3f_ph%5.3f_(.*?).pickle' % (theta, phi)
filename = [filename for filename in os.listdir(job_dir)
if re.match(t_headle, filename) is not None][0]
with open(os.path.join(PWD, job_dir, filename), 'rb') as handle:
tpick = pickle.load(handle)
if 'Table_dt' not in tpick.keys():
Table_dt = np.hstack((np.diff(tpick['Table_t']), 0))
tpick['Table_dt'] = Table_dt
return tpick, filename
def load_table_data_pickle_dir(t_dir, t_headle='(.*?).pickle'):
t_path = os.listdir(t_dir)
filename_list = [filename for filename in t_path if re.match(t_headle, filename) is not None]
ini_theta_list = []
ini_phi_list = []
lst_eta_list = []
theta_max_fre_list = []
phi_max_fre_list = []
psi_max_fre_list = []
eta_max_fre_list = []
pickle_path_list = []
idx_list = []
for i0, tname in enumerate(tqdm_notebook(filename_list[:])):
tpath = os.path.join(t_dir, tname)
with open(tpath, 'rb') as handle:
tpick = pickle.load(handle)
ini_theta_list.append(tpick['ini_theta'])
ini_phi_list.append(tpick['ini_phi'])
lst_eta_list.append(tpick['Table_eta'][-1])
pickle_path_list.append(tpath)
idx_list.append(i0)
# fft rule
tx = tpick['Table_t']
tmin = np.max((0, tx.max() - 1000))
idx = tx > tmin
freq_pk = get_major_fre(tx[idx], tpick['Table_theta'][idx])
idx = tx > (tx.max() - 1 / freq_pk * 10)
theta_max_fre_list.append(get_major_fre(tx[idx], tpick['Table_theta'][idx]))
phi_max_fre_list.append(get_major_fre(tx[idx], tpick['Table_phi'][idx]))
psi_max_fre_list.append(get_major_fre(tx[idx], tpick['Table_psi'][idx]))
eta_max_fre_list.append(get_major_fre(tx[idx], tpick['Table_eta'][idx]))
data0 = pd.DataFrame({'ini_theta': np.around(ini_theta_list, 3),
'ini_phi': np.around(ini_phi_list, 3),
'lst_eta': np.around(lst_eta_list, 3),
'theta_max_fre': theta_max_fre_list,
'phi_max_fre': phi_max_fre_list,
'psi_max_fre': psi_max_fre_list,
'eta_max_fre': eta_max_fre_list,
'data_idx': idx_list})
data = data0.pivot_table(index=['ini_theta'], columns=['ini_phi'])
# lst_eta = data.lst_eta
# theta_max_fre = data.theta_max_fre
# phi_max_fre = data.phi_max_fre
# psi_max_fre = data.psi_max_fre
# eta_max_fre = data.eta_max_fre
# data_idx = data.data_idx.fillna(-1).astype(int)
return data
def load_rand_data_pickle_dir(t_dir, t_headle='(.*?).pickle', n_load=None, rand_mode=False):
t_path = os.listdir(t_dir)
filename_list = [filename for filename in t_path if re.match(t_headle, filename) is not None]
ini_theta_list = []
ini_phi_list = []
ini_psi_list = []
theta_max_fre_list = []
phi_max_fre_list = []
psi_max_fre_list = []
pickle_path_list = []
n_load = len(filename_list) if n_load is None else n_load
assert n_load <= len(filename_list)
if rand_mode:
tidx = np.random.choice(len(filename_list), n_load, replace=False)
else:
tidx = np.arange(n_load)
use_filename_list = np.array(filename_list)[tidx]
for i0, tname in enumerate(tqdm_notebook(use_filename_list)):
tpath = os.path.join(t_dir, tname)
with open(tpath, 'rb') as handle:
tpick = pickle.load(handle)
ini_theta_list.append(tpick['ini_theta'])
ini_phi_list.append(tpick['ini_phi'])
ini_psi_list.append(tpick['ini_psi'])
pickle_path_list.append(tpath)
# fft rule
tx = tpick['Table_t']
tmin = np.max((0, tx.max() - 1000))
idx = tx > tmin
freq_pk = get_major_fre(tx[idx], tpick['Table_theta'][idx])
idx = tx > (tx.max() - 1 / freq_pk * 10)
theta_max_fre_list.append(get_major_fre(tx[idx], tpick['Table_theta'][idx]))
phi_max_fre_list.append(get_major_fre(tx[idx], tpick['Table_phi'][idx]))
psi_max_fre_list.append(get_major_fre(tx[idx], tpick['Table_psi'][idx]))
ini_theta_list = np.hstack(ini_theta_list)
ini_phi_list = np.hstack(ini_phi_list)
ini_psi_list = np.hstack(ini_psi_list)
theta_max_fre_list = np.hstack(theta_max_fre_list)
phi_max_fre_list = np.hstack(phi_max_fre_list)
psi_max_fre_list = np.hstack(psi_max_fre_list)
pickle_path_list = np.hstack(pickle_path_list)
return ini_theta_list, ini_phi_list, ini_psi_list, \
theta_max_fre_list, phi_max_fre_list, psi_max_fre_list, \
pickle_path_list
def load_rand_data_pickle_dir_v2(t_dir, t_headle='(.*?).pickle', n_load=None, rand_mode=False):
def _get_primary_autocorrelate_fft_fre_v2(tx, ty1, continue_angle=True, fft_full_mode=False):
idx = np.ones_like(tx, dtype=bool)
if not fft_full_mode:
idx[:-20000] = False
if continue_angle:
t_use = np.linspace(tx[idx].min(), tx[idx].max(), tx[idx].size)
ty = get_continue_angle(tx[idx], ty1[idx], t_use)
else:
t_use = tx
ty = ty1
ty = np.cos(ty - np.mean(ty) + np.pi / 2)
sampling_rate = ty.size / (t_use.max() - t_use.min())
tfft = np.fft.rfft(np.correlate(ty, ty, mode='full')[ty.size - 1:])
tfft = tfft / ty.size / sampling_rate * 2
tfft_abs = np.abs(tfft)
# noinspection PyTypeChecker
tfreq = np.fft.rfftfreq(t_use.size, np.mean(np.diff(t_use)))
# tfft_abs = tfft_abs[:-1]
# tfreq = tfreq[:-1]
# plt.plot(t_use, ty)
# plt.loglog(tfreq, tfft_abs)
tpk = signal.find_peaks(tfft_abs)[0]
fft_abs_pk = tfft_abs[tpk]
tidx = np.argsort(fft_abs_pk)
fft_abs_pk = fft_abs_pk[tidx]
freq_pk = tfreq[tpk][tidx]
low_fft_abs_pk = fft_abs_pk[freq_pk < freq_pk[-1]]
low_freq_pk = freq_pk[freq_pk < freq_pk[-1]]
if low_fft_abs_pk.size > 0:
tidx2 = np.argmax(low_fft_abs_pk)
pk_fre = np.hstack((freq_pk[-1], low_freq_pk[tidx2]))
pk_fft = np.hstack((fft_abs_pk[-1], low_fft_abs_pk[tidx2]))
else:
pk_fre = np.hstack((freq_pk[-1], freq_pk[-1],))
pk_fft = np.hstack((fft_abs_pk[-1], fft_abs_pk[-1]))
return pk_fre, pk_fft
t_path = os.listdir(t_dir)
filename_list = [filename for filename in t_path if re.match(t_headle, filename) is not None]
ini_theta_list = []
ini_phi_list = []
ini_psi_list = []
std_eta_list = []
# theta_primary_fre_list = []
# phi_primary_fre_list = []
# psi_primary_fre_list = []
# eta_primary_fre_list = []
theta_autocorrelate_fre_list = []
phi_autocorrelate_fre_list = []
psi_autocorrelate_fre_list = []
eta_autocorrelate_fre_list = []
psi_max_phi_list = []
dx_list = []
dy_list = []
dz_list = []
pickle_path_list = []
idx_list = []
n_load = len(filename_list) if n_load is None else n_load
assert n_load <= len(filename_list)
if rand_mode:
tidx = np.random.choice(len(filename_list), n_load, replace=False)
else:
tidx = np.arange(n_load)
use_filename_list = np.array(filename_list)[tidx]
for i0, tname in enumerate(tqdm_notebook(use_filename_list)):
tpath = os.path.join(t_dir, tname)
with open(tpath, 'rb') as handle:
tpick = pickle.load(handle)
ini_theta_list.append(tpick['ini_theta'])
ini_phi_list.append(tpick['ini_phi'])
ini_psi_list.append(tpick['ini_psi'])
pickle_path_list.append(tpath)
idx_list.append(i0)
# fft rule
tx = tpick['Table_t']
tmin = np.max((0, tx.max() - 1000))
idx = tx > tmin
freq_pk = get_major_fre(tx[idx], tpick['Table_theta'][idx])
idx = tx > (tx.max() - 1 / freq_pk * 10)
psi_max_phi_list.append(tpick['Table_psi'][idx][np.argmax(tpick['Table_phi'][idx])])
# theta_primary_fre_list.append(spf_tb.get_primary_fft_fre(tx[idx], tpick['Table_theta'][idx], cos_mode=True)[-10:])
# phi_primary_fre_list.append(spf_tb.get_primary_fft_fre(tx[idx], tpick['Table_phi'][idx], cos_mode=True)[-10:])
# psi_primary_fre_list.append(spf_tb.get_primary_fft_fre(tx[idx], tpick['Table_psi'][idx], cos_mode=True)[-10:])
# eta_primary_fre_list.append(spf_tb.get_primary_fft_fre(tx[idx], tpick['Table_eta'][idx], cos_mode=True)[-10:])
theta_autocorrelate_fre_list.append(
_get_primary_autocorrelate_fft_fre_v2(tx[idx], tpick['Table_theta'][idx]))
phi_autocorrelate_fre_list.append(
_get_primary_autocorrelate_fft_fre_v2(tx[idx], tpick['Table_phi'][idx]))
psi_autocorrelate_fre_list.append(
_get_primary_autocorrelate_fft_fre_v2(tx[idx], tpick['Table_psi'][idx]))
eta_autocorrelate_fre_list.append(
_get_primary_autocorrelate_fft_fre_v2(tx[idx], tpick['Table_eta'][idx]))
std_eta_list.append((np.mean(tpick['Table_eta'][idx]), np.std(tpick['Table_eta'][idx])))
for i0, tlist in enumerate((dx_list, dy_list, dz_list)):
tpoly = np.polyfit(tx[idx], tpick['Table_X'][idx, i0], 1, w=np.blackman(idx.sum()))
tlist.append(tpoly[0])
ini_theta_list = np.hstack(ini_theta_list)
ini_phi_list = np.hstack(ini_phi_list)
std_eta_list = np.array(std_eta_list)
psi_max_phi_list = np.array(psi_max_phi_list)
# theta_primary_fre_list = np.array(theta_primary_fre_list)
# phi_primary_fre_list = np.array(phi_primary_fre_list)
# psi_primary_fre_list = np.array(psi_primary_fre_list)
# eta_primary_fre_list = np.array(eta_primary_fre_list)
theta_autocorrelate_fre_list = np.array(theta_autocorrelate_fre_list)
phi_autocorrelate_fre_list = np.array(phi_autocorrelate_fre_list)
psi_autocorrelate_fre_list = np.array(psi_autocorrelate_fre_list)
eta_autocorrelate_fre_list = np.array(eta_autocorrelate_fre_list)
dx_list = np.hstack(dx_list)
dy_list = np.hstack(dy_list)
dz_list = np.hstack(dz_list)
pickle_path_list = np.array(pickle_path_list)
return ini_theta_list, ini_phi_list, ini_psi_list, std_eta_list, psi_max_phi_list, \
theta_autocorrelate_fre_list, phi_autocorrelate_fre_list, psi_autocorrelate_fre_list, \
eta_autocorrelate_fre_list, dx_list, dy_list, dz_list, pickle_path_list
def load_rand_data_pickle_dir_instant(t_dir, t_headle='(.*?).pickle', n_load=None, rand_mode=False,
t_start=0, t_stop=None, t_step=1):
t_path = os.listdir(t_dir)
filename_list = [filename for filename in t_path if re.match(t_headle, filename) is not None]
n_load = len(filename_list) if n_load is None else n_load
assert n_load <= len(filename_list)
if rand_mode:
tidx = np.random.choice(len(filename_list), n_load, replace=False)
else:
tidx = np.arange(n_load)
use_filename_list = np.array(filename_list)[tidx]
if t_stop is None:
tname = use_filename_list[0]
tpath = os.path.join(t_dir, tname)
with open(tpath, 'rb') as handle:
tpick = pickle.load(handle)
Table_t = tpick['Table_t'][1:]
t_stop = Table_t.max()
pickle_path_list = []
idx_list = []
intp_X_list = []
intp_t = np.arange(t_start, t_stop, t_step)
for i0, tname in enumerate(tqdm_notebook(use_filename_list)):
tpath = os.path.join(t_dir, tname)
with open(tpath, 'rb') as handle:
tpick = pickle.load(handle)
pickle_path_list.append(tpath)
idx_list.append(i0)
Table_t = tpick['Table_t'][1:]
Table_X = tpick['Table_X'][1:]
int_fun_X = interpolate.interp1d(Table_t, Table_X, kind='quadratic', axis=0)
intp_X = int_fun_X(intp_t)
intp_X_list.append(intp_X)
pickle_path_list = np.array(pickle_path_list)
idx_list = np.hstack(idx_list)
intp_X_list = np.dstack(intp_X_list) # (time, coord, caseid)
return pickle_path_list, idx_list, intp_t, intp_X_list
def load_lookup_table_pickle(pickle_name):
with open('%s.pickle' % pickle_name, 'rb') as handle:
pickle_data = pickle.load(handle)
ttheta_all, tphi_all = pickle_data[0][1][0][:2]
if tphi_all[-1] < (2 * np.pi):
tphi_all = np.hstack((tphi_all, 2 * np.pi))
if ttheta_all[-1] < (np.pi):
ttheta_all = np.hstack((ttheta_all, np.pi))
tpsi_all = np.array([ti[0] for ti in pickle_data])
U_all = [[] for i in range(6)]
for _, table_psi_data in pickle_data:
for (ttheta, tphi, tU), Ui in zip(table_psi_data, U_all):
if tphi[-1] < (2 * np.pi):
tU[2 * np.pi] = tU[0]
if ttheta[-1] < (np.pi):
tU = tU.append(tU.loc[0].rename(np.pi))
Ui.append(tU)
return U_all, ttheta_all, tphi_all, tpsi_all
def phase_map_show_idx(type_fre, tipical_th_ph_list, iidx, job_dir, table_name, fast_mode=0):
theta = type_fre.index.values[iidx[0][0]]
phi = type_fre.columns.values[iidx[1][0]]
print('-ini_theta %f -ini_phi %f' % (theta, phi))
tipical_th_ph_list.append((theta, phi))
show_pickle_results(job_dir, theta, phi, table_name, fast_mode=fast_mode)
return tipical_th_ph_list
def phase_map_show_idx_list(type_fre, iidx, job_dir, nshow=5, Table_t_range1=np.array((0, np.inf)),
Table_t_range2=np.array((0, np.inf)), fast_mode=0,
figsize=np.array((16, 9)) * 0.5, dpi=200):
nshow = int(np.min((nshow, iidx[0].size)))
tidx = np.random.choice(iidx[0].size, nshow, replace=False)
theta = type_fre.index.values[iidx[0][tidx]]
phi = type_fre.columns.values[iidx[1][tidx]]
theta_phi_list = np.vstack((theta, phi)).T
show_table_theta_phi_list(theta_phi_list, job_dir, Table_t_range=Table_t_range1,
figsize=figsize, dpi=dpi, fast_mode=fast_mode)
show_table_result_list(theta_phi_list, job_dir, Table_t_range=Table_t_range2,
figsize=figsize, dpi=dpi)
return True
def _do_plot_process(args):
job_dir, dirpath, filename, theta, phi, pick_fre = args
pick_name = os.path.join(job_dir, filename)
with open(pick_name, 'rb') as handle:
tpick = pickle.load(handle)
if 'Table_dt' not in tpick.keys():
tpick['Table_dt'] = np.hstack((np.diff(tpick['Table_t']), 0))
# print('%s, Fth=%.6f' % (filename, pick_fre))
tname = os.path.splitext(os.path.basename(filename))[0]
filename = os.path.join(dirpath, tname)
tmin = tpick['Table_t'].max() - 1 / pick_fre * 10
idx = tpick['Table_t'] > tmin
fig0 = save_table_result('%s_1.jpg' % filename,
tpick['Table_t'][idx], tpick['Table_dt'][idx],
tpick['Table_X'][idx],
tpick['Table_P'][idx], tpick['Table_P2'][idx],
tpick['Table_theta'][idx], tpick['Table_phi'][idx],
tpick['Table_psi'][idx], tpick['Table_eta'][idx])
fig1 = save_theta_phi_psi_eta('%s_2.jpg' % filename,
tpick['Table_t'][idx], tpick['Table_dt'][idx],
tpick['Table_X'][idx],
tpick['Table_P'][idx], tpick['Table_P2'][idx],
tpick['Table_theta'][idx], tpick['Table_phi'][idx],
tpick['Table_psi'][idx], tpick['Table_eta'][idx])
plt.close(fig0)
plt.close(fig1)
return True
def _save_separate_angle_fft(job_dir, dirpath, tfre, tidx):
# clear dir
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
print('remove folder %s' % dirpath)
os.makedirs(dirpath)
print('make folder %s' % dirpath)
pickle_info_list = []
tfre_shape = tfre.values.shape
tfre_idx_list = tfre.unstack().index.to_numpy().reshape(tfre_shape[1], tfre_shape[0])
for phi, theta in tfre_idx_list[tidx]:
t_headle = 'th%5.3f_ph%5.3f_(.*?).pickle' % (theta, phi)
filenames = [filename for filename in os.listdir(job_dir)
if re.match(t_headle, filename) is not None]
pick_fre = tfre.loc[theta].loc[phi]
for filename in filenames:
pickle_info_list.append((job_dir, dirpath, filename, theta, phi, pick_fre))
# # multi process version, ignore becouse sometimes have unknow error.
# pool = multiprocessing.Pool()
# for _ in tqdm_notebook(pool.imap_unordered(_do_plot_process, pickle_info_list),
# total=len(pickle_info_list)):
# pass
# single process version
for pickle_info in tqdm_notebook(pickle_info_list):
# print(pickle_info)
_do_plot_process(pickle_info)
return True
def save_separate_angle_fft(job_dir, tfre, check_fre, atol_fre):
use_idx = np.isclose(tfre, check_fre, rtol=0, atol=atol_fre).T
fre_subdir = 'fre_%f' % check_fre
dirpath = os.path.join(job_dir, 'fre_separate', fre_subdir)
print('frequency in the range (%f, %f)' % (check_fre - atol_fre, check_fre + atol_fre))
_save_separate_angle_fft(job_dir, dirpath, tfre, use_idx)
return use_idx
def save_separate_angleList_fft(job_dir, tfre, check_fre_list, atol_fre_list):
remaind_idx = np.ones_like(tfre, dtype=bool).T
for check_fre, atol_fre in zip(check_fre_list, atol_fre_list):
use_idx = save_separate_angle_fft(job_dir, tfre, check_fre, atol_fre)
# use_idx = np.isclose(tfre, check_fre, rtol=0, atol=atol_fre).T
remaind_idx[use_idx] = False
# process the remainders
if np.any(remaind_idx):
dirpath = os.path.join(job_dir, 'fre_separate', 'remainders')
_save_separate_angle_fft(job_dir, dirpath, tfre, remaind_idx)
return True
def separate_fre_path(check_fre_list, atol_list, data0, pickle_path_list):
for i0, (check_fre, atol) in enumerate(zip(check_fre_list, atol_list)):
print('%dth frequence range: (%f, %f)' % (i0, check_fre - atol, check_fre + atol))
case_path_list = [[] for ti in check_fre_list]
for i0 in data0.index:
datai = data0.loc[i0]
tdata_idx = int(datai.data_idx)
tmax_fre = datai.use_max_fre
tpath = pickle_path_list[tdata_idx]
n_match = 0
for check_fre, atol, case_path in zip(check_fre_list, atol_list, case_path_list):
if np.isclose(tmax_fre, check_fre, rtol=0, atol=atol):
case_path.append(tpath)
n_match = n_match + 1
if not np.isclose(n_match, 1):
print('tmax_fre=%f, n_match=%d' % (tmax_fre, n_match), tpath)
return case_path_list
def draw_phase_map_theta(case_path, color, psi_lim, axs=None,
resampling=False, resampling_fct=2, thandle=''):
fontsize = 40
# color = np.array(color)
if axs is None:
n_xticks = 32
xticks = np.arange(n_xticks)
fig = plt.figure(figsize=(20, 20))
fig.patch.set_facecolor('white')
ax0 = fig.add_subplot(221, polar=True)
ax0.set_xticks(xticks / n_xticks * 2 * np.pi)
ax0.set_xticklabels(['$\dfrac{%d}{%d}2\pi$' % (i0, n_xticks) for i0 in xticks])
ax0.set_yticklabels([])
ax0.set_ylim(0, np.pi)
plt.tight_layout()
axs = (ax0,)
if np.array(case_path).size > 0:
th_all = []
ph_all = []
for tpath in tqdm_notebook(case_path[:], desc=thandle):
with open(tpath, 'rb') as handle:
tpick = pickle.load(handle)
Table_t = tpick['Table_t']
Table_theta = tpick['Table_theta']
Table_phi = tpick['Table_phi']
Table_psi = tpick['Table_psi']
Table_eta = tpick['Table_eta']
if resampling:
Table_t, Table_theta, Table_phi, Table_psi, Table_eta = \
resampling_angle(Table_t, Table_theta, Table_phi, Table_psi, Table_eta,
resampling_fct)
tidx = np.logical_and(Table_psi >= psi_lim[0], Table_psi < psi_lim[1])
th_all.append(Table_theta[tidx])
ph_all.append(Table_phi[tidx])
for ax0 in tube_flatten((axs,)):
ax0.scatter(np.hstack(ph_all), np.hstack(th_all), c=color, s=fontsize * 0.2)
return axs
def draw_phase_map_theta_bck(case_path, color, psi_lim, axs=None,
resampling=False, resampling_fct=2, thandle=''):
fontsize = 40
# color = np.array(color)
if axs is None:
n_xticks = 32
xticks = np.arange(n_xticks)
fig = plt.figure(figsize=(20, 20))
fig.patch.set_facecolor('white')
ax0 = fig.add_subplot(221, polar=True)
ax0.set_xticks(xticks / n_xticks * 2 * np.pi)
ax0.set_xticklabels(['$\dfrac{%d}{%d}2\pi$' % (i0, n_xticks) for i0 in xticks])
ax0.set_yticklabels([])
ax0.set_ylim(0, np.pi)
plt.tight_layout()
axs = (ax0,)
for tpath in tqdm_notebook(case_path[:], desc=thandle):
with open(tpath, 'rb') as handle:
tpick = pickle.load(handle)
Table_t = tpick['Table_t']
Table_theta = tpick['Table_theta']
Table_phi = tpick['Table_phi']
Table_psi = tpick['Table_psi']
Table_eta = tpick['Table_eta']
if resampling:
Table_t, Table_theta, Table_phi, Table_psi, Table_eta = \
resampling_angle(Table_t, Table_theta, Table_phi, Table_psi, Table_eta,
resampling_fct)
tidx = np.logical_and(Table_psi >= psi_lim[0], Table_psi < psi_lim[1])
for ax0 in tube_flatten((axs,)):
ax0.scatter(Table_phi[tidx], Table_theta[tidx], c=color, s=fontsize * 0.2)
return axs
# show phase map of final trajectory in theta-phi space, using frequence.
def show_traj_phase_map_fre(tuse):
fontsize = 40
fig = plt.figure(figsize=(20, 12))
fig.patch.set_facecolor('white')
ax0 = fig.add_subplot(111, polar=True)
n_xticks = 32
xticks = np.arange(n_xticks)
ax0.set_xticks(xticks / n_xticks * 2 * np.pi)
ax0.set_xticklabels(['$\dfrac{%d}{%d}2\pi$' % (i0, n_xticks) for i0 in xticks])
ax0.set_yticklabels([])
ax0.set_ylim(0, np.pi)
tdata = tuse.values
im = ax0.pcolor(tuse.columns.values, tuse.index.values, tdata,
cmap=plt.get_cmap('Set2'))
fig.colorbar(im, ax=ax0, orientation='vertical').ax.tick_params(labelsize=fontsize)
return True
# show phase map of final trajectory in theta-phi space, using prepared type.
def show_traj_phase_map_type(tuse, ticklabels=None, figsize=(12, 12), dpi=100, n_xticks=32):
fig = plt.figure(figsize=figsize, dpi=dpi)
fig.patch.set_facecolor('white')
ax0 = fig.add_subplot(111, polar=True)
# xticks = np.arange(n_xticks)
# ax0.set_xticks(xticks / n_xticks * 2 * np.pi)
# ax0.set_xticklabels(['$\dfrac{%d}{%d}2\pi$' % (i0, n_xticks) for i0 in xticks])
ax0.set_yticklabels([])
ax0.set_ylim(0, np.pi)
tdata = tuse.values
im = ax0.pcolor(tuse.columns.values, tuse.index.values, tdata,
cmap=plt.get_cmap('tab20', int(np.nanmax(tdata)) + 1),
vmin=np.nanmin(tdata) - .5, vmax=np.nanmax(tdata) + .5)
ticks = np.arange(np.nanmin(tdata), np.nanmax(tdata) + 1)
if ticklabels is None:
ticklabels = np.arange(np.nanmin(tdata), np.nanmax(tdata) + 1)
cbar = fig.colorbar(im, ax=ax0, orientation='vertical')
# cbar.ax.tick_params(labelsize=fontsize)
cbar.set_ticks(ticks)
cbar.ax.set_yticklabels(ticklabels)
plt.tight_layout()
return True
# The following code are used to do 2D FFT an 2D IFFT of \omega(\theta, \phi, psi)
# of microswimmer in shear flow along \theta and \psi.
def do_fft_major(tw, tktl_list):
# do FFT of velocity component and pick major frequence, then IFFT.
tw_fft = np.fft.fft2(tw)
ntk, ntl = tw_fft.shape
idx = np.ones_like(tw_fft) * 1e-30
for tk1, tl1 in tktl_list:
tk2 = ntk - tk1 if tk1 > 0 else tk1
tl2 = ntl - tl1 if tl1 > 0 else tl1
idx[tk1, tl1] = 1
idx[tk2, tl2] = 1
tf1 = tw_fft[tk1, tl1]
tf2 = tw_fft[tk2, tl2]
if tk1 > 0 or tl1 > 0:
print('use frequence pairs %f%+fi and %f%+fi at (%d, %d) and (%d, %d)' % (
tf1.real, tf1.imag, tf2.real, tf2.imag, tk1, tl1, tk2, tl2))
else:
print('use frequence %f%+fi at (%d, %d)' % (tf1.real, tf1.imag, tk1, tl1))
tw_fft2 = tw_fft * idx
tw2 = np.fft.ifft2(tw_fft2)
print('absolute abs of imag part is', np.abs(tw2.imag).max())
return tw_fft, tw2.real, tw_fft2
def factor_wpi_kl(tw, tktl):
# see decouplingIdea.tex for detail.
# \omega_{pi}^{kl}(\theta, \phi, \psi) = \dfrac{2}{n_\theta n_\phi}
# \left(\Re(\Omega_{pi}(k,l, \psi)) \cos(2k\theta + l\phi) -
# \Im(\Omega_{pi}(k,l, \psi)) \sin(2k\theta + l\phi) \right)
# \omega_{pi}^{kl}(\theta, \phi, \psi) = \dfrac{2}{n_\theta n_\phi}
# \norm{\Omega_{pi}(k,l, \psi)} \sin(\alpha_0 + 2k\theta + l\phi)
# Amp_use = \dfrac{2}{n_\theta n_\phi}\norm{\Omega_{pi}(k,l, \psi)}
# w_th_use = 2k
# w_ph_use = l
# alpha_use = \alpha_0
tk1, tl1 = tktl
nth, nph = tw.shape
tw_fft = np.fft.fft2(tw)
Akl1 = tw_fft[tk1, tl1]
Aklr = Akl1.real
Akli = Akl1.imag
k_sign = 1 if tk1 < (nth / 2) else -1
l_sign = 1 if tl1 < (nph / 2) else -1
Amp_use = 2 * np.abs(Akl1) / tw.size * k_sign * l_sign
w_th_use = 2 * tk1 if tk1 < (nth / 2) else -2 * (nth - tk1)
w_ph_use = tl1 if tl1 < (nph / 2) else -1 * (nph - tl1)
alpha_use = -np.arctan(Aklr / Akli)
return Akl1, Amp_use, w_th_use, w_ph_use, alpha_use
def show_fft_major(tw, tktl_list, ttheta, tphi):
tw_fft, tw2, tw_fft2 = do_fft_major(tw, tktl_list)
th_freq, ph_freq = np.meshgrid(np.fft.fftshift(np.fft.fftfreq(ttheta.size, 1 / ttheta.size)),
np.fft.fftshift(np.fft.fftfreq(tphi.size, 1 / tphi.size)),
indexing='ij')
tw_fft = np.fft.fftshift(tw_fft)
tw_fft2 = np.fft.fftshift(tw_fft2)
fig = plt.figure(figsize=(13, 11), dpi=300)
fig.patch.set_facecolor('white')
axs = fig.subplots(nrows=2, ncols=2)
twmax = np.max(np.abs(tw)) * 1.2
tw_levels = np.linspace(-twmax, twmax, 10)
fft_max = np.max(np.abs(tw_fft))
log_fft_max = np.ceil(np.log10(fft_max))
log_fft_step = 3
log_fft_min = log_fft_max - log_fft_step
fft_ticks = 10 ** np.linspace(log_fft_min, log_fft_max, log_fft_step + 1)
fft_formatter = mtick.LogFormatter(10, labelOnlyBase=False)
ax = axs[0, 0]
im = ax.contourf(tphi / np.pi, ttheta / np.pi, tw, tw_levels, cmap=plt.get_cmap('RdBu'))
fig.colorbar(im, ax=ax, orientation='vertical')
ax.set_title('original data')
ax.set_xlabel('$\\phi / \pi$')
ax.set_ylabel('$\\theta / \pi$')
ax = axs[0, 1]
im = ax.pcolor(ph_freq, th_freq, np.abs(tw_fft), cmap=plt.get_cmap('Greys'),
norm=mcolors.LogNorm(vmin=10 ** log_fft_min, vmax=10 ** log_fft_max))
fig.colorbar(im, ax=ax, orientation='vertical', ticks=fft_ticks, format=fft_formatter)
ax.set_title('original frequence')
ax.set_xlabel('$f_\\phi$')
ax.set_ylabel('$f_\\theta$')
# ax.set_xlim(0, ax.get_xlim()[1])
# ax.set_ylim(0, ax.get_ylim()[1])
ax = axs[1, 0]
im = ax.contourf(tphi / np.pi, ttheta / np.pi, tw2, tw_levels, cmap=plt.get_cmap('RdBu'))
fig.colorbar(im, ax=ax, orientation='vertical')
ax.set_title('after filter data')
ax.set_xlabel('$\\phi / \pi$')
ax.set_ylabel('$\\theta / \pi$')
ax = axs[1, 1]
im = ax.pcolor(ph_freq, th_freq, np.abs(tw_fft2), cmap=plt.get_cmap('Greys'),
norm=mcolors.LogNorm(vmin=10 ** log_fft_min, vmax=10 ** log_fft_max))
fig.colorbar(im, ax=ax, orientation='vertical', ticks=fft_ticks, format=fft_formatter)
ax.set_title('after filter frequence')
ax.set_xlabel('$f_\\phi$')
ax.set_ylabel('$f_\\theta$')
# ax.set_xlim(0, ax.get_xlim()[1])
# ax.set_ylim(0, ax.get_ylim()[1])
plt.tight_layout()
return True
def show_fft_fit(tw, tktl, ttheta, tphi):
def fit_fun(tx, Amp, w_th, w_ph, alpha):
theta, phi = tx
return Amp * np.sin(w_th * theta + w_ph * phi + alpha)
# analitical from IFFT. The input index includes and only includes a pair of conjugate frequencies.
tk1, tl1 = tktl
tw_fft, tw2, tw_fft2 = do_fft_major(tw, ((tk1, tl1),))
ntk, ntl = tw_fft.shape
Akl1 = tw_fft[tk1, tl1]
tk2 = ntk - tk1 if tk1 > 0 else tk1
tl2 = ntl - tl1 if tl1 > 0 else tl1
Akl2 = tw_fft[tk2, tl2]
Aklr = Akl1.real
Akli = Akl1.imag
th_freq, ph_freq = np.meshgrid(np.fft.fftshift(np.fft.fftfreq(ttheta.size, 1 / ttheta.size)),
np.fft.fftshift(np.fft.fftfreq(tphi.size, 1 / tphi.size)),
indexing='ij')
theta_all, phi_all = np.meshgrid(ttheta, tphi, indexing='ij')
tw_fft = np.fft.fftshift(tw_fft)
tw_fft2 = np.fft.fftshift(tw_fft2)
# fit
Amp_ini = 0
w_th_ini = 2 * tk1 if tk1 < (ttheta.size / 2) else -2 * (ttheta.size - tk1)
w_ph_ini = tl1 if tl1 < (tphi.size / 2) else -1 * (tphi.size - tl1)
alpha_ini = 0
p0 = (Amp_ini, w_th_ini, w_ph_ini, alpha_ini)
popt, pcov = curve_fit(fit_fun, (theta_all.ravel(), phi_all.ravel()), tw2.ravel(), p0=p0)
tw_fit = fit_fun((theta_all, phi_all), *popt)
# analitical solution
k_sign = 1 if tk1 < (ttheta.size / 2) else -1
l_sign = 1 if tl1 < (tphi.size / 2) else -1
Amp_use = (np.abs(Akl1) + np.abs(Akl2)) / tw.size * k_sign * l_sign
w_th_use = w_th_ini
w_ph_use = w_ph_ini
alpha_use = np.arctan(Aklr / -Akli)
tw_ana = fit_fun((theta_all, phi_all), Amp_use, w_th_use, w_ph_use, alpha_use)
fig = plt.figure(figsize=(13, 11), dpi=300)
fig.patch.set_facecolor('white')
axs = fig.subplots(nrows=2, ncols=2)
twmax = np.max(np.abs(tw)) * 1.2
tw_levels = np.linspace(-twmax, twmax, 10)
fft_max = np.max(np.abs(tw_fft))
log_fft_max = np.ceil(np.log10(fft_max))
log_fft_step = 3
log_fft_min = log_fft_max - log_fft_step
fft_ticks = 10 ** np.linspace(log_fft_min, log_fft_max, log_fft_step + 1)
fft_formatter = mtick.LogFormatter(10, labelOnlyBase=False)
ax = axs[0, 0]
im = ax.contourf(tphi / np.pi, ttheta / np.pi, tw2, tw_levels, cmap=plt.get_cmap('RdBu'))
fig.colorbar(im, ax=ax, orientation='vertical')
ax.set_title('after filter data')
ax.set_xlabel('$\\phi / \pi$')
ax.set_ylabel('$\\theta / \pi$')
ax = axs[0, 1]
im = ax.pcolor(ph_freq, th_freq, np.abs(tw_fft2), cmap=plt.get_cmap('Greys'),
norm=mcolors.LogNorm(vmin=10 ** log_fft_min, vmax=10 ** log_fft_max))
fig.colorbar(im, ax=ax, orientation='vertical', ticks=fft_ticks, format=fft_formatter)
ax.set_title('after filter frequence')
ax.set_xlabel('$f_\\phi$')
ax.set_ylabel('$f_\\theta$')
# ax.set_xlim(0, ax.get_xlim()[1])
# ax.set_ylim(0, ax.get_ylim()[1])
ax.text(tphi.size * -0.4, ttheta.size * +0.3,
'$A(%d, %d) = %f %+fi$' % (tk1, tl1, Akl1.real, Akl1.imag), fontsize='x-small')
ax.text(tphi.size * -0.4, ttheta.size * -0.3,
'$A(%d, %d) = %f %+fi$' % (tk2, tl2, Akl2.real, Akl2.imag), fontsize='x-small')
ax = axs[1, 0]
im = ax.contourf(tphi / np.pi, ttheta / np.pi, tw_fit, tw_levels, cmap=plt.get_cmap('RdBu'))
fig.colorbar(im, ax=ax, orientation='vertical')
ax.set_title('after filter and fit data')
ax.set_xlabel('$\\phi / \pi$')
ax.set_ylabel('$\\theta / \pi$')
ax.text(0.1, 0.8, '$%5.3f \sin(%5.3f \\theta %+5.3f \\phi %+5.3f)$' % (
popt[0], popt[1], popt[2], popt[3]), fontsize='x-small')
ax = axs[1, 1]
im = ax.contourf(tphi / np.pi, ttheta / np.pi, tw_ana, tw_levels, cmap=plt.get_cmap('RdBu'))
fig.colorbar(im, ax=ax, orientation='vertical')
ax.set_title('analitical solution')
ax.set_xlabel('$\\phi / \pi$')
ax.set_ylabel('$\\theta / \pi$')
ax.text(0.1, 0.8, '$%5.3f \sin(%5.3f \\theta %+5.3f \\phi %+5.3f)$' % (
Amp_use, w_th_use, w_ph_use, alpha_use), fontsize='x-small')
plt.tight_layout()
return True
# The following code are used to do 3D FFT an 3D IFFT of \omega(\theta, \phi, psi)
# of microswimmer in shear flow.
def do_3dfft_major(tw, tktltj_list, print_info=True):
# do FFT of velocity component and pick major frequence, then IFFT.
tw_fft = np.fft.fftn(tw)
ntk, ntl, ntj = tw_fft.shape
idx = np.ones_like(tw_fft) * 1e-30
for tk1, tl1, tj1 in tktltj_list:
tk2 = ntk - tk1 if tk1 > 0 else tk1
tl2 = ntl - tl1 if tl1 > 0 else tl1
tj2 = ntj - tj1 if tj1 > 0 else tj1
idx[tk1, tl1, tj1] = 1
idx[tk2, tl2, tj2] = 1
tf1 = tw_fft[tk1, tl1, tj1]
tf2 = tw_fft[tk2, tl2, tj2]
if print_info:
if tk1 > 0 or tl1 > 0 or tj1 > 0:
print('use frequence pairs %f%+fi and %f%+fi at (%d, %d, %d) and (%d, %d, %d)' % (
tf1.real, tf1.imag, tf2.real, tf2.imag, tk1, tl1, tj1, tk2, tl2, tj2))
else:
print('use frequence %f%+fi at (%d, %d, %d)' % (tf1.real, tf1.imag, tk1, tl1, tj1))
tw_fft2 = tw_fft * idx
tw2 = np.fft.ifftn(tw_fft2)
print('absolute abs of imag part is', np.abs(tw2.imag).max())
return tw_fft, tw2.real, tw_fft2
def do_3dfft_major_conj(tw, tktltj_list, print_info=True):
# do FFT of velocity component and pick major frequence, then IFFT.
tw_fft = np.fft.fftn(tw)
tM, tN, tO = tw_fft.shape
tw2 = np.zeros_like(tw)
tm, tn, to = np.meshgrid(np.arange(tM), np.arange(tN), np.arange(tO), indexing='ij')
ttheta = tm / tM * np.pi
tphi = tn / tN * 2 * np.pi
tpsi = to / tO * 2 * np.pi
idx = np.ones_like(tw_fft) * 1e-30
for tk1, tl1, tj1 in tktltj_list:
tk2 = tM - tk1 if tk1 > 0 else tk1
tl2 = tN - tl1 if tl1 > 0 else tl1
tj2 = tO - tj1 if tj1 > 0 else tj1
idx[tk1, tl1, tj1] = 1
idx[tk2, tl2, tj2] = 1
tf1 = tw_fft[tk1, tl1, tj1]
tf2 = tw_fft[tk2, tl2, tj2]
if print_info:
if tk1 > 0 or tl1 > 0 or tj1 > 0:
print('use frequence pairs %f%+fi and %f%+fi at (%d, %d, %d) and (%d, %d, %d)' % (
tf1.real, tf1.imag, tf2.real, tf2.imag, tk1, tl1, tj1, tk2, tl2, tj2))
else:
print('use frequence %f%+fi at (%d, %d, %d)' % (tf1.real, tf1.imag, tk1, tl1, tj1))
tfct = 1 if np.allclose(np.array((tk1, tl1, tj1)), np.zeros(3)) else 2
tw2 = tw2 + tfct / (tM * tN * tO) * \
(np.real(tf1) * np.cos(2 * tk1 * ttheta + tl1 * tphi + tj1 * tpsi) -
np.imag(tf1) * np.sin(2 * tk1 * ttheta + tl1 * tphi + tj1 * tpsi))
tw_fft2 = tw_fft * idx
return tw_fft, tw2, tw_fft2
def factor_wpi_klj(tw, tktltj):
# see decouplingIdea.tex for detail.
# \omega_{pi}^{kl}(\theta, \phi, \psi) = \dfrac{2}{n_\theta n_\phi}
# \left(\Re(\Omega_{pi}(k,l, \psi)) \cos(2k\theta + l\phi) -
# \Im(\Omega_{pi}(k,l, \psi)) \sin(2k\theta + l\phi) \right)
# \omega_{pi}^{kl}(\theta, \phi, \psi) = \dfrac{2}{n_\theta n_\phi}
# \norm{\Omega_{pi}(k,l, \psi)} \sin(\alpha_0 + 2k\theta + l\phi)
# Amp_use = \dfrac{2}{n_\theta n_\phi}\norm{\Omega_{pi}(k,l, \psi)}
# w_th_use = 2k
# w_ph_use = l
# alpha_use = \alpha_0
err_msg = 'do NOT test yet. '
assert 1 == 2, err_msg
tk1, tl1, tj1 = tktltj
nth, nph, nps = tw.shape
tw_fft = np.fft.fftn(tw)
Akl1 = tw_fft[tk1, tl1, tj1]
Aklr = Akl1.real
Akli = Akl1.imag
k_sign = 1 if tk1 < (nth / 2) else -1
l_sign = 1 if tl1 < (nph / 2) else -1
j_sing = 1 if tl1 < (nps / 2) else -1
Amp_use = 2 * np.abs(Akl1) / tw.size * k_sign * l_sign * j_sing
w_th_use = 2 * tk1 if tk1 < (nth / 2) else -2 * (nth - tk1)
w_ph_use = tl1 if tl1 < (nph / 2) else -1 * (nph - tl1)
w_ps_use = tj1 if tj1 < (nps / 2) else -1 * (nps - tj1)
alpha_use = -np.arctan(Aklr / Akli)
return Akl1, Amp_use, w_th_use, w_ph_use, w_ps_use, alpha_use
def fill_Ui(ttheta, tphi, use_U):
if tphi[-1] < (2 * np.pi):
tphi = np.hstack((tphi, 2 * np.pi))
use_U = np.vstack((use_U.T, use_U[:, 0])).T
if ttheta[-1] < (np.pi):
ttheta = np.hstack((ttheta, np.pi))
use_U = np.vstack((use_U, use_U[0]))
return ttheta, tphi, use_U
def _get_fig_axs_ui_psi(tw, dpi=100, polar=False):
if tw.shape[-1] == 15:
fig = plt.figure(figsize=np.array((16, 9)) * 2, dpi=dpi)
fig.patch.set_facecolor('white')
axs = fig.subplots(nrows=3, ncols=5, subplot_kw=dict(polar=polar))
elif tw.shape[-1] == 16:
fig = plt.figure(figsize=np.array((16, 9)) * 2, dpi=dpi)
fig.patch.set_facecolor('white')
axs = fig.subplots(nrows=4, ncols=4, subplot_kw=dict(polar=polar))
elif tw.shape[-1] == 2:
fig = plt.figure(figsize=np.array((16, 9)) * 2, dpi=dpi)
fig.patch.set_facecolor('white')
axs = np.array(fig.subplots(nrows=1, ncols=1, subplot_kw=dict(polar=polar))).reshape((1, 1))
else:
raise ValueError("currently, amount of psi is either 15 or 16. ")
return fig, axs
def core_show_ui_psi(tw, ttheta0, tphi0, tpsi, dpi=100, polar=False):
fig, axs = _get_fig_axs_ui_psi(tw, dpi=dpi, polar=polar)
cmap = plt.get_cmap('RdBu')
t1 = np.nanmax(np.abs(tw))
n_polar_xticks = 8
# noinspection PyTypeChecker
levels = np.linspace(-t1, t1, 10)
for i0, ax0 in zip(range(tw.shape[-1]), axs.flatten()):
ttheta, tphi, use_U = fill_Ui(ttheta0.copy(), tphi0.copy(), tw[..., i0])
if polar:
im = ax0.contourf(tphi, ttheta, use_U, levels, cmap=cmap)
xticks = np.arange(n_polar_xticks)
ax0.set_xticks(xticks / n_polar_xticks * 2 * np.pi)
ax0.set_xticklabels(['$\dfrac{%d}{%d}2\pi$' % (i0, n_polar_xticks) for i0 in xticks])
ax0.set_yticklabels([])
ax0.set_ylim(0, np.pi)
else:
im = ax0.contourf(tphi / np.pi, ttheta / np.pi, use_U, levels, cmap=cmap)
ax0.set_xlabel('$\\phi / \pi$')
ax0.set_ylabel('$\\theta / \pi$')
ax0.set_title('$\\psi=%f \pi$' % (tpsi[i0] / np.pi))
fig.colorbar(im, ax=ax0, orientation='vertical')
plt.tight_layout()
return fig
def show_ui_psi(tw, ttheta, tphi, tpsi, dpi=100, polar=False):
core_show_ui_psi(tw, ttheta, tphi, tpsi, dpi=dpi, polar=polar)
return True
def show_3dfft_major(tw, tktltj_list, ttheta, tphi, tpsi, dpi=100, polar=False):
tw_fft, tw2, tw_fft2 = do_3dfft_major(tw, tktltj_list)
core_show_ui_psi(tw, ttheta, tphi, tpsi, dpi=dpi, polar=polar)
core_show_ui_psi(tw2, ttheta, tphi, tpsi, dpi=dpi, polar=polar)
return True
def Rloc2glb(theta, phi, psi):
Rloc2glb = np.array(
((np.cos(phi) * np.cos(psi) * np.cos(theta) - np.sin(phi) * np.sin(psi),
-(np.cos(psi) * np.sin(phi)) - np.cos(phi) * np.cos(theta) * np.sin(psi),
np.cos(phi) * np.sin(theta)),
(np.cos(psi) * np.cos(theta) * np.sin(phi) + np.cos(phi) * np.sin(psi),
np.cos(phi) * np.cos(psi) - np.cos(theta) * np.sin(phi) * np.sin(psi),
np.sin(phi) * np.sin(theta)),
(-(np.cos(psi) * np.sin(theta)),
np.sin(psi) * np.sin(theta),
np.cos(theta))))
return Rloc2glb
def Eij_loc(theta, phi, psi):
Eij_loc = np.array(
((np.cos(psi) * (-(np.cos(phi) * np.cos(psi) * np.cos(theta)) +
np.sin(phi) * np.sin(psi)) * np.sin(theta),
(2 * np.cos(2 * psi) * np.sin(phi) * np.sin(theta) +
np.cos(phi) * np.sin(2 * psi) * np.sin(2 * theta)) / 4.,
(np.cos(phi) * np.cos(psi) * np.cos(2 * theta) -
np.cos(theta) * np.sin(phi) * np.sin(psi)) / 2.),
((2 * np.cos(2 * psi) * np.sin(phi) * np.sin(theta) +
np.cos(phi) * np.sin(2 * psi) * np.sin(2 * theta)) / 4.,
-(np.sin(psi) * (np.cos(psi) * np.sin(phi) +
np.cos(phi) * np.cos(theta) * np.sin(psi)) * np.sin(theta)),
(-(np.cos(psi) * np.cos(theta) * np.sin(phi)) -
np.cos(phi) * np.cos(2 * theta) * np.sin(psi)) / 2.),
((np.cos(phi) * np.cos(psi) * np.cos(2 * theta) -
np.cos(theta) * np.sin(phi) * np.sin(psi)) / 2.,
(-(np.cos(psi) * np.cos(theta) * np.sin(phi)) -
np.cos(phi) * np.cos(2 * theta) * np.sin(psi)) / 2.,
np.cos(phi) * np.cos(theta) * np.sin(theta))))
return Eij_loc
def Sij_loc(theta, phi, psi):
Sij_loc = np.array(
((0,
-(np.sin(phi) * np.sin(theta)) / 2.,
(np.cos(phi) * np.cos(psi) - np.cos(theta) * np.sin(phi) * np.sin(psi)) / 2.),
((np.sin(phi) * np.sin(theta)) / 2.,
0,
(-(np.cos(psi) * np.cos(theta) * np.sin(phi)) - np.cos(phi) * np.sin(psi)) / 2.),
((-(np.cos(phi) * np.cos(psi)) + np.cos(theta) * np.sin(phi) * np.sin(psi)) / 2.,
(np.cos(psi) * np.cos(theta) * np.sin(phi) + np.cos(phi) * np.sin(psi)) / 2.,
0)))
return Sij_loc
| mit | -917,454,301,623,532,400 | 43.856129 | 128 | 0.557158 | false |
daveol/Fedora-Test-Laptop | tests/wifi_connect_ap.py | 1 | 1432 | #!/usr/bin/env python
# Copyright 2017 Nick Dekker, Marthe Veldhuis.
#
# This work is licensed under the terms of the MIT license.
# For a copy, see LICENSE.txt.
from avocado import Test
from utils import internet, utils
import time
class WifiConnectAP(Test):
"""
Uses the first access point from internet_data to ping the default
gateway using internet utils.
"""
def setUp(self):
wifidata = utils.load_yaml(self, "data/internet_data.yaml")
if 'access_point_1' not in wifidata:
self.skip("No AP found in the yaml config")
if ('ssid' not in wifidata['access_point_1'] or
'pass' not in wifidata['access_point_1']):
self.skip("No AP found in the yaml config")
self.ap_ssid = wifidata['access_point_1']['ssid']
self.ap_pass = wifidata['access_point_1']['pass']
def test(self):
wifi_dev = internet.get_active_device('wifi', self)
self.wireless_interface = wifi_dev.get_iface()
self.log.debug(self.wireless_interface)
self.connect_and_check()
def connect_and_check(self):
internet.connect(self.ap_ssid, self.ap_pass, self)
time.sleep(10)
gateway = internet.get_gateway(self.wireless_interface, self)
pingResult = internet.pingtest_hard(gateway, self.wireless_interface, self)
self.log.debug("Internet is working on network {0}".format(self.ap_ssid))
| mit | 6,230,230,109,215,572,000 | 31.545455 | 83 | 0.652235 | false |
pagea/unstyle | experimental/feature_extraction.py | 1 | 7752 | # -*- coding: utf-8 -*-
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from collections import defaultdict
from constants import *
def rvd(numbers):
"""
rvd - rich vector descriptor.
Input: a list of at least 3 numbers.
Returns a list with the following:
- mean
- median
- median - mean
- sigma
"""
from math import sqrt
numbers = sorted(numbers)
mean = float(sum(numbers)) / len(numbers)
sigma = sqrt(sum([ (mean - n) ** 2 for n in numbers ]) / len(numbers))
if len(numbers) % 2 == 1:
median = numbers[len(numbers)/2]
else:
median = (numbers[(len(numbers)-1)/2] + numbers[len(numbers)/2]) / 2.0
return [mean, median, median - mean, sigma]
def get_features(words, sentences, tags, chunks):
"""
Extracts features from words, sentences, tags, chunks triplet.
Returns a dictionary with real vectors
"""
from itertools import chain
flatten = lambda x : list(chain(*x))
def get_legomena(words):
# Returns the lowest three legomenon
# https://en.wikipedia.org/wiki/Hapax_legomenon
freqs = defaultdict(int)
for word in words:
freqs[word] += 1
hapax = float(len([ w for (w, c) in freqs.items() if c == 1 ]))
return [ len([ w for (w, c) in freqs.items() if c == i ]) / hapax for i in xrange(2,7) ]
def get_readability(words, sentences):
# Returns two readability scores
# Calculate ARI - https://en.wikipedia.org/wiki/Automated_Readability_Index
char_count = float(sum( [len(w) for w in words]))
word_count = float(len(words))
sentence_count = float(len(sentences))
ARI = 4.71 * char_count / word_count + 0.5 * word_count / sentence_count - 21.43
# Calculate LIX - https://en.wikipedia.org/wiki/LIX
long_word_count = float(sum([ len(w) for w in words if len(w) > 6]))
LIX = word_count / sentence_count + 100 * long_word_count / word_count
return [ARI, LIX]
def get_word_length_distribution(words):
# Returns the wordlength distribution
# Projects all wordlengths larger than 12 to 12.
max_len = 12
freqs = dict( [ (i, 0) for i in xrange(1,max_len+1) ])
for word in words:
l = len(word)
if max_len < l:
l = max_len
freqs[l] += 1
total = float(len(words))
return [ freqs[i] / total for i in xrange(1,max_len+1) ] + rvd([len(x) for x in words])
def get_char_distribution(words):
"""
This functions reports on character distributions
Reports rel. frequencies of:
- sum special characters
- sum normal characters
- upper characters
- all individual characters
- common character bigrams
"""
special_char_set = set(SPECIAL_CHARS)
normal_char_set = set(NORMAL_CHARS)
letters = flatten([w + " " for w in words])
special = 0
normal = 0
upper = 0
char_dist = dict([ (char, 0) for char in ALL_CHARS ])
bi_char_dist = dict([ (char, 0) for char in BI_CHARS ])
tri_char_dist = dict([ (char, 0) for char in TRI_CHARS ])
bigram = (None, None)
trigram = (None, None, None)
for l in letters:
bigram = (bigram[1], l.lower())
trigram = (trigram[1], trigram[2], l.lower())
if bigram in bi_char_dist:
bi_char_dist[bigram] += 1
if trigram in tri_char_dist:
tri_char_dist[trigram] += 1
if l.isupper():
upper += 1
if l.lower() in normal_char_set:
normal += 1
elif l in special_char_set:
special += 1
if l.lower() in char_dist:
char_dist[l.lower()] += 1
lc = float(len(letters))
specials = [special / lc, normal / lc, upper / float(len(words))]
lc = float(sum(char_dist.values()))
char_dist = [ char_dist[char] / lc for char in ALL_CHARS ]
lc = float(sum(bi_char_dist.values()))
bi_char_dist = [ bi_char_dist[char] / lc for char in BI_CHARS ]
lc = float(sum(tri_char_dist.values()))
tri_char_dist = [ tri_char_dist[char] / lc for char in TRI_CHARS ]
return specials + char_dist, bi_char_dist, tri_char_dist
def get_tag_distribution(tags):
"""
Gives POS-tag distribution.
Measures rel. frequencies of:
- POS-tags
- common POS-tag bigrams
"""
tags = flatten([ ['<s>'] + ts + ['</s>'] for ts in tags ])
tag_bi_dist = dict([ (t, 0) for t in BI_TAGS ])
tag_dist = dict([ (t, 0) for t in SIMPLE_TAGS ])
bigram = (None, None)
for t in tags:
bigram = (bigram[1], t)
if bigram in tag_bi_dist:
tag_bi_dist[bigram] += 1
if t in tag_dist:
tag_dist[t] += 1
tc = float(sum(tag_dist.values()))
mono = [ tag_dist[tag] / tc for tag in SIMPLE_TAGS ]
tc = float(sum(tag_bi_dist.values()))
bi = [ tag_bi_dist[tag] / tc for tag in BI_TAGS ]
return mono, bi
def get_chunk_distribution(chunks):
chunks = flatten([ ['<s>'] + cs + ['</s>'] for cs in chunks ])
chunk_bi_dist = dict([ (c, 0) for c in BI_CHUNKS ])
chunk_dist = dict([ (c, 0) for c in CHUNKS ])
bigram = (None, None)
for c in chunks:
bigram = (bigram[1], c)
if bigram in chunk_bi_dist:
chunk_bi_dist[bigram] += 1
if c in chunk_dist:
chunk_dist[c] += 1
cc = float(sum(chunk_dist.values()))
mono = [ chunk_dist[chunk] / cc for chunk in CHUNKS ]
cc = float(sum(chunk_bi_dist.values()))
bi = [ chunk_bi_dist[chunk] / cc for chunk in BI_CHUNKS ]
return mono, bi
features = []
feature_dic = dict()
feature_dic_names = []
def append_features(vector, name, features=features, feature_dic=feature_dic, feature_dic_names=feature_dic_names):
features += vector
feature_dic[name] = vector
feature_dic_names.append(name)
# Sentence length distribution
sentence_length_f = rvd([len(x) for x in sentences])
append_features(sentence_length_f, "sentence_length")
# Word length distribution
word_length_f = get_word_length_distribution(words)
append_features(word_length_f, "word_length")
# char distribution
mono_char_dist, bi_char_dist, tri_char_dist = get_char_distribution(words)
append_features(mono_char_dist, "mono_char_dist")
append_features(bi_char_dist, "bi_char_dist")
append_features(tri_char_dist, "tri_char_dist")
# Tag distribution
mono, bi = get_tag_distribution(tags)
append_features(mono, "mono_tag_dist")
append_features(bi, "bi_tag_dist")
# Chunk distribution
mono, bi = get_chunk_distribution(chunks)
append_features(mono, "mono_chunk_dist")
append_features(bi, "bi_chunk_dist")
# Readability
readability_f = get_readability(words, sentences)
append_features(readability_f, "readability")
# Legomena
legomena_f = get_legomena(words)
append_features(legomena_f, "legomena")
return features, feature_dic, feature_dic_names
def create_cached_features(data, filename="Cached_Features.py"):
dataset = dict()
for author in data.keys():
print "Working on:", author
dataset[author] = dict()
for storyname, info in data[author].items():
dataset[author][storyname] = get_features(*info)
f = open(filename, 'w')
f.write("# -*- coding: utf-8 -*-\n")
f.write("data = " + str(dataset) + "\n")
f.close()
def demo():
from Dataset import data
info = data[data.keys()[2]][data[data.keys()[2]].keys()[1]]
features, feature_dic, feature_dic_names = get_features(*info)
for key in feature_dic_names:
print key, feature_dic[key]
print
print len(features)
if __name__ == '__main__':
from Dataset import data
create_cached_features(data)
| mit | 5,541,036,737,737,882,000 | 29.761905 | 116 | 0.660604 | false |
Unfocused/Sublime-DXR | DXR.py | 1 | 2850 | import sublime, sublime_plugin
import os
from urllib.parse import urlencode
def open_dxr(query):
base_url = "http://dxr.mozilla.org/mozilla-central/search?"
params = {"tree": "mozilla-central", "q": query }
query_string = urlencode(params)
sublime.active_window().run_command('open_url', { "url": base_url + query_string })
def get_sel_or_word(view = None):
if view == None:
view = sublime.active_window().active_view()
region = view.sel()[0]
if not region.empty():
return view.substr(region).strip()
else:
return view.substr(view.word(region))
def get_repo_root_dir(filename):
path = filename
if not os.path.isdir(filename):
path = os.path.dirname(filename)
while True:
hg_dir = os.path.join(path, ".hg")
if os.path.exists(hg_dir) and os.path.isdir(hg_dir):
return path
git_dir = os.path.join(path, ".git")
if os.path.exists(git_dir) and os.path.isdir(git_dir):
return path
parent_path = os.path.dirname(path)
if path == parent_path:
break
path = parent_path
return None
def split_path(path):
head, tail = os.path.split(path)
if tail == "":
return []
return split_path(head) + [tail]
def convert_native_path(path):
return "/".join(split_path(path))
class DxrFreeform(sublime_plugin.ApplicationCommand):
def run(self):
window = sublime.active_window()
window.show_input_panel("DXR query", "", self.on_done, None, None)
def on_done(self, result):
open_dxr(result.strip())
class DxrRegexp(sublime_plugin.ApplicationCommand):
def run(self):
window = sublime.active_window()
window.show_input_panel("DXR regexp query", "//", self.on_done, None, None)
def on_done(self, result):
open_dxr("regexp:%s" % result.strip())
class DxrTextCommand(sublime_plugin.ApplicationCommand):
def run(self):
query = get_sel_or_word()
open_dxr(query)
class DxrFunctionCommand(sublime_plugin.ApplicationCommand):
def run(self):
query = get_sel_or_word()
open_dxr("function:%s" % query)
class DxrPath(sublime_plugin.ApplicationCommand):
def run(self):
window = sublime.active_window()
view = window.active_view()
full_path = view.file_name()
filename = os.path.basename(full_path)
window.show_input_panel("DXR path query", filename, self.on_done, None, None)
def on_done(self, result):
open_dxr("path:%s" % result.strip())
class DxrFilename(sublime_plugin.ApplicationCommand):
def run(self):
view = sublime.active_window().active_view()
full_path = view.file_name()
filename = os.path.basename(full_path)
open_dxr("path:%s" % filename)
class DxrParentDirectory(sublime_plugin.ApplicationCommand):
def run(self):
file_name = sublime.active_window().active_view().file_name()
repo_root = get_repo_root_dir(file_name)
repo_path = os.path.relpath(os.path.dirname(file_name), repo_root)
open_dxr("path:%s" % convert_native_path(repo_path))
| mpl-2.0 | -6,245,010,098,013,954,000 | 24 | 84 | 0.697895 | false |
dani-i/bachelor-project | graphics/gui/test/new_k_fold_cv_sess_gui.py | 1 | 10264 | from graphics.input.train_sess_details_input_f import TrainSessDetailsInputF
from graphics.input.data_augmentation_input_f import DataAugmentationInputF
from graphics.input.file_save_details_input_f import FileSaveDetailsInputF
from graphics.output.train_sess.train_sess_output_f import TrainSessOutputF
from graphics.output.test_sess.test_sess_output_f import TestSessOutputF
from graphics.widgets.scrollable_canvas_c import ScrollableCanvasC
from graphics.widgets.session_buttons_f import SessionButtonsF
from graphics.widgets.combobox_input_f import ComboboxInputF
from utils.train.train_sess_message import TrainSessMessage
import constants.gui_constants as const
import tkinter as tk
class NewKFoldCVSessGUI(tk.Frame):
def __init__(self,
parent,
enable_k_fold_cv_sess_buttons,
disable_k_fold_cv_sess_buttons):
"""
:param parent:
:param enable_k_fold_cv_sess_buttons:
:param disable_k_fold_cv_sess_buttons:
"""
tk.Frame.__init__(self,
parent)
self._disable_k_fold_cv_sess_buttons = disable_k_fold_cv_sess_buttons
self._enable_k_fold_cv_sess_buttons = enable_k_fold_cv_sess_buttons
self._valid_train_session_details_input = False
self._valid_train_sess_save_details = False
self._sc_scrollable = None
self._f_output = None
self._display_options = []
self._first_start = True
self._create_widgets()
self._place_widgets()
#########################################################################
# Widget creation and placement
def _create_and_place_output_frame_and_canvas(self):
if self._sc_scrollable:
self._sc_scrollable.destroy()
del self._sc_scrollable
if self._f_output:
self._f_output.destroy()
del self._f_output
self._f_output = tk.Frame(
self,
)
self._sc_scrollable = ScrollableCanvasC(
parent=self._f_output,
)
self._sc_scrollable.pack(side='top',
fill='both',
expand=True)
self._f_output.pack(side='top',
fill='both',
expand=True)
def _create_widgets(self):
self._create_and_place_output_frame_and_canvas()
self._data_augmentation = DataAugmentationInputF(
parent=self._sc_scrollable.f_main_frame,
selection_eh=self._data_augmentation_selection_eh,
disabled=True
)
self._train_session_details_input = TrainSessDetailsInputF(
parent=self._sc_scrollable.f_main_frame,
valid_input_eh=self._valid_session_details_eh,
invalid_input_eh=self._invalid_session_details_eh,
k_fold_cv_session=True,
disabled=True
)
self._train_sess_save_details = FileSaveDetailsInputF(
parent=self._sc_scrollable.f_main_frame,
file_extension=const.NTSG_EXTENSION,
valid_input_eh=self._valid_save_details_eh,
invalid_input_eh=self._invalid_save_details_eh,
disabled=False
)
self._session_buttons = SessionButtonsF(
parent=self._sc_scrollable.f_main_frame,
start_eh=self._start_btn_eh,
pause_eh=self._pause_btn_eh,
stop_eh=self._stop_btn_eh,
cancel_eh=self._cancel_btn_eh,
disabled=False
)
def _place_widgets(self):
self._sc_scrollable.pack(side='top',
fill='both',
expand=True)
self._train_sess_save_details.pack(side='top',
fill='both',
expand=True)
self._train_session_details_input.pack(side='top',
fill='both',
expand=True)
self._data_augmentation.pack(side='top',
fill='both',
expand=True)
self._session_buttons.pack(side='top',
fill='both',
expand=True)
#########################################################################
# Event handling
# ~~~~~~~~~~~~~~~~~~~~~Data augmentation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _data_augmentation_selection_eh(self):
# TODO
pass
# ~~~~~~~~~~~~~~~~~~~~~Data augmentation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fold_number_selected_eh(
self,
selected_value):
# TODO
if selected_value != 'Overall':
self._train_session_output.pack_forget()
self._test_sess_output.pack_forget()
self._train_session_output.pack(side='top',
fill='both',
expand=True)
self._test_sess_output.pack(side='top',
fill='both',
expand=True)
else:
self._train_session_output.pack_forget()
print('_fold_number_selected_eh ' + selected_value)
# ~~~~~~~~~~~~~~~~~~~~~Save details~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _valid_save_details_eh(
self,
save_details):
self._valid_train_sess_save_details = True
print('_valid_save_details_eh ' + str(save_details))
self._check_form_validity()
def _invalid_save_details_eh(self):
self._valid_train_sess_save_details = False
print('_invalid_save_details_eh')
self._check_form_validity()
# ~~~~~~~~~~~~~~~~~~~~~Session details~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _valid_session_details_eh(self):
session_details = self._train_session_details_input.get_input()
self._valid_train_session_details_input = True
self._display_options = ["Overall"]
for i in range(int(session_details.number_of_folds)):
self._display_options.append(str(i + 1))
print('_valid_session_details_eh')
self._check_form_validity()
def _invalid_session_details_eh(self):
self._valid_train_session_details_input = False
print('_invalid_session_details_eh')
self._check_form_validity()
# ~~~~~~~~~~~~~~~~~~~~~Session buttons~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _start_btn_eh(self):
if self._first_start:
self._first_start = False
self._train_session_details_input.destroy()
self._train_sess_save_details.destroy()
self._data_augmentation.destroy()
self._create_and_place_output_frame_and_canvas()
self._fold_number_input = ComboboxInputF(
parent=self._sc_scrollable.f_main_frame,
user_instruction=const.KFCVESG_K_TEXT,
user_options=self._display_options,
selection_eh=self._fold_number_selected_eh,
)
self._fold_number_input.config(
pady=30
)
self._train_session_output = TrainSessOutputF(
parent=self._sc_scrollable.f_main_frame,
)
self._test_sess_output = TestSessOutputF(
parent=self._sc_scrollable.f_main_frame
)
self._test_sess_output.progress_bar.pack_forget()
self._fold_number_input.pack(side='top',
fill='both',
expand=True)
self._train_session_output.pack(side='top',
fill='both',
expand=True)
self._test_sess_output.pack(side='top',
fill='both',
expand=True)
# TODO -> Call the controller to start the training session.
from utils.call_method_in_new_thread import CallMethodInNewThread
CallMethodInNewThread.call_method(
function_to_call=self.mock_data_set_creation,
)
def _pause_btn_eh(self):
# TODO
print(str(self._train_session_details_input.get_input()))
print(str(self._train_sess_save_details.get_new_file_details()))
print(str(self._data_augmentation.get_input()))
def _stop_btn_eh(self):
# TODO
pass
def _cancel_btn_eh(self):
# TODO
pass
#########################################################################
# Auxiliary methods
def _check_form_validity(self):
if self._valid_train_sess_save_details:
self._train_session_details_input.enable()
self._data_augmentation.enable()
if self._valid_train_session_details_input:
self._session_buttons.enable()
else:
self._session_buttons.disable()
self._data_augmentation.disable()
else:
self._train_session_details_input.disable()
self._data_augmentation.disable()
self._session_buttons.disable()
#########################################################################
# Public methods
#########################################################################
# Temporary methods
def mock_data_set_creation(self):
from random import random
from time import sleep
for i in range(25):
message = TrainSessMessage()
message.step = i
message.loss = random() * 100
message.seconds_per_batch = random() * 100
message.examples_per_sec = random() * 100
self._train_session_output.new_message(
message=message
)
sleep(0.2)
#########################################################################
| apache-2.0 | 522,798,749,803,514,600 | 28.579251 | 77 | 0.497564 | false |
nightrune/ola | python/ola/UID.py | 1 | 2958 | # This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# UID.py
# Copyright (C) 2010 Simon Newton
"""The UID class."""
__author__ = '[email protected] (Simon Newton)'
class Error(Exception):
"""Base Error Class."""
class UIDOutOfRangeException(Error):
"""Returned when a UID would be out of range."""
class UID(object):
"""Represents a UID."""
def __init__(self, manufacturer_id, device_id):
self._manufacturer_id = manufacturer_id
self._device_id = device_id
@property
def manufacturer_id(self):
return self._manufacturer_id
@property
def device_id(self):
return self._device_id
def IsBroadcast(self):
return self._device_id == 0xffffffff
def __str__(self):
return '%04x:%08x' % (self._manufacturer_id, self._device_id)
def __hash__(self):
return hash(str(self))
def __repr__(self):
return self.__str__()
def __cmp__(self, other):
if other is None:
return 1
if self._manufacturer_id == other._manufacturer_id:
return cmp(self._device_id, other._device_id)
return cmp(self.manufacturer_id, other.manufacturer_id)
@staticmethod
def AllDevices():
return UID(0xffff, 0xffffffff)
@staticmethod
def VendorcastAddress(manufacturer_id):
return UID(manufacturer_id, 0xffffffff)
@staticmethod
def FromString(uid_str):
"""Create a new UID from a string.
Args:
uid_str: The string representation of the UID, e.g. 00f0:12345678.
"""
parts = uid_str.split(':')
if len(parts) != 2:
return None
try:
manufacturer_id = int(parts[0], 16)
device_id = int(parts[1], 16)
except ValueError:
return None
if manufacturer_id > 0xffff or device_id > 0xffffffff:
return None
return UID(manufacturer_id, device_id)
@staticmethod
def NextUID(uid):
if uid == UID.AllDevices():
raise UIDOutOfRangeException(uid)
if uid.IsBroadcast():
return UID(uid.manufacturer_id + 1, 0)
else:
return UID(uid.manufacturer_id, uid.device_id + 1)
@staticmethod
def PreviousUID(uid):
if uid == UID(0, 0):
raise UIDOutOfRangeException(uid)
if uid.device_id == 0:
return UID(uid.manufacturer_id - 1, 0xffffffff)
else:
return UID(uid.manufacturer_id, uid.device_id - 1)
| lgpl-2.1 | 6,715,555,315,331,315,000 | 25.890909 | 78 | 0.675118 | false |
tkzeng/molecular-design-toolkit | moldesign/helpers/helpers.py | 1 | 4716 | # Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains various helper functions used by MDT internally.
"""
import collections
import numpy as np
import webcolors
from moldesign import units as u
class VolumetricGrid(object):
"""
Helper object for preparing gaussian CUBE files
"""
UNITS = u.angstrom
def __init__(self, positions, padding=2.5*u.angstrom, npoints=25):
mins = positions.min(axis=0) - padding
maxes = positions.max(axis=0) + padding
self.npoints = npoints
self.xr = (mins[0], maxes[0])
self.yr = (mins[1], maxes[1])
self.zr = (mins[2], maxes[2])
self._origin = mins.value_in(self.UNITS)
self.dx = (self.xr[1] - self.xr[0]).value_in(self.UNITS) / (float(npoints) - 1)
self.dy = (self.yr[1] - self.yr[0]).value_in(self.UNITS) / (float(npoints) - 1)
self.dz = (self.zr[1] - self.zr[0]).value_in(self.UNITS) / (float(npoints) - 1)
self.fxyz = None
def xyzlist(self):
stride = self.npoints * 1j
grids = np.mgrid[self.xr[0]:self.xr[1]:stride,
self.yr[0]:self.yr[1]:stride,
self.zr[0]:self.zr[1]:stride]
return grids * self.UNITS
def origin(self):
return tuple(self._origin)
def get_all_atoms(*objects):
""" Given Atoms, AtomContainers, lists of Atoms, and lists of AtomContainers,
return a flat list of all atoms contained therein.
A given atom is only returned once, even if it's found more than once.
Args:
*objects (moldesign.Atom OR moldesign.AtomContainer OR List[moldesign.Atom] OR
List[moldesign.AtomContainer]): objects to take atoms from
"""
from moldesign import molecules
atoms = collections.OrderedDict()
for obj in objects:
if isinstance(obj, molecules.Atom):
atoms[obj] = None
elif hasattr(obj, 'atoms'):
atoms.update((x,None) for x in obj.atoms)
else:
for item in obj:
if isinstance(item, molecules.Atom):
atoms[item] = None
elif hasattr(item, 'atoms'):
atoms.update((x, None) for x in item.atoms)
return molecules.AtomList(atoms.iterkeys())
def kinetic_energy(momenta, masses):
return 0.5 * (momenta*momenta/masses).sum()
def kinetic_temperature(ke, dof):
from moldesign.units import k_b
t = (2.0*ke)/(k_b*dof)
return t.defunits()
# def get_residues(obj, **queries):
# """
#
# Args:
# obj ():
# **queries ():
#
# Returns:
#
# """
# for residue in obj.residues:
# pass
#
DEF_CATEGORICAL = 'Paired'
DEF_SEQUENTIAL = None # should be inferno, but that's only MPL >1.5
def colormap(cats, mplmap='auto'):
# should make it easy to choose one for:
# categorical data
# sequential (low, high important)
# diverging data (low, mid, high important)
# Can deal with numerical and categorical data
# we'll treat ints as categories for now
global DEF_SEQUENTIAL
from matplotlib import cm
if hasattr(cm, 'inferno'):
DEF_SEQUENTIAL = 'inferno'
else:
DEF_SEQUENTIAL = 'BrBG'
# strip units
units = None
if hasattr(cats[0], 'magnitude'):
arr = u.array(cats)
units = arr.units
cats = arr.magnitude
if not isinstance(cats, np.ndarray) and not isinstance(cats[0], float): # treat as
# categorical
values = np.zeros(len(cats), dtype='float')
to_int = collections.OrderedDict()
for i, item in enumerate(cats):
if item not in to_int:
to_int[item] = len(to_int)
values[i] = to_int[item]
if mplmap == 'auto':
mplmap = DEF_CATEGORICAL
else: # it's numerical
values = np.array(cats, dtype='float')
if mplmap == 'auto':
mplmap = DEF_SEQUENTIAL
cmap = getattr(cm, mplmap)
mx = values.max()
mn = values.min()
r = (values - mn) / (mx - mn) # rescale to [0.0,1.0]
rgb = cmap(r)
hexcolors = [webcolors.rgb_to_hex(np.array(r[:3]) * 256) for r in rgb]
return hexcolors | apache-2.0 | -513,517,205,098,998,400 | 28.85443 | 87 | 0.609415 | false |
myhro/myhronet | myhronet/migrations/0002_auto_20140501_1545.py | 1 | 1268 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
(b'myhronet', b'0001_initial'),
]
operations = [
migrations.AddField(
model_name=b'url',
name=b'ip',
field=models.GenericIPAddressField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name=b'url',
name=b'longurl',
field=models.CharField(max_length=1024, unique=True, null=True, db_index=True),
preserve_default=True,
),
migrations.AddField(
model_name=b'url',
name=b'data',
field=models.DateTimeField(auto_now_add=True, null=True),
preserve_default=True,
),
migrations.AddField(
model_name=b'url',
name=b'views',
field=models.IntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name=b'url',
name=b'hashcode',
field=models.CharField(max_length=10, unique=True, null=True, db_index=True),
preserve_default=True,
),
]
| mit | 2,269,109,178,985,048,300 | 27.818182 | 91 | 0.544953 | false |
memray/seq2seq-keyphrase | emolga/layers/recurrent.py | 1 | 22901 | # -*- coding: utf-8 -*-
from abc import abstractmethod
from .core import *
class Recurrent(MaskedLayer):
"""
Recurrent Neural Network
"""
@staticmethod
def get_padded_shuffled_mask(mask, pad=0):
"""
change the order of dims of mask, to match the dim of inputs outside
[1] change the 2D matrix into 3D, (nb_samples, max_sent_len, 1)
[2] dimshuffle to (max_sent_len, nb_samples, 1)
the value on dim=0 could be either 0 or 1?
:param: mask, shows x is a word (!=0) or not(==0), shape=(n_samples, max_sent_len)
"""
# mask is (n_samples, time)
assert mask, 'mask cannot be None'
# pad a dim of 1 to the right, (nb_samples, max_sent_len, 1)
mask = T.shape_padright(mask)
# mask = T.addbroadcast(mask, -1), make the new dim broadcastable
mask = T.addbroadcast(mask, mask.ndim-1)
# change the order of dims, to match the dim of inputs outside
mask = mask.dimshuffle(1, 0, 2) # (max_sent_len, nb_samples, 1)
if pad > 0:
# left-pad in time with 0
padding = alloc_zeros_matrix(pad, mask.shape[1], 1)
mask = T.concatenate([padding, mask], axis=0)
return mask.astype('int8')
class GRU(Recurrent):
"""
Gated Recurrent Unit - Cho et al. 2014
Acts as a spatio-temporal projection,
turning a sequence of vectors into a single vector.
Eats inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
z_t = tanh(W_z*x + U_z*h_t-1 + b_z)
r_t = tanh(W_r*x + U_r*h_t-1 + b_r)
hh_t = tanh(W_h*x + U_r*(r_t*h_t-1) + b_h)
h_t = z_t * h_t-1 + (1 - z_t) * hh_t
The doc product computation regarding x is independent from time
so it could be done out of the recurrent process (in advance)
x_z = dot(X, self.W_z, self.b_z)
x_r = dot(X, self.W_r, self.b_r)
x_h = dot(X, self.W_h, self.b_h)
References:
On the Properties of Neural Machine Translation: Encoder–Decoder Approaches
http://www.aclweb.org/anthology/W14-4012
Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling
http://arxiv.org/pdf/1412.3555v1.pdf
"""
def __init__(self,
input_dim,
output_dim=128,
context_dim=None,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='sigmoid',
name=None, weights=None):
super(GRU, self).__init__()
"""
Standard GRU.
"""
self.input_dim = input_dim
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
# W is a matrix to map input x_t
self.W_z = self.init((self.input_dim, self.output_dim))
self.W_r = self.init((self.input_dim, self.output_dim))
self.W_h = self.init((self.input_dim, self.output_dim))
# U is a matrix to map hidden state of last time h_t-1
self.U_z = self.inner_init((self.output_dim, self.output_dim))
self.U_r = self.inner_init((self.output_dim, self.output_dim))
self.U_h = self.inner_init((self.output_dim, self.output_dim))
# bias terms
self.b_z = shared_zeros(self.output_dim)
self.b_r = shared_zeros(self.output_dim)
self.b_h = shared_zeros(self.output_dim)
# set names
self.W_z.name, self.U_z.name, self.b_z.name = 'Wz', 'Uz', 'bz'
self.W_r.name, self.U_r.name, self.b_r.name = 'Wr', 'Ur', 'br'
self.W_h.name, self.U_h.name, self.b_h.name = 'Wh', 'Uh', 'bh'
self.params = [
self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h,
]
"""
GRU with context inputs.
"""
if context_dim is not None:
self.context_dim = context_dim
self.C_z = self.init((self.context_dim, self.output_dim))
self.C_r = self.init((self.context_dim, self.output_dim))
self.C_h = self.init((self.context_dim, self.output_dim))
self.C_z.name, self.C_r.name, self.C_h.name = 'Cz', 'Cr', 'Ch'
self.params += [self.C_z, self.C_r, self.C_h]
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def _step(self,
xz_t, xr_t, xh_t, mask_t,
h_tm1,
u_z, u_r, u_h):
"""
One step computation of GRU for a batch of data at time t
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=init_h,
non_sequences=[self.U_z, self.U_r, self.U_h]
:param xz_t, xr_t, xh_t:
value of x of time t after gate z/r/h (computed beforehand)
shape=(n_samples, output_emb_dim)
:param mask_t: mask of time t, indicates whether t-th token is a word, shape=(n_samples, 1)
:param h_tm1: hidden value (output) of last time, shape=(nb_samples, output_emb_dim)
:param u_z, u_r, u_h:
mapping matrix for hidden state of time t-1
shape=(output_emb_dim, output_emb_dim)
:return: h_t: output, hidden state of time t, shape=(nb_samples, output_emb_dim)
"""
# h_mask_tm1 = mask_tm1 * h_tm1
# Here we use a GroundHog-like style which allows
# activation value of update/reset gate, shape=(n_samples, 1)
z = self.inner_activation(xz_t + T.dot(h_tm1, u_z))
r = self.inner_activation(xr_t + T.dot(h_tm1, u_r))
hh_t = self.activation(xh_t + T.dot(r * h_tm1, u_h))
h_t = z * h_tm1 + (1 - z) * hh_t
# why use mask_t to mix up h_t and h_tm1 again?
# if current term is None (padding term, mask=0), then drop the update (0*h_t and keep use the last state(1*h_tm1)
h_t = mask_t * h_t + (1 - mask_t) * h_tm1
return h_t
def _step_gate(self,
xz_t, xr_t, xh_t, mask_t,
h_tm1,
u_z, u_r, u_h):
"""
One step computation of GRU
:returns
h_t: output, hidden state of time t, shape=(n_samples, output_emb_dim)
z: value of update gate (after activation), shape=(n_samples, 1)
r: value of reset gate (after activation), shape=(n_samples, 1)
"""
# h_mask_tm1 = mask_tm1 * h_tm1
# Here we use a GroundHog-like style which allows
z = self.inner_activation(xz_t + T.dot(h_tm1, u_z))
r = self.inner_activation(xr_t + T.dot(h_tm1, u_r))
hh_t = self.activation(xh_t + T.dot(r * h_tm1, u_h))
h_t = z * h_tm1 + (1 - z) * hh_t
h_t = mask_t * h_t + (1 - mask_t) * h_tm1
return h_t, z, r
def __call__(self, X, mask=None, C=None, init_h=None,
return_sequence=False, one_step=False,
return_gates=False):
"""
:param X: input sequence, a list of word vectors, shape=(n_samples, max_sent_len, input_emb_dim)
:param mask: input mask, shows x is a word (!=0) or not(==0), shape=(n_samples, max_sent_len)
:param C: context, for encoder is none
:param init_h: initial hidden state
:param return_sequence: if True, return the encoding at each time, or only return the end state
:param one_step: only go one step computation, or will be done by theano.scan()
:param return_gates: whether return the gate state
:return:
"""
# recurrent cell only work for tensor
if X.ndim == 2: # X.ndim == 3, shape=(n_samples, max_sent_len, input_emb_dim)
X = X[:, None, :]
if mask is not None:
mask = mask[:, None]
# mask, shape=(n_samples, max_sent_len)
if mask is None: # sampling or beam-search
mask = T.alloc(1., X.shape[0], 1)
# one step
if one_step:
assert init_h, 'previous state must be provided!'
# reshape the mask to shape=(max_sent_len, n_samples, 1)
padded_mask = self.get_padded_shuffled_mask(mask, pad=0)
X = X.dimshuffle((1, 0, 2)) # X: (max_sent_len, nb_samples, input_emb_dim)
# compute the gate values at each time in advance
# shape of W = (input_emb_dim, output_emb_dim)
x_z = dot(X, self.W_z, self.b_z) # x_z: (max_sent_len, nb_samples, output_emb_dim)
x_r = dot(X, self.W_r, self.b_r) # x_r: (max_sent_len, nb_samples, output_emb_dim)
x_h = dot(X, self.W_h, self.b_h) # x_h: (max_sent_len, nb_samples, output_emb_dim)
"""
GRU with constant context. (no attention here.)
"""
if C is not None:
assert C.ndim == 2
ctx_step = C.dimshuffle('x', 0, 1) # C: (nb_samples, context_dim)
x_z += dot(ctx_step, self.C_z)
x_r += dot(ctx_step, self.C_r)
x_h += dot(ctx_step, self.C_h)
"""
GRU with additional initial/previous state.
"""
if init_h is None:
init_h = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
if not return_gates:
if one_step:
seq = [x_z, x_r, x_h, padded_mask] # A hidden BUG (1)+++(1) !?!!!?!!?!?
outputs_info = [init_h]
non_seq = [self.U_z, self.U_r, self.U_h]
outputs = self._step(*(seq + outputs_info + non_seq))
else:
outputs, _ = theano.scan(
self._step,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=init_h,
non_sequences=[self.U_z, self.U_r, self.U_h]
)
# return hidden state of all times, shape=(nb_samples, max_sent_len, input_emb_dim)
if return_sequence:
return outputs.dimshuffle((1, 0, 2))
# hidden state of last time, shape=(nb_samples, output_emb_dim)
return outputs[-1]
else:
if one_step:
seq = [x_z, x_r, x_h, padded_mask] # A hidden BUG (1)+++(1) !?!!!?!!?!?
outputs_info = [init_h]
non_seq = [self.U_z, self.U_r, self.U_h]
outputs, zz, rr = self._step_gate(*(seq + outputs_info + non_seq))
else:
outputx, _ = theano.scan(
self._step_gate,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=[init_h, None, None],
non_sequences=[self.U_z, self.U_r, self.U_h]
)
outputs, zz, rr = outputx
if return_sequence:
return outputs.dimshuffle((1, 0, 2)), zz.dimshuffle((1, 0, 2)), rr.dimshuffle((1, 0, 2))
return outputs[-1], zz[-1], rr[-1]
class JZS3(Recurrent):
"""
Evolved recurrent neural network architectures from the evaluation of thousands
of models, serving as alternatives to LSTMs and GRUs. See Jozefowicz et al. 2015.
This corresponds to the `MUT3` architecture described in the paper.
Takes inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
References:
An Empirical Exploration of Recurrent Network Architectures
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf
"""
def __init__(self,
input_dim,
output_dim=128,
context_dim=None,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='sigmoid',
name=None, weights=None):
super(JZS3, self).__init__()
"""
Standard model
"""
self.input_dim = input_dim
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_z = self.init((self.input_dim, self.output_dim))
self.U_z = self.inner_init((self.output_dim, self.output_dim))
self.b_z = shared_zeros(self.output_dim)
self.W_r = self.init((self.input_dim, self.output_dim))
self.U_r = self.inner_init((self.output_dim, self.output_dim))
self.b_r = shared_zeros(self.output_dim)
self.W_h = self.init((self.input_dim, self.output_dim))
self.U_h = self.inner_init((self.output_dim, self.output_dim))
self.b_h = shared_zeros(self.output_dim)
# set names
self.W_z.name, self.U_z.name, self.b_z.name = 'Wz', 'Uz', 'bz'
self.W_r.name, self.U_r.name, self.b_r.name = 'Wr', 'Ur', 'br'
self.W_h.name, self.U_h.name, self.b_h.name = 'Wh', 'Uh', 'bh'
self.params = [
self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h,
]
"""
context inputs.
"""
if context_dim is not None:
self.context_dim = context_dim
self.C_z = self.init((self.context_dim, self.output_dim))
self.C_r = self.init((self.context_dim, self.output_dim))
self.C_h = self.init((self.context_dim, self.output_dim))
self.C_z.name, self.C_r.name, self.C_h.name = 'Cz', 'Cr', 'Ch'
self.params += [self.C_z, self.C_r, self.C_h]
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def _step(self,
xz_t, xr_t, xh_t, mask_t,
h_tm1,
u_z, u_r, u_h):
# h_mask_tm1 = mask_tm1 * h_tm1
z = self.inner_activation(xz_t + T.dot(T.tanh(h_tm1), u_z))
r = self.inner_activation(xr_t + T.dot(h_tm1, u_r))
hh_t = self.activation(xh_t + T.dot(r * h_tm1, u_h))
h_t = (hh_t * z + h_tm1 * (1 - z)) * mask_t + (1 - mask_t) * h_tm1
return h_t
def __call__(self, X, mask=None, C=None, init_h=None, return_sequence=False, one_step=False):
# recurrent cell only work for tensor
if X.ndim == 2:
X = X[:, None, :]
# mask
if mask is None: # sampling or beam-search
mask = T.alloc(1., X.shape[0], X.shape[1])
# one step
if one_step:
assert init_h, 'previous state must be provided!'
padded_mask = self.get_padded_shuffled_mask(mask, pad=0)
X = X.dimshuffle((1, 0, 2))
x_z = dot(X, self.W_z, self.b_z)
x_r = dot(X, self.W_r, self.b_r)
x_h = dot(X, self.W_h, self.b_h)
"""
JZS3 with constant context. (not attention here.)
"""
if C is not None:
assert C.ndim == 2
ctx_step = C.dimshuffle('x', 0, 1) # C: (nb_samples, context_dim)
x_z += dot(ctx_step, self.C_z)
x_r += dot(ctx_step, self.C_r)
x_h += dot(ctx_step, self.C_h)
"""
JZS3 with additional initial/previous state.
"""
if init_h is None:
init_h = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
if one_step:
seq = [x_z, x_r, x_h, padded_mask]
outputs_info = [init_h]
non_seq = [self.U_z, self.U_r, self.U_h]
outputs = self._step(*(seq + outputs_info + non_seq))
else:
outputs, updates = theano.scan(
self._step,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=init_h,
non_sequences=[self.U_z, self.U_r, self.U_h],
)
if return_sequence:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
class LSTM(Recurrent):
def __init__(self,
input_dim=0,
output_dim=128,
context_dim=None,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one',
activation='tanh', inner_activation='sigmoid',
name=None, weights=None):
super(LSTM, self).__init__()
"""
Standard model
"""
self.input_dim = input_dim
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
# input gate param.
self.W_i = self.init((self.input_dim, self.output_dim))
self.U_i = self.inner_init((self.output_dim, self.output_dim))
self.b_i = shared_zeros(self.output_dim)
# forget gate param.
self.W_f = self.init((self.input_dim, self.output_dim))
self.U_f = self.inner_init((self.output_dim, self.output_dim))
self.b_f = self.forget_bias_init(self.output_dim) # forget gate needs one bias.
# output gate param.
self.W_o = self.init((self.input_dim, self.output_dim))
self.U_o = self.inner_init((self.output_dim, self.output_dim))
self.b_o = shared_zeros(self.output_dim)
# memory param.
self.W_c = self.init((self.input_dim, self.output_dim))
self.U_c = self.inner_init((self.output_dim, self.output_dim))
self.b_c = shared_zeros(self.output_dim)
# set names
self.W_i.name, self.U_i.name, self.b_i.name = 'Wi', 'Ui', 'bi'
self.W_f.name, self.U_f.name, self.b_f.name = 'Wf', 'Uf', 'bf'
self.W_o.name, self.U_o.name, self.b_o.name = 'Wo', 'Uo', 'bo'
self.W_c.name, self.U_c.name, self.b_c.name = 'Wc', 'Uc', 'bc'
self.params = [
self.W_i, self.U_i, self.b_i,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o,
self.W_c, self.U_c, self.b_c,
]
"""
context inputs.
"""
if context_dim is not None:
self.context_dim = context_dim
self.C_i = self.init((self.context_dim, self.output_dim))
self.C_f = self.init((self.context_dim, self.output_dim))
self.C_o = self.init((self.context_dim, self.output_dim))
self.C_c = self.init((self.context_dim, self.output_dim))
self.C_i.name, self.C_f.name, self.C_o.name, self.C_c.name = 'Ci', 'Cf', 'Co', 'Cc'
self.params += [self.C_i, self.C_f, self.C_o, self.C_c]
if weights is not None:
self.set_weights(weights)
if name is not None:
self.set_name(name)
def _step(self,
xi_t, xf_t, xo_t, xc_t, mask_t,
h_tm1, c_tm1,
u_i, u_f, u_o, u_c):
# h_mask_tm1 = mask_tm1 * h_tm1
i = self.inner_activation(xi_t + T.dot(h_tm1, u_i)) # input gate
f = self.inner_activation(xf_t + T.dot(h_tm1, u_f)) # forget gate
o = self.inner_activation(xo_t + T.dot(h_tm1, u_o)) # output gate
c = self.activation(xc_t + T.dot(h_tm1, u_c)) # memory updates
# update the memory cell.
c_t = f * c_tm1 + i * c
h_t = o * self.activation(c_t)
# masking
c_t = c_t * mask_t + (1 - mask_t) * c_tm1
h_t = h_t * mask_t + (1 - mask_t) * h_tm1
return h_t, c_t
def input_embed(self, X, C=None):
x_i = dot(X, self.W_i, self.b_i)
x_f = dot(X, self.W_f, self.b_f)
x_o = dot(X, self.W_o, self.b_o)
x_c = dot(X, self.W_c, self.b_c)
"""
LSTM with constant context. (not attention here.)
"""
if C is not None:
assert C.ndim == 2
ctx_step = C.dimshuffle('x', 0, 1) # C: (nb_samples, context_dim)
x_i += dot(ctx_step, self.C_i)
x_f += dot(ctx_step, self.C_f)
x_o += dot(ctx_step, self.C_o)
x_c += dot(ctx_step, self.C_c)
return x_i, x_f, x_o, x_c
def __call__(self, X, mask=None, C=None, init_h=None, init_c=None, return_sequence=False, one_step=False):
# recurrent cell only work for tensor
if X.ndim == 2:
X = X[:, None, :]
# mask
if mask is None: # sampling or beam-search
mask = T.alloc(1., X.shape[0], X.shape[1])
# one step
if one_step:
assert init_h, 'previous state must be provided!'
padded_mask = self.get_padded_shuffled_mask(mask, pad=0)
X = X.dimshuffle((1, 0, 2))
x_i, x_f, x_o, x_c = self.input_embed(X, C)
"""
LSTM with additional initial/previous state.
"""
if init_h is None:
init_h = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
if init_c is None:
init_c = init_h
if one_step:
seq = [x_i, x_f, x_o, x_c, padded_mask]
outputs_info = [init_h, init_c]
non_seq = [self.U_i, self.U_f, self.U_o, self.U_c]
outputs = self._step(*(seq + outputs_info + non_seq))
else:
outputs, updates = theano.scan(
self._step,
sequences=[x_i, x_f, x_o, x_c, padded_mask],
outputs_info=[init_h, init_c],
non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],
)
if return_sequence:
return outputs[0].dimshuffle((1, 0, 2)), outputs[1].dimshuffle((1, 0, 2)) # H, C
return outputs[0][-1], outputs[1][-1]
| mit | 8,013,153,729,195,206,000 | 38.277873 | 124 | 0.517752 | false |
ksmit799/Toontown-Source | toontown/safezone/RegenTreasurePlannerAI.py | 1 | 1653 | from direct.distributed.ClockDelta import *
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
import random
import TreasurePlannerAI
class RegenTreasurePlannerAI(TreasurePlannerAI.TreasurePlannerAI):
notify = DirectNotifyGlobal.directNotify.newCategory('RegenTreasurePlannerAI')
def __init__(self, zoneId, treasureConstructor, taskName, spawnInterval, maxTreasures, callback = None):
TreasurePlannerAI.TreasurePlannerAI.__init__(self, zoneId, treasureConstructor, callback)
self.taskName = '%s-%s' % (taskName, zoneId)
self.spawnInterval = spawnInterval
self.maxTreasures = maxTreasures
def start(self):
self.preSpawnTreasures()
self.startSpawning()
def stop(self):
self.stopSpawning()
def stopSpawning(self):
taskMgr.remove(self.taskName)
def startSpawning(self):
self.stopSpawning()
taskMgr.doMethodLater(self.spawnInterval, self.upkeepTreasurePopulation, self.taskName)
def upkeepTreasurePopulation(self, task):
if self.numTreasures() < self.maxTreasures:
self.placeRandomTreasure()
taskMgr.doMethodLater(self.spawnInterval, self.upkeepTreasurePopulation, self.taskName)
return Task.done
def placeRandomTreasure(self):
self.notify.debug('Placing a Treasure...')
spawnPointIndex = self.nthEmptyIndex(random.randrange(self.countEmptySpawnPoints()))
self.placeTreasure(spawnPointIndex)
def preSpawnTreasures(self):
for i in range(self.maxTreasures):
self.placeRandomTreasure()
| mit | -6,763,203,890,189,821,000 | 36.568182 | 108 | 0.727163 | false |
jpopelka/fabric8-analytics-common | perf-tests/src/measurements.py | 1 | 2526 | """Module with functions that read data and metadata from the S3 and retrieve durations."""
from s3interface import *
from duration import *
from botocore.exceptions import *
def read_component_analysis_from_core_data(s3, ecosystem, component, version):
"""Read component analysis from the core data and retrieve duration info from it."""
bucket = "bayesian-core-data"
durations = {}
key = s3.component_key(ecosystem, component, version)
data = s3.read_object(bucket, key)
durations["overall"] = Duration.from_data(data)
analyses = data.get("analyses")
# Remove this analysis because it is not performed on component-version level
if "github_details" in analyses:
analyses.remove("github_details")
# analyses.remove("code_metrics")
for analysis in analyses:
key = s3.component_analysis_key(ecosystem, component, version, analysis)
try:
data = s3.read_object(bucket, key)
durations[analysis] = Duration.from_audit(data)
except ClientError:
print("Warning: duration for the following analysis won't be "
"be computed: {a}".format(a=analysis))
return durations
def read_component_analysis_from_core_package(s3, ecosystem, component):
"""Read component analysis from core package data and retrieve duration info from it."""
bucket = "bayesian-core-package-data"
durations = {}
key = s3.component_core_package_data_key(ecosystem, component)
data = s3.read_object(bucket, key)
durations["overall"] = Duration.from_data(data)
# we have to specify analysis manually here
analyses = ["git_stats", "github_details", "keywords_tagging", "libraries_io"]
for analysis in analyses:
key = s3.component_core_package_data_analysis_key(ecosystem, component, analysis)
try:
data = s3.read_object(bucket, key)
durations[analysis] = Duration.from_audit(data)
except ClientError:
print("Warning: duration for the following analysis won't be "
"be computed: {a}".format(a=analysis))
return durations
def read_component_analysis_audit_duration(s3, ecosystem, component, version):
"""Read durations for the core data and core package data as well."""
return {"core-data":
read_component_analysis_from_core_data(s3, ecosystem, component, version),
"core-package-data":
read_component_analysis_from_core_package(s3, ecosystem, component)}
| apache-2.0 | -8,453,762,179,332,420,000 | 36.701493 | 92 | 0.674188 | false |
patrickshuff/artofmemory | artofmemory/pao.py | 1 | 2868 | import random
import textwrap
from configparser import ConfigParser
def explain() -> str:
"""Explain Person Action Object"""
return textwrap.dedent(
"""\
Person Action Object (PAO)
The PAO is a system of encoding where you attribute a specific Person with an
Action that includes an Object. This is a composite object which you can then use
in a variety of ways. The idea is that you develop a collection of PAOs and assign
each of them a number.
Examples:
15: Albert Einstein (person) writing (action) on a blackboard (object).
16: Molly Ringwald (person) blowing candles (action) on a cake (object).
23: Michael Jordan (person) shooting (action) a basketball (object).
Armed with such an inventory you can use it for encoding of other information. Say
you want to memorize a series of numbers and you had a PAO inventory from
00-99. You could then assign the first six digits with a special combination of
your PAO collection.
Example:
162315 => Molly Ringwald shooting a blackboard
By doing this, you're compressing six digits into a single, composite image.
"""
)
def flatten_pao(d):
"""Yield back (num, item) tuples for each PAO broken into items.
The PAO item will be prefixed with either 'p:', 'a:', 'o:' to help denote its part of
the overall PAO.
Args:
d (dict): dictionary-like object that supports .items()
Yields:
(str, str)
"""
for num, pao in d.items():
person, action, obj = pao.split(",")
yield (num, "p:" + person.strip())
yield (num, "a:" + action.strip())
yield (num, "o:" + obj.strip())
def basic_quiz(config_file: str):
"""Test out your Person Action Object (PAO) knowledge
It supports just testing your PAO + shuffling them up to test combos
"""
config = ConfigParser()
config.read(config_file)
# TODO -- add an option to limit the values to test
# e.g. if I only want to test PAO for 1 through 4
# TODO add support for properly mixing up the PAO and testing
if "pao" not in config.sections():
print("No PAO Config setup. See README")
return
# Randomize the PAO items
pao_pairs = list(flatten_pao(config["pao"]))
random.shuffle(pao_pairs)
correct = 0
total = 0
for number, item in pao_pairs:
try:
guess = input("{}\n=> ".format(item))
except (EOFError, KeyboardInterrupt):
break
if not guess:
continue
if guess == number:
print("CORRECT!")
correct += 1
else:
print("INCORRECT: {}".format(number))
total += 1
if total:
print("\n{:>2}% Correct".format(correct / float(total) * 100))
| mit | 4,104,524,509,316,503,600 | 30.516484 | 90 | 0.613668 | false |
alexholehouse/SBMLIntegrator | libsbml-5.0.0/src/bindings/python/test/sbml/TestUnitKind.py | 1 | 10589 | #
# @file TestUnitKind.py
# @brief UnitKind enumeration unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestUnitKind.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestUnitKind(unittest.TestCase):
def test_UnitKind_equals(self):
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_AMPERE,libsbml.UNIT_KIND_AMPERE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_INVALID,libsbml.UNIT_KIND_INVALID) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_LITER,libsbml.UNIT_KIND_LITER) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_LITRE,libsbml.UNIT_KIND_LITRE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_METER,libsbml.UNIT_KIND_METER) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_METRE,libsbml.UNIT_KIND_METRE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_LITER,libsbml.UNIT_KIND_LITRE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_LITRE,libsbml.UNIT_KIND_LITER) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_METER,libsbml.UNIT_KIND_METRE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_METRE,libsbml.UNIT_KIND_METER) )
self.assertEqual( 0, libsbml.UnitKind_equals(libsbml.UNIT_KIND_AMPERE,libsbml.UNIT_KIND_WEBER) )
pass
def test_UnitKind_forName(self):
self.assert_( libsbml.UnitKind_forName("ampere") == libsbml.UNIT_KIND_AMPERE )
self.assert_( libsbml.UnitKind_forName("becquerel") == libsbml.UNIT_KIND_BECQUEREL )
self.assert_( libsbml.UnitKind_forName("candela") == libsbml.UNIT_KIND_CANDELA )
self.assert_( libsbml.UnitKind_forName("Celsius") == libsbml.UNIT_KIND_CELSIUS )
self.assert_( libsbml.UnitKind_forName("coulomb") == libsbml.UNIT_KIND_COULOMB )
self.assert_( libsbml.UnitKind_forName("dimensionless") == libsbml.UNIT_KIND_DIMENSIONLESS )
self.assert_( libsbml.UnitKind_forName("farad") == libsbml.UNIT_KIND_FARAD )
self.assert_( libsbml.UnitKind_forName("gram") == libsbml.UNIT_KIND_GRAM )
self.assert_( libsbml.UnitKind_forName("gray") == libsbml.UNIT_KIND_GRAY )
self.assert_( libsbml.UnitKind_forName("henry") == libsbml.UNIT_KIND_HENRY )
self.assert_( libsbml.UnitKind_forName("hertz") == libsbml.UNIT_KIND_HERTZ )
self.assert_( libsbml.UnitKind_forName("item") == libsbml.UNIT_KIND_ITEM )
self.assert_( libsbml.UnitKind_forName("joule") == libsbml.UNIT_KIND_JOULE )
self.assert_( libsbml.UnitKind_forName("katal") == libsbml.UNIT_KIND_KATAL )
self.assert_( libsbml.UnitKind_forName("kelvin") == libsbml.UNIT_KIND_KELVIN )
self.assert_( libsbml.UnitKind_forName("kilogram") == libsbml.UNIT_KIND_KILOGRAM )
self.assert_( libsbml.UnitKind_forName("liter") == libsbml.UNIT_KIND_LITER )
self.assert_( libsbml.UnitKind_forName("litre") == libsbml.UNIT_KIND_LITRE )
self.assert_( libsbml.UnitKind_forName("lumen") == libsbml.UNIT_KIND_LUMEN )
self.assert_( libsbml.UnitKind_forName("lux") == libsbml.UNIT_KIND_LUX )
self.assert_( libsbml.UnitKind_forName("meter") == libsbml.UNIT_KIND_METER )
self.assert_( libsbml.UnitKind_forName("metre") == libsbml.UNIT_KIND_METRE )
self.assert_( libsbml.UnitKind_forName("mole") == libsbml.UNIT_KIND_MOLE )
self.assert_( libsbml.UnitKind_forName("newton") == libsbml.UNIT_KIND_NEWTON )
self.assert_( libsbml.UnitKind_forName("ohm") == libsbml.UNIT_KIND_OHM )
self.assert_( libsbml.UnitKind_forName("pascal") == libsbml.UNIT_KIND_PASCAL )
self.assert_( libsbml.UnitKind_forName("radian") == libsbml.UNIT_KIND_RADIAN )
self.assert_( libsbml.UnitKind_forName("second") == libsbml.UNIT_KIND_SECOND )
self.assert_( libsbml.UnitKind_forName("siemens") == libsbml.UNIT_KIND_SIEMENS )
self.assert_( libsbml.UnitKind_forName("sievert") == libsbml.UNIT_KIND_SIEVERT )
self.assert_( libsbml.UnitKind_forName("steradian") == libsbml.UNIT_KIND_STERADIAN )
self.assert_( libsbml.UnitKind_forName("tesla") == libsbml.UNIT_KIND_TESLA )
self.assert_( libsbml.UnitKind_forName("volt") == libsbml.UNIT_KIND_VOLT )
self.assert_( libsbml.UnitKind_forName("watt") == libsbml.UNIT_KIND_WATT )
self.assert_( libsbml.UnitKind_forName("weber") == libsbml.UNIT_KIND_WEBER )
self.assert_( libsbml.UnitKind_forName(None) == libsbml.UNIT_KIND_INVALID )
self.assert_( libsbml.UnitKind_forName("") == libsbml.UNIT_KIND_INVALID )
self.assert_( libsbml.UnitKind_forName("foobar") == libsbml.UNIT_KIND_INVALID )
pass
def test_UnitKind_isValidUnitKindString(self):
self.assertEqual( 0, libsbml.UnitKind_isValidUnitKindString("fun-foam-unit for kids!",1,1) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("litre",2,2) )
self.assertEqual( 0, libsbml.UnitKind_isValidUnitKindString("liter",2,2) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("liter",1,2) )
self.assertEqual( 0, libsbml.UnitKind_isValidUnitKindString("meter",2,3) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("metre",2,1) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("meter",1,2) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("Celsius",2,1) )
self.assertEqual( 0, libsbml.UnitKind_isValidUnitKindString("Celsius",2,2) )
pass
def test_UnitKind_toString(self):
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_AMPERE)
self.assert_(( "ampere" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_BECQUEREL)
self.assert_(( "becquerel" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_CANDELA)
self.assert_(( "candela" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_CELSIUS)
self.assert_(( "Celsius" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_COULOMB)
self.assert_(( "coulomb" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_DIMENSIONLESS)
self.assert_(( "dimensionless" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_FARAD)
self.assert_(( "farad" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_GRAM)
self.assert_(( "gram" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_GRAY)
self.assert_(( "gray" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_HENRY)
self.assert_(( "henry" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_HERTZ)
self.assert_(( "hertz" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_ITEM)
self.assert_(( "item" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_JOULE)
self.assert_(( "joule" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_KATAL)
self.assert_(( "katal" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_KELVIN)
self.assert_(( "kelvin" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_KILOGRAM)
self.assert_(( "kilogram" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_LITER)
self.assert_(( "liter" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_LITRE)
self.assert_(( "litre" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_LUMEN)
self.assert_(( "lumen" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_LUX)
self.assert_(( "lux" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_METER)
self.assert_(( "meter" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_METRE)
self.assert_(( "metre" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_MOLE)
self.assert_(( "mole" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_NEWTON)
self.assert_(( "newton" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_OHM)
self.assert_(( "ohm" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_PASCAL)
self.assert_(( "pascal" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_RADIAN)
self.assert_(( "radian" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_SECOND)
self.assert_(( "second" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_SIEMENS)
self.assert_(( "siemens" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_SIEVERT)
self.assert_(( "sievert" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_STERADIAN)
self.assert_(( "steradian" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_TESLA)
self.assert_(( "tesla" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_VOLT)
self.assert_(( "volt" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_WATT)
self.assert_(( "watt" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_WEBER)
self.assert_(( "weber" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_INVALID)
self.assert_(( "(Invalid UnitKind)" == s ))
s = libsbml.UnitKind_toString(-1)
self.assert_(( "(Invalid UnitKind)" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_INVALID + 1)
self.assert_(( "(Invalid UnitKind)" == s ))
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestUnitKind))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| gpl-3.0 | 7,651,008,237,058,202,000 | 52.211055 | 103 | 0.684201 | false |
developerworks/horizon | horizon/utils/validators.py | 1 | 1149 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.core import validators
from django.core.exceptions import ValidationError
ipv4_cidr_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)' # 0-255
'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' # 3x .0-255
'/(3[0-2]|[1-2]?\d)$') # /0-32
validate_ipv4_cidr = validators.RegexValidator(ipv4_cidr_re)
def validate_port_range(port):
if port not in range(-1, 65535):
raise ValidationError("Not a valid port number")
| apache-2.0 | -6,168,565,720,718,603,000 | 33.818182 | 79 | 0.656223 | false |
ajfriend/cyscs | cyscs/test/test_workspace.py | 1 | 2974 | import cyscs as scs
import pytest
import cyscs.examples as ex
import numpy as np
def test_cache():
data, cone = ex.many_iter_ecp()
work = scs.Workspace(data, cone)
sol = work.solve()
def test_settings():
expected_keys = set(['normalize', 'use_indirect', 'scale', 'verbose',
'eps', 'cg_rate', 'max_iters', 'alpha', 'rho_x'])
data, cone, _ = ex.simple_socp()
work = scs.Workspace(data, cone)
assert 'warm_start' not in work.settings
assert set(work.settings.keys()) == expected_keys
work.solve()
assert 'warm_start' not in work.settings
assert set(work.settings.keys()) == expected_keys
def test_fixed_settings():
data, cone, _ = ex.simple_socp()
work = scs.Workspace(data, cone)
expected_fixed = set(['normalize', 'use_indirect', 'scale', 'rho_x'])
assert set(work.fixed.keys()) == expected_fixed
with pytest.raises(Exception):
work.settings['rho_x'] = 3.14159
# should raise an exception because we changed a fixed setting
work.solve()
def test_data_keys():
data, cone, _ = ex.simple_socp()
work = scs.Workspace(data, cone)
assert 'A' not in work.data
assert set(work.data.keys()) == set(['b','c'])
def test_A():
data, cone, true_x = ex.simple_socp()
work = scs.Workspace(data, cone)
# corrupt the original data (but SCS should have made an internal copy, so this is ok)
data['A'][:] = 3
sol = work.solve(eps=1e-6)
assert np.allclose(sol['x'], true_x)
# now, solving on corrupted data shouldn't work
work = scs.Workspace(data, cone)
sol = work.solve(eps=1e-6)
assert not np.allclose(sol['x'], true_x)
def test_settings_change():
data, cone, _ = ex.simple_socp()
work = scs.Workspace(data, cone)
assert work.settings['eps'] == 1e-3
work.solve(eps=1e-6)
assert work.settings['eps'] == 1e-6
def test_warm_start():
# if warm-starting, the input warm-start vector should not be modified
data, cone, true_x = ex.simple_socp()
work = scs.Workspace(data, cone)
sol = work.solve(eps=1e-2)
assert np.linalg.norm(sol['x'] - true_x) > 1e-3
sol2 = work.solve(warm_start=sol, eps=1e-9)
assert np.linalg.norm(sol2['x'] - true_x) <= 1e-9
assert np.linalg.norm(sol['x'] - sol2['x']) > 0
assert sol['x'] is not sol2['x']
def test_many_iter_ecp():
# warm starting with a solution at a lower tolerance should reduce
# the number of iterations needed
data, cone = ex.many_iter_ecp()
# intially takes ~920 iters for eps 1e-4
work = scs.Workspace(data, cone, eps=1e-4)
sol = work.solve()
assert sol['info']['iter'] >= 800
# ~640 for eps 1e-3
sol = work.solve(eps=1e-3)
assert 500 <= sol['info']['iter'] <= 700
# use 1e-3 sol as warm start for 1e-4
# extra digit only takes ~280 iters more
sol = work.solve(warm_start = sol, eps=1e-4)
assert sol['info']['iter'] < 300
| mit | 1,000,583,587,743,262,100 | 23.783333 | 90 | 0.61466 | false |
pusateri/vsd | webapp/ui/management/commands/build_files.py | 1 | 1545 | from django.core.management.base import BaseCommand, CommandError
from library.ui.models import Fileinfo, Media
from settings import MEDIA_ROOT
import os
from mutagen.mp4 import MP4
class Command(BaseCommand):
args = ''
help = 'build list of media filenames'
def handle(self, *args, **options):
files = os.listdir(MEDIA_ROOT + '/files')
for f in files:
save = False
loc = f.split('_')
if len(loc[0]) > 5 or len(loc[0]) < 1:
continue
basename, extension = os.path.splitext(f)
if not extension in ['.m4v', '.mp4', '.mov']:
print extension
continue
try:
finfo = Fileinfo.objects.get(id=loc[0])
if finfo.filename != f:
finfo.filename = f
save = True
except Fileinfo.DoesNotExist:
finfo = Fileinfo(id=loc[0], filename=f)
save = True
try:
video = MP4(MEDIA_ROOT + '/files/' + f)
except:
print "error: %s" % f
assert(0)
secs = round(video.info.length)
try:
media = Media.objects.get(locationSingularString=loc[0])
minutes = round(secs/60.0)
if media.minutes != minutes:
media.minutes = int(minutes)
media.save()
except Media.DoesNotExist:
pass
if finfo.secs != secs:
finfo.secs = secs
save = True
if save:
print 'updating %s (%6.1f): %s' % (loc[0], secs, f)
finfo.save()
| mit | 2,368,409,360,834,580,500 | 28.150943 | 67 | 0.53657 | false |
uber/tchannel-python | tchannel/thrift/rw.py | 1 | 13504 | # Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, unicode_literals
import sys
import types
from functools import partial
import thriftrw
from tornado import gen
from tornado.util import raise_exc_info
from tchannel.status import OK, FAILED
from tchannel.errors import OneWayNotSupportedError
from tchannel.errors import ValueExpectedError
from tchannel.response import Response, response_from_mixed
from tchannel.serializer.thrift import ThriftRWSerializer
from .module import ThriftRequest
def load(path, service=None, hostport=None, module_name=None):
"""Loads the Thrift file at the specified path.
The file is compiled in-memory and a Python module containing the result
is returned. It may be used with ``TChannel.thrift``. For example,
.. code-block:: python
from tchannel import TChannel, thrift
# Load our server's interface definition.
donuts = thrift.load(path='donuts.thrift')
# We need to specify a service name or hostport because this is a
# downstream service we'll be calling.
coffee = thrift.load(path='coffee.thrift', service='coffee')
tchannel = TChannel('donuts')
@tchannel.thrift.register(donuts.DonutsService)
@tornado.gen.coroutine
def submitOrder(request):
args = request.body
if args.coffee:
yield tchannel.thrift(
coffee.CoffeeService.order(args.coffee)
)
# ...
The returned module contains, one top-level type for each struct, enum,
union, exeption, and service defined in the Thrift file. For each service,
the corresponding class contains a classmethod for each function defined
in that service that accepts the arguments for that function and returns a
``ThriftRequest`` capable of being sent via ``TChannel.thrift``.
For more information on what gets generated by ``load``, see `thriftrw
<http://thriftrw.readthedocs.org/en/latest/>`_.
Note that the ``path`` accepted by ``load`` must be either an absolute
path or a path relative to the *the current directory*. If you need to
refer to Thrift files relative to the Python module in which ``load`` was
called, use the ``__file__`` magic variable.
.. code-block:: python
# Given,
#
# foo/
# myservice.thrift
# bar/
# x.py
#
# Inside foo/bar/x.py,
path = os.path.join(
os.path.dirname(__file__), '../myservice.thrift'
)
The returned value is a valid Python module. You can install the module by
adding it to the ``sys.modules`` dictionary. This will allow importing
items from this module directly. You can use the ``__name__`` magic
variable to make the generated module a submodule of the current module.
For example,
.. code-block:: python
# foo/bar.py
import sys
from tchannel import thrift
donuts = = thrift.load('donuts.thrift')
sys.modules[__name__ + '.donuts'] = donuts
This installs the module generated for ``donuts.thrift`` as the module
``foo.bar.donuts``. Callers can then import items from that module
directly. For example,
.. code-block:: python
# foo/baz.py
from foo.bar.donuts import DonutsService, Order
def baz(tchannel):
return tchannel.thrift(
DonutsService.submitOrder(Order(..))
)
:param str service:
Name of the service that the Thrift file represents. This name will be
used to route requests through Hyperbahn.
:param str path:
Path to the Thrift file. If this is a relative path, it must be
relative to the current directory.
:param str hostport:
Clients can use this to specify the hostport at which the service can
be found. If omitted, TChannel will route the requests through known
peers. This value is ignored by servers.
:param str module_name:
Name used for the generated Python module. Defaults to the name of the
Thrift file.
"""
# TODO replace with more specific exceptions
# assert service, 'service is required'
# assert path, 'path is required'
# Backwards compatibility for callers passing in service name as first arg.
if not path.endswith('.thrift'):
service, path = path, service
module = thriftrw.load(path=path, name=module_name)
return TChannelThriftModule(service, module, hostport)
class TChannelThriftModule(types.ModuleType):
"""Wraps the ``thriftrw``-generated module.
Wraps service classes with ``Service`` and exposes everything else from
the module as-is.
"""
def __init__(self, service, module, hostport=None):
"""Initialize a TChannelThriftModule.
:param str service:
Name of the service this module represents. This name will be used
for routing over Hyperbahn.
:param module:
Module generated by ``thriftrw`` for a Thrift file.
:param str hostport:
This may be specified if the caller is a client and wants all
requests sent to a specific address.
"""
self.service = service
self.hostport = hostport
self._module = module
services = getattr(self._module, '__services__', None)
if services is None:
# thriftrw <1.0
services = getattr(self._module, 'services')
for service_cls in services:
name = service_cls.service_spec.name
setattr(self, name, Service(service_cls, self))
def __getattr__(self, name):
return getattr(self._module, name)
def __str__(self):
return 'TChannelThriftModule(%s, %s)' % (self.service, self._module)
__repr__ = __str__
class Service(object):
"""Wraps service classes generated by thriftrw.
Exposes all functions of the service.
"""
def __init__(self, cls, module):
self._module = module
self._cls = cls
self._spec = cls.service_spec
self._setup_functions(self._spec)
def _setup_functions(self, spec):
if spec.parent:
# Set up inherited functions first.
self._setup_functions(spec.parent)
for func_spec in spec.functions:
setattr(self, func_spec.name, Function(func_spec, self))
@property
def name(self):
"""Name of the Thrift service this object represents."""
return self._spec.name
def __str__(self):
return 'Service(%s)' % self.name
__repr__ = __str__
class Function(object):
"""Wraps a ServiceFunction generated by thriftrw.
Acts as a callable that will construct ThriftRequests.
"""
__slots__ = (
'spec', 'service', '_func', '_request_cls', '_response_cls'
)
def __init__(self, func_spec, service):
self.spec = func_spec
self.service = service
self._func = func_spec.surface
self._request_cls = self._func.request
self._response_cls = self._func.response
@property
def endpoint(self):
"""Endpoint name for this function."""
return '%s::%s' % (self.service.name, self._func.name)
@property
def oneway(self):
"""Whether this function is oneway."""
return self.spec.oneway
def __call__(self, *args, **kwargs):
if self.oneway:
raise OneWayNotSupportedError(
'TChannel+Thrift does not currently support oneway '
'procedures.'
)
if not (
self.service._module.hostport or
self.service._module.service
):
raise ValueError(
"No 'service' or 'hostport' provided to " +
str(self)
)
module = self.service._module
call_args = self._request_cls(*args, **kwargs)
return ThriftRWRequest(
module=module,
service=module.service,
endpoint=self.endpoint,
result_type=self._response_cls,
call_args=call_args,
hostport=module.hostport,
)
def __str__(self):
return 'Function(%s)' % self.endpoint
__repr__ = __str__
def register(dispatcher, service, handler=None, method=None):
"""
:param dispatcher:
RequestDispatcher against which the new endpoint will be registered.
:param Service service:
Service object representing the service whose endpoint is being
registered.
:param handler:
A function implementing the given Thrift function.
:param method:
If specified, name of the method being registered. Defaults to the
name of the ``handler`` function.
"""
def decorator(method, handler):
if not method:
method = handler.__name__
function = getattr(service, method, None)
assert function, (
'Service "%s" does not define method "%s"' % (service.name, method)
)
assert not function.oneway
dispatcher.register(
function.endpoint,
build_handler(function, handler),
ThriftRWSerializer(service._module, function._request_cls),
ThriftRWSerializer(service._module, function._response_cls),
)
return handler
if handler is None:
return partial(decorator, method)
else:
return decorator(method, handler)
def build_handler(function, handler):
# response_cls is a class that represents the response union for this
# function. It accepts one parameter for each exception defined on the
# method and another parameter 'success' for the result of the call. The
# success kwarg is absent if the function doesn't return anything.
response_cls = function._response_cls
response_spec = response_cls.type_spec
@gen.coroutine
def handle(request):
# kwargs for this function's response_cls constructor
response_kwargs = {}
status = OK
try:
response = yield gen.maybe_future(handler(request))
except Exception as e:
response = Response()
for exc_spec in response_spec.exception_specs:
# Each exc_spec is a thriftrw.spec.FieldSpec. The spec
# attribute on that is the TypeSpec for the Exception class
# and the surface on the TypeSpec is the exception class.
exc_cls = exc_spec.spec.surface
if isinstance(e, exc_cls):
status = FAILED
response_kwargs[exc_spec.name] = e
break
else:
raise_exc_info(sys.exc_info())
else:
response = response_from_mixed(response)
if response_spec.return_spec is not None:
assert response.body is not None, (
'Expected a value to be returned for %s, '
'but recieved None - only void procedures can '
'return None.' % function.endpoint
)
response_kwargs['success'] = response.body
response.status = status
response.body = response_cls(**response_kwargs)
raise gen.Return(response)
handle.__name__ = function.spec.name
return handle
class ThriftRWRequest(ThriftRequest):
def __init__(self, module, **kwargs):
kwargs['serializer'] = ThriftRWSerializer(
module, kwargs['result_type']
)
super(ThriftRWRequest, self).__init__(**kwargs)
def read_body(self, body):
response_spec = self.result_type.type_spec
for exc_spec in response_spec.exception_specs:
exc = getattr(body, exc_spec.name)
if exc is not None:
raise exc
# success - non-void
if response_spec.return_spec is not None:
if body.success is None:
raise ValueExpectedError(
'Expected a value to be returned for %s, '
'but recieved None - only void procedures can '
'return None.' % self.endpoint
)
return body.success
# success - void
else:
return None
| mit | 6,518,467,213,706,256,000 | 31.856448 | 79 | 0.623741 | false |
badele/pyRFXtrx | examples/receive.py | 1 | 1277 | # This file is part of pyRFXtrx, a Python library to communicate with
# the RFXtrx family of devices from http://www.rfxcom.com/
# See https://github.com/woudt/pyRFXtrx for the latest version.
#
# Copyright (C) 2012 Edwin Woudt <[email protected]>
#
# pyRFXtrx is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyRFXtrx is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pyRFXtrx. See the file COPYING.txt in the distribution.
# If not, see <http://www.gnu.org/licenses/>.
import sys
sys.path.append("../")
import RFXtrx
import time
def main():
core = RFXtrx.Core('/dev/serial/by-id/usb-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0', debug=True)
while True:
print(core.sensors())
time.sleep(2)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 | 8,812,058,134,330,700,000 | 31.74359 | 96 | 0.718089 | false |
olavvatne/CNN | tools/util.py | 1 | 1829 | __author__ = 'olav'
import sys, os
import theano.tensor as T
import theano
import numpy as np
sys.path.append(os.path.abspath("./"))
from wrapper import create_output_func
from model import ConvModel
def create_threshold_image(image, threshold):
binary_arr = np.ones(image.shape)
low_values_indices = image <= threshold # Where values are low
binary_arr[low_values_indices] = 0 # All low values set to 0
return binary_arr
def resize(image, size):
return image.resize( [int(size * s) for s in image.size] )
def create_predictor(dataset, model_config, model_params, batch_size):
x = T.matrix('x')
y = T.imatrix('y')
drop = T.iscalar('drop')
index = T.lscalar()
model = ConvModel(model_config, verbose=True)
model.build(x, drop, batch_size, init_params=model_params)
return create_output_func(dataset, x, y, drop, [index], model.get_output_layer(), batch_size)
def create_simple_predictor(model_config, model_params):
#TODO: Does this single predictor even work?
data = T.matrix('data')
x = T.matrix('x')
drop = T.iscalar('drop')
batch_size = 1
model = ConvModel(model_config, verbose=True)
model.build(x, drop, batch_size, init_params=model_params)
return model.create_predict_function(x, drop, data)
def batch_predict(predictor, dataset, dim, batch_size):
examples = dataset[0].eval().shape[0]
nr_of_batches = int(examples/ batch_size)
result_output = np.empty((examples, dim*dim), dtype=theano.config.floatX)
result_label = np.empty((examples, dim*dim), dtype=theano.config.floatX)
for i in range(nr_of_batches):
output, label = predictor(i)
result_output[i*batch_size: (i+1)*batch_size] = output
result_label[i*batch_size: (i+1)*batch_size] = label
return result_output, result_label
| mit | 6,201,771,261,819,565,000 | 31.660714 | 97 | 0.679606 | false |
like2000/PyCOBRA | PyCOBRA_beam.py | 1 | 2276 | from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.constants import e, m_p
def gaussian_generator(eps_geo, phase_space_tuple=('x', 'xp'), alpha=0, beta=1):
sigma = np.sqrt(eps_geo)
def generate(bunch):
n_macroparticles = bunch.n_macroparticles
x = np.random.normal(scale=sigma, size=n_macroparticles)
xp = np.random.normal(scale=sigma, size=n_macroparticles)
M = np.array([[np.sqrt(beta), 0],
[-alpha/np.sqrt(beta), 1./np.sqrt(beta)]])
x, xp = M[0,0]*x + M[0,1]*xp, M[1,0]*x + M[1,1]*xp
setattr(bunch, phase_space_tuple[0], x)
setattr(bunch, phase_space_tuple[1], xp)
return generate
class Bunch(object):
def __init__(self, n_macroparticles,
weight=1, charge=e, mass=m_p, gamma=1,
*phase_space_generators):
self.n_macroparticles = n_macroparticles
self.weight = weight
self.charge = charge
self.mass = mass
self.gamma = gamma
[generate(self) for generate in phase_space_generators]
def emittance_normalised(self, x, xp):
return np.sqrt(self.gamma**2 - 1) * \
np.sqrt( np.std(x**2)*np.std(xp**2) - np.std(x*xp)**2 )
def epsn_x(self):
return self.emittance_normalised(self.x, self.xp)
def epsn_y(self):
return self.emittance_normalised(self.y, self.yp)
def epsn_z(self):
return self.emittance_normalised(self.z, self.dp)
class Beam(object):
def __init__(self, bunches_list):
self.n_macroparticles = sum([b.n_macroparticles for b in bunches_list])
self.weight = np.concatenate(b.weight for b in bunches_list)
self.charge = np.concatenate(b.charge for b in bunches_list)
self.mass = np.concatenate(b.mass for b in bunches_list)
self.gamma = np.concatenate(b.gamma for b in bunches_list)
self.x = np.concatenate(b.x for b in bunches_list)
self.xp = np.concatenate(b.xp for b in bunches_list)
self.y = np.concatenate(b.y for b in bunches_list)
self.yp = np.concatenate(b.yp for b in bunches_list)
self.z = np.concatenate(b.z for b in bunches_list)
self.dp = np.concatenate(b.dp for b in bunches_list)
| mit | 5,275,692,509,039,961,000 | 30.611111 | 80 | 0.611599 | false |
ut-planteco/ssu-pipeline | pipeline_parse_blast.py | 1 | 3045 | #!/usr/bin/env python
from __future__ import division
import os
import argparse
import console
import sys
"""
Parsing XML BLAST output for tab delimited file for easier parsing with samples,
hit descripttions, hit identities and hit aligment length values. Input is taken
from STDIN, use it in pipe command.
"""
parser = argparse.ArgumentParser(description = """
Parsing XML BLAST output for tab delimited file for easier parsing with samples,
hit descripttions, hit identities and hit aligment length values. Input is taken
from STDIN, use it in pipe command.
""")
args = parser.parse_args()
i = 0
for line in sys.stdin:
tmp = line.strip().split("<")
if len(tmp) > 1:
tmp2 = tmp[1].split(">")
tag = tmp2[0]
if len(tmp2) > 1:
value = tmp2[1]
else:
value = None
if tag == "Iteration_query-def":
i += 1
if i % 100 == 0:
console.log("%d hits parsed\r" % (i))
qry = {}
qry['qry-id'] = value
if tag == "Iteration_query-len":
qry['qry-len'] = value
if tag == "Hit_num":
qry['hit'] = {}
qry['hit']['nr'] = value
if tag == "Hit_id":
if value[:2] == "gi":
qry['hit']['id'] = value.split("|")[1]
else:
qry['hit']['id'] = ""
if tag == "Hit_accession" and qry['hit']['id'] == "":
qry['hit']['id'] = value
if tag == "Hit_def":
qry['hit']['def'] = value.split(">")[0]
if tag == "Hit_len":
qry['hit']['len'] = value
if tag == "Hsp_num":
qry['hit']['hsp'] = {}
qry['hit']['hsp']['nr'] = value
if tag == "Hsp_bit-score":
qry['hit']['hsp']['score'] = value
if tag == "Hsp_evalue":
qry['hit']['hsp']['evalue'] = value
if tag == "Hsp_query-from":
qry['hit']['hsp']['qfrom'] = value
if tag == "Hsp_query-to":
qry['hit']['hsp']['qto'] = value
if tag == "Hsp_hit-from":
qry['hit']['hsp']['rfrom'] = value
if tag == "Hsp_hit-to":
qry['hit']['hsp']['rto'] = value
if tag == "Hsp_identity":
qry['hit']['hsp']['identity'] = value
if tag == "Hsp_align-len":
qry['hit']['hsp']['alen'] = value
if tag == "Hsp_hit-frame":
if value == "1":
value = "+/+"
else:
value = "+/-"
qry['hit']['hsp']['frame'] = value
if tag == "Hsp_midline":
# print our result
identity = float(qry['hit']['hsp']['identity']) / float(qry['hit']['hsp']['alen']) * 100
tmp = qry['qry-id'].split("-")
if len(tmp) > 1:
sample = tmp[0]
else:
sample = "NA"
mlen = min(int(qry['qry-len']), int(qry['hit']['len']))
alen = float(qry['hit']['hsp']['alen']) / float(mlen) * 100
if alen > 100:
alen = 100
sys.stdout.write("\t".join([qry['qry-id'], qry['hit']['id'], qry['hit']['def'], qry['hit']['hsp']['evalue'],
"{0:.2f}".format(identity), qry['hit']['hsp']['identity'],
qry['hit']['hsp']['alen'], qry['hit']['hsp']['nr'], qry['hit']['hsp']['frame'],
qry['hit']['hsp']['qfrom'], qry['hit']['hsp']['qto'], qry['hit']['hsp']['rfrom'],
qry['hit']['hsp']['rto'], qry['qry-len'], qry['hit']['len'], qry['hit']['hsp']['score'], "{0:.2f}".format(alen), "\n"]))
console.log("%d hits parsed\n" % (i))
| gpl-3.0 | 9,045,750,180,031,306,000 | 30.071429 | 124 | 0.549754 | false |
gatecat/prjoxide | timing/util/extract_route.py | 1 | 1535 | import lapie
import pickle
import sys
def main():
udb = sys.argv[1]
# Get actual routed path using Tcl
nets = lapie.list_nets(udb)
routing = lapie.get_routing(udb, nets)
# (source, sink) -> pips
arc2pips = {}
# Keep track of fanout - we'll need this later!
wire_fanout = {}
for net in sorted(nets):
if net not in routing:
continue
route = routing[net]
tree = {}
# Construct route tree dst->src
for pip in route.pips:
tree[pip.node2] = pip.node1
# Mapping node -> pin
node2pin = {}
for pin in route.pins:
node2pin[pin.node] = (pin.cell, pin.pin)
for rpin in route.pins:
pin = (rpin.cell, rpin.pin)
cursor = rpin.node
if cursor not in tree:
continue
pin_route = []
while True:
wire_fanout[cursor] = wire_fanout.get(cursor, 0) + 1
if cursor not in tree:
if cursor in node2pin:
# Found a complete (src, sink) route
pin_route.reverse()
arc2pips[(node2pin[cursor], pin)] = pin_route
break
prev_wire = tree[cursor]
pin_route.append((prev_wire, cursor))
cursor = prev_wire
with open(sys.argv[2], "wb") as pf:
pickle.dump(dict(arc2pips=arc2pips, wire_fanout=wire_fanout), pf)
if __name__ == '__main__':
main()
| isc | 7,013,076,900,119,699,000 | 29.098039 | 73 | 0.503583 | false |
CMPUT404Team/CMPUT404-project-socialdistribution | cmput404project/service/testFriendApi.py | 1 | 1343 | from rest_framework.test import APIRequestFactory
from django.contrib.auth.models import User
from rest_framework.test import APITestCase, APIClient, force_authenticate
from unittest import skip
from django.urls import reverse
from rest_framework import status
from models.Author import Author
import json
class UserViewSetTests(APITestCase):
def setUp(self):
superuser = User.objects.create_superuser('superuser', '[email protected]', 'test1234')
self.client = APIClient()
#Authenticate as a super user so we can test everything
self.client.force_authenticate(user=superuser)
self.author = Author.create(host='local', displayName='testMonkey', user=superuser)
self.author.save()
self.friend = Author.create(host='local', displayName='testMonkey2', user=superuser)
self.friend.save()
self.author.add_friend(self.friend)
self.friend.add_friend(self.author)
self.detail_url = reverse('friend-detail', kwargs={'uuid1': self.author.id, 'uuid2': self.friend.id})
@skip ("Doesn't pass yet")
def test_get_friend_status(self):
response = self.client.get(self.detail_url)
self.assertEqual(response.status_code, 200)
self.assertIn(str(self.author.id), response.content)
self.assertIn(str(self.friend.id), response.content)
self.assertIn('true', response.content)
| apache-2.0 | 615,720,122,142,193,400 | 43.766667 | 109 | 0.736411 | false |
jailuthra/asr | filegen.py | 1 | 2657 | #!/usr/bin/env python
'''Generate configuration files for decoding via Kaldi.
The input directory (wavdir) should contain 16-bit 8KHz wav files,
with the naming convention <spk_id>_<utt_id>.wav.
For example: 0001_0001.wav, 0002_0001.wav etc.
'''
import sys
import os
from glob import glob
def get_filepaths(directory):
'''This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
'''
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
if filepath.endswith('.wav') or filepath.endswith('.flac'):
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
def get_wavscp(wavs):
out = {}
for path in wavs:
_, wav = os.path.split(path)
wav = wav.strip('.wav')
out[wav] = path
return out
def get_spk2utt(wavscp):
out = {}
for wav in wavscp.keys():
spk, utt = wav.split('_')
if spk in out:
out[spk].append(wav)
else:
out[spk] = [wav]
return out
def get_utt2spk(spk2utt):
out = {}
for spk, utts in spk2utt.iteritems():
for utt in utts:
out[utt] = spk
return out
def write_scp(dirname, filename, data):
f = open(os.path.join(dirname, filename), 'w')
for key, val in iter(sorted(data.iteritems())):
if type(val) == list:
val = ' '.join(sorted(val))
f.write("%s %s\n" % (key, val))
def filegen(wavdir, outdir):
'''Generate wav.scp, spk2utt, utt2spk using wav files.
Args:
wavdir -- Path to directory having the wav files
outdir -- Path to directory where the config files will be written
'''
wavs = get_filepaths(wavdir)
wavscp = get_wavscp(wavs)
# print wavscp
write_scp(outdir, 'wav.scp', wavscp)
spk2utt = get_spk2utt(wavscp)
# print spk2utt
write_scp(outdir, 'spk2utt', spk2utt)
utt2spk = get_utt2spk(spk2utt)
# print utt2spk
write_scp(outdir, 'utt2spk', utt2spk)
def main():
if (len(sys.argv) < 3):
print "Usage: %s <wavdir> <outdir>" % (sys.argv[0])
exit(1)
wavdir = sys.argv[1]
outdir = sys.argv[2]
filegen(wavdir, outdir)
if __name__ == '__main__':
main()
| mit | -5,146,011,456,511,642,000 | 29.54023 | 74 | 0.610463 | false |
MattJDavidson/python-adventofcode | tests/test_04.py | 1 | 1270 | import pytest
from advent.problem_04 import (acceptable_hash,
first_acceptable_hash,
generate_hash)
def test_acceptable_hash():
assert acceptable_hash('00000') == True
assert acceptable_hash('000001dbbfa') == True
assert acceptable_hash('000006136ef') == True
assert acceptable_hash('000000', check_length=6) == True
assert acceptable_hash('0000001dbbfa', check_length=6) == True
assert acceptable_hash('0000006136ef', check_length=6) == True
assert acceptable_hash('') == False
assert acceptable_hash('00001') == False
assert acceptable_hash('100000') == False
assert acceptable_hash('00000', check_length=6) == False
assert acceptable_hash('000001', check_length=6) == False
def test_generate_hash():
assert generate_hash('abcdef', 609043) == '000001dbbfa3a5c83a2d506429c7b00e'
assert generate_hash('pqrstuv', 1048970) == '000006136ef2ff3b291c85725f17325c'
def test_first_acceptable_hash():
assert first_acceptable_hash('$', ceiling=2) is None
assert first_acceptable_hash('abcdef', floor=609042, ceiling=609044) \
== 609043
assert first_acceptable_hash('pqrstuv', floor=1048969, ceiling=1048971) \
== 1048970
| bsd-2-clause | 2,326,567,479,929,284,600 | 37.484848 | 82 | 0.666929 | false |
mgorny/PyGithub | tests/Connection.py | 1 | 5202 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2019 Adam Baratz <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import itertools
import unittest
from io import StringIO
from unittest.mock import Mock
import httpretty
from parameterized import parameterized
from . import Framework
PARAMETERS = itertools.product(
[
(Framework.ReplayingHttpConnection, "http"),
(Framework.ReplayingHttpsConnection, "https"),
],
[
(
'{"body":"BODY TEXT"}',
"\nGET\napi.github.com\nNone\n/user\n{'Authorization': 'Basic login_and_password_removed', 'User-Agent': 'PyGithub/Python'}\nNone\n200\n[]\n{\"body\":\"BODY TEXT\"}\n\n",
),
(
u'{"body":"BODY\xa0TEXT"}',
u"\nGET\napi.github.com\nNone\n/user\n{'Authorization': 'Basic login_and_password_removed', 'User-Agent': 'PyGithub/Python'}\nNone\n200\n[]\n{\"body\":\"BODY\xa0TEXT\"}\n\n",
),
(
"BODY TEXT",
"\nGET\napi.github.com\nNone\n/user\n{'Authorization': 'Basic login_and_password_removed', 'User-Agent': 'PyGithub/Python'}\nNone\n200\n[]\nBODY TEXT\n\n",
),
(
u"BODY\xa0TEXT",
u"\nGET\napi.github.com\nNone\n/user\n{'Authorization': 'Basic login_and_password_removed', 'User-Agent': 'PyGithub/Python'}\nNone\n200\n[]\nBODY\xa0TEXT\n\n",
),
],
)
class RecordingMockConnection(Framework.RecordingConnection):
def __init__(self, file, protocol, host, port, realConnection):
self._realConnection = realConnection
super().__init__(file, protocol, host, port)
class Connection(unittest.TestCase):
@parameterized.expand(itertools.chain(*p) for p in PARAMETERS)
def testRecordAndReplay(
self, replaying_connection_class, protocol, response_body, expected_recording
):
file = StringIO()
host = "api.github.com"
verb = "GET"
url = "/user"
headers = {"Authorization": "Basic p4ssw0rd", "User-Agent": "PyGithub/Python"}
response = Mock()
response.status = 200
response.getheaders.return_value = {}
response.read.return_value = response_body
connection = Mock()
connection.getresponse.return_value = response
# write mock response to buffer
recording_connection = RecordingMockConnection(
file, protocol, host, None, lambda *args, **kwds: connection
)
recording_connection.request(verb, url, None, headers)
recording_connection.getresponse()
recording_connection.close()
# validate contents of buffer
file_value_lines = file.getvalue().split("\n")
expected_recording_lines = (protocol + expected_recording).split("\n")
self.assertEqual(file_value_lines[:5], expected_recording_lines[:5])
self.assertEqual(
eval(file_value_lines[5]), eval(expected_recording_lines[5])
) # dict literal, so keys not in guaranteed order
self.assertEqual(file_value_lines[6:], expected_recording_lines[6:])
# required for replay to work as expected
httpretty.enable(allow_net_connect=False)
# rewind buffer and attempt to replay response from it
file.seek(0)
replaying_connection = replaying_connection_class(
self, file, host=host, port=None
)
replaying_connection.request(verb, url, None, headers)
replaying_connection.getresponse()
# not necessarily required for subsequent tests
httpretty.disable()
httpretty.reset()
| lgpl-3.0 | 1,172,037,639,037,431,600 | 43.461538 | 186 | 0.539216 | false |
WZQ1397/automatic-repo | python/FileSystem/BTpanel/btclass/panelSSL.py | 1 | 9341 | #coding: utf-8
#-------------------------------------------------------------------
# 宝塔Linux面板
#-------------------------------------------------------------------
# Copyright (c) 2015-2016 宝塔软件(http:#bt.cn) All rights reserved.
#-------------------------------------------------------------------
# Author: 黄文良 <[email protected]>
#-------------------------------------------------------------------
#------------------------------
# SSL接口
#------------------------------
import public,os,web,sys,binascii,urllib,json,time,datetime
reload(sys)
sys.setdefaultencoding('utf-8')
class panelSSL:
__APIURL = 'https://www.bt.cn/api/Auth';
__UPATH = 'data/userInfo.json';
__userInfo = None;
__PDATA = None;
#构造方法
def __init__(self):
pdata = {}
data = {}
if os.path.exists(self.__UPATH):
self.__userInfo = json.loads(public.readFile(self.__UPATH));
if self.__userInfo:
pdata['access_key'] = self.__userInfo['access_key'];
data['secret_key'] = self.__userInfo['secret_key'];
else:
pdata['access_key'] = 'test';
data['secret_key'] = '123456';
pdata['data'] = data;
self.__PDATA = pdata;
#获取Token
def GetToken(self,get):
data = {}
data['username'] = get.username;
data['password'] = public.md5(get.password);
pdata = {}
pdata['data'] = self.De_Code(data);
result = json.loads(public.httpPost(self.__APIURL+'/GetToken',pdata));
result['data'] = self.En_Code(result['data']);
if result['data']: public.writeFile(self.__UPATH,json.dumps(result['data']));
del(result['data']);
return result;
#删除Token
def DelToken(self,get):
os.system("rm -f " + self.__UPATH);
return public.returnMsg(True,"SSL_BTUSER_UN");
#获取用户信息
def GetUserInfo(self,get):
result = {}
if self.__userInfo:
userTmp = {}
userTmp['username'] = self.__userInfo['username'][0:3]+'****'+self.__userInfo['username'][-4:];
result['status'] = True;
result['msg'] = public.getMsg('SSL_GET_SUCCESS');
result['data'] = userTmp;
else:
userTmp = {}
userTmp['username'] = public.getMsg('SSL_NOT_BTUSER');
result['status'] = False;
result['msg'] = public.getMsg('SSL_NOT_BTUSER');
result['data'] = userTmp;
return result;
#获取订单列表
def GetOrderList(self,get):
if hasattr(get,'siteName'):
path = '/etc/letsencrypt/live/'+ get.siteName + '/partnerOrderId';
if os.path.exists(path):
self.__PDATA['data']['partnerOrderId'] = public.readFile(path);
self.__PDATA['data'] = self.De_Code(self.__PDATA['data']);
result = json.loads(public.httpPost(self.__APIURL + '/GetSSLList',self.__PDATA));
result['data'] = self.En_Code(result['data']);
for i in range(len(result['data'])):
result['data'][i]['endtime'] = self.add_months(result['data'][i]['createTime'],result['data'][i]['validityPeriod'])
return result;
#计算日期增加(月)
def add_months(self,dt,months):
import calendar
dt = datetime.datetime.fromtimestamp(dt/1000);
month = dt.month - 1 + months
year = dt.year + month / 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
return (time.mktime(dt.replace(year=year, month=month, day=day).timetuple()) + 86400) * 1000
#申请证书
def GetDVSSL(self,get):
runPath = self.GetRunPath(get);
if runPath != False and runPath != '/': get.path += runPath;
if not self.CheckDomain(get): return public.returnMsg(False,'SSL_CHECK_DNS_ERR',(get.domain,));
self.__PDATA['data']['domain'] = get.domain;
self.__PDATA['data'] = self.De_Code(self.__PDATA['data']);
result = json.loads(public.httpPost(self.__APIURL + '/GetDVSSL',self.__PDATA));
result['data'] = self.En_Code(result['data']);
if hasattr(result['data'],'authValue'):
public.writeFile(get.path + '/.well-known/pki-validation/fileauth.txt',result['data']['authValue']);
return result;
#获取运行目录
def GetRunPath(self,get):
if hasattr(get,'siteName'):
get.id = public.M('sites').where('name=?',(get.siteName,)).getField('id');
else:
get.id = public.M('sites').where('path=?',(get.path,)).getField('id');
if not get.id: return False;
import panelSite
result = panelSite.panelSite().GetSiteRunPath(get);
return result['runPath'];
#检查域名是否解析
def CheckDomain(self,get):
try:
epass = public.GetRandomString(32);
spath = get.path + '/.well-known/pki-validation';
if not os.path.exists(spath): os.system("mkdir -p '" + spath + "'");
public.writeFile(spath + '/fileauth.txt',epass);
result = public.httpGet('http://' + get.domain + '/.well-known/pki-validation/fileauth.txt');
if result == epass: return True
return False
except:
return False
#确认域名
def Completed(self,get):
self.__PDATA['data']['partnerOrderId'] = get.partnerOrderId;
self.__PDATA['data'] = self.De_Code(self.__PDATA['data']);
if hasattr(get,'siteName'):
get.path = public.M('sites').where('name=?',(get.siteName,)).getField('path');
runPath = self.GetRunPath(get);
if runPath != False and runPath != '/': get.path += runPath;
sslInfo = json.loads(public.httpPost(self.__APIURL + '/SyncOrder',self.__PDATA));
sslInfo['data'] = self.En_Code(sslInfo['data']);
try:
public.writeFile(get.path + '/.well-known/pki-validation/fileauth.txt',sslInfo['data']['authValue']);
except:
return public.returnMsg(False,'SSL_CHECK_WRITE_ERR');
result = json.loads(public.httpPost(self.__APIURL + '/Completed',self.__PDATA));
result['data'] = self.En_Code(result['data']);
return result;
#同步指定订单
def SyncOrder(self,get):
self.__PDATA['data']['partnerOrderId'] = get.partnerOrderId;
self.__PDATA['data'] = self.De_Code(self.__PDATA['data']);
result = json.loads(public.httpPost(self.__APIURL + '/SyncOrder',self.__PDATA));
result['data'] = self.En_Code(result['data']);
return result;
#获取证书
def GetSSLInfo(self,get):
self.__PDATA['data']['partnerOrderId'] = get.partnerOrderId;
self.__PDATA['data'] = self.De_Code(self.__PDATA['data']);
result = json.loads(public.httpPost(self.__APIURL + '/GetSSLInfo',self.__PDATA));
result['data'] = self.En_Code(result['data']);
#写配置到站点
if hasattr(get,'siteName'):
try:
siteName = get.siteName;
path = '/etc/letsencrypt/live/'+ siteName;
if not os.path.exists(path):
public.ExecShell('mkdir -p ' + path)
csrpath = path+"/fullchain.pem";
keypath = path+"/privkey.pem";
pidpath = path+"/partnerOrderId";
#清理旧的证书链
public.ExecShell('rm -f ' + keypath)
public.ExecShell('rm -f ' + csrpath)
public.ExecShell('rm -rf ' + path + '-00*')
public.ExecShell('rm -rf /etc/letsencrypt/archive/' + get.siteName)
public.ExecShell('rm -rf /etc/letsencrypt/archive/' + get.siteName + '-00*')
public.ExecShell('rm -f /etc/letsencrypt/renewal/'+ get.siteName + '.conf')
public.ExecShell('rm -f /etc/letsencrypt/renewal/'+ get.siteName + '-00*.conf')
public.ExecShell('rm -f ' + path + '/README');
public.writeFile(keypath,result['data']['privateKey']);
public.writeFile(csrpath,result['data']['cert']+result['data']['certCa']);
public.writeFile(pidpath,get.partnerOrderId);
import panelSite
panelSite.panelSite().SetSSLConf(get);
public.serviceReload();
return public.returnMsg(True,'SET_SUCCESS');
except Exception,ex:
return public.returnMsg(False,'SET_ERROR,' + str(ex));
result['data'] = self.En_Code(result['data']);
return result;
#获取产品列表
def GetSSLProduct(self,get):
self.__PDATA['data'] = self.De_Code(self.__PDATA['data']);
result = json.loads(public.httpPost(self.__APIURL + '/GetSSLProduct',self.__PDATA));
result['data'] = self.En_Code(result['data']);
return result;
#加密数据
def De_Code(self,data):
pdata = urllib.urlencode(data);
return binascii.hexlify(pdata);
#解密数据
def En_Code(self,data):
result = urllib.unquote(binascii.unhexlify(data));
return json.loads(result);
| lgpl-3.0 | -1,451,442,486,167,215,600 | 41.138249 | 129 | 0.531007 | false |
quaddra/dist_job_mgr | dist_job_mgr/test_json_model.py | 1 | 1930 | import unittest
import logging
import model
import json_model
import tempfile
import shutil
import copy
import json
import os.path
from common import *
import json_model
class TestModelSaving(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.model = json_model.ModelAdapter(self.temp_dir)
self.model.create_database()
self.db_file_path = self.model.db_file_path
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_round_trip(self):
m = self.model
m.begin_transaction()
p = m.create_static_pool("pool1")
n1 = m.create_node("joe", 20001, False,
"n1", "n1", pool=p)
n2 = m.create_node("joe", 20001, False,
"n2", "n2", pool=p)
n3 = m.create_node("joe", 20001, False,
"n3", "n3", pool=p)
n4 = m.create_node("joe", 20001, False,
"n4", "n4", pool=None)
j = m.create_job("j1", JobType.ONE_TIME_JOB, "test job", "lockfile", p)
p.allocate_nodes_to_job(j, 2, ["n1", "n2"])
t1 = m.create_task("t1", j, "Test", n1, "test on n1")
t2 = m.create_task("t2", j, "Test", n2, "test on n2")
old_state = copy.deepcopy(m.state)
m.commit_transaction()
m.begin_transaction()
with open(self.db_file_path, "rb") as f:
data = json.load(f)
new_state = json_model.json_to_state(m, data)
new_state.compare(old_state)
class TestModel(unittest.TestCase, model.UnitTestMixin):
def setUp(self):
self.temp_dir = tempfile.mkdtemp() #os.path.expanduser("~/djm")
self.model = json_model.ModelAdapter(self.temp_dir)
self.model.create_database()
def tearDown(self):
shutil.rmtree(self.temp_dir)
if __name__ == '__main__':
logging.basicConfig()
unittest.main()
| apache-2.0 | -5,679,407,207,214,755,000 | 28.242424 | 79 | 0.56943 | false |
jgrynczewski/Assistive-Prototypes | modules/ewriting/eplatform.py | 1 | 14724 | #!/bin/env python2.7
# -*- coding: utf-8 -*-
# This file is part of AP - Assistive Prototypes.
#
# AP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AP. If not, see <http://www.gnu.org/licenses/>.
import wxversion
# wxversion.select('2.8')
import os, sys, psutil, time
import wx
import wx.lib.buttons as bt
from pymouse import PyMouse
from pygame import mixer
import EGaps, EMatch
#=============================================================================
class cwiczenia(wx.Frame):
def __init__(self, parent, id):
self.winWidth, self.winHeight = wx.DisplaySize( )
wx.Frame.__init__( self , parent , id , 'e-platform main menu')
style = self.GetWindowStyle( )
self.SetWindowStyle( style | wx.STAY_ON_TOP )
self.parent = parent
self.Maximize( True )
self.Centre( True )
self.MakeModal( True )
self.initializeParameters( )
self.initializeBitmaps( )
self.createGui( )
self.createBindings( )
self.initializeTimer( )
#-------------------------------------------------------------------------
def initializeParameters(self):
with open( './.pathToAP' ,'r' ) as textFile:
self.pathToAP = textFile.readline( )
sys.path.append( self.pathToAP )
from reader import reader
reader = reader()
reader.readParameters()
parameters = reader.getParameters()
for item in parameters:
try:
setattr(self, item[:item.find('=')], int(item[item.find('=')+1:]))
except ValueError:
setattr(self, item[:item.find('=')], item[item.find('=')+1:])
self.pressFlag = False
self.numberOfRows = 3,
self.numberOfColumns = 1,
self.numberOfIteration = 0
self.maxNumberOfIteration = 2 * self.numberOfRows[0]
self.flaga = 0
if self.control != 'tracker':
self.mouseCursor = PyMouse( )
self.mousePosition = self.winWidth - 8 - self.xBorder, self.winHeight - 8 - self.yBorder
self.mouseCursor.move( *self.mousePosition )
if self.switchSound.lower( ) != 'off' or self.pressSound.lower( ) != 'off':
mixer.init( )
self.switchingSound = mixer.Sound( self.pathToAP + '/sounds/switchSound.ogg' )
self.pressingSound = mixer.Sound( self.pathToAP + '/sounds/pressSound.ogg' )
self.powrotSound = mixer.Sound( self.pathToAP + '/sounds/powrot.ogg' )
self.slowoSound = mixer.Sound( self.pathToAP + '/sounds/slowo.ogg' )
self.dziuraSound = mixer.Sound( self.pathToAP + '/sounds/dziura.ogg' )
self.poczatek = True
self.numberOfPresses = 1
#-------------------------------------------------------------------------
def initializeBitmaps(self):
self.functionButtonPath = [ wx.BitmapFromImage( wx.ImageFromStream( open(self.pathToAP + 'icons/back.png', 'rb' ) ) ) ]
self.functionButtonName = [ 'back' ]
#-------------------------------------------------------------------------
def initializeTimer(self):
id1 = wx.NewId( )
wx.RegisterId( id1 )
self.stoper = wx.Timer( self, id1 )
self.Bind( wx.EVT_TIMER, self.timerUpdate, self.stoper,id1 )
if self.control != 'tracker':
self.stoper.Start( self.timeGap )
#-------------------------------------------------------------------------
def createGui(self):
self.mainSizer = wx.GridBagSizer( self.xBorder, self.yBorder )
nazwy = [ u'DZIURA',u'SŁOWO' ]
kolory = [ 'indian red', 'yellow' ]
b = bt.GenButton( self, -1, nazwy[ 0 ], name = nazwy[ 0 ])
b.SetFont( wx.Font( 75, wx.FONTFAMILY_ROMAN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False ) )
b.SetBezelWidth( 3 )
b.SetBackgroundColour( self.backgroundColour )
b.SetForegroundColour( kolory[ 0 ] )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
self.mainSizer.Add( b, ( 0, 0 ), wx.DefaultSpan, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border = self.xBorder )
b = bt.GenButton( self, -1, nazwy[ 1 ], name = nazwy[ 1 ])
b.SetFont( wx.Font( 75, wx.FONTFAMILY_ROMAN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False ) )
b.SetBezelWidth( 3 )
b.SetBackgroundColour( self.backgroundColour )
b.SetForegroundColour( kolory[ 1 ] )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
self.mainSizer.Add( b, ( 1, 0 ), wx.DefaultSpan, wx.EXPAND | wx.LEFT | wx.RIGHT, border = self.xBorder )
b = bt.GenBitmapButton( self, -1, bitmap = self.functionButtonPath[ 0 ], name = self.functionButtonName[ 0 ] )
b.SetBackgroundColour( self.backgroundColour )
b.SetBezelWidth( 3 )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
self.mainSizer.Add( b, ( 2, 0 ), wx.DefaultSpan, wx.EXPAND | wx.BOTTOM | wx.LEFT | wx.RIGHT, border = self.xBorder)
for number in range( self.numberOfRows[ 0 ] ):
self.mainSizer.AddGrowableRow( number )
for number in range( self.numberOfColumns[ 0 ] ):
self.mainSizer.AddGrowableCol( number )
self.SetSizer( self.mainSizer )
self.SetBackgroundColour( 'black' )
self.Layout( )
self.Refresh( )
self.Center( )
self.MakeModal( True )
self.flaga = 0
#-------------------------------------------------------------------------
def createBindings(self):
self.Bind( wx.EVT_CLOSE , self.OnCloseWindow )
#-------------------------------------------------------------------------
def OnCloseWindow(self, event):
if self.control != 'tracker':
if True in [ 'debian' in item for item in os.uname( ) ]: #POSITION OF THE DIALOG WINDOW DEPENDS ON WINDOWS MANAGER NOT ON DESKTOP ENVIROMENT. THERE IS NO REASONABLE WAY TO CHECK IN PYTHON WHICH WINDOWS MANAGER IS CURRENTLY RUNNING, BESIDE IT IS POSSIBLE TO FEW WINDOWS MANAGER RUNNING AT THE SAME TIME. I DON'T SEE SOLUTION OF THIS ISSUE, EXCEPT OF CREATING OWN SIGNAL (AVR MICROCONTROLLERS).
if os.environ.get('KDE_FULL_SESSION'):
self.mousePosition = self.winWidth/1.7, self.winHeight/1.7
# elif ___: #for gnome-debian
# self.mousePosition = self.winWidth/6.5, self.winHeight/6.
else:
self.mousePosition = self.winWidth/1.8, self.winHeight/1.7
else:
self.mousePosition = self.winWidth/1.9, self.winHeight/1.68
self.mouseCursor.move( *self.mousePosition )
dial = wx.MessageDialog(self, 'Czy napewno chcesz wyjść z programu?', 'Wyjście',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION | wx.STAY_ON_TOP)
ret = dial.ShowModal( )
if ret == wx.ID_YES:
try:
if "smplayer" in [psutil.Process(i).name() for i in psutil.pids( )]:
os.system( 'smplayer -send-action quit' )
except TypeError:
if "smplayer" in [psutil.Process(i).name for i in psutil.pids( )]:
os.system( 'smplayer -send-action quit' )
try:
self.parent.parent.parent.Destroy()
self.parent.parent.Destroy()
self.parent.Destroy()
self.Destroy()
except AttributeError:
try:
self.parent.parent.Destroy()
self.parent.Destroy()
self.Destroy()
except AttributeError:
try:
self.parent.Destroy()
self.Destroy()
except AttributeError:
self.Destroy()
else:
event.Veto()
if self.control != 'tracker':
self.mousePosition = self.winWidth - 8 - self.xBorder, self.winHeight - 8 - self.yBorder
self.mouseCursor.move( *self.mousePosition )
#-------------------------------------------------------------------------
def onExit(self):
if self.parent:
self.parent.MakeModal( True )
self.parent.Show( )
if self.control == 'tracker':
self.parent.stoper.Start( 0.15 * self.parent.timeGap )
else:
self.parent.stoper.Start( self.parent.timeGap )
self.MakeModal( False )
self.Destroy( )
else:
self.MakeModal( False )
self.Destroy( )
#-------------------------------------------------------------------------
def onPress(self, event):
if self.pressSound.lower( ) != 'off':
self.pressingSound.play( )
if self.control == 'tracker':
if self.pressFlag == False:
self.button = event.GetEventObject()
self.button.SetBackgroundColour( self.selectionColour )
self.pressFlag = True
self.label = event.GetEventObject().GetName().encode( 'utf-8' )
self.stoper.Start( 0.15 * self.timeGap )
if self.label == 'DZIURA':
if self.pressSound.lower( ) == 'voice':
self.dziuraSound.play()
self.stoper.Stop( )
EGaps.cwiczenia( self, id = -1 ).Show( True )
self.MakeModal( False )
self.Hide( )
elif self.label == u'SŁOWO':
self.stoper.Stop( )
if self.pressSound.lower( ) == 'voice':
self.slowoSound.play()
EMatch.cwiczenia( self, id = -1 ).Show( True )
self.MakeModal( False )
self.Hide( )
if self.label == 'back':
self.stoper.Stop( )
time.sleep( ( self.selectionTime + self.timeGap )/(1000.*2) )
if self.pressSound.lower( ) == 'voice':
self.powrotSound.play()
time.sleep( ( self.selectionTime + self.timeGap )/(1000.*2) )
self.stoper.Start( self.timeGap )
self.onExit( )
else:
self.numberOfPresses += 1
self.numberOfIteration = 0
if self.numberOfPresses == 1:
items = self.mainSizer.GetChildren( )
if self.flaga == 'rest':
self.flaga = 0
else:
if self.flaga == 0:
b = items[ 2 ].GetWindow( )
elif self.flaga == 1:
b = items[ 0 ].GetWindow( )
elif self.flaga == 2:
b = items[ 1 ].GetWindow( )
b.SetBackgroundColour( self.selectionColour )
b.SetFocus( )
b.Update( )
if self.flaga == 0 :
self.stoper.Stop( )
time.sleep( ( self.selectionTime + self.timeGap )/(1000.*2) )
if self.pressSound.lower( ) == 'voice':
self.powrotSound.play()
time.sleep( ( self.selectionTime + self.timeGap )/(1000.*2) )
self.stoper.Start( self.timeGap )
self.onExit( )
if self.flaga == 1 :
self.stoper.Stop( )
time.sleep( ( self.selectionTime + self.timeGap )/(1000.*2) )
if self.pressSound.lower( ) == 'voice':
self.dziuraSound.play()
time.sleep( ( self.selectionTime + self.timeGap )/(1000.*2) )
self.stoper.Start( self.timeGap )
self.stoper.Stop( )
EGaps.cwiczenia( self, id = -1 ).Show( True )
self.MakeModal( False )
self.Hide( )
if self.flaga == 2 :
self.stoper.Stop( )
time.sleep( ( self.selectionTime + self.timeGap )/(1000.*2) )
if self.pressSound.lower( ) == 'voice':
self.slowoSound.play()
time.sleep( ( self.selectionTime + self.timeGap )/(1000.*2) )
self.stoper.Start( self.timeGap )
self.stoper.Stop( )
EMatch.cwiczenia( self, id = -1 ).Show( True )
self.MakeModal( False )
self.Hide( )
else:
event.Skip( )
#-------------------------------------------------------------------------
def timerUpdate(self, event):
if self.control == 'tracker':
if self.button.GetBackgroundColour( ) == self.backgroundColour:
self.button.SetBackgroundColour( self.selectionColour )
else:
self.button.SetBackgroundColour( self.backgroundColour )
self.stoper.Stop( )
self.pressFlag = False
else:
self.mouseCursor.move( *self.mousePosition )
self.numberOfPresses = 0
self.numberOfIteration += 1
if self.flaga == 'rest':
pass
elif self.numberOfIteration > self.maxNumberOfIteration:
for i in range( 3 ):
item = self.mainSizer.GetItem( i )
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
if self.switchSound == "voice":
self.usypiamSound.play()
self.flaga = 'rest'
else:
for i in range( 3 ):
item = self.mainSizer.GetItem( i )
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
item = self.mainSizer.GetItem( self.flaga )
b = item.GetWindow( )
b.SetBackgroundColour( self.scanningColour )
b.SetFocus( )
logo = b.Name
if self.switchSound.lower() == "voice":
if logo == "DZIURA":
self.dziuraSound.play()
elif logo == u"SŁOWO":
self.slowoSound.play()
elif logo == "back":
self.powrotSound.play()
if self.flaga == 2:
self.flaga = 0
else:
self.flaga += 1
if self.switchSound.lower( ) == 'on':
self.switchingSound.play( )
#=============================================================================
if __name__ == '__main__':
app = wx.App(False)
frame = cwiczenia( parent = None, id = -1 )
frame.Show( )
app.MainLoop( )
| gpl-3.0 | 8,931,400,673,670,100,000 | 35.162162 | 395 | 0.52378 | false |
sxjscience/tvm | tests/python/frontend/pytorch/test_forward.py | 1 | 113518 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""Unit tests for various models and operators"""
from time import time
import os
import sys
from scipy.stats import t as tdistr
import numpy as np
import torch
import torchvision
from torch.nn import Module
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
from tvm.contrib.nvcc import have_fp16
import tvm.testing
from packaging import version as package_version
sys.setrecursionlimit(10000)
def list_ops(expr):
class OpLister(tvm.relay.ExprVisitor):
def visit_op(self, expr):
if expr not in self.node_set:
self.node_list.append(expr)
return super().visit_op(expr)
def list_nodes(self, expr):
self.node_set = {}
self.node_list = []
self.visit(expr)
return self.node_list
return OpLister().list_nodes(expr)
def assert_shapes_match(tru, est):
if tru.shape != est.shape:
msg = "Output shapes {} and {} don't match"
raise AssertionError(msg.format(tru.shape, est.shape))
def load_torchvision(model_name):
"""Given a model name, returns a Torchvision model in eval mode as well
as an example input."""
with torch.no_grad():
if model_name.startswith("inception"):
height = width = 299
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
else:
height = width = 224
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_shape = [1, 3, height, width]
input_data = torch.randn(input_shape).float()
for channel in range(3):
input_data[:, channel] -= mean[channel]
input_data[:, channel] /= std[channel]
if model_name.startswith("googlenet"):
model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)
else:
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.float().eval()
return model, [input_data]
def load_pretrainedmodels(model_name):
"""Given a model name, returns a pretrainedmodels.pytorch model in eval
mode as well as an example input."""
import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch
model = getattr(pretrainedmodels, model_name)().float().eval()
input_shape = [1, *model.input_size]
input_data = torch.rand(input_shape).float() * 256
for channel in range(3):
input_data[:, channel] -= model.mean[channel]
input_data[:, channel] /= model.std[channel]
return model, [input_data]
def load_model(model_name):
"""Given a model name, returns a model as well as an example input."""
if hasattr(torchvision.models, model_name):
return load_torchvision(model_name)
try:
import pretrainedmodels
if hasattr(pretrainedmodels, model_name):
return load_pretrainedmodels(model_name)
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install pretrainedmodels.pytorch")
raise RuntimeError("Model not supported")
def confidence_interval(mean, stdev, count, alpha=0.01):
"""Returns the lower and upper bounds of the confidence interval of a random
variable. Confidence is 1 - alpha (default confidence is 99%)."""
stdval = tdistr.ppf(1 - alpha / 2, count - 1)
lower, upper = mean + np.array([-1, 1]) * stdval * stdev / np.sqrt(count)
return lower, upper
def measure_latency(model, input_shapes, output_shapes, thresh, dryruns=40):
"""Compute the latency of the given model"""
latencies = []
count = 0
while True:
if isinstance(model, Module):
input_data = [torch.rand(shape).float() for shape in input_shapes]
if torch.cuda.is_available():
input_data = list(map(lambda x: x.cuda(), input_data))
model = model.cuda()
t_start = time()
with torch.no_grad():
model(*input_data)
t_end = time()
latencies.append(t_end - t_start)
else:
input_data = {}
for i, shape in enumerate(input_shapes):
name = "input" + str(i)
arr = np.random.random(shape).astype("float32")
input_data[name] = tvm.nd.array(arr)
t_start = time()
model.set_input(**input_data)
model.run()
for i, shape in enumerate(output_shapes):
arr = np.zeros(shape).astype("float32")
model.get_output(i, tvm.nd.array(arr))
t_end = time()
count += 1
if count < dryruns:
continue
latencies.append(t_end - t_start)
mean = np.mean(latencies)
stdev = np.std(latencies)
sample_size = len(latencies)
if sample_size > dryruns:
lower, upper = confidence_interval(mean, stdev, sample_size)
est = (upper + lower) / 2
err = (upper - lower) / 2
if err < thresh:
return est
def verify_model(model_name, input_data=[], custom_convert_map={}, rtol=1e-5, atol=1e-5):
"""Assert that the output of a compiled model matches with that of its
baseline."""
if isinstance(model_name, str):
baseline_model, baseline_input = load_model(model_name)
elif isinstance(input_data, list):
baseline_model = model_name
baseline_input = input_data
elif isinstance(input_data, torch.Tensor) or len(input_data.shape) == 0:
baseline_model = model_name
baseline_input = [input_data]
else:
assert False, "Unexpected input format"
if torch.cuda.is_available():
if isinstance(baseline_model, torch.nn.Module):
baseline_model = baseline_model.cuda()
baseline_input = [inp.cuda() for inp in baseline_input]
with torch.no_grad():
baseline_outputs = baseline_model(*baseline_input)
if isinstance(baseline_outputs, tuple):
baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)
else:
baseline_outputs = (baseline_outputs.cpu().numpy(),)
trace = torch.jit.trace(baseline_model, baseline_input)
if isinstance(baseline_model, torch.nn.Module):
trace = trace.float().eval()
if torch.cuda.is_available():
trace = trace.cuda()
else:
trace = trace.cpu()
input_names = ["input{}".format(idx) for idx, inp in enumerate(baseline_input)]
input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))
mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
compiled_input = dict(zip(input_names, [inp.cpu().numpy() for inp in baseline_input]))
with tvm.transform.PassContext(opt_level=3):
for target, ctx in tvm.testing.enabled_targets():
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)
relay_model.set_input(**relay_params)
for name, inp in compiled_input.items():
relay_model.set_input(name, inp)
relay_model.run()
for i, baseline_output in enumerate(baseline_outputs):
compiled_output = relay_model.get_output(i).asnumpy()
assert_shapes_match(baseline_output, compiled_output)
tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol)
del model_name
del baseline_model
torch.cuda.empty_cache()
# Single operator tests
@tvm.testing.uses_gpu
def test_forward_pixel_shuffle():
torch.set_grad_enabled(False)
input_shape = [1, 144, 16, 16]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PixelShuffle(2).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(3).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(4).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_add():
torch.set_grad_enabled(False)
input_shape = [10]
class Add1(Module):
def forward(self, *args):
return args[0] + args[0]
class Add2(Module):
def forward(self, *args):
return args[0] + 1
class Add3(Module):
def forward(self, *args):
ones = torch.ones(input_shape, dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
class Add4(Module):
def forward(self, *args):
ones = torch.ones([], dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
input_data = torch.rand(input_shape).float()
verify_model(Add1().float().eval(), input_data=input_data)
verify_model(Add2().float().eval(), input_data=input_data)
verify_model(Add3().float().eval(), input_data=input_data)
verify_model(Add4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_subtract():
torch.set_grad_enabled(False)
input_shape = [10]
class Subtract1(Module):
def forward(self, *args):
return args[0] - args[0]
class Subtract2(Module):
def forward(self, *args):
return args[0] - 1
class Subtract3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
class Subtract4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
input_data = torch.rand(input_shape).float()
verify_model(Subtract1().float().eval(), input_data=input_data)
verify_model(Subtract2().float().eval(), input_data=input_data)
verify_model(Subtract3().float().eval(), input_data=input_data)
verify_model(Subtract4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_multiply():
torch.set_grad_enabled(False)
input_shape = [10]
class Multiply1(Module):
def forward(self, *args):
return args[0] * args[0]
class Multiply2(Module):
def forward(self, *args):
return args[0] * 1.0
class Multiply3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
class Multiply4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
input_data = torch.rand(input_shape).float()
verify_model(Multiply1().float().eval(), input_data=input_data)
verify_model(Multiply2().float().eval(), input_data=input_data)
verify_model(Multiply3().float().eval(), input_data=input_data)
verify_model(Multiply4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_min_max():
class Max(Module):
def forward(self, inp):
return torch.max(inp)
class Min(Module):
def forward(self, inp):
return torch.min(inp)
class Max2(Module):
def forward(self, inp):
out, _ = torch.max(inp, 1, keepdim=True)
return out
class Min2(Module):
def forward(self, inp):
out, _ = torch.min(inp, 0, keepdim=False)
return out
class Max3(Module):
def forward(self, lhs, rhs):
return torch.max(lhs, rhs)
class Min3(Module):
def forward(self, lhs, rhs):
return torch.min(lhs, rhs)
input_data = [torch.rand((10, 10)), torch.rand((10, 10))]
verify_model(Max(), input_data=input_data[0])
verify_model(Min(), input_data=input_data[0])
verify_model(Max2(), input_data=input_data[0])
verify_model(Min2(), input_data=input_data[0])
verify_model(Max3(), input_data=input_data)
verify_model(Min3(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reciprocal():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Reciprocal1(Module):
def forward(self, *args):
return args[0].reciprocal()
input_data = torch.rand(input_shape).float()
verify_model(Reciprocal1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat():
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Repeat1(Module):
def forward(self, *args):
return args[0].repeat(1, 1)
class Repeat2(Module):
def forward(self, *args):
return args[0].repeat(4, 2)
class Repeat3(Module):
def forward(self, *args):
return args[0].repeat(4, 2, 1)
input_data = torch.rand(input_shape).float()
verify_model(Repeat1().float().eval(), input_data=input_data)
verify_model(Repeat2().float().eval(), input_data=input_data)
verify_model(Repeat3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat_interleave():
torch.set_grad_enabled(False)
input_shape = [2, 2, 3]
class RepeatInterleave1(Module):
def forward(self, *args):
return args[0].repeat_interleave(2)
class RepeatInterleave2(Module):
def forward(self, *args):
return args[0].repeat_interleave(3, dim=0)
class RepeatInterleave3(Module):
def forward(self, *args):
return args[0].repeat_interleave(2, dim=1)
class RepeatInterleave4(Module):
def forward(self, *args):
return args[0].repeat_interleave(4, dim=2)
input_data = torch.rand(input_shape).float()
verify_model(RepeatInterleave1().float().eval(), input_data=input_data)
verify_model(RepeatInterleave2().float().eval(), input_data=input_data)
verify_model(RepeatInterleave3().float().eval(), input_data=input_data)
verify_model(RepeatInterleave4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_unsqueeze():
torch.set_grad_enabled(False)
input_shape = [10, 10]
class Unsqueeze1(Module):
def forward(self, *args):
return args[0].unsqueeze(2)
input_data = torch.rand(input_shape).float()
verify_model(Unsqueeze1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_squeeze():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Squeeze1(Module):
def forward(self, *args):
return args[0].squeeze()
class Squeeze2(Module):
def forward(self, *args):
return args[0].squeeze(1)
input_data = torch.rand(input_shape).float()
verify_model(Squeeze1().float().eval(), input_data=input_data)
verify_model(Squeeze2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_arange():
torch.set_grad_enabled(False)
class Arange1(Module):
def forward(self, *args):
return torch.arange(5)
class Arange2(Module):
def forward(self, *args):
return torch.arange(2.5)
class Arange3(Module):
def forward(self, *args):
return torch.arange(1, 4)
class Arange4(Module):
def forward(self, *args):
return torch.arange(1, 2.5, 0.5)
class Arange5(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int32)
class Arange6(Module):
def forward(self, *args):
return torch.arange(start=1, end=6, step=2)
class Arange7(Module):
def forward(self, *args):
return torch.arange(1, 4, dtype=torch.float32)
class Arange8(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int16)
class Arange9(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4), 1)
return torch.arange(end) + torch.ones((5,), dtype=torch.int64)
class Arange10(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4.0), torch.tensor(1.0))
return torch.arange(end) + torch.ones((5,), dtype=torch.float)
class Arange11(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2), 1)
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.int64)
class Arange12(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2.5), torch.tensor(4.1))
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.float)
verify_model(Arange1().float().eval())
verify_model(Arange2().float().eval())
verify_model(Arange3().float().eval())
verify_model(Arange4().float().eval())
verify_model(Arange5().float().eval())
verify_model(Arange6().float().eval())
verify_model(Arange7().float().eval())
verify_model(Arange8().float().eval())
verify_model(Arange9().float().eval())
verify_model(Arange10().float().eval())
verify_model(Arange11().float().eval())
verify_model(Arange12().float().eval())
@tvm.testing.uses_gpu
def test_forward_mesh_grid():
torch.set_grad_enabled(False)
class MeshGrid1(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 6])
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
class MeshGrid2(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3], dtype=torch.float32)
y = torch.add(torch.tensor(5, dtype=torch.float32), 1)
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
verify_model(MeshGrid1().float().eval())
verify_model(MeshGrid2().float().eval())
@tvm.testing.uses_gpu
def test_forward_abs():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Abs1(Module):
def forward(self, *args):
return args[0].abs()
input_data = torch.rand(input_shape).float()
verify_model(Abs1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_concatenate():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Concatenate1(Module):
def forward(self, *args):
return torch.cat([args[0][:, 0].unsqueeze(1), args[0][:, 1].unsqueeze(1)], 1)
class Concatenate2(Module):
def forward(self, *args):
a = (args[0][:, :, 0] + 2) * 7
b = (args[0][:, :, 1] + 3) * 11
c = (args[0][:, :, 2] + 5) * 13
return torch.cat([t.unsqueeze(2) for t in [a, b, c]], 2)
input_data = torch.rand(input_shape).float()
verify_model(Concatenate1().float().eval(), input_data=input_data)
verify_model(Concatenate2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_relu():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ReLU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_prelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_leakyrelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LeakyReLU().eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=1.0, inplace=True).eval(), input_data=input_data)
verify_model(
torch.nn.LeakyReLU(negative_slope=1.25, inplace=True).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_elu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ELU().eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_celu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.CELU().eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_selu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.SELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softplus():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softplus().eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=1.5, threshold=20).eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=5, threshold=10).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softsign():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softsign().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_log_sigmoid():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_adaptiveavgpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool2d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool2d(kernel_size=[1, 1]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[10, 10]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[4, 4], padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool2D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool2d(args[0], kernel_size=[10, 10])
verify_model(MaxPool2D(), input_data=input_data)
class MaxPool2DWithIndices(Module):
def __init__(self):
super(MaxPool2DWithIndices, self).__init__()
self.pool = torch.nn.MaxPool2d(kernel_size=[1, 1], return_indices=True)
def forward(self, *args):
output, indices = self.pool(args[0])
return output
verify_model(MaxPool2DWithIndices().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool1d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool1d(kernel_size=1).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=10).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=4, padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool1D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool1d(args[0], kernel_size=10)
verify_model(MaxPool1D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool3d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool3d(kernel_size=[1, 1, 1]).eval(), input_data)
verify_model(torch.nn.MaxPool3d(kernel_size=[10, 10, 10]).eval(), input_data)
verify_model(torch.nn.MaxPool3d(kernel_size=[4, 4, 4], padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool3D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool3d(args[0], kernel_size=[10, 10, 10])
verify_model(MaxPool3D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_split():
torch.set_grad_enabled(False)
input_shape = [4, 10]
class Split(Module):
def __init__(self, split_size_or_sections, dim):
super(Split, self).__init__()
self.split_size_or_sections = split_size_or_sections
self.dim = dim
def forward(self, *args):
return torch.split(args[0], self.split_size_or_sections, self.dim)
input_data = torch.rand(input_shape).float()
verify_model(Split(2, 0).float().eval(), input_data=input_data)
verify_model(Split(3, 1).float().eval(), input_data=input_data)
verify_model(Split(4, 1).float().eval(), input_data=input_data)
verify_model(Split([2, 3, 5], 1).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_avgpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class AvgPool2D2(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data)
verify_model(AvgPool2D2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_avgpool3d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10, 10]
class AvgPool3D1(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool3d(args[0], kernel_size=[10, 10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool3d(kernel_size=[10, 10, 10]).eval(), input_data=input_data)
verify_model(AvgPool3D1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_hardtanh():
torch.set_grad_enabled(False)
input_shape = [10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Hardtanh().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_conv():
torch.set_grad_enabled(False)
conv1d_input_shape = [1, 3, 10]
conv2d_input_shape = [1, 3, 10, 10]
class Conv2D1(Module):
def __init__(self):
super(Conv2D1, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=True)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D2(Module):
def __init__(self):
super(Conv2D2, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D3(Module):
def __init__(self):
super(Conv2D3, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D1(Module):
def __init__(self):
super(Conv1D1, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D2(Module):
def __init__(self):
super(Conv1D2, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D3(Module):
def __init__(self):
super(Conv1D3, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
conv2d_input_data = torch.rand(conv2d_input_shape).float()
verify_model(Conv2D1().float().eval(), input_data=conv2d_input_data)
verify_model(Conv2D2().float().eval(), input_data=conv2d_input_data)
# depth wise conv with channel mult 2
verify_model(Conv2D3().float().eval(), input_data=conv2d_input_data)
# group conv
verify_model(
torch.nn.Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), groups=2).eval(),
input_data=torch.randn((1, 8, 16, 16)),
)
conv1d_input_data = torch.rand(conv1d_input_shape).float()
verify_model(Conv1D1().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D2().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D3().float().eval(), input_data=conv1d_input_data)
@tvm.testing.uses_gpu
def test_forward_conv_transpose():
torch.set_grad_enabled(False)
conv2d_input_shape = [1, 3, 10, 10]
conv2d_input_data = torch.rand(conv2d_input_shape).float()
verify_model(torch.nn.ConvTranspose2d(3, 6, 7, bias=True), input_data=conv2d_input_data)
verify_model(torch.nn.ConvTranspose2d(3, 12, 3, bias=False), input_data=conv2d_input_data)
conv1d_input_shape = [1, 3, 10]
conv1d_input_data = torch.rand(conv1d_input_shape).float()
verify_model(torch.nn.ConvTranspose1d(3, 6, 7, bias=True), input_data=conv1d_input_data)
verify_model(torch.nn.ConvTranspose1d(3, 12, 3, bias=False), input_data=conv1d_input_data)
@tvm.testing.uses_gpu
def test_forward_threshold():
torch.set_grad_enabled(False)
input_shape = [1, 3]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_contiguous():
torch.set_grad_enabled(False)
input_shape = [10]
class Contiguous1(Module):
def forward(self, *args):
return args[0].contiguous()
input_data = torch.rand(input_shape).float()
verify_model(Contiguous1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_batchnorm():
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for bn, inp in [(torch.nn.BatchNorm2d(16), inp_2d), (torch.nn.BatchNorm3d(16), inp_3d)]:
init_weight(bn.eval())
verify_model(bn.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_instancenorm():
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ins_norm, inp in [
(torch.nn.InstanceNorm2d(16), inp_2d),
(torch.nn.InstanceNorm3d(16), inp_3d),
]:
verify_model(ins_norm.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_layernorm():
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias, 0.02)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ln, inp in [(torch.nn.LayerNorm(10), inp_2d), (torch.nn.LayerNorm(10), inp_3d)]:
init_weight(ln.eval())
verify_model(ln.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_groupnorm():
input_shape = [10, 6, 5, 5]
input_data = torch.rand(input_shape).float()
# Separate 6 channels into 3 groups
verify_model(torch.nn.GroupNorm(3, 6).eval(), input_data=input_data)
# Put all 6 channels into a single group (equivalent with LayerNorm)
verify_model(torch.nn.GroupNorm(1, 6).eval(), input_data=input_data)
# Separate 6 channels into 6 groups (equivalent with InstanceNorm)
verify_model(torch.nn.GroupNorm(6, 6).eval(), input_data=input_data)
input_shape = [1, 10, 4, 7]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GroupNorm(1, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(2, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(5, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(10, 10).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reshape():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
new_shape = [2, 1, 10, 10]
class Reshape1(Module):
def forward(self, *args):
return args[0].reshape(new_shape)
class Reshape2(Module):
def forward(self, *args):
return args[0].reshape([-1])
class Reshape3(torch.nn.Module):
def forward(self, x):
x_shape = x.shape
return x.reshape((x_shape[0] * x_shape[1], x_shape[2]))
input_data = torch.rand(input_shape).float()
verify_model(Reshape1(), input_data=input_data)
verify_model(Reshape2(), input_data=input_data)
verify_model(Reshape3(), input_data=torch.randn(2, 3, 4))
@tvm.testing.uses_gpu
def test_flatten():
class Flatten(Module):
def forward(self, x):
return torch.flatten(x)
class BatchFlatten(Module):
def forward(self, x):
return torch.flatten(x, start_dim=1)
inp = torch.rand((5, 2, 2))
verify_model(Flatten(), input_data=inp)
verify_model(BatchFlatten(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_transpose():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Transpose1(Module):
def forward(self, *args):
return args[0].transpose(2, 3)
class Transpose2(Module):
def forward(self, *args):
return args[0].transpose(-2, -1)
class Transpose3(Module):
def forward(self, *args):
return args[0].permute(0, 2, 3, 1)
input_data = torch.rand(input_shape).float()
verify_model(Transpose1().float().eval(), input_data=input_data)
verify_model(Transpose2().float().eval(), input_data=input_data)
verify_model(Transpose3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_size():
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Size1(Module):
def forward(self, *args):
return float(args[0].size(0)) * args[0]
input_data = torch.rand(input_shape).float()
verify_model(Size1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_type_as():
torch.set_grad_enabled(False)
input_shape = [1, 3]
def _create_module(dtype):
class TypeAs(Module):
def forward(self, *args):
expected_type_tensor = torch.zeros(1, 3, dtype=dtype)
return args[0].type_as(expected_type_tensor)
return TypeAs()
input_data = torch.randn(input_shape).float()
verify_model(_create_module(torch.float64), input_data=input_data)
verify_model(_create_module(torch.float32), input_data=input_data)
verify_model(_create_module(torch.int64), input_data=input_data)
verify_model(_create_module(torch.int32), input_data=input_data)
verify_model(_create_module(torch.int16), input_data=input_data)
verify_model(_create_module(torch.int8), input_data=input_data)
if torch.cuda.is_available():
check_fp16 = False
try:
# Only check half precision on supported hardwares.
if have_fp16(tvm.gpu(0).compute_version):
check_fp16 = True
except Exception as e:
# If GPU is not enabled in TVM, skip the fp16 test.
pass
# Temporary disable fp16 test
check_fp16 = False
if check_fp16:
verify_model(_create_module(torch.float16), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_view():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class View1(Module):
def forward(self, *args):
return args[0].view((1, 3 * 10 * 10))
class View2(Module):
def forward(self, *args):
return args[0].view(args[0].shape[0], -1)
class View3(Module):
def forward(self, *args):
d1 = torch.tensor(3) * torch.tensor(10) * torch.tensor(10)
return args[0].view(args[0].shape[0], d1)
input_data = torch.rand(input_shape).float()
verify_model(View1().float().eval(), input_data=input_data)
verify_model(View2().float().eval(), input_data=input_data)
verify_model(View3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_select():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Select1(Module):
def forward(self, *args):
return args[0].select(1, 1)
class IndexedSelect(Module):
def __init__(self, inp, dim):
super().__init__()
self.inp = inp
self.dim = dim
if torch.cuda.is_available():
self.inp = self.inp.cuda()
def forward(self, index):
return torch.index_select(self.inp, self.dim, index)
input_data = torch.rand(input_shape).float()
verify_model(Select1().float().eval(), input_data=input_data)
x = torch.randn(3, 4)
indices = torch.tensor([0, 2])
verify_model(IndexedSelect(x, 0).eval(), input_data=indices)
verify_model(IndexedSelect(x, 1).eval(), input_data=indices)
@tvm.testing.uses_gpu
def test_forward_clone():
torch.set_grad_enabled(False)
input_shape = [10]
class Clone1(Module):
def forward(self, *args):
return args[0].clone()
input_data = torch.rand(input_shape).float()
verify_model(Clone1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gather():
torch.set_grad_enabled(False)
class Gather1(Module):
def forward(self, *args):
return torch.gather(args[0], 0, args[1])
class Gather2(Module):
def forward(self, *args):
return torch.gather(args[0], 1, args[1])
class Gather3(Module):
def forward(self, *args):
return torch.gather(args[0], 2, args[1])
input_data = torch.rand((4,)).float()
index = torch.tensor([1])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.tensor([[1, 2], [3, 4]])
index = torch.tensor([[0, 0], [1, 0]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((3, 3, 3)).float()
index = torch.tensor(
[
[[1, 0, 0], [1, 0, 1], [0, 1, 1]],
[[1, 1, 1], [1, 2, 1], [1, 0, 1]],
[[1, 2, 1], [1, 2, 1], [1, 2, 1]],
]
)
verify_model(Gather3().float().eval(), input_data=[input_data, index])
@tvm.testing.uses_gpu
def test_forward_logsoftmax():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class LogSoftmax1(Module):
def forward(self, *args):
return torch.nn.LogSoftmax(dim=1)(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(LogSoftmax1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_norm():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Norm1(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=None, keepdim=False)
class Norm2(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=False)
class Norm3(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=True)
class Norm4(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1, 2), keepdim=False)
class Norm5(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1), keepdim=True)
class Norm6(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(0.5), dim=(1), keepdim=True)
class Norm7(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(1), dim=None, keepdim=False)
class Norm8(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(2.0), dim=(1), keepdim=True)
class Norm9(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-0.5), dim=(1, 2), keepdim=True)
class Norm10(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-2), dim=(1), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(Norm1().float().eval(), input_data=input_data)
verify_model(Norm2().float().eval(), input_data=input_data)
verify_model(Norm3().float().eval(), input_data=input_data)
verify_model(Norm4().float().eval(), input_data=input_data)
verify_model(Norm5().float().eval(), input_data=input_data)
verify_model(Norm6().float().eval(), input_data=input_data)
verify_model(Norm7().float().eval(), input_data=input_data)
verify_model(Norm8().float().eval(), input_data=input_data)
verify_model(Norm9().float().eval(), input_data=input_data)
verify_model(Norm10().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_frobenius_norm():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FroNorm1(Module):
def forward(self, *args):
return torch.norm(args[0])
class FroNorm2(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=None, keepdim=True)
class FroNorm3(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=(1), keepdim=True)
class FroNorm4(Module):
def forward(self, *args):
return torch.norm(args[0], dim=None, keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(FroNorm1().float().eval(), input_data=input_data)
verify_model(FroNorm2().float().eval(), input_data=input_data)
verify_model(FroNorm3().float().eval(), input_data=input_data)
verify_model(FroNorm4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_sigmoid():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Sigmoid().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_dense():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Dense1(Module):
def __init__(self):
super(Dense1, self).__init__()
self.linear = torch.nn.Linear(10, 7, bias=True)
def forward(self, *args):
return self.linear(args[0][0, 0])
class Dense2(Module):
def __init__(self):
super(Dense2, self).__init__()
self.linear = torch.nn.Linear(10, 7, bias=False)
def forward(self, *args):
return self.linear(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(Dense1().float().eval(), input_data=input_data)
verify_model(Dense2().float().eval(), input_data=input_data)
trace = torch.jit.trace(Dense1(), [input_data])
mod, params = relay.frontend.from_pytorch(
trace,
[("input", input_shape)],
)
assert not any([op.name == "multiply" for op in list_ops(mod["main"])])
@tvm.testing.uses_gpu
def test_forward_dropout():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Dropout(p=0.5).eval(), input_data=input_data[0, 0])
verify_model(torch.nn.Dropout2d(p=0.5).eval(), input_data=input_data[0])
verify_model(torch.nn.Dropout3d(p=0.5).eval(), input_data=input_data)
verify_model(torch.nn.AlphaDropout(p=0.5).eval(), input_data=input_data[0, 0])
@tvm.testing.uses_gpu
def test_forward_slice():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Slice1(Module):
def forward(self, *args):
return args[0][:, :, :, :3]
class Slice2(Module):
def forward(self, *args):
return args[0][0, :, :-3, :]
class Slice3(Module):
def forward(self, *args):
x0 = torch.tensor(2) - torch.tensor(1)
x1 = torch.tensor(3) + torch.tensor(1)
return args[0][:, x0:, 1:x1, :]
class SliceWithStride(torch.nn.Module):
def forward(self, x):
return x[..., 0::2] + x[..., 1::2]
class SliceWithStride2(torch.nn.Module):
def forward(self, x):
return x[0::2, 0::2] + x[1::2, 1::2]
input_data = torch.rand(input_shape).float()
verify_model(Slice1(), input_data=input_data)
verify_model(Slice2(), input_data=input_data)
verify_model(Slice3(), input_data=input_data)
verify_model(SliceWithStride(), input_data=torch.randn(1, 4))
verify_model(SliceWithStride2(), input_data=torch.randn(4, 4))
@tvm.testing.uses_gpu
def test_forward_mean():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Mean1(Module):
def forward(self, *args):
return args[0].mean(2)
input_data = torch.rand(input_shape).float()
verify_model(Mean1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_expand():
torch.set_grad_enabled(False)
class Expand1(Module):
def forward(self, *args):
return args[0].expand((3, -1, -1, -1))
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Expand1().float().eval(), input_data=input_data)
class Expand2(Module):
def forward(self, *args):
return args[0].expand((3, 3, 3, 1))
input_shape = [3, 1]
input_data = torch.rand(input_shape).float()
verify_model(Expand2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_pow():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Pow1(Module):
def forward(self, *args):
return args[0] ** 2
input_data = torch.rand(input_shape).float()
verify_model(Pow1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_chunk():
torch.set_grad_enabled(False)
input_shape = [1, 3, 14, 14]
class Chunk1(Module):
def forward(self, *args):
chunks = args[0].chunk(7, 2)
return torch.cat(chunks, 2)
input_data = torch.rand(input_shape).float()
verify_model(Chunk1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_upsample():
class Upsample(Module):
def __init__(self, size=None, scale=None, mode="nearest", align_corners=None):
super().__init__()
self.size = size
self.scale = scale
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return torch.nn.functional.interpolate(
x,
size=self.size,
scale_factor=self.scale,
mode=self.mode,
align_corners=self.align_corners,
)
inp = torch.rand((1, 3, 32, 32))
verify_model(Upsample(size=(64, 64), mode="nearest"), inp)
verify_model(Upsample(scale=2, mode="nearest"), inp)
verify_model(Upsample(size=(50, 50), mode="nearest"), inp)
verify_model(Upsample(size=(64, 64), mode="bilinear", align_corners=True), inp)
verify_model(Upsample(scale=2, mode="bilinear", align_corners=True), inp)
verify_model(Upsample(size=(50, 50), mode="bilinear", align_corners=True), inp)
@tvm.testing.uses_gpu
def test_to():
""" test for aten::to(...) """
class ToCPU(Module):
def forward(self, x):
return x.to("cpu")
class ToFloat(Module):
def forward(self, x):
return x.float()
class ToInt(Module):
def forward(self, x):
return x.int()
class ToLong(Module):
def forward(self, x):
return x.long()
class ToDouble(Module):
def forward(self, x):
return x.double()
class ToFloat16(Module):
def forward(self, x):
return x.to(torch.float16)
verify_model(ToCPU().eval(), torch.rand((1, 3, 32, 32)))
verify_model(ToFloat().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
verify_model(ToFloat().eval(), torch.tensor(2, dtype=torch.int))
verify_model(ToInt().eval(), torch.zeros((1, 3, 32, 32)))
verify_model(ToInt().eval(), torch.tensor(0.8))
verify_model(ToLong().eval(), torch.tensor(0.8))
verify_model(ToDouble().eval(), torch.tensor(0.8))
verify_model(ToFloat16().eval(), torch.tensor(2, dtype=torch.float32))
verify_model(ToFloat16().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
@tvm.testing.uses_gpu
def test_adaptive_pool3d():
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.AdaptiveMaxPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((4, 8, 8)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((7, 8, 9)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_functional_pad():
torch.set_grad_enabled(False)
pad = (0, 0)
class Pad1(Module):
def forward(self, *args):
return torch.nn.functional.pad(args[0], pad, "constant", 0)
input_data = torch.rand((3, 3, 4, 2))
pad = (1, 1)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (1, 1, 2, 2)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (0, 1, 2, 1, 3, 3)
verify_model(Pad1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_zero_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ZeroPad2d(2).eval(), inp)
verify_model(torch.nn.ZeroPad2d((1, 1, 2, 0)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)
inp = torch.rand((1, 2, 3))
verify_model(torch.nn.ConstantPad2d((3, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad2d():
inp = torch.rand((1, 2, 2, 2))
verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad3d():
inp = torch.rand((1, 3, 2, 2, 2))
verify_model(torch.nn.ConstantPad3d(3, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad3d((3, 4, 5, 6, 0, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReflectionPad1d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReflectionPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReflectionPad2d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReflectionPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReplicationPad1d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReplicationPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReplicationPad2d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReplicationPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad3d():
inp = torch.rand((1, 1, 3, 3, 3))
verify_model(torch.nn.ReplicationPad3d(3).eval(), inp)
verify_model(torch.nn.ReplicationPad3d((1, 1, 2, 2, 1, 1)).eval(), inp)
inp = torch.rand((7, 5, 4, 5, 6))
verify_model(torch.nn.ReplicationPad3d((2, 3, 2, 5, 1, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_upsample3d():
inp = torch.arange(1, 9, dtype=torch.float32).view(1, 1, 2, 2, 2)
verify_model(torch.nn.Upsample(scale_factor=2, mode="nearest").eval(), inp)
verify_model(torch.nn.Upsample(scale_factor=2, mode="trilinear").eval(), inp)
verify_model(
torch.nn.Upsample(scale_factor=2, mode="trilinear", align_corners=True).eval(), inp
)
def test_forward_nms():
"""dynamic Non-Maximum Suppression"""
torch.set_grad_enabled(False)
class NonMaxSupression(Module):
def __init__(self, iou_thres):
super().__init__()
self.iou_threshold = iou_thres
def forward(self, *args):
return torchvision.ops.nms(args[0], args[1], self.iou_threshold)
# Generate random input data
def _gen_rand_inputs(num_boxes):
box_len = 4
boxes = torch.rand(num_boxes, box_len, dtype=torch.float) * 0.5
boxes[:, 2] += boxes[:, 0]
boxes[:, 3] += boxes[:, 1]
scores = torch.rand(num_boxes, dtype=torch.float)
return boxes, scores
targets = ["llvm"] # dynamic nms does not work on gpu
for num_boxes, iou_thres in [(10, 0.3), (100, 0.5), (500, 0.9)]:
in_boxes, in_scores = _gen_rand_inputs(num_boxes)
verify_trace_model(NonMaxSupression(iou_thres), [in_boxes, in_scores], targets)
def test_forward_roi_align():
"""ROI align"""
torch.set_grad_enabled(False)
class ROIAlgin(Module):
def __init__(self, output_sizes, spatial_scale=1.0, sampling_ratio=-1):
super().__init__()
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.output_sizes = output_sizes
def forward(self, *args):
return torchvision.ops.roi_align(
args[0],
args[1],
self.output_sizes,
self.spatial_scale,
self.sampling_ratio,
)
in_data = torch.Tensor(np.random.uniform(size=(1, 8, 100, 100)))
in_boxes = torch.Tensor(np.random.uniform(0.0, 100.0, size=(35, 4)))
in_batch = torch.zeros((35, 1), dtype=torch.float)
in_boxes = torch.cat([in_batch, in_boxes], dim=1)
verify_model(ROIAlgin(7), [in_data, in_boxes])
verify_model(ROIAlgin((10, 10), 0.7, 5), [in_data, in_boxes])
verify_model(ROIAlgin(15, 0.9, 3), [in_data, in_boxes])
@tvm.testing.uses_gpu
def test_conv3d():
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.Conv3d(32, 16, (3, 3, 3), padding=(1, 1, 1)).eval(), inp),
verify_model(torch.nn.Conv3d(32, 16, (5, 5, 5), padding=(2, 2, 2)).eval(), inp),
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1).eval(), inp)
# downsample
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1, stride=2).eval(), inp)
@tvm.testing.uses_gpu
def test_conv3d_transpose():
for ishape in [(1, 8, 10, 5, 10), (1, 8, 5, 8, 8), (1, 8, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8, out_channels=33, kernel_size=3, stride=2
).eval(),
inp,
),
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8,
out_channels=20,
kernel_size=(3, 5, 2),
stride=(2, 1, 1),
padding=(0, 4, 2),
).eval(),
inp,
),
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=20, kernel_size=1).eval(), inp
)
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=5, kernel_size=1, stride=2).eval(),
inp,
)
# Model tests
@tvm.testing.uses_gpu
def test_resnet18():
torch.set_grad_enabled(False)
verify_model("resnet18", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_0():
torch.set_grad_enabled(False)
verify_model("squeezenet1_0", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_1():
torch.set_grad_enabled(False)
verify_model("squeezenet1_1", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_densenet121():
torch.set_grad_enabled(False)
verify_model("densenet121", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_inception_v3():
torch.set_grad_enabled(False)
verify_model("inception_v3", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_googlenet():
torch.set_grad_enabled(False)
verify_model("googlenet", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mnasnet0_5():
torch.set_grad_enabled(False)
verify_model("mnasnet0_5", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mobilenet_v2():
torch.set_grad_enabled(False)
verify_model("mobilenet_v2", atol=1e-4, rtol=1e-4)
"""
#TODO: Fix VGG and AlexNet issues (probably due to pooling)
@tvm.testing.uses_gpu
def test_alexnet():
torch.set_grad_enabled(False)
verify_model("alexnet")
@tvm.testing.uses_gpu
def test_vgg11():
torch.set_grad_enabled(False)
verify_model("vgg11")
@tvm.testing.uses_gpu
def test_vgg11_bn():
torch.set_grad_enabled(False)
verify_model("vgg11_bn")
"""
@tvm.testing.uses_gpu
def test_custom_conversion_map():
def get_roi_align():
pool_size = 5
n_channels = 2 * (pool_size ** 2)
x = torch.rand(2, n_channels, 10, 10)
rois = torch.tensor(
[
[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9],
],
dtype=torch.float,
)
roi_align = torchvision.ops.RoIAlign(pool_size, spatial_scale=1, sampling_ratio=-1)
return roi_align.eval(), [x, rois]
def convert_roi_align():
def _impl(inputs, input_types):
spatial_scale = inputs[2]
pooled_size = (inputs[3], inputs[4])
sampling_ratio = inputs[5]
return relay.op.vision.roi_align(
inputs[0], inputs[1], pooled_size, spatial_scale, sampling_ratio
)
return _impl
custom_map = {"torchvision::roi_align": convert_roi_align()}
model, inputs = get_roi_align()
verify_model(model, inputs, custom_map)
@tvm.testing.uses_gpu
def test_segmentaton_models():
class SegmentationModelWrapper(Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return out["out"]
fcn = torchvision.models.segmentation.fcn_resnet101(pretrained=True)
deeplab = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)
inp = [torch.rand((1, 3, 300, 300), dtype=torch.float)]
verify_model(SegmentationModelWrapper(fcn.eval()), inp, atol=1e-4, rtol=1e-4)
verify_model(SegmentationModelWrapper(deeplab.eval()), inp, atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_3d_models():
input_shape = (1, 3, 4, 56, 56)
resnet3d = torchvision.models.video.r3d_18(pretrained=True).eval()
verify_model(resnet3d, [torch.rand(input_shape)], atol=1e-4, rtol=1e-4)
def _get_default_vm_targets():
return [tgt for (tgt, _) in tvm.testing.enabled_targets()]
def verify_script_model(pt_model, ishapes, targets):
script_module = torch.jit.script(pt_model)
verify_model_vm(script_module, ishapes, targets=targets)
def verify_trace_model(pt_model, idata, targets):
traced_model = torch.jit.trace(pt_model, idata)
ishapes = [data.shape for data in idata]
verify_model_vm(traced_model, ishapes, idata=idata, targets=targets)
def verify_model_vm(input_model, ishapes, idtype=torch.float, idata=None, targets=["llvm"]):
input_names = ["i{}".format(idx) for idx, ish in enumerate(ishapes)]
input_shapes = list(zip(input_names, ishapes))
input_data = idata if idata else [torch.randn(shape, dtype=idtype) for shape in ishapes]
# Compile via VM
mod, params = relay.frontend.from_pytorch(input_model, input_shapes)
for tgt in targets:
print("Running on target", tgt)
ctx = tvm.context(tgt, 0)
executor = relay.create_executor("vm", mod=mod, ctx=ctx, target=tgt)
evaluator = executor.evaluate()
# Inference
for name, inp in zip(input_names, input_data):
params[name] = inp.numpy()
vm_res = evaluator(**params)
# Baseline result
with torch.no_grad():
pt_result = input_model(*input_data)
# Verify the accuracy
if not isinstance(pt_result, torch.Tensor):
tvm_res = vm_res.asnumpy().item()
assert pt_result == tvm_res
else:
tvm.testing.assert_allclose(vm_res.asnumpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_control_flow():
class SimpleIf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
if inp.sum() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
return output
class NestedIf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
if inp.sum() > 0.0:
if inp.mean() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
else:
if inp.mean() >= 0.0:
output = self.weight * inp
else:
output = self.weight / inp
return output
class ScalarLoop(torch.nn.Module):
def forward(self, inp):
a = 0
for i in range(inp.size(0)):
b = i * i
b = b + 1
a += b
if a != 0:
a += 1
else:
a += 2
return a
class SimpleLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * 2.0
c = a + b
a += c
return a
class LoopWithIf(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * 2.0
b = a + b
if b.sum() > 0.0:
a += b
else:
a -= b
return a
class NestedLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * float(i)
for j in range(inp.size(1)):
a += b * float(j)
return a
class SimpleScalarWhileLoop(torch.nn.Module):
def forward(self, inp):
a = 1
i = 0
while i <= inp.size(0):
a += i
i += 2
i = 0
# also test constant init cond
while i < 10:
a += i
i += 3
return a
class SimpleWhileLoop(torch.nn.Module):
def forward(self, inp):
a = inp
i = 0
while i < inp.size(0):
a += a * float(i) * 2.0
i += 1
return a
models = [
SimpleIf(10, 20),
NestedIf(10, 20),
ScalarLoop(),
SimpleLoop(),
LoopWithIf(),
SimpleScalarWhileLoop(),
SimpleWhileLoop(),
NestedLoop(),
]
for pt_model in models:
verify_script_model(pt_model.eval(), [(10, 20)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_simple_rnn():
# The mixed tracing and scripting example from
# https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#mixing-scripting-and-tracing
class DecisionGate(torch.nn.Module):
def forward(self, x):
if x.sum() > 0:
return x
else:
return -x
class Cell(torch.nn.Module):
def __init__(self, dg):
super(Cell, self).__init__()
self.dg = dg
self.linear = torch.nn.Linear(4, 4)
def forward(self, x, h):
new_h = torch.tanh(self.dg(self.linear(x)) + h)
return new_h, new_h
class RNNLoop(torch.nn.Module):
def __init__(self):
super().__init__()
x = torch.rand(10, 4, dtype=torch.float)
h = torch.rand(10, 4, dtype=torch.float)
self.cell = torch.jit.trace(Cell(DecisionGate()), (x, h))
def forward(self, xs):
h = torch.zeros(10, 4, dtype=torch.float)
y = torch.zeros(10, 4, dtype=torch.float)
for i in range(xs.size(0)):
y, h = self.cell(xs[i], h)
return y
verify_script_model(RNNLoop().eval(), [(10, 10, 4)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_forward_reduce_sum():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceSum1(Module):
def forward(self, *args):
return args[0].sum(1)
class ReduceSum2(Module):
def forward(self, *args):
return args[0].sum(dim=1, keepdim=False)
class ReduceSum3(Module):
def forward(self, *args):
return args[0].sum(dim=2, keepdim=True)
class ReduceSum4(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=True)
class ReduceSum5(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(ReduceSum1().float().eval(), input_data=input_data)
verify_model(ReduceSum2().float().eval(), input_data=input_data)
verify_model(ReduceSum3().float().eval(), input_data=input_data)
verify_model(ReduceSum4().float().eval(), input_data=input_data)
verify_model(ReduceSum5().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reduce_prod():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceProd1(Module):
def forward(self, *args):
return args[0].prod(1)
class ReduceProd2(Module):
def forward(self, *args):
return args[0].prod(dim=1, keepdim=False)
class ReduceProd3(Module):
def forward(self, *args):
return args[0].prod(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ReduceProd1().float().eval(), input_data=input_data)
verify_model(ReduceProd2().float().eval(), input_data=input_data)
verify_model(ReduceProd3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmin():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMin1(Module):
def forward(self, *args):
return args[0].argmin(1)
class ArgMin2(Module):
def forward(self, *args):
return args[0].argmin(dim=1, keepdim=False)
class ArgMin3(Module):
def forward(self, *args):
return args[0].argmin(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMin1().float().eval(), input_data=input_data)
verify_model(ArgMin2().float().eval(), input_data=input_data)
verify_model(ArgMin3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmax():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMax1(Module):
def forward(self, *args):
return args[0].argmax(1)
class ArgMax2(Module):
def forward(self, *args):
return args[0].argmax(dim=1, keepdim=False)
class ArgMax3(Module):
def forward(self, *args):
return args[0].argmax(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMax1().float().eval(), input_data=input_data)
verify_model(ArgMax2().float().eval(), input_data=input_data)
verify_model(ArgMax3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_std():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Std1(Module):
def forward(self, *args):
return args[0].std(1, unbiased=False)
class Std2(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=False)
class Std3(Module):
def forward(self, *args):
return args[0].std(dim=2, keepdim=True, unbiased=False)
class Std4(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=False)
class Std5(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=False, unbiased=False)
class Std6(Module):
def forward(self, *args):
return args[0].std(unbiased=False)
class Std7(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=True)
class Std8(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=True)
class Std9(Module):
def forward(self, *args):
return args[0].std(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Std1().float().eval(), input_data=input_data)
verify_model(Std2().float().eval(), input_data=input_data)
verify_model(Std3().float().eval(), input_data=input_data)
verify_model(Std4().float().eval(), input_data=input_data)
verify_model(Std5().float().eval(), input_data=input_data)
verify_model(Std6().float().eval(), input_data=input_data)
verify_model(Std7().float().eval(), input_data=input_data)
verify_model(Std8().float().eval(), input_data=input_data)
verify_model(Std9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_variance():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Variance1(Module):
def forward(self, *args):
return args[0].var(1, unbiased=False)
class Variance2(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=False)
class Variance3(Module):
def forward(self, *args):
return args[0].var(dim=2, keepdim=True, unbiased=False)
class Variance4(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=False)
class Variance5(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=False, unbiased=False)
class Variance6(Module):
def forward(self, *args):
return args[0].var(unbiased=False)
class Variance7(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=True)
class Variance8(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=True)
class Variance9(Module):
def forward(self, *args):
return args[0].var(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Variance1().float().eval(), input_data=input_data)
verify_model(Variance2().float().eval(), input_data=input_data)
verify_model(Variance3().float().eval(), input_data=input_data)
verify_model(Variance4().float().eval(), input_data=input_data)
verify_model(Variance5().float().eval(), input_data=input_data)
verify_model(Variance6().float().eval(), input_data=input_data)
verify_model(Variance7().float().eval(), input_data=input_data)
verify_model(Variance8().float().eval(), input_data=input_data)
verify_model(Variance9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_rsub():
torch.set_grad_enabled(False)
class Rsub1(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1])
class Rsub2(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1], alpha=0.5)
d1 = torch.rand([1, 3]).float()
d2 = torch.rand([1, 3]).float()
d3 = torch.rand([1, 3]).int()
verify_model(Rsub1().float().eval(), input_data=[d1, d2])
verify_model(Rsub1().float().eval(), input_data=[d1, d3])
verify_model(Rsub2().float().eval(), input_data=[d1, d2])
verify_model(Rsub2().float().eval(), input_data=[d1, d3])
@tvm.testing.uses_gpu
def test_forward_embedding():
torch.set_grad_enabled(False)
input_data = torch.randint(0, 10, [2, 4]).long()
verify_model(torch.nn.Embedding(10, 3).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=False).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=True).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_onehot():
torch.set_grad_enabled(False)
class OneHot1(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=3)
class OneHot2(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=5)
input_data = torch.arange(0, 5) % 3
verify_model(OneHot1().float().eval(), input_data=input_data)
input_data = torch.arange(0, 5) % 4
verify_model(OneHot2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isfinite():
torch.set_grad_enabled(False)
class IsFinite1(Module):
def forward(self, *args):
return torch.isfinite(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsFinite1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isnan():
torch.set_grad_enabled(False)
class IsNan1(Module):
def forward(self, *args):
return torch.isnan(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsNan1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isinf():
torch.set_grad_enabled(False)
class IsInf1(Module):
def forward(self, *args):
return torch.isinf(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsInf1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_clamp():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Clamp1(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.5, max=0.5)
class Clamp2(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.3)
class Clamp3(Module):
def forward(self, *args):
return torch.clamp(args[0], max=1.0)
input_data = torch.rand(input_shape).float()
verify_model(Clamp1().float().eval(), input_data=input_data)
verify_model(Clamp2().float().eval(), input_data=input_data)
verify_model(Clamp3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_clamp_():
torch.set_grad_enabled(False)
class ClampInPlace(Module):
def __init__(self, min, max):
super(ClampInPlace, self).__init__()
self.min = min
self.max = max
def forward(self, *args):
return torch.clamp_(args[0], self.min, self.max)
for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)):
input_data = torch.rand(ishape).float()
verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_ones():
torch.set_grad_enabled(False)
class Ones1(Module):
def forward(self, *args):
return torch.ones(2, 3)
verify_model(Ones1().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_ones_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class OnesLike1(Module):
def forward(self, *args):
return torch.ones_like(args[0])
class OnesLike2(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.int8)
class OnesLike3(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(OnesLike1().float().eval(), input_data=input_data)
verify_model(OnesLike2().float().eval(), input_data=input_data)
verify_model(OnesLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_zeros():
torch.set_grad_enabled(False)
class Zeros1(Module):
def forward(self, *args):
return torch.zeros(2, 3)
verify_model(Zeros1().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_zeros_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ZerosLike1(Module):
def forward(self, *args):
return torch.zeros_like(args[0])
class ZerosLike2(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.int32)
class ZerosLike3(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(ZerosLike1().float().eval(), input_data=input_data)
verify_model(ZerosLike2().float().eval(), input_data=input_data)
verify_model(ZerosLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_full():
torch.set_grad_enabled(False)
class Full1(Module):
def forward(self, *args):
return torch.full((2, 3), 3.14)
class Full2(Module):
def forward(self, *args):
return torch.full((1, 2, 3), 1.0, dtype=torch.int32)
verify_model(Full1().float().eval(), input_data=[])
verify_model(Full2().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_full_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FullLike1(Module):
def forward(self, *args):
return torch.full_like(args[0], 3.14)
class FullLike2(Module):
def forward(self, *args):
return torch.full_like(args[0], 22.22, dtype=torch.int32)
class FullLike3(Module):
def forward(self, *args):
return torch.full_like(args[0], 1.4, dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(FullLike1().float().eval(), input_data=input_data)
verify_model(FullLike2().float().eval(), input_data=input_data)
verify_model(FullLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_linspace():
torch.set_grad_enabled(False)
class Linspace1(Module):
def forward(self, *args):
return torch.linspace(5, 10)
class Linspace2(Module):
def forward(self, *args):
return torch.linspace(-10, 10, steps=5)
class Linspace3(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=5)
class Linspace4(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=1)
class Linspace5(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int32)
class Linspace6(Module):
def forward(self, *args):
return torch.linspace(start=1, end=6, steps=2)
class Linspace7(Module):
def forward(self, *args):
return torch.linspace(1, 4, dtype=torch.float32)
class Linspace8(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int16)
verify_model(Linspace1().float().eval())
verify_model(Linspace2().float().eval())
verify_model(Linspace3().float().eval())
verify_model(Linspace4().float().eval())
verify_model(Linspace5().float().eval())
verify_model(Linspace6().float().eval())
verify_model(Linspace7().float().eval())
verify_model(Linspace8().float().eval())
@tvm.testing.uses_gpu
def test_forward_take():
torch.set_grad_enabled(False)
class Take1(Module):
def forward(self, *args):
indices = torch.tensor([[0, 0], [1, 0]])
if torch.cuda.is_available():
indices = indices.cuda()
return torch.take(args[0], indices)
class Take2(Module):
def forward(self, *args):
return torch.take(args[0], args[1])
input_data = torch.tensor([[1, 2], [3, 4]])
verify_model(Take1().float().eval(), input_data=input_data)
indices = torch.tensor([[0, 0], [1, 0]])
verify_model(Take2().float().eval(), input_data=[input_data, indices])
@tvm.testing.uses_gpu
def test_forward_topk():
torch.set_grad_enabled(False)
class Topk1(Module):
def forward(self, *args):
return torch.topk(args[0], k=3)
class Topk2(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=-2)
class Topk3(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=3)
class Topk4(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=True)
class Topk5(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=False)
class Topk6(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, sorted=True)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Topk1().float().eval(), input_data=input_data)
verify_model(Topk2().float().eval(), input_data=input_data)
verify_model(Topk3().float().eval(), input_data=input_data)
verify_model(Topk4().float().eval(), input_data=input_data)
verify_model(Topk5().float().eval(), input_data=input_data)
verify_model(Topk6().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_logical_not():
torch.set_grad_enabled(False)
class LogicalNot1(Module):
def forward(self, *args):
return torch.logical_not(args[0])
input_data = torch.tensor([True, False])
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_not():
torch.set_grad_enabled(False)
class BitwiseNot1(Module):
def forward(self, *args):
return torch.bitwise_not(args[0])
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([True, False])
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_xor():
torch.set_grad_enabled(False)
class BitwiseXor1(Module):
def forward(self, *args):
return torch.bitwise_xor(args[0], args[1])
class BitwiseXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.bitwise_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(BitwiseXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_logical_xor():
torch.set_grad_enabled(False)
class LogicalXor1(Module):
def forward(self, *args):
return torch.logical_xor(args[0], args[1])
class LogicalXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.logical_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(LogicalXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_unary():
torch.set_grad_enabled(False)
class Sqrt1(Module):
def forward(self, *args):
return torch.sqrt(args[0])
class RSqrt1(Module):
def forward(self, *args):
return torch.rsqrt(args[0])
class Ceil1(Module):
def forward(self, *args):
return torch.ceil(args[0])
class Floor1(Module):
def forward(self, *args):
return torch.floor(args[0])
class Round1(Module):
def forward(self, *args):
return torch.round(args[0])
class Cos1(Module):
def forward(self, *args):
return torch.cos(args[0])
class Sin1(Module):
def forward(self, *args):
return torch.sin(args[0])
class Tan1(Module):
def forward(self, *args):
return torch.tan(args[0])
class Tanh1(Module):
def forward(self, *args):
return torch.tanh(args[0])
class Acos1(Module):
def forward(self, *args):
return torch.acos(args[0])
class Asin1(Module):
def forward(self, *args):
return torch.asin(args[0])
class Atan1(Module):
def forward(self, *args):
return torch.atan(args[0])
class Log1(Module):
def forward(self, *args):
return torch.log(args[0])
class Exp1(Module):
def forward(self, *args):
return torch.exp(args[0])
class Erf1(Module):
def forward(self, *args):
return torch.erf(args[0])
class Trunc1(Module):
def forward(self, *args):
return torch.trunc(args[0])
class Sign1(Module):
def forward(self, *args):
return torch.sign(args[0])
class Neg1(Module):
def forward(self, *args):
return torch.neg(args[0])
class Sinh1(Module):
def forward(self, *args):
return torch.sinh(args[0])
class Cosh1(Module):
def forward(self, *args):
return torch.cosh(args[0])
class Log2_1(Module):
def forward(self, *args):
return torch.log2(args[0])
class Log10_1(Module):
def forward(self, *args):
return torch.log10(args[0])
class Log1p_1(Module):
def forward(self, *args):
return torch.log1p(args[0])
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Sqrt1().float().eval(), input_data=input_data)
verify_model(RSqrt1().float().eval(), input_data=input_data)
verify_model(Ceil1().float().eval(), input_data=input_data)
verify_model(Floor1().float().eval(), input_data=input_data)
verify_model(Round1().float().eval(), input_data=input_data)
verify_model(Cos1().float().eval(), input_data=input_data)
verify_model(Cosh1().float().eval(), input_data=input_data)
verify_model(Sin1().float().eval(), input_data=input_data)
verify_model(Sinh1().float().eval(), input_data=input_data)
verify_model(Tan1().float().eval(), input_data=input_data)
verify_model(Tanh1().float().eval(), input_data=input_data)
verify_model(Acos1().float().eval(), input_data=input_data)
verify_model(Asin1().float().eval(), input_data=input_data)
verify_model(Atan1().float().eval(), input_data=input_data)
verify_model(Log1().float().eval(), input_data=input_data)
verify_model(Log2_1().float().eval(), input_data=input_data)
verify_model(Log10_1().float().eval(), input_data=input_data)
verify_model(Log1p_1().float().eval(), input_data=input_data)
verify_model(Exp1().float().eval(), input_data=input_data)
verify_model(Erf1().float().eval(), input_data=input_data)
verify_model(Trunc1().float().eval(), input_data=input_data)
verify_model(Sign1().float().eval(), input_data=input_data)
verify_model(Neg1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_where():
torch.set_grad_enabled(False)
class Where1(Module):
def forward(self, *args):
y = torch.ones([3, 2])
if torch.cuda.is_available():
y = y.cuda()
return torch.where(args[0] > 0, args[0], y)
class Where2(Module):
def forward(self, *args):
return torch.where(args[0] > 0, args[0], args[1])
class Where3(Module):
def forward(self, *args):
return torch.where(args[0])[0]
x = torch.rand([3, 2]).float()
verify_model(Where1(), input_data=[x])
y = torch.rand([3, 2])
verify_model(Where2(), input_data=[x, y])
# a single argument variant, equivalent to torch.nonzero(..., as_tuple=True)
inp = torch.rand([10])
inp[3:8] = 0
verify_trace_model(Where3(), [inp], ["llvm"])
@tvm.testing.uses_gpu
def test_forward_addcdiv():
torch.set_grad_enabled(False)
class Addcdiv1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcdiv(args[0], 0.1, t1, t2)
class Addcdiv2(Module):
def forward(self, *args):
return torch.addcdiv(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcdiv1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcdiv2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_addcmul():
torch.set_grad_enabled(False)
class Addcmul1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcmul(args[0], 0.1, t1, t2)
class Addcmul2(Module):
def forward(self, *args):
return torch.addcmul(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcmul1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_true_divide():
if package_version.parse(torch.__version__) < package_version.parse("1.5.0"):
return
torch.set_grad_enabled(False)
class TrueDivide(Module):
def forward(self, *args):
return torch.true_divide(args[0], args[1])
dividend = torch.rand([5, 3]).float()
# divisor could be either tensor or scalar
divisor_tensor = torch.rand([5, 3]).float() + 0.5
divisor_scalar = torch.tensor(1.0, dtype=torch.float32)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4
)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4
)
@tvm.testing.uses_gpu
def test_forward_traced_function():
def fn(t1, t2):
return t1 + t2
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(3, 4)
verify_model(fn, input_data=[tensor1, tensor2])
@tvm.testing.uses_gpu
def test_forward_dtypes():
def fn(t1, t2):
return 2.5 * t1 + t2
for dt in [torch.int32, torch.int64, torch.double]:
tensor1 = torch.randn(3, 4).to(dtype=dt)
tensor2 = torch.randn(3, 4).to(dtype=dt)
verify_model(fn, input_data=[tensor1, tensor2])
class ModuleWithIntParameters(Module):
def __init__(self, arr):
super().__init__()
self.param = torch.nn.Parameter(torch.LongTensor(arr), requires_grad=False)
def forward(self, x):
return x.long() + self.param
shape = (10, 10)
param = torch.ones(shape, dtype=torch.long)
inp = torch.ones(shape, dtype=torch.int)
verify_model(ModuleWithIntParameters(param), input_data=inp)
@tvm.testing.uses_gpu
def test_weight_names():
tm = torch.jit.trace(torch.nn.Linear(3, 4), [torch.randn(2, 3)])
mod, params = relay.frontend.from_pytorch(tm, [("input", (2, 3))])
assert set(params.keys()) == set(n for n, p in tm.named_parameters())
@tvm.testing.uses_gpu
def test_duplicate_weight_use():
# The test cases doesn't make any sense as a neural network,
# the issue popped up in shared input/output embeddings of bert,
# but this is quicker
class Test(Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(5, 3)
def forward(self, x):
x = self.lin(x)
x = x @ self.lin.weight
return x
verify_model(Test(), input_data=[torch.randn(5, 5)])
@tvm.testing.uses_gpu
def test_forward_matmul():
torch.set_grad_enabled(False)
class MatMul1(Module):
def forward(self, *args):
return torch.matmul(args[0], args[1])
# matrix x vector
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(4)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# matrix x matrix
tensor1 = torch.randn(10, 4)
tensor2 = torch.randn(4, 10)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# batched matrix x batched matrix
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(10, 4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# batched matrix x broadcasted matrix
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# batched matrix x batched matrix
tensor1 = torch.randn(1, 12, 14, 64)
tensor2 = torch.randn(1, 12, 64, 14)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
def test_forward_index():
torch.set_grad_enabled(False)
input_shape = [3, 4, 5, 6]
class Index0(Module):
def forward(self, x):
return x[[0, 1], [0, 2], :2, 4]
input_data = torch.rand(input_shape).float()
verify_model(Index0().eval(), input_data=input_data)
class Index1(Module):
def forward(self, x):
return x[[0], [1, 2, 3, 0], [3, 1, 2, 2], [4, 2, 1, 0]]
input_data = torch.rand(input_shape).float()
verify_model(Index1().eval(), input_data=input_data)
def test_logsumexp():
class Logsumexp(Module):
def __init__(self, dim, keepdim=False):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return torch.logsumexp(x, self.dim, self.keepdim)
input_shape = (100, 100)
input_data = torch.rand(input_shape)
verify_model(Logsumexp(0), input_data=input_data)
verify_model(Logsumexp(0, keepdim=True), input_data=input_data)
# Also test on double
verify_model(Logsumexp(1, keepdim=True), input_data=input_data.double())
def test_stack():
class Stack(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.stack((x, x), dim=self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Stack(), input_data=inp)
verify_model(Stack(axis=-1), input_data=inp)
verify_model(Stack(axis=3), input_data=inp)
verify_model(Stack(axis=-4), input_data=inp)
def test_stack_dynamic():
class Stack(torch.nn.Module):
def forward(self, x):
tensor_list = []
for i in range(x.size(0)):
# this is a workaround to avoid generating impure aten::append op
tensor_list += [x[i]]
# relay tensor array only supports stacking on the first axis
return torch.stack(tensor_list, dim=0)
verify_script_model(Stack(), [(8, 8, 8)], _get_default_vm_targets())
def test_forward_unbind():
class Unbind(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.unbind(x, self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Unbind(0), input_data=inp)
verify_model(Unbind(1), input_data=inp)
verify_model(Unbind(2), input_data=inp)
def test_forward_nonzero():
class Nonzero(Module):
def __init__(self, as_tuple=False):
super().__init__()
self.as_tuple = as_tuple
def forward(self, data):
return torch.nonzero(data, as_tuple=self.as_tuple)
inp = torch.Tensor(np.array([[0, 1, 0], [2, 0, 9], [-1, -1, 0]]).astype("float32"))
verify_trace_model(Nonzero(), [inp], ["llvm"])
def test_forward_scatter():
class Scatter(Module):
def __init__(self, dim=0):
super().__init__()
self.dim = dim
def forward(self, data, index, src):
return torch.scatter(data, dim=self.dim, index=index, src=src)
in_data = torch.zeros(3, 5)
in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])
in_src = torch.rand(2, 5)
# TODO: add scatter gpu schedule to enable gpu test.
verify_trace_model(Scatter(), [in_data, in_index, in_src], ["llvm"])
in_data = torch.zeros(2, 4)
in_index = torch.tensor([[2], [3]])
in_src = torch.rand(2, 1)
# TODO: add scatter gpu schedule to enable gpu test.
verify_trace_model(Scatter(1), [in_data, in_index, in_src], ["llvm"])
def test_numel():
class Numel(Module):
def forward(self, data):
return torch.tensor(torch.numel(data))
targets = _get_default_vm_targets()
verify_script_model(Numel(), [(1,)], targets)
verify_script_model(Numel(), [(3, 5)], targets)
verify_script_model(Numel(), [(3, 5, 8)], targets)
def test_forward_pretrained_bert_base_uncased():
######################################################################
# This is an example how to run BERT models using TVM
# ---------------------------------------------------
"""
Refer the bert example given in https://pypi.org/project/pytorch-pretrained-bert
# To get started, pretrained bert package needs to be installed as prerequisite.
.. code-block:: bash
# install bert package
pip install pytorch_pretrained_bert==0.6.2 --user
"""
try:
from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM
except:
print("Torch pretrained bert package must be installed to run this script.")
return
######################################################################
# Load the tokenizer and tokenize the input
# -----------------------------------------
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# Tokenized input
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = tokenizer.tokenize(text)
# Mask a token that we will try to predict back with `BertForMaskedLM`
masked_index = 8
tokenized_text[masked_index] = "[MASK]"
assert tokenized_text == [
"[CLS]",
"who",
"was",
"jim",
"henson",
"?",
"[SEP]",
"jim",
"[MASK]",
"was",
"a",
"puppet",
"##eer",
"[SEP]",
]
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
######################################################################
# Load a pretrained PyTorch model bert-base-uncased
# -------------------------------------------------
# Bert Model with a language modeling
model = BertForMaskedLM.from_pretrained("bert-base-uncased")
model.eval()
######################################################################
# Predict all tokens with pytorch
# -------------------------------
with torch.no_grad():
torch_preds = model(tokens_tensor, segments_tensors)
######################################################################
# Make TorchScripted model via jit trace
# --------------------------------------
scripted_model = torch.jit.trace(model, (tokens_tensor, segments_tensors)).eval()
######################################################################
# Import the graph to Relay
# -------------------------
# Convert PyTorch graph to Relay graph. The input name can be arbitrary.
input_1 = "input_ids"
input_2 = "input.2"
shape_list = [(input_1, list(tokens_tensor.shape)), (input_2, list(segments_tensors.shape))]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
######################################################################
# Compile the model with relay
# ----------------------------
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
######################################################################
# Execute on TVM
# --------------
ctx = tvm.context(target, 0)
relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)
relay_model.set_input(**relay_params)
relay_model.set_input(input_1, tokens_tensor)
relay_model.set_input(input_2, segments_tensors)
relay_model.run()
compiled_output = relay_model.get_output(0).asnumpy()
######################################################################
# Validate the outputs
# --------------------
# Compare the torch and tvm outputs
tvm.testing.assert_allclose(torch_preds, compiled_output, rtol=1e-3, atol=1e-3)
######################################################################
# Process the output
# ------------------
# Process the model output to token.
# Torch output to token
torch_pred_idx = torch.argmax(torch_preds[0, masked_index]).item()
torch_pred_token = tokenizer.convert_ids_to_tokens([torch_pred_idx])[0]
# TVM output to token
tvm_pred_idx = compiled_output[0, masked_index].argmax()
tvm_pred_token = tokenizer.convert_ids_to_tokens([tvm_pred_idx])[0]
assert torch_pred_idx == tvm_pred_idx
assert torch_pred_token == tvm_pred_token
# Print the outputs
print("Torch top-1 id: {}, token: {}".format(torch_pred_idx, torch_pred_token))
print("TVM top-1 id: {}, token: {}".format(tvm_pred_idx, tvm_pred_token))
def test_convert_torch_script_with_input_types():
def model_fn(x, y):
x = x.to(dtype=torch.int32)
y = x + y
return y
ishape = (4, 5)
input_x = torch.rand(ishape, dtype=torch.float32)
input_y = torch.randint(low=0, high=100, size=ishape, dtype=torch.int32)
inputs = [input_x, input_y]
script_module = torch.jit.trace(model_fn, inputs)
fname = "tmp.pt"
torch.jit.save(script_module, fname)
loaded = torch.jit.load(fname)
os.remove(fname)
verify_model(loaded.eval(), input_data=inputs)
def expected(x_shape, y_shape):
# use a fixed order of args so alpha equal check can pass
x = relay.var("x", shape=x_shape, dtype="float32")
y = relay.var("y", shape=y_shape, dtype="int32")
args = [x, y]
x1 = relay.cast(x, "int32")
y1 = relay.add(x1, y)
mod = tvm.IRModule.from_expr(relay.Function(args, y1))
return mod["main"]
input_infos = [("input0", (ishape, "float")), ("input1", (ishape, "int"))]
mod, params = relay.frontend.from_pytorch(loaded, input_infos)
expected_mod = expected(ishape, ishape)
assert tvm.ir.structural_equal(expected_mod, mod["main"], map_free_vars=True)
if __name__ == "__main__":
# some structural tests
test_forward_traced_function()
test_forward_dtypes()
test_weight_names()
test_duplicate_weight_use()
# Single operator tests
test_forward_pixel_shuffle()
test_forward_add()
test_forward_subtract()
test_forward_multiply()
test_forward_matmul()
test_forward_rsub()
test_forward_onehot()
test_forward_embedding()
test_forward_reshape()
test_forward_reciprocal()
test_forward_repeat()
test_forward_repeat_interleave()
test_forward_squeeze()
test_forward_unsqueeze()
test_forward_concatenate()
test_forward_reduce_sum()
test_forward_reduce_prod()
test_forward_argmin()
test_forward_argmax()
test_forward_norm()
test_forward_frobenius_norm()
test_forward_std()
test_forward_variance()
test_forward_relu()
test_forward_prelu()
test_forward_leakyrelu()
test_forward_elu()
test_forward_celu()
test_forward_gelu()
test_forward_selu()
test_forward_log_sigmoid()
test_forward_adaptiveavgpool()
test_forward_maxpool2d()
test_forward_maxpool1d()
test_forward_maxpool3d()
test_forward_hardtanh()
test_forward_conv()
test_forward_conv_transpose()
test_forward_threshold()
test_forward_contiguous()
test_forward_batchnorm()
test_forward_instancenorm()
test_forward_layernorm()
test_forward_groupnorm()
test_forward_transpose()
test_forward_size()
test_forward_view()
test_forward_select()
test_forward_take()
test_forward_topk()
test_forward_where()
test_forward_addcdiv()
test_forward_addcmul()
test_forward_true_divide()
test_forward_clone()
test_forward_softplus()
test_forward_softsign()
test_forward_logsoftmax()
test_forward_sigmoid()
test_forward_dense()
test_forward_avgpool()
test_forward_avgpool3d()
test_forward_dropout()
test_forward_slice()
test_forward_mean()
test_forward_expand()
test_forward_pow()
test_forward_unary()
test_forward_clamp()
test_forward_clamp_()
test_forward_logical_not()
test_forward_bitwise_not()
test_forward_bitwise_xor()
test_forward_logical_xor()
test_forward_isfinite()
test_forward_isnan()
test_forward_isinf()
test_forward_ones()
test_forward_ones_like()
test_forward_zeros()
test_forward_zeros_like()
test_forward_full()
test_forward_full_like()
test_forward_linspace()
test_forward_arange()
test_forward_mesh_grid()
test_forward_chunk()
test_forward_split()
test_forward_gather()
test_upsample()
test_forward_upsample3d()
test_forward_nms()
test_forward_roi_align()
test_to()
test_flatten()
test_type_as()
test_forward_functional_pad()
test_forward_zero_pad2d()
test_forward_constant_pad1d()
test_forward_constant_pad2d()
test_forward_constant_pad3d()
test_forward_reflection_pad1d()
test_forward_reflection_pad2d()
test_forward_replication_pad1d()
test_forward_replication_pad2d()
test_forward_replication_pad3d()
test_adaptive_pool3d()
test_conv3d()
test_conv3d_transpose()
test_forward_index()
test_min_max()
test_logsumexp()
test_stack()
test_stack_dynamic()
test_forward_unbind()
test_forward_nonzero()
test_forward_scatter()
test_numel()
# Model tests
test_resnet18()
test_squeezenet1_0()
test_squeezenet1_1()
test_densenet121()
# disable inception test for now, since loading it takes ~5min on torchvision-0.5 due to scipy bug
# See https://discuss.pytorch.org/t/torchvisions-inception-v3-takes-much-longer-to-load-than-other-models/68756
# test_inception_v3()
test_googlenet()
test_mnasnet0_5()
test_mobilenet_v2()
test_custom_conversion_map()
test_segmentaton_models()
test_3d_models()
# Quantization test
from qnn_test import test_quantized_imagenet, test_quantized_modules
test_quantized_modules()
test_quantized_imagenet()
# Test simple conditionals and loop
test_control_flow()
test_simple_rnn()
# More complex recurrent models
from test_lstm import test_custom_lstm
test_custom_lstm()
# Test bert model
test_forward_pretrained_bert_base_uncased()
# Test convert torch script(jit) with specific inputs' types
test_convert_torch_script_with_input_types()
| apache-2.0 | 966,044,548,304,771,500 | 31.286121 | 115 | 0.601552 | false |
feranick/SpectralMachine | Archive/SpectraKeras/20180926a/SpectraKeras_MLP.py | 1 | 7841 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
**********************************************************
* SpectraKeras - MLP
* 20180926a
* Uses: Keras, TensorFlow
* By: Nicola Ferralis <[email protected]>
***********************************************************
'''
print(__doc__)
import numpy as np
import sys, os.path, time, pydot, graphviz, pickle, h5py
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import tensorflow as tf
#import keras # pure keras
import tensorflow.keras as keras #tf.keras
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
#************************************
''' Parameters '''
#************************************
class dP:
l_rate = 0.001
l_rdecay = 1e-4
HL=[10,20,30]
drop = 0
l2 = 1e-4
epochs = 100
cv_split = 0.01
#batch_size = A.shape[0]
batch_size = 512
plotWeightsFlag = False
#************************************
''' Parameters '''
#************************************
def main():
start_time = time.clock()
learnFile = sys.argv[1]
En, A, Cl = readLearnFile(learnFile)
learnFileRoot = os.path.splitext(learnFile)[0]
tb_directory = "keras_MLP"
model_directory = "."
model_name = model_directory+"/keras_model.hd5"
model_le = model_directory+"/keras_le.pkl"
#totA = np.vstack((A, A_test))
#totCl = np.append(Cl, Cl_test)
totA = A
totCl = Cl
numTotClasses = np.unique(totCl).size
le = preprocessing.LabelEncoder()
totCl2 = le.fit_transform(totCl)
Cl2 = le.transform(Cl)
print(" Total number of points per data:",En.size)
print(" Total number of classes:",numTotClasses)
#Cl2_test = le.transform(Cl_test)
print("\n Label Encoder saved in:", model_le,"\n")
with open(model_le, 'ab') as f:
f.write(pickle.dumps(le))
totCl2 = keras.utils.to_categorical(totCl2, num_classes=np.unique(totCl).size)
Cl2 = keras.utils.to_categorical(Cl2, num_classes=np.unique(Cl).size+1)
#Cl2_test = keras.utils.to_categorical(Cl2_test, num_classes=np.unique(Cl).size+1)
### Build model
model = keras.models.Sequential()
for i in range(len(dP.HL)):
model.add(keras.layers.Dense(dP.HL[i],
activation = 'relu',
input_dim=A.shape[1],
kernel_regularizer=keras.regularizers.l2(dP.l2)))
model.add(keras.layers.Dropout(dP.drop))
model.add(keras.layers.Dense(np.unique(Cl).size+1, activation = 'softmax'))
#optim = opt.SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
optim = keras.optimizers.Adam(lr=dP.l_rate, beta_1=0.9,
beta_2=0.999, epsilon=1e-08,
decay=dP.l_rdecay,
amsgrad=False)
model.compile(loss='categorical_crossentropy',
optimizer=optim,
metrics=['accuracy'])
tbLog = keras.callbacks.TensorBoard(log_dir=tb_directory, histogram_freq=0, batch_size=dP.batch_size,
write_graph=True, write_grads=True, write_images=True,)
tbLogs = [tbLog]
log = model.fit(A, Cl2,
epochs=dP.epochs,
batch_size=dP.batch_size,
callbacks = tbLogs,
verbose=2,
validation_split=dP.cv_split)
accuracy = np.asarray(log.history['acc'])
loss = np.asarray(log.history['loss'])
val_loss = np.asarray(log.history['val_loss'])
val_acc = np.asarray(log.history['val_acc'])
#score = model.evaluate(A_test, Cl2_test, batch_size=A.shape[1])
model.save(model_name)
keras.utils.plot_model(model, to_file=model_directory+'/keras_MLP_model.png', show_shapes=True)
print('\n =============================================')
print(' \033[1mKeras MLP\033[0m - Model Configuration')
print(' =============================================')
print("\n Training set file:",learnFile)
print("\n Data size:", A.shape,"\n")
for conf in model.get_config():
print(conf,"\n")
printParam()
print('\n ==========================================')
print(' \033[1mKeras MLP\033[0m - Training Summary')
print(' ==========================================')
print("\n Accuracy - Average: {0:.2f}%; Max: {1:.2f}%".format(100*np.average(accuracy), 100*np.amax(accuracy)))
print(" Loss - Average: {0:.4f}; Min: {1:.4f}".format(np.average(loss), np.amin(loss)))
print('\n\n ==========================================')
print(' \033[1mKeras MLP\033[0m - Validation Summary')
print(' ==========================================')
print("\n Accuracy - Average: {0:.2f}%; Max: {1:.2f}%".format(100*np.average(val_acc), 100*np.amax(val_acc)))
print(" Loss - Average: {0:.4f}; Min: {1:.4f}\n".format(np.average(val_loss), np.amin(val_loss)))
#print("\n Validation - Loss: {0:.2f}; accuracy: {1:.2f}%".format(score[0], 100*score[1]))
print(' =========================================\n')
if dP.plotWeightsFlag == True:
plotWeights(En, A, model)
total_time = time.clock() - start_time
print(" Total time: {0:.1f}s or {1:.1f}m or {2:.1f}h".format(total_time,
total_time/60, total_time/3600),"\n")
#************************************
''' Open Learning Data '''
#************************************
def readLearnFile(learnFile):
print(" Opening learning file: "+learnFile+"\n")
try:
if os.path.splitext(learnFile)[1] == ".npy":
M = np.load(learnFile)
elif os.path.splitext(learnFile)[1] == ".h5":
with h5py.File(learnFile, 'r') as hf:
M = hf["M"][:]
else:
with open(learnFile, 'r') as f:
M = np.loadtxt(f, unpack =False)
except:
print("\033[1m" + " Learning file not found \n" + "\033[0m")
return
En = M[0,1:]
A = M[1:,1:]
Cl = M[1:,0]
return En, A, Cl
#************************************
''' Print NN Info '''
#************************************
def printParam():
print('\n ================================================')
print(' \033[1mKeras MLP\033[0m - Parameters')
print(' ================================================')
print(' Optimizer:','Adam',
'\n Hidden layers:', dP.HL,
'\n Activation function:','relu',
'\n L2:',dP.l2,
'\n Dropout:', dP.drop,
'\n Learning rate:', dP.l_rate,
'\n Learning decay rate:', dP.l_rdecay)
#if kerasDef.fullBatch == True:
# print(' Full batch size: {0:d} spectra, {1:.3f} Mb'.format(A.shape[0],(1e-6*A.size*A.itemsize)))
#else:
print(' Batch size:', dP.batch_size)
#print(' ================================================\n')
#************************************
''' Open Learning Data '''
#************************************
def plotWeights(En, A, model):
import matplotlib.pyplot as plt
plt.figure(tight_layout=True)
plotInd = 511
for layer in model.layers:
try:
w_layer = layer.get_weights()[0]
ax = plt.subplot(plotInd)
newX = np.arange(En[0], En[-1], (En[-1]-En[0])/w_layer.shape[0])
plt.plot(En, np.interp(En, newX, w_layer[:,0]), label=layer.get_config()['name'])
plt.legend(loc='upper right')
plt.setp(ax.get_xticklabels(), visible=False)
plotInd +=1
except:
pass
ax1 = plt.subplot(plotInd)
ax1.plot(En, A[0], label='Sample data')
plt.xlabel('Raman shift [1/cm]')
plt.legend(loc='upper right')
plt.savefig('keras_MLP_weights' + '.png', dpi = 160, format = 'png') # Save plot
#************************************
''' Main initialization routine '''
#************************************
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 | 638,412,869,959,671,800 | 34.640909 | 116 | 0.508098 | false |
CivicKnowledge/ambry | ambry/orm/__init__.py | 1 | 13456 | """Object-Rlational Mapping classess, based on Sqlalchemy, for representing the
dataset, partitions, configuration, tables and columns.
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
__docformat__ = 'restructuredtext en'
import json
from six import string_types, iteritems
import sqlalchemy
from sqlalchemy import BigInteger
from sqlalchemy import Text
from sqlalchemy.types import TypeDecorator, TEXT, UserDefinedType
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.dialects import postgresql, mysql, sqlite
from sqlalchemy import func
Base = declarative_base()
from sqlalchemy.dialects import registry
registry.register('spatialite', 'ambry.orm.dialects.spatialite', 'SpatialiteDialect')
registry.register('postgis', 'ambry.orm.dialects.postgis', 'PostgisDialect')
# http://stackoverflow.com/a/23175518/1144479
# SQLAlchemy does not map BigInt to Int by default on the sqlite dialect.
# It should, but it doesnt.
BigIntegerType = BigInteger()
BigIntegerType = BigIntegerType.with_variant(postgresql.BIGINT(), 'postgresql')
BigIntegerType = BigIntegerType.with_variant(mysql.BIGINT(), 'mysql')
BigIntegerType = BigIntegerType.with_variant(sqlite.INTEGER(), 'sqlite')
class Geometry(UserDefinedType):
"""Geometry type, to ensure that WKT text is properly inserted into the
database with the GeomFromText() function.
NOTE! This is paired with code in
database.relational.RelationalDatabase.table() to convert NUMERIC
fields that have the name 'geometry' to GEOMETRY types. Sqlalchemy
sees spatialte GEOMETRY types as NUMERIC
"""
DEFAULT_SRS = 4326
def get_col_spec(self):
return "GEOMETRY"
def bind_expression(self, bindvalue):
return func.ST_GeomFromText(bindvalue, self.DEFAULT_SRS, type_=self)
def column_expression(self, col):
return func.ST_AsText(col, type_=self)
class SpatialiteGeometry(Geometry):
def get_col_spec(self):
return "BLOB"
GeometryType = Geometry()
GeometryType = GeometryType.with_variant(SpatialiteGeometry(), 'spatialite')
GeometryType = GeometryType.with_variant(Text(), 'sqlite') # Just write the WKT through
GeometryType = GeometryType.with_variant(Text(), 'postgresql')
def table_convert_geometry(metadata, table_name):
"""Get table metadata from the database."""
from sqlalchemy import Table
from ..orm import Geometry
table = Table(table_name, metadata, autoload=True)
for c in table.columns:
# HACK! Sqlalchemy sees spatialte GEOMETRY types
# as NUMERIC
if c.name == 'geometry':
c.type = Geometry # What about variants?
return table
class JSONEncoder(json.JSONEncoder):
"""A JSON encoder that turns unknown objets into a string representation of
the type."""
def default(self, o):
try:
return o.dict
except AttributeError:
return str(type(o))
class JSONEncodedObj(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = TEXT
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value, cls=JSONEncoder)
else:
value = '{}'
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
else:
value = {}
return value
class MutationObj(Mutable):
@classmethod
def coerce(cls, key, value):
if isinstance(value, dict) and not isinstance(value, MutationDict):
return MutationDict.coerce(key, value)
if isinstance(value, list) and not isinstance(value, MutationList):
return MutationList.coerce(key, value)
return value
@classmethod
def _listen_on_attribute(cls, attribute, coerce, parent_cls):
key = attribute.key
if parent_cls is not attribute.class_:
return
# rely on "propagate" here
parent_cls = attribute.class_
def load(state, *args):
val = state.dict.get(key, None)
if coerce:
val = cls.coerce(key, val)
state.dict[key] = val
if isinstance(val, cls):
val._parents[state.obj()] = key
def set(target, value, oldvalue, initiator):
if not isinstance(value, cls):
value = cls.coerce(key, value)
if isinstance(value, cls):
value._parents[target.obj()] = key
if isinstance(oldvalue, cls):
oldvalue._parents.pop(target.obj(), None)
return value
def pickle(state, state_dict):
val = state.dict.get(key, None)
if isinstance(val, cls):
if 'ext.mutable.values' not in state_dict:
state_dict['ext.mutable.values'] = []
state_dict['ext.mutable.values'].append(val)
def unpickle(state, state_dict):
if 'ext.mutable.values' in state_dict:
for val in state_dict['ext.mutable.values']:
val._parents[state.obj()] = key
sqlalchemy.event.listen(parent_cls,'load',load,raw=True,propagate=True)
sqlalchemy.event.listen(parent_cls,'refresh',load,raw=True,propagate=True)
sqlalchemy.event.listen(attribute,'set',set,raw=True,retval=True,propagate=True)
sqlalchemy.event.listen(parent_cls,'pickle',pickle,raw=True,propagate=True)
sqlalchemy.event.listen(parent_cls,'unpickle',unpickle,raw=True,propagate=True)
class MutationDict(Mutable, dict):
@classmethod
def coerce(cls, key, value): # @ReservedAssignment
"""Convert plain dictionaries to MutationDict."""
if not isinstance(value, MutationDict):
if isinstance(value, dict):
return MutationDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
class MutationList(MutationObj, list):
@classmethod
def coerce(cls, key, value):
"""Convert plain list to MutationList."""
if isinstance(value, string_types):
value = value.strip()
if value[0] == '[': # It's json encoded, probably
try:
value = json.loads(value)
except ValueError:
raise ValueError("Failed to parse JSON: '{}' ".format(value))
else:
value = value.split(',')
if not value:
value = []
self = MutationList((MutationObj.coerce(key, v) for v in value))
self._key = key
return self
def __setitem__(self, idx, value):
list.__setitem__(self, idx, MutationObj.coerce(self._key, value))
self.changed()
def __setslice__(self, start, stop, values):
list.__setslice__(self,start,stop,(MutationObj.coerce( self._key, v) for v in values))
self.changed()
def __delitem__(self, idx):
list.__delitem__(self, idx)
self.changed()
def __delslice__(self, start, stop):
list.__delslice__(self, start, stop)
self.changed()
def append(self, value):
list.append(self, MutationObj.coerce(self._key, value))
self.changed()
def insert(self, idx, value):
list.insert(self, idx, MutationObj.coerce(self._key, value))
self.changed()
def extend(self, values):
list.extend(self, (MutationObj.coerce(self._key, v) for v in values))
self.changed()
def pop(self, *args, **kw):
value = list.pop(self, *args, **kw)
self.changed()
return value
def remove(self, value):
list.remove(self, value)
self.changed()
def JSONAlchemy(sqltype):
"""A type to encode/decode JSON on the fly.
sqltype is the string type for the underlying DB column.
You can use it like:
Column(JSONAlchemy(Text(600)))
"""
class _JSONEncodedObj(JSONEncodedObj):
impl = sqltype
return MutationObj.as_mutable(_JSONEncodedObj)
class SavableMixin(object):
def save(self):
self.session.commit()
class DataPropertyMixin(object):
"""A Mixin for appending a value into a list in the data field."""
def _append_string_to_list(self, sub_prop, value):
""""""
if not sub_prop in self.data:
self.data[sub_prop] = []
if value and not value in self.data[sub_prop]:
self.data[sub_prop] = self.data[sub_prop] + [value]
class LoadPropertiesMixin(object):
def load_properties(self, args, kwargs):
for p in self.__mapper__.attrs:
if p.key in kwargs:
setattr(self, p.key, kwargs[p.key])
del kwargs[p.key]
if self.data:
self.data.update(kwargs)
# Sould have things derived from this, once there are test cases for it.
class DictableMixin(object):
def set_attributes(self, **kwargs):
for k, v in iteritems(kwargs):
setattr(self, k, v)
@property
def record_dict(self):
return {p.key: getattr(self, p.key) for p in self.__mapper__.attrs}
@property
def dict(self):
d = self.record_dict
# Move the values in the data attribute into the top level.
if 'data' in d and d['data']:
for k in self.data:
assert k not in d # Data items can't overlap attributes
d[k] = self.data[k]
return d
def _clean_flag(in_flag):
if in_flag is None or in_flag == '0':
return False
return bool(in_flag)
# DEPRECATED
# The two remaining uses of this should be replaced with dataset.next_sequence_id
def next_sequence_id(session, sequence_ids, parent_vid, table_class, force_query = False):
"""
Return the next sequence id for a object, identified by the vid of the parent object, and the database prefix
for the child object. On the first call, will load the max sequence number
from the database, but subsequence calls will run in process, so this isn't suitable for
multi-process operation -- all of the tables in a dataset should be created by one process
The child table must have a sequence_id value.
:param session: Database session or connection ( must have an execute() method )
:param sequence_ids: A dict for caching sequence ids
:param parent_vid: The VID of the parent object, which sets the namespace for the sequence
:param table_class: Table class of the child object, the one getting a number
:return:
"""
from sqlalchemy import text
seq_col = table_class.sequence_id.property.columns[0].name
try:
parent_col = table_class._parent_col
except AttributeError:
parent_col = table_class.d_vid.property.columns[0].name
assert bool(parent_vid)
key = (parent_vid, table_class.__name__)
number = sequence_ids.get(key, None)
if (not number and session) or force_query:
sql = text("SELECT max({seq_col})+1 FROM {table} WHERE {parent_col} = '{vid}'"
.format(table=table_class.__tablename__, parent_col=parent_col,
seq_col=seq_col, vid=parent_vid))
max_id, = session.execute(sql).fetchone()
if not max_id:
max_id = 1
sequence_ids[key] = int(max_id)
elif not session:
# There was no session set. This should only happen when the parent object is new, and therefore,
# there are no child number, so the appropriate starting number is 1. If the object is not new,
# there will be conflicts.
sequence_ids[key] = 1
else:
# There were no previous numbers, so start with 1
sequence_ids[key] += 1
return sequence_ids[key]
def incver(o, prop_names):
"""Increment the version numbers of a set of properties and return a new object"""
from ambry.identity import ObjectNumber
d = {}
for p in o.__mapper__.attrs:
v = getattr(o, p.key)
if v is None:
d[p.key] = None
elif p.key in prop_names:
d[p.key] = str(ObjectNumber.increment(v))
else:
if not hasattr(v, '__mapper__'): # Only copy values, never objects
d[p.key] = v
return o.__class__(**d)
from ambry.orm.code import Code
from ambry.orm.column import Column
from ambry.orm.file import File
from ambry.orm.partition import Partition
from ambry.orm.table import Table
from ambry.orm.config import Config
from ambry.orm.dataset import Dataset
from ambry.orm.columnstat import ColumnStat
from ambry.orm.source_table import SourceColumn, SourceTable
from ambry.orm.source import DataSource, TransientDataSource
from ambry.orm.plot import Plot
from ambry.orm.database import Database
from ambry.orm.account import Account
from ambry.orm.process import Process
from ambry.orm.remote import Remote
| bsd-2-clause | -6,827,046,663,386,977,000 | 29.374718 | 113 | 0.638005 | false |
clockspot/master-clock | calibrate-meter.py | 1 | 2413 | #!/usr/bin/env python
#Use this script to find calibration points for your meter (add to settings.py).
#External settings
import settings
#External modules
import time
if settings.piMode:
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(settings.slavePin, GPIO.OUT)
if(settings.meterPin != False):
GPIO.setup(settings.meterPin, GPIO.OUT)
pwm = GPIO.PWM(settings.meterPin, 50)
pwm.start(0)
else:
print('Please set the meter pin in settings.py, if indeed you have a meter hooked up.')
exit()
else:
print('Please enable piMode in settings.py, if this is indeed running on a Pi.')
exit()
dcLast = 0
meterLag = 0.18 #seconds between ballistics steps
def setMeter(dcNew): #Unlike carillon.py, this one is DC direct, not value converted; nor checks for piMode
#pwm must already have been started
global dcLast #otherwise the fact that we set dcLast inside this function would make python complain
if dcNew > 100: dcNew = 100 #apply range limits
if dcNew < 0: dcNew = 0
#set meter, using ballistics if dcChg is great enough
dcChg = dcNew-dcLast
if(abs(dcChg) > settings.meterChg): #apply ballistics
#easing out equations by Robert Penner - gizma.com/easing
for t in range(1, settings.meterStp+1):
#quadratic t^2
t /= float(settings.meterStp)
nowDC = float(-dcChg) * t * (t-2) + dcLast
pwm.ChangeDutyCycle( nowDC )
if(t<settings.meterStp):
time.sleep(settings.meterLag)
else: #just go to there
pwm.ChangeDutyCycle(dcNew)
dcLast = dcNew
#end def setMeter
try:
print("Use this script to find calibration points for your meter (add to settings.py).")
print("Type Ctrl+C to exit.");
while 1:
userDC = input("Enter duty cycle 0-100: ")
print("Setting meter to "+str(userDC))
setMeter(float(userDC))
except AttributeError: #Easier to ask forgiveness than permission (EAFP) - http://stackoverflow.com/a/610923
print("\r\nAttributeError. Please ensure your settings.py includes all items from settings-sample.py.")
except KeyboardInterrupt:
print("\r\nBye!")
# except:
# print("Error")
finally:
if settings.piMode:
if dcLast > 20: #kill the meter softly
setMeter(0)
pwm.stop()
GPIO.cleanup()
#end try/except/finally | mit | 7,565,959,822,144,746,000 | 34.5 | 108 | 0.662246 | false |
titienmiami/mmc.repository | plugin.video.tvalacarta/tvalacarta/channels/tvn.py | 1 | 4763 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para TVN (Chile)
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import urlparse,re
import urllib
from core import logger
from core import config
from core import scrapertools
from core.item import Item
DEBUG = False
CHANNELNAME = "tvn"
def isGeneric():
return True
def mainlist(item):
logger.info("tvalacarta.channels.tvn mainlist")
itemlist = []
itemlist.append( Item(channel=CHANNELNAME, title="Teleseries" , action="programas" , url="http://www.tvn.cl/player/", extra="teleseries", folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Entretención" , action="programas" , url="http://www.tvn.cl/player/", extra="entretencion", folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Series" , action="programas" , url="http://www.tvn.cl/player/", extra="series", folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Docurrealidad" , action="programas" , url="http://www.tvn.cl/player/", extra="docurrealidad", folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Cultura" , action="programas" , url="http://www.tvn.cl/player/", extra="cultura", folder=True) )
return itemlist
def programas(item):
logger.info("tvalacarta.channels.tvn programas")
itemlist = []
#http://www.tvn.cl/cultura/menuportadaplayer/?service=blank
# Extrae las series
data = scrapertools.cachePage("http://www.tvn.cl/"+item.extra+"/menuportadaplayer/?service=blank")
logger.info("data="+data.strip())
patron = '<li><a href="([^"]+)">([^<]+)<'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
title = scrapedtitle.strip()
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url,scrapedurl)
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item( channel=item.channel , title=title , action="episodios" , url=url , thumbnail=thumbnail , plot=plot , show=title , fanart=thumbnail , folder=True ) )
return itemlist
def episodios(item):
logger.info("tvalacarta.channels.tvn episodios")
itemlist=[]
'''
<article class="ventana3 efecto-hover">
<img src="http://www.tvn.cl/incoming/article566557.ece/ALTERNATES/w300/cumbres_170313.jpg" alt="Lhasa la ciudad prohibida"/>
<a href="/player/play/?id=566567&s=8959">
<div class="mask">
<h5><span></span>Cumbres del Mundo</h5>
<h3>Capítulo 11</h3>
<h2>Lhasa la ciudad prohibida</h2>
</div>
</a>
</article>
'''
# Extrae los episodios
data = scrapertools.cachePage(item.url)
patron = '<article class="ventana3 efecto-hover"[^<]+'
patron += '<img src="([^"]+)"[^<]+'
patron += '<a href="([^"]+)"[^<]+'
patron += '<div class="mask"[^<]+'
patron += '<h5><span></span>([^<]+)</h5[^<]+'
patron += '<h3>([^<]+)</h3[^<]+'
patron += '<h2>([^<]+)</h2>'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for scrapedthumbnail,scrapedurl,scrapedshow,scrapedepisode,scrapedtitle in matches:
title = scrapedepisode.strip()+" - "+scrapedtitle.strip()
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = ""
url = urlparse.urljoin(item.url,scrapedurl)
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item( channel=item.channel , title=title , action="play" , server="tvn" , url=url , thumbnail=thumbnail , plot=plot , show=title , fanart=thumbnail , folder=False ) )
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
def test():
# El canal tiene estructura
items_mainlist = mainlist(Item())
items_programas = []
# Todas las opciones del menu tienen que tener algo
for item_mainlist in items_mainlist:
exec "itemlist="+item_mainlist.action+"(item_mainlist)"
if len(itemlist)==0:
print "La sección '"+item_mainlist.title+"' no devuelve nada"
return False
items_programas = itemlist
# Ahora recorre los programas hasta encontrar vídeos en alguno
for item_programa in items_programas:
print "Verificando "+item_programa.title
items_episodios = episodios(item_programa)
if len(items_episodios)>0:
return True
print "No hay videos en ningún programa"
return False
| gpl-2.0 | -1,261,113,166,664,057,600 | 37.967213 | 191 | 0.636517 | false |
edek437/Zastosowanie-informatyki-w-gospodarce-projekt | lotnisko/migrations/0006_auto_20160117_2111.py | 1 | 1331 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotnisko', '0005_auto_20160117_1041'),
]
operations = [
migrations.RemoveField(
model_name='reservedseat',
name='flight',
),
migrations.RemoveField(
model_name='reservation',
name='hand_luggage_surcharge',
),
migrations.RemoveField(
model_name='reservation',
name='hold_luggage_surcharge',
),
migrations.RemoveField(
model_name='reservation',
name='seat',
),
migrations.AddField(
model_name='reservation',
name='seat_number',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='reservation',
name='seat_type',
field=models.CharField(default='Economic Class', max_length=254, choices=[(b'Economic Class', b'Economic Class'), (b'Business Class', b'Business Class'), (b'First Class', b'First Class')]),
preserve_default=False,
),
migrations.DeleteModel(
name='ReservedSeat',
),
]
| mit | -8,412,982,665,111,262,000 | 28.577778 | 201 | 0.552968 | false |
feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/abs-plastic-materials_v3-1-0/lib/mat_properties.py | 1 | 15505 | # Copyright (C) 2019 Christopher Gearhart
# [email protected]
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# LDR code reference: https://www.ldraw.org/article/547.html
# Color naming reference: https://www.bricklink.com/catalogColors.asp
"""
ABS_Dialectric defaults:
Diffuse Color: (1, 1, 1, 1)
Boost Value: 0.0
Random: 0.02
Rough 1: 0.005
Rough 2: 0.15
Metallic: 0.01
Speckle: 0.0
Fingerprints: 0.25
SSS Color: (1, 1, 1, 1)
SSS Amount: 0.0
ABS_Transparent defaults:
Color: (1, 1, 1, 1)
Boost Value: 0
Random: 0.02
Rough 1: 0.005
Rough 2: 0.15
Rough Mix: 0.0
Reflection: 0.01
Fingerprints: 0.25
Absorption: -1.0
"""
mat_properties = {
'ABS Plastic Black':{
'Diffuse Color':[0.0185, 0.01764, 0.01681, 1.0],
# Other properties (not node inputs)
'LDR Code':0,
},
'ABS Plastic Blue':{
'Diffuse Color':[0.0, 0.12214, 0.46778, 1.0],
# Other properties (not node inputs)
'LDR Code':1,
},
'ABS Plastic Bright Green':{
'Diffuse Color':[0.00605, 0.29614, 0.04667, 1.0],
'SSS Color':[0.0, 1.0, 0.02956, 1.0],
'SSS Amount':0.17,
# Other properties (not node inputs)
'LDR Code':10,
},
'ABS Plastic Bright Light Blue':{
# 'Diffuse Color':[0.05951, 0.32314, 0.60383, 1.0], # OLD
# 'Diffuse Color':[0.337164, 0.545725, 0.921582, 1.0], # Possibly better?
'Diffuse Color':[0.084, 0.225, 0.656, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.084, 0.225, 0.656, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':212,
},
'ABS Plastic Bright Light Orange':{
'Diffuse Color':[0.98225, 0.4452, 0.0, 1.0],
'Boost Value':0.1,
'SSS Color':[1.0, 0.30499, 0.0, 1.0],
'SSS Amount':0.12,
# Other properties (not node inputs)
'LDR Code':191,
},
'ABS Plastic Bright Light Yellow':{
'Diffuse Color':[1.0, 0.83077, 0.20508, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[1.0, 0.83077, 0.20508, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':226,
},
'ABS Plastic Bright Pink':{
'Diffuse Color':[0.92158, 0.40724, 0.7011, 1.0],
'SSS Color':[0.98225, 0.01797, 0.15952, 1.0],
'SSS Amount':0.04,
# Other properties (not node inputs)
'LDR Code':29,
},
'ABS Plastic Coral':{
'Diffuse Color':[0.991102, 0.152926, 0.181164, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.991102, 0.152926, 0.181164, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':353,
},
'ABS Plastic Dark Azure':{
'Diffuse Color':[0.16203, 0.40724, 0.65837, 1.0],
'SSS Color':[0.0003, 0.33245, 1.0, 1.0],
'SSS Amount':0.12,
# Other properties (not node inputs)
'LDR Code':321,
},
'ABS Plastic Dark Blue':{
'Diffuse Color':[0.01161, 0.0382, 0.08866, 1.0],
# Other properties (not node inputs)
'LDR Code':272,
},
'ABS Plastic Dark Bluish Gray':{
'Diffuse Color':[0.07819, 0.0999, 0.09306, 1.0],
# Other properties (not node inputs)
'LDR Code':72, # 8 for classic (but expensive) Dark Bluish Gray
},
'ABS Plastic Dark Brown':{
'Diffuse Color':[0.06848, 0.0331, 0.02519, 1.0],
# Other properties (not node inputs)
'LDR Code':308,
},
'ABS Plastic Dark Green':{
'Diffuse Color':[0.0075, 0.0648, 0.0356, 1.0],
'SSS Color':[0.0075, 0.0648, 0.0356, 1.0],
'SSS Amount':0.03,
# Other properties (not node inputs)
'LDR Code':288,
},
'ABS Plastic Dark Orange':{
'Diffuse Color':[0.278894, 0.078187, 0.011612, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.278894, 0.078187, 0.011612, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':484,
},
'ABS Plastic Dark Pink':{
'Diffuse Color':[0.2462, 0.02217, 0.14703, 1.0],
'SSS Color':[0.87962, 0.0, 0.06848, 1.0],
'SSS Amount':0.04,
# Other properties (not node inputs)
'LDR Code':5,
},
'ABS Plastic Dark Purple':{
'Diffuse Color':[0.09306, 0.05127, 0.25818, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.09306, 0.05127, 0.25818, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':85,
},
'ABS Plastic Dark Red':{
'Diffuse Color':[0.21953, 0.02029, 0.02217, 1.0],
'SSS Color':[1.0, 0.0, 0.0, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':320,
},
'ABS Plastic Dark Tan':{
'Diffuse Color':[0.32778, 0.23074, 0.12744, 1.0],
'SSS Color':[0.40724, 0.10702, 0.01681, 1.0],
'SSS Amount':0.14,
# Other properties (not node inputs)
'LDR Code':28,
},
'ABS Plastic Dark Turquoise':{
'Diffuse Color':[0.0, 0.29177, 0.28315, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.0, 0.29177, 0.28315, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':3,
},
'ABS Plastic Green':{
'Diffuse Color':[0.0, 0.21586, 0.04971, 1.0],
'SSS Color':[0.0, 0.4452, 0.04667, 1.0],
'SSS Amount':0.04,
# Other properties (not node inputs)
'LDR Code':2,
},
'ABS Plastic Lavender':{
'Diffuse Color':[0.48515, 0.39676, 0.67954, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.48515, 0.39676, 0.67954, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':31,
},
'ABS Plastic Light Aqua':{
'Diffuse Color':[0.651406, 0.887923, 0.814847, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.651406, 0.887923, 0.814847, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':323,
},
'ABS Plastic Light Nougat':{
'Diffuse Color':[0.93011, 0.55834, 0.39676, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.93011, 0.55834, 0.39676, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':78,
},
'ABS Plastic Light Bluish Gray':{
'Diffuse Color':[0.3467, 0.37626, 0.38643, 1.0],
'SSS Color':[0.3467, 0.37626, 0.38643, 1.0],
'SSS Amount':0.01,
# Other properties (not node inputs)
'LDR Code':71,
},
'ABS Plastic Lime':{
'Diffuse Color':[0.36625, 0.49102, 0.00304, 1.0],
'SSS Color':[0.43966, 0.95597, 0.0, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':27,
},
'ABS Plastic Magenta':{
'Diffuse Color':[0.39157, 0.0185, 0.14996, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.39157, 0.0185, 0.14996, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':26,
},
'ABS Plastic Medium Azure':{
'Diffuse Color':[0.138432, 0.53948, 0.752943, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.138432, 0.53948, 0.752943, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':322,
},
'ABS Plastic Medium Blue':{
'Diffuse Color':[0.168269, 0.304987, 0.577581, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.168269, 0.304987, 0.577581, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':73,
},
'ABS Plastic Medium Nougat':{
'Diffuse Color':[0.42327, 0.17465, 0.0648, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.42327, 0.17465, 0.0648, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':84,
},
'ABS Plastic Medium Lavender':{
'Diffuse Color':[0.36131, 0.17789, 0.47932, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.36131, 0.17789, 0.47932, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':30,
},
'ABS Plastic Metallic Gold':{
'Diffuse Color':[0.38333, 0.2021, 0.05824, 1.0],
'Rough 1':0.25,
'Rough 2':0.33,
'Metallic':0.85,
'Speckle':0.35,
'Fingerprints':0.03125,
'SSS Color':[1.0, 0.16827, 0.0, 1.0],
'SSS Amount':0.05,
# Other properties (not node inputs)
'LDR Code':82,
},
'ABS Plastic Metallic Silver':{
'Diffuse Color':[0.30963, 0.30963, 0.30963, 1.0],
'Rough 1':0.25,
'Rough 2':0.33,
'Metallic':0.9,
'Speckle':0.35,
'Fingerprints':0.03125,
# Other properties (not node inputs)
'LDR Code':80,
},
'ABS Plastic Nougat':{
'Diffuse Color':[0.491021, 0.215861, 0.1, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.491021, 0.215861, 0.1, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':92,
},
'ABS Plastic Olive Green':{
'Diffuse Color':[0.181164, 0.184475, 0.076185, 1.0],
'SSS Color':[0.181164, 0.184475, 0.076185, 1.0],
'SSS Amount':0.05,
# Other properties (not node inputs)
'LDR Code':330,
},
'ABS Plastic Orange':{
'Diffuse Color':[1.0, 0.20864, 0.00605, 1.0],
'SSS Color':[1.0, 0.02956, 0.0, 1.0],
'SSS Amount':0.14,
# Other properties (not node inputs)
'LDR Code':25,
},
'ABS Plastic Red':{
'Diffuse Color':[0.50289, 0.01161, 0.01521, 1.0],
'SSS Color':[1.0, 0.0, 0.0, 1.0],
'SSS Amount':0.14,
# Other properties (not node inputs)
'LDR Code':4,
},
'ABS Plastic Reddish Brown':{
'Diffuse Color':[0.16513, 0.04817, 0.02416, 1.0],
# Other properties (not node inputs)
'LDR Code':70,
},
'ABS Plastic Sand Blue':{
'Diffuse Color':[0.15593, 0.23455, 0.30054, 1.0],
'SSS Color':[0.15593, 0.23455, 0.30054, 1.0],
'SSS Amount':0.01,
# Other properties (not node inputs)
'LDR Code':379,
},
'ABS Plastic Sand Green':{
'Diffuse Color':[0.16513, 0.29614, 0.20156, 1.0],
'SSS Color':[0.16513, 0.29614, 0.20156, 1.0],
'SSS Amount':0.05,
# Other properties (not node inputs)
'LDR Code':378,
},
'ABS Plastic Tan':{
'Diffuse Color':[0.71569, 0.53948, 0.30054, 1.0],
'SSS Color':[1.0, 0.67244, 0.06125, 1.0],
'SSS Amount':0.14,
# Other properties (not node inputs)
'LDR Code':19,
},
'ABS Plastic Trans-Dark Blue':{
'Color':[0.0, 0.42327, 0.7454, 0.75],
# Other properties (not node inputs)
'LDR Code':33,
},
'ABS Plastic Trans-Orange':{
'Color':[1.0, 0.31399, 0.0, 0.75],
'Boost Value':0.33,
# Other properties (not node inputs)
'LDR Code':231,
},
'ABS Plastic Trans-Black':{
'Color':[0.116, 0.085, 0.0484, 0.75],
'Boost Value':0.33,
# Other properties (not node inputs)
'LDR Code':40,
},
'ABS Plastic Trans-Bright Green':{
'Color':[0.192202, 0.7454, 0.0, 0.75],
'Boost Value':0.33,
# TODO: UPDATE BASED ON IN-PERSON ANALYSIS
# Other properties (not node inputs)
'LDR Code':35,
},
'ABS Plastic Trans-Clear':{
'Color':[1.0, 0.98225, 0.94731, 0.65],
# Other properties (not node inputs)
'LDR Code':47,
},
'ABS Plastic Trans-Dark Pink':{
'Color':[0.7454, 0.024093, 0.302096, 1.0],
# TODO: UPDATE BASED ON IN-PERSON ANALYSIS
# Other properties (not node inputs)
'LDR Code':37,
},
'ABS Plastic Trans-Green':{
'Color':[0.0, 0.53328, 0.08438, 0.75],
# Other properties (not node inputs)
'LDR Code':34,
},
'ABS Plastic Trans-Light Blue':{
'Color':[0.38643, 0.85499, 1.0, 0.75],
# Other properties (not node inputs)
'LDR Code':43,
},
'ABS Plastic Trans-Neon Green':{
'Color':[0.858457, 1.0, 0.0, 0.65],
'Rough 1': 0.001,
'Fluorescence':0.8,
'Fluorescent Color':[0.230947, 1.0, 0.045182, 1.0],
# Other properties (not node inputs)
'LDR Code':42,
},
'ABS Plastic Trans-Neon Orange':{
'Color':[1.0, 0.42, 0.033, 0.65],
'Fluorescence':0.8,
'Fluorescent Color':[1.0, 0.047, 0.0, 1.0],
# Other properties (not node inputs)
'LDR Code':38,
},
'ABS Plastic Trans-Orange':{
'Color':[1.0, 0.47353, 0.12214, 0.75],
# Other properties (not node inputs)
'LDR Code':57,
},
'ABS Plastic Trans-Purple':{
'Color':[0.320953, 0.018755, 0.7454, 1.0],
# TODO: UPDATE BASED ON IN-PERSON ANALYSIS
# Other properties (not node inputs)
'LDR Code':52,
},
'ABS Plastic Trans-Red':{
'Color':[0.95597, 0.0, 0.0, 0.75],
# Other properties (not node inputs)
'LDR Code':36,
},
'ABS Plastic Trans-Yellow':{
'Color':[1.0, 0.89627, 0.01681, 0.75],
# Other properties (not node inputs)
'LDR Code':46,
},
# 'ABS Plastic Trans-Yellowish Clear':{
# 'Color':[0.87962, 0.8388, 0.73046, 0.7],
# 'Rough 1':0.015,
# # Other properties (not node inputs)
# 'LDR Code':47,
# },
'ABS Plastic White':{
'Diffuse Color':[0.94731, 0.89627, 0.81485, 1.0],
'SSS Color':[1.0, 0.67244, 0.06125, 1.0],
'SSS Amount':0.14,
# Other properties (not node inputs)
'LDR Code':15,
},
'ABS Plastic Yellow':{
'Diffuse Color':[0.97345, 0.58408, 0.0, 1.0],
'SSS Color':[1.0, 0.30499, 0.0, 1.0],
'SSS Amount':0.14,
# Other properties (not node inputs)
'LDR Code':14,
},
'ABS Plastic Yellowish Green':{
'Diffuse Color':[0.752942, 0.938686, 0.323143, 1.0],
# TODO: UPDATE SUBSURFACE SCATTERING COLOR
'SSS Color':[0.752942, 0.938686, 0.323143, 1.0],
'SSS Amount':0.1,
# Other properties (not node inputs)
'LDR Code':326,
},
# TODO: define properties for the following materials
'ABS Plastic Pearl Gold':{
'Diffuse Color':[0.396755, 0.212231, 0.026241, 1],
# Other properties (not node inputs)
'LDR Code':297,
},
'ABS Plastic Flat Silver':{
'Diffuse Color':[0.258183, 0.262251, 0.258183, 1],
# Other properties (not node inputs)
'LDR Code':135,
},
'ABS Plastic Pearl Dark Gray':{
'Diffuse Color':[0.048172, 0.043735, 0.039546, 1],
# Other properties (not node inputs)
'LDR Code':87, # previously 148
},
# 'ABS Plastic Copper':{
# 'Diffuse Color':[0.7, 0.7, 0.7, 1],
# # Other properties (not node inputs)
# 'LDR Code':None,
# },
'ABS Plastic Chrome Silver':{
'Diffuse Color':[0.610496, 0.6172076, 0.610496, 1],
# Other properties (not node inputs)
'LDR Code':383,
},
'ABS Plastic Chrome Gold':{
'Diffuse Color':[0.730461, 0.527115, 0.177888, 1],
# Other properties (not node inputs)
'LDR Code':334,
},
}
| gpl-3.0 | 6,631,376,333,744,395,000 | 31.101449 | 79 | 0.573879 | false |
classcat/cctf | cctf/helpers/summarizer.py | 1 | 3079 | from __future__ import division, print_function, absolute_import
import tensorflow as tf
from .. import summaries
"""
Summarizer contains some useful functions to help summarize variables,
activations etc... in Tensorboard.
"""
def summarize_all(train_vars, grads, activations,
summary_collection="tflearn_summ"):
summarize_variables(train_vars, summary_collection)
summarize_gradients(grads, summary_collection)
return summarize_activations(activations, summary_collection)
def summarize_variables(train_vars=None, summary_collection="tflearn_summ"):
""" summarize_variables.
Arguemnts:
train_vars: list of `Variable`. The variable weights to monitor.
summary_collection: A collection to add this summary to and
also used for returning a merged summary over all its elements.
Default: 'tflearn_summ'.
Returns:
`Tensor`. Merge of all summary in 'summary_collection'
"""
if not train_vars: train_vars = tf.trainable_variables()
summaries.add_trainable_vars_summary(train_vars, "", "", summary_collection)
return tf.merge_summary(tf.get_collection(summary_collection))
def summarize_activations(activations, summary_collection="tflearn_summ"):
""" summarize_activations.
Arguemnts:
activations: list of `Tensor`. The activations to monitor.
summary_collection: A collection to add this summary to and
also used for returning a merged summary over all its elements.
Default: 'tflearn_summ'.
Returns:
`Tensor`. Merge of all summary in 'summary_collection'
"""
summaries.add_activations_summary(activations, "", "", summary_collection)
return tf.merge_summary(tf.get_collection(summary_collection))
def summarize_gradients(grads, summary_collection="tflearn_summ"):
""" summarize_activations.
Arguemnts:
grads: list of `Tensor`. The gradients to monitor.
summary_collection: A collection to add this summary to and
also used for returning a merged summary over all its elements.
Default: 'tflearn_summ'.
Returns:
`Tensor`. Merge of all summary in 'summary_collection'
"""
summaries.add_gradients_summary(grads, "", "", summary_collection)
return tf.merge_summary(tf.get_collection(summary_collection))
def summarize(value, type, name, summary_collection="tflearn_summ"):
""" summarize.
A custom summarization op.
Arguemnts:
value: `Tensor`. The tensor value to monitor.
type: `str` among 'histogram', 'scalar'. The data monitoring type.
name: `str`. A name for this summary.
summary_collection: A collection to add this summary to and
also used for returning a merged summary over all its elements.
Default: 'tflearn_summ'.
Returns:
`Tensor`. Merge of all summary in 'summary_collection'.
"""
summaries.get_summary(type, name, value, summary_collection)
return tf.merge_summary(tf.get_collection(summary_collection))
| agpl-3.0 | 654,663,146,128,021,200 | 33.595506 | 80 | 0.688535 | false |
Mozu/mozu-python-sdk | mozurestsdk/platform/secureappdata.py | 1 | 3372 |
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
from mozurestsdk.apicontext import ApiContext;
class SecureAppData(object):
def __init__(self, apiContext: ApiContext = None, mozuClient = None):
self.client = mozuClient or default_client();
if (apiContext is not None):
self.client.withApiContext(apiContext);
else:
self.client.withApiContext(ApiContext());
def getDBValue(self,appKeyId, dbEntryQuery, responseFields = None):
""" platform-secureappdata Get GetDBValue description DOCUMENT_HERE
Args:
| appKeyId (string) -
| dbEntryQuery (string) - The database entry string to create.
| responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss.
Returns:
| JObject
Raises:
| ApiException
"""
url = MozuUrl("/api/platform/secureappdata/{appKeyId}/{*dbEntryQuery}?responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("appKeyId", appKeyId);
url.formatUrl("dbEntryQuery", dbEntryQuery);
url.formatUrl("responseFields", responseFields);
self.client.withResourceUrl(url).execute();
return self.client.result();
def createDBValue(self,value, appKeyId, dbEntryQuery):
""" platform-secureappdata Post CreateDBValue description DOCUMENT_HERE
Args:
| value(value) - The value string to create.
| appKeyId (string) -
| dbEntryQuery (string) - The database entry string to create.
Raises:
| ApiException
"""
url = MozuUrl("/api/platform/secureappdata/{appKeyId}/{*dbEntryQuery}", "POST", UrlLocation.TenantPod, False);
url.formatUrl("appKeyId", appKeyId);
url.formatUrl("dbEntryQuery", dbEntryQuery);
self.client.withResourceUrl(url).withBody(value).execute();
def updateDBValue(self,value, appKeyId, dbEntryQuery):
""" platform-secureappdata Put UpdateDBValue description DOCUMENT_HERE
Args:
| value(value) - The value string to create.
| appKeyId (string) -
| dbEntryQuery (string) - The database entry string to create.
Raises:
| ApiException
"""
url = MozuUrl("/api/platform/secureappdata/{appKeyId}/{*dbEntryQuery}", "PUT", UrlLocation.TenantPod, False);
url.formatUrl("appKeyId", appKeyId);
url.formatUrl("dbEntryQuery", dbEntryQuery);
self.client.withResourceUrl(url).withBody(value).execute();
def deleteDBValue(self,appKeyId, dbEntryQuery):
""" platform-secureappdata Delete DeleteDBValue description DOCUMENT_HERE
Args:
| appKeyId (string) -
| dbEntryQuery (string) - The database entry string to create.
Raises:
| ApiException
"""
url = MozuUrl("/api/platform/secureappdata/{appKeyId}/{*dbEntryQuery}", "DELETE", UrlLocation.TenantPod, False);
url.formatUrl("appKeyId", appKeyId);
url.formatUrl("dbEntryQuery", dbEntryQuery);
self.client.withResourceUrl(url).execute();
| apache-2.0 | 6,208,607,910,808,804,000 | 29.64486 | 266 | 0.70433 | false |
open-synergy/opnsynid-l10n-indonesia | l10n_id_taxform_bukti_potong_pph_f113313/models/bukti_potong_pph_f113313_in.py | 1 | 1102 | # -*- coding: utf-8 -*-
# Copyright 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api
class BuktiPotongPPhF113313In(models.Model):
_name = "l10n_id.bukti_potong_pph_f113313_in"
_inherit = "l10n_id.bukti_potong_pph"
_table = "l10n_id_bukti_potong_pph"
_description = "Bukti Potong PPh f.1.1.33.13 In"
@api.model
def _default_type_id(self):
return self.env.ref(
"l10n_id_taxform_bukti_potong_pph_f113313."
"bukti_potong_pph_type_f113313_in").id
type_id = fields.Many2one(
default=lambda self: self._default_type_id(),
)
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
type_id = self.env.ref(
"l10n_id_taxform_bukti_potong_pph_f113313."
"bukti_potong_pph_type_f113313_in")
args.append(("type_id", "=", type_id.id))
return super(BuktiPotongPPhF113313In, self).search(
args=args, offset=offset, limit=limit,
order=order, count=count)
| agpl-3.0 | -7,598,236,411,724,964,000 | 33.4375 | 74 | 0.629764 | false |
Ulauncher/Ulauncher | tests/api/server/test_DeferredResultRenderer.py | 1 | 2856 | import mock
import pytest
from ulauncher.api.server.DeferredResultRenderer import DeferredResultRenderer
from ulauncher.api.server.ExtensionController import ExtensionController
from ulauncher.api.server.ExtensionManifest import ExtensionManifest
from ulauncher.api.shared.action.BaseAction import BaseAction
from ulauncher.api.shared.event import BaseEvent, KeywordQueryEvent
from ulauncher.search.Query import Query
class TestDeferredResultRenderer:
@pytest.fixture(autouse=True)
def Timer(self, mocker):
return mocker.patch('ulauncher.api.server.DeferredResultRenderer.Timer')
@pytest.fixture(autouse=True)
def GLib(self, mocker):
return mocker.patch('ulauncher.api.server.DeferredResultRenderer.GLib')
@pytest.fixture
def event(self):
return mock.create_autospec(BaseEvent)
@pytest.fixture
def manifest(self):
return mock.create_autospec(ExtensionManifest)
@pytest.fixture
def controller(self, manifest):
controller = mock.create_autospec(ExtensionController)
controller.get_manifest.return_value = manifest
return controller
@pytest.fixture
def renderer(self):
return DeferredResultRenderer()
def test_handle_event__result__instanceof_BaseAction(self, renderer, event, controller):
result = renderer.handle_event(event, controller)
assert isinstance(result, BaseAction)
def test_handle_event__loading_timer__is_canceled(self, renderer, event, controller):
timer = mock.Mock()
renderer.loading = timer
renderer.handle_event(event, controller)
timer.cancel.assert_called_once_with()
def test_handle_response__action__is_ran(self, renderer, controller):
response = mock.Mock()
response.event = KeywordQueryEvent(Query('test'))
renderer.active_event = response.event
renderer.active_controller = controller
renderer.handle_response(response, controller)
response.action.run.assert_called_once_with()
def test_handle_response__keep_app_open_is_False__hide_is_called(self, renderer, controller, GLib, mocker):
UlauncherWindow = mocker.patch('ulauncher.ui.windows.UlauncherWindow.UlauncherWindow')
response = mock.Mock()
response.event = KeywordQueryEvent(Query('test'))
response.action.keep_app_open.return_value = False
renderer.active_event = response.event
renderer.active_controller = controller
renderer.handle_response(response, controller)
GLib.idle_add.assert_called_with(UlauncherWindow.get_instance.return_value.hide_and_clear_input)
def test_on_query_change__loading__is_canceled(self, renderer):
timer = mock.Mock()
renderer.loading = timer
renderer.on_query_change()
timer.cancel.assert_called_once_with()
| gpl-3.0 | 1,385,661,172,429,175,600 | 38.666667 | 111 | 0.723039 | false |
seung-lab/cloud-volume | cloudvolume/datasource/boss/__init__.py | 1 | 1434 | from .image import BossImageSource
from .metadata import BossMetadata
from ...frontends.precomputed import CloudVolumePrecomputed
from .. import get_cache_path
from ...cacheservice import CacheService
from ...cloudvolume import SharedConfiguration, register_plugin
from ...paths import strict_extract
def create_boss(
cloudpath, mip=0, bounded=True, autocrop=False,
fill_missing=False, cache=False, compress_cache=None,
cdn_cache=True, progress=False, info=None, provenance=None,
compress=None, non_aligned_writes=False, parallel=1,
delete_black_uploads=False, green_threads=False
):
path = strict_extract(cloudpath)
config = SharedConfiguration(
cdn_cache=cdn_cache,
compress=compress,
compress_level=None,
green=green_threads,
mip=mip,
parallel=parallel,
progress=progress,
)
cache = CacheService(
cloudpath=get_cache_path(cache, cloudpath),
enabled=bool(cache),
config=config,
compress=compress_cache,
)
meta = BossMetadata(cloudpath, cache=cache, info=info)
image = BossImageSource(
config, meta, cache,
autocrop=bool(autocrop),
bounded=bool(bounded),
non_aligned_writes=bool(non_aligned_writes),
)
return CloudVolumePrecomputed(
meta, cache, config,
imagesrc, mesh=None, skeleton=None,
mip=mip
)
def register():
register_plugin('boss', create_boss) | bsd-3-clause | 8,546,106,730,014,830,000 | 27.7 | 63 | 0.694561 | false |
ma89long/google-python-class | babynames/babynames.py | 1 | 2902 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
f = open(filename, 'r')
htmlText = f.read()
f.close()
nameDict = {}
# year.group(1) is the year in the current file
year = re.search('Popularity in (\d{4})', htmlText)
# (rank, boyname, girlname) in the list of tuples
names = re.findall('<td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>', htmlText)
for name in names:
(rank, boyname, girlname) = name
if (boyname not in nameDict) or (int(nameDict[boyname]) > int(rank)):
nameDict[boyname] = rank
if (girlname not in nameDict) or (int(nameDict[girlname]) > int(rank)):
nameDict[girlname] = rank
# Sort the names
sortedNames = sorted(nameDict.keys())
# Generate output
nameArray = []
# Save year in the beginning
nameArray.append(year.group(1))
for name in sortedNames:
nameArray.append(name + ' ' + nameDict[name])
return nameArray
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# For each filename, get the names, then either print the text output
# or write it to a summary file
for filename in args:
names = extract_names(filename)
year = names[0]
text = '\n'.join(names) + '\n'
if summary:
f = open(filename + '.summary', 'w')
f.write(text)
f.close()
else:
print text
if __name__ == '__main__':
main()
| apache-2.0 | 8,827,361,729,426,963,000 | 28.313131 | 79 | 0.661268 | false |
gusam/drnau_project | drnau_project/project/models.py | 1 | 1601 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
TRANSMISSION = (('1','Vivo'),('2','Grabado'))
class Project(models.Model):
proj_name = models.CharField(max_length=30, unique=True, verbose_name='Nombre del Proyecto')
proj_date = models.DateField(auto_now_add=True)
proj_user = models.ForeignKey(User, related_name="project_user", verbose_name='Usuario')
proj_description = models.CharField(max_length=150, verbose_name="Descripción", blank=True)
def __str__(self):
return self.proj_name
class ShowTv(models.Model):
st_channel = models.CharField(max_length=30)
st_name = models.CharField(max_length=30, blank=True)
st_live = models.CharField(max_length=1, default='2', choices=TRANSMISSION)
def __str__(self):
return self.st_channel
class Prototype(models.Model):
pro_proj_id = models.ForeignKey(Project, related_name="prototype_project")
pro_version = models.IntegerField()
pro_link_content = models.BooleanField(default=True)
pro_sho_id = models.ForeignKey(ShowTv, related_name="prototype_showtv")
pro_date = models.DateField(auto_now_add=True)
pro_date_update = models.DateField(auto_now=True)
pro_name = models.CharField(max_length=30,verbose_name='Nombre del Prototipo')
pro_description = models.CharField(max_length=150, verbose_name="Descripción", blank=True)
def __str__(self):
return self.pro_version
class Schedule(models.Model):
sch_st_id = models.ForeignKey(ShowTv, related_name="schedule_showtv")
sch_time = models.TimeField()
| mit | 8,242,514,081,713,468,000 | 43.416667 | 96 | 0.714196 | false |
pmajka/3dbar | bin/parsers/whs_0.6.2/data.py | 1 | 5969 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# #
# This file is part of 3d Brain Atlas Reconstructor #
# #
# Copyright (C) 2010-2012 Piotr Majka, Jakub M. Kowalski #
# #
# 3d Brain Atlas Reconstructor is free software: you can redistribute #
# it and/or modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation, either version 3 of #
# the License, or (at your option) any later version. #
# #
# 3d Brain Atlas Reconstructor is distributed in the hope that it #
# will be useful, but WITHOUT ANY WARRANTY; without even the implied #
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with 3d Brain Atlas Reconstructor. If not, see #
# http://www.gnu.org/licenses/. #
# #
###############################################################################
import datetime
CONF_PARSER_COMMENT = 'CAF dataset based on <a href="http://software.incf.org/software/waxholm-space/waxholm-space/mbat-ready-label-volume-v0.6.2/file_download?file_field=file" target="_blank">MBAT (Mouse BIRN Atlasing Toolkit)-ready \
label volume</a> which contains 50 manually segmented brain regions, version 0.6.2. For more information about the Waxholm space check out: \
Waxholm Space, a coordinate-based reference space for the \
mapping and registration of neuroanatomical data in the mouse brain \
(<a href="http://www.ncbi.nlm.nih.gov/pubmed/20600960" target="_blank"> \
G.Johnson, et. al., NeuroImage 53 (2010) 365-372</a>) or <a href="http://atlasing.incf.org/wiki/Main_Page" target="_blank">The INCF Digital Atlasing Program wiki</a>.'
CONF_PARSER_NAME = 'whs_0.6.2'
CONF_CONTACT_COMMENT= 'Piotr Majka, Nencki Institute of Experimental Biology'
CONF_CONTACT_EMAIL = '[email protected]'
CONF_CAF_COMPIL_TIME= datetime.datetime.utcnow().strftime("%F %T")
CONF_CAF_FULL_NAME = 'The Waxholm Space - mouse brain reference space, delineation 0.6.2'
REFERENCE_WIDTH = 512
REFERENCE_HEIGHT = 512
tracedSlideTemplate = """<?xml version="1.0" ?><svg baseProfile="full" height="%d" id="body"
preserveAspectRatio="none" version="1.1" viewBox="0 0 %d %d"
width="%d" xmlns="http://www.w3.org/2000/svg"
xmlns:ev="http://www.w3.org/2001/xml-events"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:bar="http://www.3dbar.org">
<title></title>
<desc></desc>
<defs></defs>
<g id='content'>
</g>
</svg>
""" % (REFERENCE_HEIGHT, REFERENCE_WIDTH, REFERENCE_HEIGHT, REFERENCE_WIDTH)
filenameTempates = dict(traced='%d_traced_v%d.svg')
renderingProperties = {}
renderingProperties['ReferenceWidth'] = REFERENCE_WIDTH
renderingProperties['ReferenceHeight'] = REFERENCE_HEIGHT
renderingProperties['imageSize'] = (REFERENCE_WIDTH*2, REFERENCE_HEIGHT*2)
potraceProperties = {}
potraceProperties['potrace_accuracy_parameter'] ='0.5'
potraceProperties['potrace_svg_resolution_string']='300x300'
potraceProperties['potrace_width_string'] = '%dpt' % REFERENCE_WIDTH
potraceProperties['potrace_height_string'] = '%dpt' % REFERENCE_HEIGHT
tracerSettings={}
tracerSettings['DumpEachStepSVG'] = False
tracerSettings['DumpEachStepPNG'] = False
tracerSettings['DumpWrongSeed'] = True
tracerSettings['DumpVBrain'] = False
tracerSettings['DumpDirectory'] = '.'
tracerSettings['DetectUnlabelled'] = False
tracerSettings['CacheLevel'] = 5
tracerSettings['MinFiterTimesApplication'] = 3
tracerSettings['GrowDefaultBoundaryColor'] = 200
tracerSettings['RegionAlreadyTraced'] = 100
tracerSettings['UnlabelledTreshold'] = 500
tracerSettings['PoTraceConf'] = potraceProperties
tracerSettings['NewPathIdTemplate'] = 'structure%d_%s_%s'
atlasparserProperties=[
('backgroundColor', (255,255,255)),
('filenameTemplates', filenameTempates),
('renderingProperties', renderingProperties),
('tracingProperties', tracerSettings),
('slideTemplate', tracedSlideTemplate)]
indexerProperties = dict([
('Genus', 'Mus'),
('Species', 'Mus musculus'),
('Strain', 'C57BL/6'),
('Age', '66-78 days'),
('Sex', 'male'),
('Source', 'http://software.incf.org/software/waxholm-space/waxholm-space/mbat-ready-label-volume-v0.6.2/file_download?file_field=file'),
('Language', 'En'),
('Licencing', '<a rel="license" href="http://creativecommons.org/licenses/by-nc/3.0/deed.pl" target="_blank"><img alt="Licencja Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc/3.0/80x15.png" /></a>'),
('SourceLicencing', ' CC-BY (<a href="http://software.incf.org/software/waxholm-space/home" target="_blank">see details</a>)'),
('SRSCode', 'INCF:0002'),
('ReferenceWidth', str(REFERENCE_WIDTH)),
('ReferenceHeight', str(REFERENCE_HEIGHT)),
('FilenameTemplate',filenameTempates['traced']),
('CAFSlideOrientation', 'coronal'),
('CAFSlideUnits', 'mm'),
('CAFName', CONF_PARSER_NAME),
('CAFFullName', CONF_CAF_FULL_NAME),
('CAFComment', CONF_PARSER_COMMENT),
('CAFCreator', CONF_CONTACT_COMMENT),
('CAFCreatorEmail', CONF_CONTACT_EMAIL),
('CAFCompilationTime',CONF_CAF_COMPIL_TIME),
('CAFAxesOrientation', 'RSA')])
| gpl-3.0 | -6,340,810,528,607,758,000 | 50.017094 | 235 | 0.614843 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.