repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
flavour/cedarbluff | modules/eden/project.py | 1 | 132447 | # -*- coding: utf-8 -*-
""" Sahana Eden Project Model
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3ProjectModel",
"S3ProjectDRRModel",
"S3ProjectTaskModel",
"S3ProjectTaskHRMModel",
"S3ProjectTaskIReportModel",
"project_rheader",
"S3ProjectTaskVirtualfields"]
import datetime
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from gluon.sqlhtml import CheckboxesWidget
from ..s3 import *
try:
from lxml import etree, html
except ImportError:
import sys
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
# =============================================================================
class S3ProjectModel(S3Model):
"""
Project Model
Note: This module operates in 2 quite different modes:
- 'drr': suitable for use by multinational organisations tracking
projects at a high level
- non-drr: suitable for use by a smaller organsiation tracking tasks
at a detailed level
This class contains the tables common to both uses
There are additional tabels in S3ProjectDRRModel and S3ProjectTaskModel
for the other 2 use cases
There are also additional Classes for optional Link Tables
"""
names = ["project_theme",
"project_hazard",
"project_project",
"project_activity_type",
"project_activity",
"project_activity_contact",
"project_project_id",
"project_activity_id",
"project_hfa_opts",
"project_project_represent",
]
def model(self):
T = current.T
db = current.db
request = current.request
s3 = current.response.s3
settings = current.deployment_settings
currency_type = s3.currency_type
person_id = self.pr_person_id
location_id = self.gis_location_id
countries_id = self.gis_countries_id
organisation_id = self.org_organisation_id
sector_id = self.org_sector_id
human_resource_id = self.hrm_human_resource_id
s3_date_format = settings.get_L10n_date_format()
s3_date_represent = lambda dt: S3DateTime.date_represent(dt, utc=True)
# Enable DRR extensions?
drr = settings.get_project_drr()
pca = settings.get_project_community_activity()
# Shortcuts
add_component = self.add_component
comments = s3.comments
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
meta_fields = s3.meta_fields
super_link = self.super_link
# ---------------------------------------------------------------------
# Theme
# @ToDo: Move to link table to move to S3ProjectDRRModel
#
tablename = "project_theme"
table = define_table(tablename,
Field("name",
length=128,
notnull=True,
unique=True),
Field("comments"),
format = "%(name)s",
*meta_fields())
# Field configuration?
# CRUD Strings?
# Search Method?
# Resource Configuration?
# Reusable Field
multi_theme_id = S3ReusableField("multi_theme_id",
"list:reference project_theme",
label = T("Themes"),
sortby = "name",
requires = IS_NULL_OR(IS_ONE_OF(db,
"project_theme.id",
"%(name)s",
sort=True,
multiple=True)),
represent = lambda opt, row=None: \
self.multiref_represent(opt, "project_theme"),
default = [],
ondelete = "RESTRICT",
widget = lambda f, v: \
CheckboxesWidgetS3.widget(f, v, cols = 3))
# ---------------------------------------------------------------------
# Hazard
# @ToDo: Move to link table to move to S3ProjectDRRModel
#
tablename = "project_hazard"
table = define_table(tablename,
Field("name",
length=128,
notnull=True,
unique=True),
Field("comments"),
format="%(name)s",
*meta_fields())
# Field configuration?
# CRUD Strings?
# Search Method?
# Resource Configuration?
# Reusable Field
multi_hazard_id = S3ReusableField("multi_hazard_id",
"list:reference project_hazard",
sortby = "name",
label = T("Hazards"),
requires = IS_NULL_OR(IS_ONE_OF(db,
"project_hazard.id",
"%(name)s",
sort=True,
multiple=True)),
represent = lambda opt, row=None: \
self.multiref_represent(opt, "project_hazard"),
ondelete = "RESTRICT",
widget = lambda f, v: CheckboxesWidgetS3.widget(f, v, cols = 3))
# ---------------------------------------------------------------------
# Project
#
# HFA
# @todo: localization?
project_hfa_opts = {
1: "HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.",
2: "HFA2: Identify, assess and monitor disaster risks and enhance early warning.",
3: "HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.",
4: "HFA4: Reduce the underlying risk factors.",
5: "HFA5: Strengthen disaster preparedness for effective response at all levels.",
}
tablename = "project_project"
table = define_table(tablename,
super_link("doc_id", "doc_entity"),
# drr uses the separate project_organisation table
organisation_id(
readable=False if drr else True,
writable=False if drr else True,
),
Field("name",
label = T("Name"),
# Require unique=True if using IS_NOT_ONE_OF like here (same table,
# no filter) in order to allow both automatic indexing (faster)
# and key-based de-duplication (i.e. before field validation)
unique = True,
requires = [IS_NOT_EMPTY(error_message=T("Please fill this!")),
IS_NOT_ONE_OF(db, "project_project.name")]
),
Field("code",
label = T("Code"),
readable=False,
writable=False,
),
Field("description", "text",
label = T("Description")),
# NB There is additional client-side validation for start/end date in the Controller
Field("start_date", "date",
label = T("Start date"),
represent = s3_date_represent,
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
widget = S3DateWidget()
),
Field("end_date", "date",
label = T("End date"),
represent = s3_date_represent,
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
widget = S3DateWidget()
),
Field("duration",
readable=False,
writable=False,
label = T("Duration")),
Field("calendar",
readable=False if drr else True,
writable=False if drr else True,
label = T("Calendar"),
requires = IS_NULL_OR(IS_URL()),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Calendar"),
T("URL to a Google Calendar to display on the project timeline.")))),
currency_type(
readable=False if drr else True,
writable=False if drr else True,
),
Field("budget", "double",
# DRR handles on the Organisations Tab
readable=False if drr else True,
writable=False if drr else True,
label = T("Budget")),
sector_id(
#readable=False,
#writable=False,
widget=lambda f, v: \
CheckboxesWidget.widget(f, v, cols=3)),
countries_id(
readable=drr,
writable=drr
),
multi_hazard_id(
readable=drr,
writable=drr
),
multi_theme_id(
readable=drr,
writable=drr
),
Field("hfa", "list:integer",
label = T("HFA Priorities"),
readable=drr,
writable=drr,
requires = IS_NULL_OR(IS_IN_SET(project_hfa_opts,
multiple = True)),
represent = self.hfa_opts_represent,
widget = CheckboxesWidgetS3.widget),
Field("objectives", "text",
readable = drr,
writable = drr,
label = T("Objectives")),
human_resource_id(label=T("Contact Person")),
comments(comment=DIV(_class="tooltip",
_title="%s|%s" % (T("Comments"),
T("Outcomes, Impact, Challenges")))),
format="%(name)s",
*meta_fields())
# Field configuration?
# CRUD Strings
ADD_PROJECT = T("Add Project")
crud_strings[tablename] = Storage(
title_create = ADD_PROJECT,
title_display = T("Project Details"),
title_list = T("List Projects"),
title_update = T("Edit Project"),
title_search = T("Search Projects"),
title_upload = T("Import Project List"),
subtitle_create = T("Add New Project"),
subtitle_list = T("Projects"),
subtitle_upload = T("Upload Project List"),
label_list_button = T("List Projects"),
label_create_button = ADD_PROJECT,
label_delete_button = T("Delete Project"),
msg_record_created = T("Project added"),
msg_record_modified = T("Project updated"),
msg_record_deleted = T("Project deleted"),
msg_list_empty = T("No Projects currently registered"))
# Search Method
if settings.get_ui_cluster():
sector = T("Cluster")
else:
sector = T("Sector")
if drr:
project_search = S3Search(
advanced = (
S3SearchSimpleWidget(
name = "project_search_text_advanced",
label = T("Description"),
comment = T("Search for a Project by description."),
field = [ "name",
"description",
]
),
S3SearchOptionsWidget(
name = "project_search_sector",
label = sector,
field = ["sector_id"],
cols = 4
),
S3SearchOptionsWidget(
name = "project_search_hazard",
label = T("Hazard"),
field = ["multi_hazard_id"],
cols = 4
),
S3SearchOptionsWidget(
name = "project_search_theme",
label = T("Theme"),
field = ["multi_theme_id"],
cols = 4
),
S3SearchOptionsWidget(
name = "project_search_hfa",
label = T("HFA"),
field = ["hfa"],
cols = 4
),
)
)
else:
project_search = S3Search(
advanced = (
S3SearchSimpleWidget(
name = "project_search_text_advanced",
label = T("Description"),
comment = T("Search for a Project by description."),
field = [ "name",
"description",
]
),
S3SearchOptionsWidget(
name = "project_search_sector",
label = sector,
field = ["sector_id"],
cols = 4
),
)
)
# Resource Configuration
if drr:
next = "organisation"
else:
next = "activity"
if drr:
table.virtualfields.append(S3ProjectVirtualfields())
list_fields=["id",
"name",
(T("Lead Organisation"), "organisation"),
"sector_id",
"start_date",
"end_date",
"countries_id",
"multi_hazard_id",
"multi_theme_id",
]
else:
list_fields=["id",
"name",
"organisation_id",
"start_date",
"end_date",
]
configure(tablename,
super_entity="doc_entity",
deduplicate=self.project_project_deduplicate,
onvalidation=self.project_project_onvalidation,
onaccept=self.project_project_onaccept,
create_next=URL(c="project", f="project",
args=["[id]", next]),
search_method=project_search,
list_fields=list_fields)
# Reusable Field
project_id = S3ReusableField("project_id", db.project_project,
sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "project_project.id",
"%(name)s")),
represent = self.project_represent,
comment = s3_popup_comment(c="project",
f="project",
title=ADD_PROJECT,
tooltip=T("If you don't see the project in the list, you can add a new one by clicking link 'Add Project'.")),
label = T("Project"),
ondelete = "CASCADE")
# ---------------------------------------------------------------------
# Custom Methods
self.set_method(tablename,
method="timeline",
action=self.project_timeline)
# Components
# Organisations
add_component("project_organisation", project_project="project_id")
# Sites
add_component("project_site", project_project="project_id")
# Activities
add_component("project_activity", project_project="project_id")
# Milestones
add_component("project_milestone", project_project="project_id")
# Beneficiaries
add_component("project_beneficiary", project_project="project_id")
# Tasks
add_component("project_task",
project_project=Storage(
link="project_task_project",
joinby="project_id",
key="task_id",
actuate="replace",
autocomplete="name",
autodelete=False))
# ---------------------------------------------------------------------
# Activity Type
#
tablename = "project_activity_type"
table = define_table(tablename,
Field("name", length=128,
notnull=True, unique=True),
format="%(name)s",
*meta_fields())
# Field configuration?
# CRUD Strings
ADD_ACTIVITY_TYPE = T("Add Activity Type")
LIST_ACTIVITY_TYPES = T("List of Activity Types")
crud_strings[tablename] = Storage(
title_create = ADD_ACTIVITY_TYPE,
title_display = T("Activity Type"),
title_list = LIST_ACTIVITY_TYPES,
title_update = T("Edit Activity Type"),
title_search = T("Search for Activity Type"),
subtitle_create = T("Add New Activity Type"),
subtitle_list = T("All Activity Types"),
label_list_button = LIST_ACTIVITY_TYPES,
label_create_button = ADD_ACTIVITY_TYPE,
msg_record_created = T("Activity Type Added"),
msg_record_modified = T("Activity Type Updated"),
msg_record_deleted = T("Activity Type Deleted"),
msg_list_empty = T("No Activity Types Found")
)
# Search Method?
# Resource Configuration?
# Reusable Fields
activity_type_id = S3ReusableField("activity_type_id",
db.project_activity_type,
sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db,
"project_activity_type.id",
"%(name)s",
sort=True)),
represent = lambda id, row=None: \
s3_get_db_field_value(tablename = "project_activity_type",
fieldname = "name",
look_up_value = id),
label = T("Activity Type"),
comment = s3_popup_comment(c="project",
f="activity_type",
title=ADD_ACTIVITY_TYPE,
tooltip=T("If you don't see the type in the list, you can add a new one by clicking link 'Add Activity Type'.")),
ondelete = "RESTRICT")
multi_activity_type_id = S3ReusableField("multi_activity_type_id",
"list:reference project_activity_type",
sortby = "name",
label = T("Types of Activities"),
requires = IS_NULL_OR(IS_ONE_OF(db,
"project_activity_type.id",
"%(name)s",
sort=True,
multiple=True)),
represent = lambda opt, row=None: \
self.multiref_represent(opt,
"project_activity_type"),
#comment = skill_help,
default = [],
widget = lambda f, v: \
CheckboxesWidgetS3.widget(f, v, col=3),
ondelete = "RESTRICT")
# ---------------------------------------------------------------------
# Project Activity
#
tablename = "project_activity"
table = define_table(tablename,
super_link("doc_id", "doc_entity"),
project_id(),
Field("name",
label = T("Short Description"),
requires=IS_NOT_EMPTY()),
location_id(
readable = drr,
writable = drr,
widget = S3LocationSelectorWidget(hide_address=True)),
multi_activity_type_id(),
Field("time_estimated", "double",
readable=False if drr else True,
writable=False if drr else True,
label = "%s (%s)" % (T("Time Estimate"),
T("hours"))),
Field("time_actual", "double",
readable=False if drr else True,
# Gets populated from constituent Tasks
writable=False,
label = "%s (%s)" % (T("Time Taken"),
T("hours"))),
comments(),
format="%(name)s",
*meta_fields())
# Field configuration
if pca:
table.name.label = T("Name") # for list_fields
table.name.readable = False
table.name.writable = False
table.name.requires = None
# CRUD Strings
if pca:
ACTIVITY = T("Community")
ACTIVITY_TOOLTIP = T("If you don't see the community in the list, you can add a new one by clicking link 'Add Community'.")
ADD_ACTIVITY = T("Add Community")
LIST_ACTIVITIES = T("List Communities")
crud_strings[tablename] = Storage(
title_create = ADD_ACTIVITY,
title_display = T("Community Details"),
title_list = LIST_ACTIVITIES,
title_update = T("Edit Community Details"),
title_search = T("Search Community"),
title_upload = T("Import Community Data"),
title_report = T("Who is doing What Where"),
subtitle_create = T("Add New Community"),
subtitle_list = T("Communities"),
subtitle_report = T("Communities"),
label_list_button = LIST_ACTIVITIES,
label_create_button = ADD_ACTIVITY,
msg_record_created = T("Community Added"),
msg_record_modified = T("Community Updated"),
msg_record_deleted = T("Community Deleted"),
msg_list_empty = T("No Communities Found")
)
else:
ACTIVITY = T("Activity")
ACTIVITY_TOOLTIP = T("If you don't see the activity in the list, you can add a new one by clicking link 'Add Activity'.")
ADD_ACTIVITY = T("Add Activity")
LIST_ACTIVITIES = T("List Activities")
crud_strings[tablename] = Storage(
title_create = ADD_ACTIVITY,
title_display = T("Activity Details"),
title_list = LIST_ACTIVITIES,
title_update = T("Edit Activity"),
title_search = T("Search Activities"),
title_upload = T("Import Activity Data"),
title_report = T("Who is doing What Where") if drr else T("Activity Report"),
subtitle_create = T("Add New Activity"),
subtitle_list = T("Activities"),
subtitle_report = T("Activities"),
label_list_button = LIST_ACTIVITIES,
label_create_button = ADD_ACTIVITY,
msg_record_created = T("Activity Added"),
msg_record_modified = T("Activity Updated"),
msg_record_deleted = T("Activity Deleted"),
msg_list_empty = T("No Activities Found")
)
# Virtual Fields
if drr:
table.virtualfields.append(S3ProjectActivityVirtualfields())
# Search Method
if pca:
project_activity_search = S3Search(field="location_id$name")
else:
project_activity_search = S3Search(field="name")
# Resource Configuration
report_fields = []
append = report_fields.append
if drr:
append((T("Organization"), "organisation"))
append((T("Project"), "project_id"))
if not pca:
append((T("Activity"), "name"))
append((T("Activity Type"), "multi_activity_type_id"))
if drr:
append((T("Sector"), "project_id$sector_id"))
append((T("Theme"), "project_id$multi_theme_id"))
append((T("Hazard"), "project_id$multi_hazard_id"))
append((T("HFA"), "project_id$hfa"))
lh = current.gis.get_location_hierarchy()
lh = [(lh[opt], opt) for opt in lh]
report_fields.extend(lh)
append("location_id")
list_fields = ["name",
"project_id",
"multi_activity_type_id",
"comments"
]
else:
append((T("Time Estimated"), "time_estimated"))
append((T("Time Actual"), "time_actual"))
list_fields = ["name",
"project_id",
"location_id",
"multi_activity_type_id",
"comments"
]
if drr:
next = "beneficiary"
else:
next = "task"
configure(tablename,
super_entity="doc_entity",
create_next=URL(c="project", f="activity",
args=["[id]", next]),
search_method=project_activity_search,
onvalidation=self.project_activity_onvalidation,
deduplicate=self.project_activity_deduplicate,
report_rows=report_fields,
report_cols=report_fields,
report_fact=report_fields,
list_fields = list_fields,
)
# Reusable Field
activity_id = S3ReusableField("activity_id", db.project_activity,
sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db,
"project_activity.id",
"%(name)s",
sort=True)),
represent = lambda id, row=None: \
s3_get_db_field_value(tablename = "project_activity",
fieldname = "name",
look_up_value = id),
label = ACTIVITY,
comment = s3_popup_comment(c="project",
f="activity",
title=ADD_ACTIVITY,
tooltip=ACTIVITY_TOOLTIP),
ondelete = "CASCADE")
# Components
# Contact Persons
add_component("pr_person",
project_activity=Storage(
name="contact",
link="project_activity_contact",
joinby="activity_id",
key="person_id",
actuate="hide",
autodelete=False))
# Beneficiaries
add_component("project_beneficiary",
project_activity="activity_id")
# Tasks
add_component("project_task",
project_activity=Storage(
link="project_task_activity",
joinby="activity_id",
key="task_id",
actuate="replace",
autocomplete="name",
autodelete=False))
# ---------------------------------------------------------------------
# Project Activity Contact Person
#
# @ToDo: This is a Community Contact nmot an Activity contact,l so
# should be renamed when we add proper Communities
#
tablename = "project_activity_contact"
table = define_table(tablename,
activity_id(),
person_id(widget=S3AddPersonWidget(controller="pr"),
requires=IS_ADD_PERSON_WIDGET(),
comment=None),
*meta_fields())
table.virtualfields.append(S3ProjectActivityContactVirtualFields())
# CRUD Strings
ADD_CONTACT = T("Add Contact")
LIST_CONTACTS = T("List Contacts")
if pca:
LIST_OF_CONTACTS = T("Community Contacts")
else:
LIST_OF_CONTACTS = T("Contacts")
crud_strings[tablename] = Storage(
title_create = ADD_CONTACT,
title_display = T("Contact Details"),
title_list = LIST_CONTACTS,
title_update = T("Edit Contact Details"),
title_search = T("Search Contacts"),
subtitle_create = T("Add New Contact"),
subtitle_list = LIST_OF_CONTACTS,
label_list_button = LIST_CONTACTS,
label_create_button = ADD_CONTACT,
msg_record_created = T("Contact Added"),
msg_record_modified = T("Contact Updated"),
msg_record_deleted = T("Contact Deleted"),
msg_list_empty = T("No Contacts Found"))
activity_contact_search = S3Search(
advanced=(S3SearchSimpleWidget(
name = "activity_contact_search_simple",
label = T("Name"),
comment = T("You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons."),
field = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name"
]
),
S3SearchLocationHierarchyWidget(
name="activity_contact_search_L1",
field="person_id$L1",
cols = 3,
),
S3SearchLocationHierarchyWidget(
name="activity_contact_search_L2",
field="person_id$L2",
cols = 3,
),
))
# Resource configuration
hierarchy = current.gis.get_location_hierarchy()
configure(tablename,
search_method=activity_contact_search,
list_fields=["activity_id",
(T("Project"), "activity_id$project_id"),
"person_id",
(hierarchy["L0"], "person_id$L0"),
(hierarchy["L1"], "person_id$L1"),
(hierarchy["L2"], "person_id$L2"),
(hierarchy["L3"], "person_id$L3"),
(T("Email"), "email"),
(T("Mobile Phone"), "sms")])
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return dict(
project_project_id = project_id,
project_activity_id = activity_id,
project_hfa_opts = project_hfa_opts,
project_project_represent = self.project_represent,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable=False,
writable=False)
return Storage(
project_project_id = lambda: dummy("project_id"),
project_activity_id = lambda: dummy("activity_id"),
project_project_represent = lambda v, r: current.messages.NONE
)
# -------------------------------------------------------------------------
@staticmethod
def multiref_represent(opts, tablename, represent_string = "%(name)s"):
"""
Represent a list of references
@param opt: the current value or list of values
@param tablename: the referenced table
@param represent_string: format string to represent the records
"""
DEFAULT = ""
db = current.db
s3db = current.s3db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
table = s3db.table(tablename, None)
if table is None:
return DEFAULT
if not isinstance(opts, (list, tuple)):
opts = [opts]
rows = db(table.id.belongs(opts)).select()
rstr = Storage([(str(row.id), row) for row in rows])
keys = rstr.keys()
represent = lambda o: str(o) in keys and \
represent_string % rstr[str(o)] or UNKNOWN_OPT
vals = [represent(o) for o in opts]
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = len(vals) and vals[0] or DEFAULT
return vals
# ---------------------------------------------------------------------
@staticmethod
def project_represent(id, row=None, show_link=True):
""" FK representation """
db = current.db
NONE = current.messages.NONE
if id:
val = (id and [db.project_project[id].name] or [NONE])[0]
if not show_link:
return val
return A(val, _href = URL(c="project",
f="project",
args=[id]))
else:
return NONE
# ---------------------------------------------------------------------
@staticmethod
def project_project_onvalidation(form):
""" Form validation """
if not form.vars.code and "name" in form.vars:
# Populate code from name
form.vars.code = form.vars.name
return
# ---------------------------------------------------------------------
@staticmethod
def project_project_onaccept(form):
""" Set the project to be owned by the customer """
db = current.db
s3db = current.s3db
if "organisation_id" in form.vars:
# Set Project to be owned by this Customer
organisation_id = form.vars.organisation_id
otable = s3db.org_organisation
query = (otable.id == organisation_id)
role = db(query).select(otable.owned_by_organisation,
limitby=(0, 1)).first()
if role:
table = s3db.project_project
query = (table.id == form.vars.id)
db(query).update(owned_by_organisation=role.owned_by_organisation)
return
# ---------------------------------------------------------------------
@staticmethod
def project_project_deduplicate(item):
""" Import item de-duplication """
if item.id:
return
if item.tablename == "project_project" and \
"name" in item.data:
# Match project by name (all-lowercase)
table = item.table
name = item.data.name
try:
query = (table.name.lower() == name.lower())
except AttributeError, exception:
s3_debug("project_deduplicate", exception.message)
else:
duplicate = current.db(query).select(table.id,
table.name,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.data.name = duplicate.name
item.method = item.METHOD.UPDATE
return
# -------------------------------------------------------------------------
@staticmethod
def project_timeline(r, **attr):
"""
Display the project on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
Currently this just displays a Google Calendar
@ToDo: Add Milestones
@ToDo: Filters for different 'layers'
@ToDo: export milestones/tasks as .ics
"""
if r.representation == "html" and r.name == "project":
T = current.T
request = current.request
response = current.response
session = current.session
s3 = response.s3
calendar = r.record.calendar
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % request.application)
# Pass vars to our JS code
s3.js_global.append("S3.timeline.calendar = '%s';" % calendar)
# Add our control script
if session.s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % request.application)
# Create the DIV
item = DIV(_id="s3timeline", _style="height: 400px; border: 1px solid #aaa; font-family: Trebuchet MS, sans-serif; font-size: 85%;")
output = dict(item=item)
output["title"] = T("Project Calendar")
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
response.view = "timeline.html"
return output
else:
raise HTTP(501, BADMETHOD)
# ---------------------------------------------------------------------
@staticmethod
def hfa_opts_represent(opt, row=None):
""" Option representation """
s3 = current.response.s3
NONE = current.messages.NONE
project_hfa_opts = s3.project_hfa_opts
opts = opt
if isinstance(opt, int):
opts = [opt]
elif not isinstance(opt, (list, tuple)):
return NONE
vals = [project_hfa_opts.get(o, NONE) for o in opts]
return ", ".join(vals)
# ---------------------------------------------------------------------
@staticmethod
def project_activity_onvalidation(form):
""" """
pca = current.deployment_settings.get_project_community_activity()
if pca:
location_id = form.vars.location_id
if location_id:
db = current.db
s3db = current.s3db
table = s3db.gis_location
query = (table.id == form.vars.location_id)
row = db(query).select(table.parent,
limitby=(0, 1)).first()
if row and row.parent:
query = (table.id == row.parent)
parent = db(query).select(table.name,
limitby=(0, 1)).first()
if parent:
form.vars.name = parent.name
return
# ---------------------------------------------------------------------
@staticmethod
def project_activity_deduplicate(item):
""" Import item de-duplication """
db = current.db
settings = current.deployment_settings
pca = settings.get_project_community_activity()
if item.id:
return
if item.tablename != "project_activity":
return
table = item.table
duplicate = None
if pca:
if "project_id" in item.data and \
"location_id" in item.data:
# Match activity by project_id and location_id
project_id = item.data.project_id
location_id = item.data.location_id
query = (table.project_id == project_id) & \
(table.location_id == location_id)
duplicate = db(query).select(table.id,
limitby=(0, 1)).first()
else:
if "project_id" in item.data and "name" in item.data:
# Match activity by project_id and name
project_id = item.data.project_id
name = item.data.name
query = (table.project_id == project_id) & \
(table.name == name)
duplicate = db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# =============================================================================
class S3ProjectDRRModel(S3Model):
"""
Project DRR Model
This class contains the tabels suitable for use by multinational
organisations tracking projects at a high level
"""
names = ["project_organisation",
#"project_site",
"project_beneficiary_type",
"project_beneficiary",
"project_organisation_roles",
"project_organisation_lead_role"
]
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
currency_type = s3.currency_type
#location_id = self.gis_location_id
organisation_id = self.org_organisation_id
project_id = self.project_project_id
activity_id = self.project_activity_id
#multi_activity_type_id = self.project_multi_activity_type_id
pca = current.deployment_settings.get_project_community_activity()
messages = current.messages
NONE = messages.NONE
# ---------------------------------------------------------------------
# Project Organisation
#
project_organisation_roles = {
1: T("Lead Implementer"), # T("Host National Society")
2: T("Partner"), # T("Partner National Society")
3: T("Donor"),
4: T("Customer"), # T("Beneficiary")?
}
project_organisation_lead_role = 1
organisation_help = T("Add all organizations which are involved in different roles in this project")
tablename = "project_organisation"
table = self.define_table(tablename,
project_id(),
organisation_id(comment = DIV(A(T("Add Organization"),
_class="colorbox",
_href=URL(c="org", f="organisation",
args="create",
vars=dict(format="popup")),
_target="top",
_title=T("Add Organization")),
DIV(_class="tooltip",
_title="%s|%s" % (T("Organization"),
organisation_help))
)
),
Field("role", "integer",
requires = IS_NULL_OR(IS_IN_SET(project_organisation_roles)),
represent = lambda opt, row=None: \
project_organisation_roles.get(opt, NONE)),
Field("amount", "double",
requires = IS_FLOAT_AMOUNT(),
represent = lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
widget = IS_FLOAT_AMOUNT.widget,
label = T("Funds Contributed by this Organization")),
currency_type(),
s3.comments(),
*s3.meta_fields())
# Field configuration?
# CRUD Strings
ADD_PROJECT_ORG = T("Add Organization to Project")
LIST_PROJECT_ORG = T("List Project Organizations")
s3.crud_strings[tablename] = Storage(
title_create = ADD_PROJECT_ORG,
title_display = T("Project Organization Details"),
title_list = LIST_PROJECT_ORG,
title_update = T("Edit Project Organization"),
title_search = T("Search Project Organizations"),
title_upload = T("Import Project Organizations"),
title_report = T("Funding Report"),
subtitle_create = T("Add Organization to Project"),
subtitle_list = T("Project Organizations"),
subtitle_report = T("Funding"),
label_list_button = LIST_PROJECT_ORG,
label_create_button = ADD_PROJECT_ORG,
label_delete_button = T("Remove Organization from Project"),
msg_record_created = T("Organization added to Project"),
msg_record_modified = T("Project Organization updated"),
msg_record_deleted = T("Organization removed from Project"),
msg_list_empty = T("No Organizations for this Project"))
# Search Method?
# Resource Configuration
self.configure(tablename,
deduplicate=self.project_organisation_deduplicate,
onvalidation=self.project_organisation_onvalidation)
# Reusable Field
# Components
# ---------------------------------------------------------------------
# Project Site
# @ToDo: Deprecated?
#
# tablename = "project_site"
# table = self.define_table(tablename,
# self.super_link("site_id", "org_site"),
# project_id(),
# Field("name", notnull=True,
# length=64, # Mayon Compatibility
# label = T("Name")),
# location_id(),
# multi_activity_type_id(),
# *(s3.address_fields() + s3.meta_fields()))
# Field configuration
# CRUD Strings
# Search Method
# Resource Configuration
# Reusable Field
# CRUD strings
# ADD_PROJECT_SITE = T("Add Project Site")
# LIST_PROJECT_SITE = T("List Project Sites")
# s3.crud_strings[tablename] = Storage(
# title_create = ADD_PROJECT_SITE,
# title_display = T("Project Site Details"),
# title_list = LIST_PROJECT_SITE,
# title_update = T("Edit Project Site"),
# title_search = T("Search Project Sites"),
# title_upload = T("Import Project Sites"),
# subtitle_create = T("Add New Project Site"),
# subtitle_list = T("Sites"),
# label_list_button = LIST_PROJECT_SITE,
# label_create_button = ADD_PROJECT_SITE,
# label_delete_button = T("Delete Project Site"),
# msg_record_created = T("Project Site added"),
# msg_record_modified = T("Project Site updated"),
# msg_record_deleted = T("Project Site deleted"),
# msg_list_empty = T("No Project Sites currently registered"))
# project_site_id = S3ReusableField("project_site_id", db.project_site,
# #sortby="default/indexname",
# requires = IS_NULL_OR(IS_ONE_OF(db, "project_site.id", "%(name)s")),
# represent = lambda id, row=None: \
# (id and [db(db.project_site.id == id).select(db.project_site.name,
# limitby=(0, 1)).first().name] or [NONE])[0],
# label = T("Project Site"),
# comment = s3_popup_comment(c="project",
# f="site",
# title=ADD_PROJECT_SITE,
# tooltip=T("If you don't see the site in the list, you can add a new one by clicking link 'Add Project Site'.")),,
# ondelete = "CASCADE")
# self.configure(tablename,
# super_entity="org_site",
# onvalidation=s3.address_onvalidation)
# ---------------------------------------------------------------------
# Project Beneficiary Type
#
tablename = "project_beneficiary_type"
table = self.define_table(tablename,
Field("name",
length=128,
unique=True,
requires = IS_NOT_IN_DB(db,
"project_beneficiary_type.name")),
*s3.meta_fields())
# Field configuration?
# CRUD Strings
ADD_BNF_TYPE = T("Add Beneficiary Type")
LIST_BNF_TYPE = T("List Beneficiary Types")
s3.crud_strings[tablename] = Storage(
title_create = ADD_BNF_TYPE,
title_display = T("Beneficiary Type"),
title_list = LIST_BNF_TYPE,
title_update = T("Edit Beneficiary Type"),
title_search = T("Search Beneficiary Types"),
subtitle_create = T("Add New Beneficiary Type"),
subtitle_list = T("Beneficiary Types"),
label_list_button = LIST_BNF_TYPE,
label_create_button = ADD_BNF_TYPE,
msg_record_created = T("Beneficiary Type Added"),
msg_record_modified = T("Beneficiary Type Updated"),
msg_record_deleted = T("Beneficiary Type Deleted"),
msg_list_empty = T("No Beneficiary Types Found")
)
# Search Method?
# Resource Configuration?
# Reusable Field
beneficiary_type_id = S3ReusableField("beneficiary_type_id", db.project_beneficiary_type,
requires = IS_NULL_OR(IS_ONE_OF(db,
"project_beneficiary_type.id",
self.beneficiary_type_represent)),
represent = self.beneficiary_type_represent,
label = T("Beneficiary Type"),
comment = s3_popup_comment(c="project",
f="beneficiary_type",
title=ADD_BNF_TYPE,
tooltip=T("Please record Beneficiary according to the reporting needs of your project")),
ondelete = "CASCADE")
# ---------------------------------------------------------------------
# Project Beneficiary
#
tablename = "project_beneficiary"
table = self.define_table(tablename,
# populated automatically
project_id(readable=False,
writable=False),
activity_id(comment=None),
beneficiary_type_id(empty=False),
Field("number", "integer",
label = T("Quantity"),
requires = IS_INT_IN_RANGE(0, 99999999)),
s3.comments(),
*s3.meta_fields())
# Field configuration
if pca:
table.activity_id.label = T("Community")
# CRUD Strings
ADD_BNF = T("Add Beneficiaries")
LIST_BNF = T("List Beneficiaries")
s3.crud_strings[tablename] = Storage(
title_create = ADD_BNF,
title_display = T("Beneficiaries Details"),
title_list = LIST_BNF,
title_update = T("Edit Beneficiaries"),
title_search = T("Search Beneficiaries"),
title_report = T("Beneficiary Report"),
subtitle_create = T("Add New Beneficiaries"),
subtitle_list = T("Beneficiaries"),
label_list_button = LIST_BNF,
label_create_button = ADD_BNF,
msg_record_created = T("Beneficiaries Added"),
msg_record_modified = T("Beneficiaries Updated"),
msg_record_deleted = T("Beneficiaries Deleted"),
msg_list_empty = T("No Beneficiaries Found")
)
table.virtualfields.append(S3ProjectBeneficiaryVirtualfields())
# Search Method?
# Resource Configuration
report_fields=[
"activity_id",
(T("Beneficiary Type"), "beneficiary_type_id"),
"project_id",
"project_id$multi_hazard_id",
"project_id$multi_theme_id",
"activity_id$multi_activity_type_id"
]
lh = current.gis.get_location_hierarchy()
lh = [(lh[opt], opt) for opt in lh]
report_fields.extend(lh)
self.configure(tablename,
onaccept=self.project_beneficiary_onaccept,
deduplicate=self.project_beneficiary_deduplicate,
report_filter=[
S3SearchOptionsWidget(
field=["project_id"],
name="project",
label=T("Project")
),
S3SearchOptionsWidget(
field=["beneficiary_type_id"],
name="beneficiary_type_id",
label=T("Beneficiary Type")
),
# Can't search be VirtualFields currently
# S3SearchLocationHierarchyWidget(
# name="beneficiary_search_L1",
# field="activity_id$L1",
# cols = 3,
# ),
],
report_rows=report_fields,
report_cols=report_fields,
report_fact=["number"],
report_method=["sum"])
# Reusable Field
beneficiary_id = S3ReusableField("beneficiary_id", db.project_beneficiary,
sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db,
"project_beneficiary.id",
"%(type)s",
sort=True)),
represent = lambda id, row=None: \
s3_get_db_field_value(tablename = "project_beneficiary",
fieldname = "type",
look_up_value = id),
label = T("Beneficiaries"),
comment = s3_popup_comment(c="project",
f="beneficiary",
title=ADD_BNF,
tooltip=T("If you don't see the beneficiary in the list, you can add a new one by clicking link 'Add Beneficiary'.")),
ondelete = "SET NULL")
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return dict(
project_organisation_roles = project_organisation_roles,
project_organisation_lead_role = project_organisation_lead_role,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable=False,
writable=False)
return Storage(
)
# ---------------------------------------------------------------------
@staticmethod
def project_organisation_onvalidation(form, lead_role=None):
""" Form validation """
db = current.db
s3db = current.s3db
s3 = current.response.s3
otable = s3db.project_organisation
if lead_role is None:
lead_role = s3.project_organisation_lead_role
project_id = form.vars.project_id
organisation_id = form.vars.organisation_id
if str(form.vars.role) == str(lead_role) and project_id:
query = (otable.deleted != True) & \
(otable.project_id == project_id) & \
(otable.role == lead_role) & \
(otable.organisation_id != organisation_id)
row = db(query).select(otable.id, limitby=(0, 1)).first()
if row:
form.errors.role = T("Lead Implementer for this project is already set, please choose another role.")
return
# ---------------------------------------------------------------------
@staticmethod
def project_organisation_deduplicate(item):
""" Import item de-duplication """
db = current.db
if item.id:
return
if item.tablename == "project_organisation" and \
"project_id" in item.data and \
"organisation_id" in item.data:
# Match project by org_id and project_id
table = item.table
project_id = item.data.project_id
organisation_id = item.data.organisation_id
query = (table.project_id == project_id) & \
(table.organisation_id == organisation_id)
duplicate = db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# ---------------------------------------------------------------------
@staticmethod
def beneficiary_type_represent(type_id, row=None):
""" FK representation """
db = current.db
s3db = current.s3db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
if isinstance(type_id, Row):
if "name" in type_id:
return type_id.name
elif "id" in type_id:
type_id = type_id.id
else:
return UNKNOWN_OPT
bnf_type = s3db.project_beneficiary_type
query = bnf_type.id == type_id
row = db(query).select(bnf_type.name, limitby=(0, 1)).first()
if row:
return row.name
else:
return UNKNOWN_OPT
# ---------------------------------------------------------------------
@staticmethod
def project_beneficiary_onaccept(form):
""" Record creation post-processing """
db = current.db
s3db = current.s3db
btable = s3db.project_beneficiary
atable = s3db.project_activity
record_id = form.vars.id
query = (btable.id == record_id) & \
(atable.id == btable.activity_id)
activity = db(query).select(atable.project_id,
limitby=(0, 1)).first()
if activity:
db(btable.id == record_id).update(project_id=activity.project_id)
return
# ---------------------------------------------------------------------
@staticmethod
def project_beneficiary_deduplicate(item):
""" Import item de-duplication """
db = current.db
if item.id:
return
if item.tablename == "project_beneficiary" and \
"beneficiary_type_id" in item.data and \
"activity_id" in item.data:
# Match beneficiary by type and activity_id
table = item.table
beneficiary_type_id = item.data.beneficiary_type_id
activity_id = item.data.activity_id
query = (table.beneficiary_type_id == beneficiary_type_id) & \
(table.activity_id == activity_id)
duplicate = db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# =============================================================================
class S3ProjectTaskModel(S3Model):
"""
Project Task Model
This class holds the tables used for a smaller Organisation to manage
their Tasks in detail.
"""
names = ["project_milestone",
"project_task",
"project_time",
"project_comment",
"project_task_project",
"project_task_activity",
"project_task_id",
]
def model(self):
db = current.db
T = current.T
auth = current.auth
request = current.request
s3 = current.response.s3
settings = current.deployment_settings
person_id = self.pr_person_id
location_id = self.gis_location_id
site_id = self.org_site_id
project_id = self.project_project_id
activity_id = self.project_activity_id
s3_date_format = settings.get_L10n_date_format()
s3_utc_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
s3_date_represent = lambda dt: S3DateTime.date_represent(dt, utc=True)
messages = current.messages
NONE = messages.NONE
UNKNOWN_OPT = messages.UNKNOWN_OPT
# Shortcuts
add_component = self.add_component
comments = s3.comments
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
meta_fields = s3.meta_fields
# ---------------------------------------------------------------------
# Project Milestone
#
tablename = "project_milestone"
table = define_table(tablename,
# Stage Report
super_link("doc_id", "doc_entity"),
project_id(),
Field("name",
label = T("Short Description"),
requires=IS_NOT_EMPTY()),
Field("date", "date",
label = T("Date"),
represent = s3_date_represent,
requires = IS_NULL_OR(IS_DATE(format = s3_date_format))),
comments(),
format="%(name)s",
*meta_fields())
# CRUD Strings
ADD_MILESTONE = T("Add Milestone")
crud_strings[tablename] = Storage(
title_create = ADD_MILESTONE,
title_display = T("Milestone Details"),
title_list = T("List Milestones"),
title_update = T("Edit Milestone"),
title_search = T("Search Milestones"),
title_upload = T("Import Milestone Data"),
subtitle_create = T("Add New Milestone"),
subtitle_list = T("Milestones"),
subtitle_report = T("Milestones"),
label_list_button = T("List Milestones"),
label_create_button = ADD_MILESTONE,
msg_record_created = T("Milestone Added"),
msg_record_modified = T("Milestone Updated"),
msg_record_deleted = T("Milestone Deleted"),
msg_list_empty = T("No Milestones Found")
)
# Reusable Field
milestone_id = S3ReusableField("milestone_id", db.project_milestone,
sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "project_milestone.id",
"%(name)s")),
represent = self.milestone_represent,
comment = s3_popup_comment(c="project",
f="milestone",
title=ADD_MILESTONE,
tooltip=T("A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.")),
label = T("Milestone"),
ondelete = "RESTRICT")
# ---------------------------------------------------------------------
# Tasks
#
# Tasks can be linked to Activities or directly to Projects
# - they can also be used by the Event/Scenario modules
#
# @ToDo: Recurring tasks
#
# These Statuses can be customised, although doing so limits the ability to do synchronization
# - best bet is simply to comment statuses that you don't wish to use
#
project_task_status_opts = {
1: T("Draft"),
2: T("New"),
3: T("Assigned"),
4: T("Feedback"),
5: T("Blocked"),
6: T("On Hold"),
7: T("Cancelled"),
8: T("Duplicate"),
9: T("Ready"),
10: T("Verified"),
11: T("Reopened"),
12: T("Completed"),
#99: T("unspecified")
}
project_task_active_statuses = [2, 3, 4, 11]
project_task_priority_opts = {
1:T("Urgent"),
2:T("High"),
3:T("Normal"),
4:T("Low")
}
#staff = auth.s3_has_role("STAFF")
staff = True
milestones = settings.get_project_milestones()
tablename = "project_task"
table = define_table(tablename,
super_link("doc_id", "doc_entity"),
Field("template", "boolean",
default=False,
readable=False,
writable=False),
Field("name",
label = T("Short Description"),
length=100,
notnull=True,
requires = IS_LENGTH(maxsize=100, minsize=1)),
Field("description", "text",
label = T("Detailed Description/URL"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Detailed Description/URL"),
T("Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.")))),
site_id,
location_id(label=T("Deployment Location"),
readable=False,
writable=False
),
Field("source",
label = T("Source")),
Field("priority", "integer",
requires = IS_IN_SET(project_task_priority_opts,
zero=None),
default = 3,
label = T("Priority"),
represent = lambda opt, row=None: \
project_task_priority_opts.get(opt,
UNKNOWN_OPT)),
# Could be a Person, Team or Organisation
super_link("pe_id", "pr_pentity",
readable = staff,
writable = staff,
label = T("Assigned to"),
filterby = "instance_type",
filter_opts = ["pr_person", "pr_group", "org_organisation"],
represent = lambda id, row=None: \
project_assignee_represent(id),
# @ToDo: Widget
#widget = S3PentityWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Assigned to"),
# T("Enter some characters to bring up a list of possible matches")))
),
Field("date_due", "datetime",
label = T("Date Due"),
readable = staff,
writable = staff,
requires = [IS_EMPTY_OR(
IS_UTC_DATETIME_IN_RANGE(
minimum=request.utcnow - datetime.timedelta(days=1),
error_message="%s %%(min)s!" %
T("Enter a valid future date")))],
widget = S3DateTimeWidget(past=0,
future=8760), # Hours, so 1 year
represent = s3_date_represent),
milestone_id(
readable = milestones and staff,
writable = milestones and staff,
),
Field("time_estimated", "double",
readable = staff,
writable = staff,
represent = lambda v: v or "",
label = "%s (%s)" % (T("Time Estimate"),
T("hours"))),
Field("time_actual", "double",
readable = staff,
# This comes from the Time component
writable=False,
label = "%s (%s)" % (T("Time Taken"),
T("hours"))),
Field("status", "integer",
requires = IS_IN_SET(project_task_status_opts,
zero=None),
default = 2,
readable = staff,
writable = staff,
label = T("Status"),
represent = lambda opt, row=None: \
project_task_status_opts.get(opt,
UNKNOWN_OPT)),
*meta_fields())
# Field configurations
# Comment these if you don't need a Site associated with Tasks
#table.site_id.readable = table.site_id.writable = True
#table.site_id.label = T("Check-in at Facility") # T("Managing Office")
table.created_on.represent = s3_date_represent
# CRUD Strings
ADD_TASK = T("Add Task")
LIST_TASKS = T("List Tasks")
crud_strings[tablename] = Storage(
title_create = ADD_TASK,
title_display = T("Task Details"),
title_list = LIST_TASKS,
title_update = T("Edit Task"),
title_search = T("Search Tasks"),
title_upload = T("Import Tasks"),
subtitle_create = T("Add New Task"),
subtitle_list = T("Tasks"),
label_list_button = LIST_TASKS,
label_create_button = ADD_TASK,
msg_record_created = T("Task added"),
msg_record_modified = T("Task updated"),
msg_record_deleted = T("Task deleted"),
msg_list_empty = T("No tasks currently registered"))
# Virtual Fields
# Do just for the common report
table.virtualfields.append(S3ProjectTaskVirtualfields())
# Search Method
task_search = S3Search(
advanced = (
# Virtual fields not supported by Search Widgets yet
#S3SearchOptionsWidget(
#name = "task_search_project",
#label = T("Project"),
#field = ["project"],
#cols = 3
#),
# This Syntax not supported by Search Widgets yet
#S3SearchOptionsWidget(
# name = "task_search_project",
# label = T("Project"),
# field = ["task.task_id:project_task:project_id$name"],
# cols = 3
#),
# Virtual fields not supported by Search Widgets yet
#S3SearchOptionsWidget(
#name = "task_search_activity",
#label = T("Activity"),
#field = ["activity"],
#cols = 3
#),
S3SearchOptionsWidget(
name = "task_search_priority",
label = T("Priority"),
field = ["priority"],
cols = 4
),
S3SearchSimpleWidget(
name = "task_search_text_advanced",
label = T("Description"),
comment = T("Search for a Task by description."),
field = [ "name",
"description",
]
),
S3SearchOptionsWidget(
name = "task_search_created_by",
label = T("Created By"),
field = ["created_by"],
cols = 4
),
S3SearchOptionsWidget(
name = "task_search_assignee",
label = T("Assigned To"),
field = ["pe_id"],
cols = 4
),
S3SearchMinMaxWidget(
name="task_search_date_created",
method="range",
label=T("Date Created"),
field=["created_on"]
),
S3SearchMinMaxWidget(
name="task_search_date_due",
method="range",
label=T("Date Due"),
field=["date_due"]
),
S3SearchOptionsWidget(
name = "task_search_status",
label = T("Status"),
field = ["status"],
cols = 4
),
)
)
list_fields=["id",
"priority",
(T("ID"), "task_id"),
"name",
"pe_id",
"date_due",
"time_estimated",
"created_on",
"status",
#"site_id"
]
if settings.get_project_milestones():
list_fields.insert(5, "milestone_id")
# Resource Configuration
configure(tablename,
super_entity="doc_entity",
copyable=True,
orderby="project_task.priority",
onvalidation=self.task_onvalidation,
create_next=URL(f="task", args=["[id]"]),
create_onaccept=self.task_create_onaccept,
update_onaccept=self.task_update_onaccept,
search_method=task_search,
list_fields=list_fields,
extra="description")
# Reusable field
task_id = S3ReusableField("task_id", db.project_task,
label = T("Task"),
sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "project_task.id", "%(name)s")),
represent = lambda id, row=None: \
(id and [db.project_task[id].name] or [NONE])[0],
comment = s3_popup_comment(c="project",
f="task",
title=ADD_TASK,
tooltip=T("A task is a piece of work that an individual or team can do in 1-2 days.")),
ondelete = "CASCADE")
# ---------------------------------------------------------------------
# Custom Methods
self.set_method("project_task",
method="dispatch",
action=self.task_dispatch)
# Components
# Projects (for imports)
add_component("project_project",
project_task=Storage(
link="project_task_project",
joinby="task_id",
key="project_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Activities (for imports)
add_component("project_activity",
project_task=Storage(
link="project_task_activity",
joinby="task_id",
key="activity_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Job roles
add_component("hrm_job_role",
project_task=Storage(
link="project_task_job_role",
joinby="task_id",
key="job_role_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Human Resources (assigned)
add_component("hrm_human_resource",
project_task=Storage(
link="project_task_human_resource",
joinby="task_id",
key="human_resource_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Requests
add_component("req_req",
project_task=Storage(
link="project_task_req",
joinby="task_id",
key="req_id",
actuate="embed",
autocomplete="request_number",
autodelete=False))
# Time
add_component("project_time", project_task="task_id")
# Comments (for imports))
add_component("project_comment", project_task="task_id")
# ---------------------------------------------------------------------
# Link Tasks <-> Projects
#
tablename = "project_task_project"
table = define_table(tablename,
task_id(),
project_id(),
*meta_fields())
# Field configuration
# CRUD Strings
# Search Method
# Resource Configuration
# Reusable Field
# ---------------------------------------------------------------------
# Link task <-> activity
#
# Tasks <> Activities
tablename = "project_task_activity"
table = define_table(tablename,
task_id(),
activity_id(),
*meta_fields())
# Field configuration
# CRUD Strings
# Search Method
# Resource Configuration
# Reusable Field
# ---------------------------------------------------------------------
# Project comment
#
# @ToDo: Attachments?
#
# Parent field allows us to:
# * easily filter for top-level threads
# * easily filter for next level of threading
# * hook a new reply into the correct location in the hierarchy
#
tablename = "project_comment"
table = define_table(tablename,
Field("parent", "reference project_comment",
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"project_comment.id")),
readable=False),
#project_id(),
#activity_id(),
task_id(),
Field("body", "text",
notnull=True,
label = T("Comment")),
*meta_fields())
# Field configuration?
# CRUD Strings?
# Search Method?
# Resource Configuration
configure(tablename,
list_fields=["id",
"task_id",
"created_by",
"modified_on"
])
# Reusable Field?
# ---------------------------------------------------------------------
# Project Time
# - used to Log hours spent on a Task
#
tablename = "project_time"
table = define_table(tablename,
task_id(),
person_id(default=auth.s3_logged_in_person()),
Field("date", "datetime",
label = T("Date"),
requires = IS_EMPTY_OR(IS_UTC_DATETIME()),
represent = s3_utc_represent,
widget = S3DateTimeWidget(past=8760, # Hours, so 1 year
future=0),
default = request.utcnow),
Field("hours", "double",
label = "%s (%s)" % (T("Time"),
T("hours"))),
comments(),
format="%(comments)s",
*meta_fields())
# CRUD Strings
ADD_TIME = T("Log Time Spent")
crud_strings[tablename] = Storage(
title_create = ADD_TIME,
title_display = T("Logged Time Details"),
title_list = T("List Logged Time"),
title_update = T("Edit Logged Time"),
title_search = T("Search Logged Time"),
title_upload = T("Import Logged Time data"),
title_report = T("Last Week's Work"),
subtitle_create = T("Log New Time"),
subtitle_list = T("Logged Time"),
subtitle_report = T("Logged Time"),
label_list_button = T("List Logged Time"),
label_create_button = ADD_TIME,
msg_record_created = T("Time Logged"),
msg_record_modified = T("Time Log Updated"),
msg_record_deleted = T("Time Log Deleted"),
msg_list_empty = T("No Time Logged")
)
if "rows" in request.get_vars and request.get_vars.rows == "project":
s3.crud_strings[tablename].title_report = T("Project Time Report")
# Virtual Fields
table.virtualfields.append(S3ProjectTimeVirtualfields())
configure(tablename,
onaccept=self.time_onaccept,
list_fields=["id",
(T("Project"), "project"),
"task_id",
"person_id",
"date",
"hours",
"comments",
])
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return dict(
project_task_id = task_id,
project_task_active_statuses = project_task_active_statuses,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable=False,
writable=False)
return Storage(
project_task_id = lambda: dummy("task_id")
)
# ---------------------------------------------------------------------
@staticmethod
def milestone_represent(id, row=None):
""" FK representation """
db = current.db
NONE = current.messages.NONE
if id:
val = (id and [db.project_milestone[id].name] or [NONE])[0]
return val
else:
return NONE
# -------------------------------------------------------------------------
@staticmethod
def task_onvalidation(form):
""" Task form validation """
T = current.T
vars = form.vars
if str(vars.status) == "3" and not vars.pe_id:
form.errors.pe_id = \
T("Status 'assigned' requires the %(fieldname)s to not be blank") % \
dict(fieldname=current.db.project_task.pe_id.label)
elif vars.pe_id and str(vars.status) == "2":
# Set the Status to 'Assigned' if left at default 'New'
vars.status = 3
return
# -------------------------------------------------------------------------
@staticmethod
def task_update_onaccept(form):
"""
* Process the additional fields: Project/Activity
* Log changes as comments
* If the task is assigned to someone then notify them
"""
db = current.db
s3db = current.s3db
s3mgr = current.manager
vars = form.vars
id = vars.id
record = form.record
table = s3db.project_task
changed = {}
for var in vars:
vvar = vars[var]
rvar = record[var]
if vvar != rvar:
if table[var].type == "integer":
vvar = int(vvar)
if vvar == rvar:
continue
represent = table[var].represent
if not represent:
represent = lambda o: o
if rvar:
changed[var] = "%s changed from %s to %s" % \
(table[var].label, represent(rvar), represent(vvar))
else:
changed[var] = "%s changed to %s" % \
(table[var].label, represent(vvar))
if changed:
table = s3db.project_comment
text = s3_user_represent(current.auth.user.id)
for var in changed:
text = "%s\n%s" % (text, changed[var])
table.insert(task_id=id,
body=text)
vars = current.request.post_vars
if "project_id" in vars:
ptable = s3db.project_project
ltable = s3db.project_task_project
filter = (ltable.task_id == id)
if vars.project_id:
# Create the link to the Project
#master = s3mgr.define_resource("project", "task", id=id)
#record = db(ptable.id == vars.project_id).select(ptable.id,
# limitby=(0, 1)).first()
#link = s3mgr.define_resource("project", "task_project")
#link_id = link.update_link(master, record)
query = (ltable.task_id == id) & \
(ltable.project_id == vars.project_id)
record = db(query).select(ltable.id, limitby=(0, 1)).first()
if record:
link_id = record.id
else:
link_id = ltable.insert(task_id = id,
project_id = vars.project_id)
filter = filter & (ltable.id != link_id)
# Remove any other links
links = s3mgr.define_resource("project", "task_project",
filter=filter)
ondelete = s3mgr.model.get_config("project_task_project",
"ondelete")
links.delete(ondelete=ondelete)
if "activity_id" in vars:
atable = s3db.project_activity
ltable = s3db.project_task_activity
filter = (ltable.task_id == id)
if vars.activity_id:
# Create the link to the Activity
#master = s3mgr.define_resource("project", "task", id=id)
#record = db(atable.id == vars.activity_id).select(atable.id,
# limitby=(0, 1)).first()
#link = s3mgr.define_resource("project", "task_activity")
#link_id = link.update_link(master, record)
query = (ltable.task_id == id) & \
(ltable.activity_id == vars.activity_id)
record = db(query).select(ltable.id, limitby=(0, 1)).first()
if record:
link_id = record.id
else:
link_id = ltable.insert(task_id = id,
activity_id = vars.activity_id)
filter = filter & (ltable.id != link_id)
# Remove any other links
links = s3mgr.define_resource("project", "task_activity",
filter=filter)
ondelete = s3mgr.model.get_config("project_task_activity",
"ondelete")
links.delete(ondelete=ondelete)
# Notify Assignee
task_notify(form)
return
# -------------------------------------------------------------------------
@staticmethod
def task_create_onaccept(form):
"""
When a Task is created:
* Process the additional fields: Project/Activity
* create associated Link Tables
* ensure that it is owned by the Project Customer
* notify assignee
"""
db = current.db
s3db = current.s3db
session = current.session
vars = form.vars
id = vars.id
_vars = current.request.post_vars
if session.s3.event:
# Create a link between this Task & the active Event
etable = s3db.event_task
etable.insert(event_id=session.s3.event,
task_id=id)
vars = current.request.post_vars
table = s3db.project_task
if "project_id" in vars:
# Create Link to Project
ltable = s3db.project_task_project
if vars.project_id:
link_id = ltable.insert(task_id = id,
project_id = _vars.project_id)
if "activity_id" in vars:
# Create Link to Activity
ltable = s3db.project_task_activity
if vars.activity_id:
link_id = ltable.insert(task_id = id,
activity_id = _vars.activity_id)
# Find the associated Project
ptable = db.project_project
ltable = db.project_task_project
query = (ltable.task_id == id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.organisation_id,
limitby=(0, 1)).first()
if project:
# Set Task to be owned by this Customer
organisation_id = project.organisation_id
otable = s3db.org_organisation
query = (otable.id == organisation_id)
role = db(query).select(otable.owned_by_organisation,
limitby=(0, 1)).first()
if role:
table = s3db.project_task
query = (table.id == vars.id)
db(query).update(owned_by_organisation=role.owned_by_organisation)
# Make sure the task is also linked to the project
# when created under an activity
if id:
lta = s3db.project_task_activity
ltp = s3db.project_task_project
ta = s3db.project_activity
query = (ltp.task_id == id)
row = db(query).select(ltp.project_id,
limitby=(0, 1)).first()
if not row:
query = (lta.task_id == id) & \
(lta.activity_id == ta.id)
row = db(query).select(ta.project_id,
limitby=(0, 1)).first()
if row and row.project_id:
ltp.insert(task_id=id,
project_id=row.project_id)
# Notify Assignee
task_notify(form)
return
# -------------------------------------------------------------------------
@staticmethod
def task_dispatch(r, **attr):
"""
Send a Task Dispatch notice from a Task
- if a location is supplied, this will be formatted as an OpenGeoSMS
"""
T = current.T
msg = current.msg
response = current.response
if r.representation == "html" and \
r.name == "task" and r.id and not r.component:
record = r.record
text = "%s: %s" % (record.name,
record.description)
# Encode the message as an OpenGeoSMS
message = msg.prepare_opengeosms(record.location_id,
code="ST",
map="google",
text=text)
# URL to redirect to after message sent
url = URL(c="project",
f="task",
args=r.id)
# Create the form
if record.pe_id:
opts = dict(recipient=record.pe_id)
else:
opts = dict(recipient_type="pr_person")
output = msg.compose(type="SMS",
message = message,
url = url,
**opts)
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = T("Send Task Notification")
response.view = "msg/compose.html"
return output
else:
raise HTTP(501, BADMETHOD)
# -------------------------------------------------------------------------
@staticmethod
def time_onaccept(form):
""" When Time is logged, update the Task & Activity """
db = current.db
titable = db.project_time
ttable = db.project_task
atable = db.project_activity
tatable = db.project_task_activity
# Find the Task
task_id = form.vars.task_id
if not task_id:
# Component Form
query = (titable.id == form.vars.id)
record = db(query).select(titable.task_id,
limitby=(0, 1)).first()
if record:
task_id = record.task_id
# Total the Hours Logged
query = (titable.deleted == False) & \
(titable.task_id == task_id)
rows = db(query).select(titable.hours)
hours = 0
for row in rows:
hours += row.hours
# Update the Task
query = (ttable.id == task_id)
db(query).update(time_actual=hours)
# Find the Activity
query = (tatable.deleted == False) & \
(tatable.task_id == task_id)
activity = db(query).select(tatable.activity_id,
limitby=(0, 1)).first()
if activity:
activity_id = activity.activity_id
# Find all Tasks in this Activity
query = (ttable.deleted == False) & \
(tatable.deleted == False) & \
(tatable.task_id == ttable.id) & \
(tatable.activity_id == activity_id)
tasks = db(query).select(ttable.time_actual)
# Total the Hours Logged
hours = 0
for task in tasks:
hours += task.time_actual
# Update the Activity
query = (atable.id == activity_id)
db(query).update(time_actual=hours)
return
# =============================================================================
class S3ProjectTaskHRMModel(S3Model):
"""
Project Task HRM Model
This class holds the tables used to link Tasks to Human Resources
- either individuals or Job Roles
"""
names = ["project_task_job_role",
"project_task_human_resource",
]
def model(self):
s3 = current.response.s3
task_id = self.project_task_id
human_resource_id = self.hrm_human_resource_id
job_role_id = self.hrm_job_role_id
# Shortcuts
define_table = self.define_table
# ---------------------------------------------------------------------
# Link Tasks <> Human Resources
tablename = "project_task_human_resource"
table = define_table(tablename,
task_id(),
human_resource_id(),
*s3.meta_fields())
# ---------------------------------------------------------------------
# Link Tasks <> Job Roles
tablename = "project_task_job_role"
table = define_table(tablename,
task_id(),
job_role_id(),
*s3.meta_fields())
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return dict(
)
# =============================================================================
class S3ProjectTaskIReportModel(S3Model):
"""
Project Task IReport Model
This class holds the table used to link Tasks with Incident Reports.
"""
names = ["project_task_ireport",
]
def model(self):
s3 = current.response.s3
task_id = self.project_task_id
ireport_id = self.irs_ireport_id
# ---------------------------------------------------------------------
# Link Tasks <-> Incident Reports
#
tablename = "project_task_ireport"
table = self.define_table(tablename,
task_id(),
ireport_id(),
*s3.meta_fields())
self.configure(tablename,
onaccept=self.task_ireport_onaccept)
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return dict(
)
# -------------------------------------------------------------------------
@staticmethod
def task_ireport_onaccept(form):
"""
When a Task is linked to an IReport, then populate the location_id
"""
vars = form.vars
ireport_id = vars.ireport_id
task_id = vars.task_id
db = current.db
s3db = current.s3db
# Check if we already have a Location for the Task
table = s3db.project_task
query = (table.id == task_id)
record = db(query).select(table.location_id,
limitby=(0, 1)).first()
if not record or record.location_id:
return
# Find the Incident Location
itable = s3db.irs_ireport
query = (itable.id == ireport_id)
record = db(query).select(itable.location_id,
limitby=(0, 1)).first()
if not record or not record.location_id:
return
location_id = record.location_id
# Update the Task
query = (table.id == task_id)
db(query).update(location_id=location_id)
return
# -----------------------------------------------------------------------------
def project_assignee_represent(id):
""" Represent the Person a Task is assigned-to or list views """
db = current.db
s3db = current.s3db
cache = s3db.cache
output = current.messages.NONE
if not id:
return output
if isinstance(id, Row):
instance_type = id.instance_type
id = id.pe_id
else:
etable = s3db.pr_pentity
query = (etable.id == id)
record = db(query).select(etable.instance_type,
cache=cache,
limitby=(0, 1)).first()
if not record:
return output
instance_type = record.instance_type
table = s3db[instance_type]
query = (table.pe_id == id)
if instance_type == "pr_person":
record = db(query).select(table.first_name,
table.middle_name,
table.last_name,
table.initials,
cache=cache,
limitby=(0, 1)).first()
if record:
output = record.initials or s3_fullname(record)
elif instance_type in ("pr_group", "org_organisation"):
# Team or Organisation
record = db(query).select(table.name,
cache=cache,
limitby=(0, 1)).first()
if record:
output = record.name
else:
# Should not happen of correctly filtered, return default
pass
return output
# =============================================================================
def project_rheader(r, tabs=[]):
""" Project Resource Headers - used in Project & Budget modules """
if r.representation != "html":
# RHeaders only used in interactive views
return None
record = r.record
if record is None:
# List or Create form: rheader makes no sense here
return None
table = r.table
resourcename = r.name
T = current.T
auth = current.auth
settings = current.deployment_settings
drr = settings.get_project_drr()
pca = settings.get_project_community_activity()
milestones = settings.get_project_milestones()
if resourcename == "project":
# Tabs
tabs = [(T("Basic Details"), None)]
append = tabs.append
if drr:
append((T("Organizations"), "organisation"))
ADMIN = current.session.s3.system_roles.ADMIN
admin = auth.s3_has_role(ADMIN)
#staff = auth.s3_has_role("STAFF")
staff = True
if staff or drr:
append((T("Communities") if pca else T("Activities"), "activity"))
if staff and milestones:
append((T("Milestones"), "milestone"))
if not drr:
append((T("Tasks"), "task"))
if drr:
append((T("Documents"), "document"))
elif staff:
append((T("Attachments"), "document"))
if record.calendar:
append((T("Calendar"), "timeline"))
rheader_tabs = s3_rheader_tabs(r, tabs)
row3 = ""
if drr:
row2 = TR(
TH("%s: " % table.countries_id.label),
table.countries_id.represent(record.countries_id),
)
else:
row2 = TR(
TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(record.organisation_id)
)
if record.end_date:
row3 = TR(
TH("%s: " % table.end_date.label),
table.end_date.represent(record.end_date)
)
rheader = DIV(TABLE(
TR(
TH("%s: " % table.name.label),
record.name
),
row2,
row3,
), rheader_tabs)
elif resourcename == "activity":
# @ToDo: integrate tabs?
rheader_tabs = s3_rheader_tabs(r, tabs)
tbl = TABLE()
if record.project_id is not None:
tbl.append(
TR(
TH("%s: " % table.project_id.label),
table.project_id.represent(record.project_id))
)
if pca:
tbl.append(
TR(
TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id)
)
)
else:
tbl.append(
TR(
TH("%s: " % table.name.label),
record.name
)
)
rheader = DIV(tbl, rheader_tabs)
elif resourcename == "task":
db = current.db
s3db = current.s3db
# Tabs
tabs = [(T("Details"), None)]
append = tabs.append
staff = auth.s3_has_role("STAFF")
if staff:
append((T("Time"), "time")),
#append((T("Comments"), "discuss"))
append((T("Attachments"), "document"))
if settings.has_module("msg"):
append((T("Notify"), "dispatch"))
#(T("Roles"), "job_role"),
#(T("Assignments"), "human_resource"),
#(T("Requests"), "req")
rheader_tabs = s3_rheader_tabs(r, tabs)
# RHeader
ptable = s3db.project_project
ltable = s3db.project_task_project
query = (ltable.deleted == False) & \
(ltable.task_id == r.id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.id,
limitby=(0, 1)).first()
if project:
project = TR(
TH("%s: " % T("Project")),
s3db.project_project_represent(project.id)
)
else:
project = ""
atable = s3db.project_activity
ltable = s3db.project_task_activity
query = (ltable.deleted == False) & \
(ltable.task_id == r.id) & \
(ltable.activity_id == atable.id)
activity = db(query).select(atable.name,
limitby=(0, 1)).first()
if activity:
activity = TR(
TH("%s: " % T("Activity")),
activity.name
)
else:
activity = ""
if record.description:
description = TR(
TH("%s: " % table.description.label),
record.description
)
else:
description = ""
if record.site_id:
facility = TR(
TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
)
else:
facility = ""
if record.location_id:
location = TR(
TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id),
)
else:
location = ""
if record.pe_id:
assignee = TR(
TH("%s: " % table.pe_id.label),
s3db.pr_pentity_represent(record.pe_id,
show_label=False),
)
else:
assignee = ""
if record.time_estimated:
time_estimated = TR(
TH("%s: " % table.time_estimated.label),
record.time_estimated
)
else:
time_estimated = ""
if record.time_actual:
time_actual = TR(
TH("%s: " % table.time_actual.label),
record.time_actual
)
else:
time_actual = ""
# Comments
# if r.method == "discuss":
# comments = ""
# else:
# ctable = s3db.project_comment
# query = (ctable.deleted == False) & \
# (ctable.task_id == r.id)
# comments = db(query).select(ctable.body).last()
# if comments:
# try:
# markup = etree.XML(comments.body)
# text = markup.xpath(".//text()")
# if text:
# text = " ".join(text)
# else:
# text = ""
# except etree.XMLSyntaxError:
# t = html.fromstring(comments.body)
# text = t.text_content()
# comments = TR(
# TH("%s: " % T("Latest Comment")),
# A(text,
# _href=URL(args=[r.id, "discuss"]))
# )
# else:
# comments = ""
rheader = DIV(TABLE(
project,
activity,
TR(
TH("%s: " % table.name.label),
record.name,
),
description,
facility,
location,
assignee,
time_estimated,
time_actual,
#comments,
), rheader_tabs)
return rheader
# -------------------------------------------------------------------------
def task_notify(form):
"""
If the task is assigned to someone then notify them
"""
vars = form.vars
try:
pe_id = int(vars.pe_id)
except TypeError, ValueError:
return
if form.record is None or (pe_id != form.record.pe_id):
# Assignee has changed
settings = current.deployment_settings
if settings.has_module("msg"):
# Notify assignee
subject = "%s: Task assigned to you" % settings.get_system_name_short()
url = "%s%s" % (settings.get_base_public_url(),
URL(c="project", f="task", args=vars.id))
message = "You have been assigned a Task:\n\n%s\n\n%s\n\n%s" % \
(url,
vars.name,
vars.description or "")
current.msg.send_by_pe_id(pe_id, subject, message)
return
# =============================================================================
class S3ProjectVirtualfields:
""" Virtual fields for the project_project table """
def organisation(self):
""" Name of the lead organisation of the project """
db = current.db
s3db = current.s3db
s3 = current.response.s3
otable = s3db.org_organisation
ltable = s3db.project_organisation
LEAD_ROLE = s3.project_organisation_lead_role
query = (ltable.deleted != True) & \
(ltable.project_id == self.project_project.id) & \
(ltable.role == LEAD_ROLE) & \
(ltable.organisation_id == otable.id)
org = db(query).select(otable.name,
limitby=(0, 1)).first()
if org:
return org.name
else:
return None
# =============================================================================
class S3ProjectActivityVirtualfields:
""" Virtual fields for the project_activity table """
extra_fields = ["project_id", "location_id"]
def organisation(self):
""" Name of the lead organisation of the project """
db = current.db
s3db = current.s3db
s3 = current.response.s3
otable = s3db.org_organisation
ltable = s3db.project_organisation
LEAD_ROLE = s3.project_organisation_lead_role
query = (ltable.deleted != True) & \
(ltable.project_id == self.project_activity.project_id) & \
(ltable.role == LEAD_ROLE) & \
(ltable.organisation_id == otable.id)
org = db(query).select(otable.name,
limitby=(0, 1)).first()
if org:
return org.name
else:
return None
def L0(self):
parents = Storage()
parents = current.gis.get_parent_per_level(parents,
self.project_activity.location_id,
ids=False,
names=True)
if "L0" in parents:
return parents["L0"]
else:
return None
def L1(self):
parents = Storage()
parents = current.gis.get_parent_per_level(parents,
self.project_activity.location_id,
ids=False,
names=True)
if "L1" in parents:
return parents["L1"]
else:
return None
def L2(self):
parents = Storage()
parents = current.gis.get_parent_per_level(parents,
self.project_activity.location_id,
ids=False,
names=True)
if "L2" in parents:
return parents["L2"]
else:
return None
def L3(self):
parents = Storage()
parents = current.gis.get_parent_per_level(parents,
self.project_activity.location_id,
ids=False,
names=True)
if "L3" in parents:
return parents["L3"]
else:
return None
# =============================================================================
class S3ProjectBeneficiaryVirtualfields:
""" Virtual fields for the project_beneficiary table """
extra_fields = ["activity_id"]
def L0(self):
db = current.db
s3db = current.s3db
s3 = current.response.s3
atable = s3db.project_activity
query = (atable.id == self.project_beneficiary.activity_id)
activity = db(query).select(atable.location_id,
limitby=(0, 1)).first()
if activity:
parents = Storage()
parents = current.gis.get_parent_per_level(parents,
activity.location_id,
ids=False,
names=True)
if "L0" in parents:
return parents["L0"]
else:
return current.messages.NONE
else:
return current.messages.NONE
def L1(self):
db = current.db
s3db = current.s3db
s3 = current.response.s3
atable = s3db.project_activity
query = (atable.id == self.project_beneficiary.activity_id)
activity = db(query).select(atable.location_id,
limitby=(0, 1)).first()
if activity:
parents = Storage()
parents = current.gis.get_parent_per_level(parents,
activity.location_id,
ids=False,
names=True)
if "L1" in parents:
return parents["L1"]
else:
return current.messages.NONE
else:
return current.messages.NONE
def L2(self):
db = current.db
s3db = current.s3db
s3 = current.response.s3
atable = s3db.project_activity
query = (atable.id == self.project_beneficiary.activity_id)
activity = db(query).select(atable.location_id,
limitby=(0, 1)).first()
if activity:
parents = Storage()
parents = current.gis.get_parent_per_level(parents,
activity.location_id,
ids=False,
names=True)
if "L2" in parents:
return parents["L2"]
else:
return current.messages.NONE
else:
return current.messages.NONE
def L3(self):
db = current.db
s3db = current.s3db
s3 = current.response.s3
atable = s3db.project_activity
query = (atable.id == self.project_beneficiary.activity_id)
activity = db(query).select(atable.location_id,
limitby=(0, 1)).first()
if activity:
parents = Storage()
parents = current.gis.get_parent_per_level(parents,
activity.location_id,
ids=False,
names=True)
if "L3" in parents:
return parents["L3"]
else:
return current.messages.NONE
else:
return current.messages.NONE
# =============================================================================
class S3ProjectActivityContactVirtualFields:
""" Virtual fields for the project_activity_contact table """
extra_fields = ["person_id"]
def email(self):
db = current.db
s3db = current.s3db
ptable = s3db.pr_person
ctable = s3db.pr_contact
person_id = self.project_activity_contact.person_id
query = (ctable.deleted != True) & \
(ptable.id == person_id) & \
(ctable.pe_id == ptable.pe_id) & \
(ctable.contact_method == "EMAIL")
items = db(query).select(ctable.value)
return ", ".join([item.value for item in items])
def sms(self):
db = current.db
s3db = current.s3db
ptable = s3db.pr_person
ctable = s3db.pr_contact
person_id = self.project_activity_contact.person_id
query = (ctable.deleted != True) & \
(ptable.id == person_id) & \
(ctable.pe_id == ptable.pe_id) & \
(ctable.contact_method == "SMS")
items = db(query).select(ctable.value)
return ", ".join([item.value for item in items])
# =============================================================================
class S3ProjectTaskVirtualfields:
""" Virtual fields for the project_task table """
extra_fields = ["id",
"project_task_project:project_id$name",
"project_task_activity:activity_id$name"]
def project(self):
"""
Project associated with this task
"""
try:
return self.project_project.name
except AttributeError:
return None
def activity(self):
"""
Activity associated with this task
"""
try:
return self.project_activity.name
except AttributeError:
return None
def task_id(self):
try:
return self.project_task.id
except AttributeError:
return None
# =============================================================================
class S3ProjectTimeVirtualfields:
""" Virtual fields for the project_time table """
extra_fields = ["task_id", "person_id", "date"]
def project(self):
"""
Project associated with this time entry
- used by the 'Project Time' report
"""
db = current.db
s3db = current.s3db
ptable = s3db.project_project
ltable = s3db.project_task_project
query = (ltable.deleted != True) & \
(ltable.task_id == self.project_time.task_id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.name,
limitby=(0, 1)).first()
if project:
return project.name
else:
return None
def day(self):
"""
Day of the last Week this time entry relates to
- used by the 'Last Week's Work' report
"""
T = current.T
now = current.request.utcnow
thisdate = self.project_time.date
if not thisdate:
return "-"
week = datetime.timedelta(days=7)
if thisdate < (now - week):
# Ignore data older than the last week
# - should already be filtered in controller anyway
return "-"
return thisdate.date().strftime("%d %B")
# END =========================================================================
| mit | -1,446,498,629,031,835,600 | 40.312227 | 219 | 0.415691 | false |
jangler/vcii | vcii/test/test_sheet.py | 1 | 2485 | import random
import unittest
from vcii.sheet import *
class TestSheet(unittest.TestCase):
def test_key_to_indices(self):
with self.assertRaises(ValueError):
indices_from_label('1A')
self.assertEqual(indices_from_label('A1'), (0, 0))
self.assertEqual(indices_from_label('zz99'), (26**2 + 25, 98))
def test_append(self):
sheet = Sheet()
sheet.cursor = [(lambda: random.randint(0, 10))()] * 2
sheet.append('rain')
self.assertEqual(sheet.active_cell.content, 'rain')
self.assertTrue(sheet.modified)
sheet.modified = False
sheet.append('bow')
self.assertEqual(sheet.active_cell.content, 'rainbow')
self.assertTrue(sheet.modified)
def test_expand(self):
sheet = Sheet()
self.assertEqual(sheet.size, (0, 0))
max_size = 0, 0
for i in range(10):
coords = [(lambda: random.randint(0, 10))()] * 2
sheet.expand(*coords)
max_size = tuple(max(max_size[j], coords[j] + 1) for j in range(2))
self.assertEqual(sheet.size, max_size)
self.assertTrue(sheet.modified)
def test_setitem(self):
sheet = Sheet()
sheet['C2'] = 'testing'
self.assertEqual(sheet.cells[2][1].content, 'testing')
self.assertEqual(sheet.size, (3, 2))
self.assertTrue(sheet.modified)
def test_move_cursor(self):
sheet = Sheet()
sheet.move_cursor(0, 0)
self.assertEqual(sheet.cursor, [0, 0])
sheet.move_cursor(-1, 1)
self.assertEqual(sheet.cursor, [0, 1])
sheet.move_cursor(1, -2)
self.assertEqual(sheet.cursor, [1, 0])
self.assertFalse(sheet.modified)
sheet.expand(0, 0)
sheet.modified = False
sheet.move_cursor(-1, 0)
self.assertEqual(sheet.cursor, [0, 0])
self.assertFalse(sheet.modified)
def test_column_width(self):
sheet = Sheet()
sheet.expand(1, 0)
self.assertEqual(sheet.column_width(1), DEFAULT_COLUMN_WIDTH)
self.assertEqual(sheet.column_width(2), DEFAULT_COLUMN_WIDTH)
sheet.column_widths[1] = 5
self.assertEqual(sheet.column_width(0), DEFAULT_COLUMN_WIDTH)
self.assertEqual(sheet.column_width(1), 5)
def test_resize_column(self):
sheet = Sheet()
sheet.resize_column(0, 0)
self.assertEqual(sheet.size, (1, 1))
self.assertEqual(sheet.column_widths, [2])
| mit | -3,295,605,350,344,006,700 | 33.513889 | 79 | 0.597183 | false |
thorwhalen/ut | ml/stream/sequences.py | 1 | 6137 |
from sklearn.base import BaseEstimator
from collections import Counter
import pandas as pd
from numpy import sum, nan, isnan
from ut.util.uiter import window
class NextElementPredictor(BaseEstimator):
def predict(self, seqs):
preds = self.predict_proba(seqs)
return [max(pred, key=lambda key: pred[key]) for pred in preds]
def predict_proba(self, seqs):
return list(map(self._predict_proba_conditioned_on_recent_subseq, seqs))
def _predict_proba_conditioned_on_recent_subseq(self, recent_subseq):
raise NotImplementedError("Need to implement this method")
class MarkovNextElementPred(NextElementPredictor):
_list_of_attributes_to_display = ['markov_window', 'empty_element', 'keep_stats_in_memory']
def __init__(self, markov_window=2, empty_element=-1, keep_stats_in_memory=True):
self.markov_window = markov_window
self.keep_stats_in_memory = keep_stats_in_memory
self.empty_element = empty_element
self._empty_element_padding = [empty_element] * (self.markov_window - 1)
@property
def total_tuple_count(self):
"""
:return: Number of observed window tuples (sum of values in self.snip_tuples_counter_)
"""
if self.total_tuple_count_ is not None:
return self.total_tuple_count_
else:
total_tuple_count_ = sum(self.snip_tuples_counter_.values())
if self.keep_stats_in_memory:
self.total_tuple_count_ = total_tuple_count_
return total_tuple_count_
@property
def pair_prob(self):
"""
:return: Series of probabilities (unsmoothed count ratios) indexed by snip pairs
"""
if self.pair_prob_ is not None:
return self.pair_prob_
else:
pair_prob_ = pd.Series(self.snip_tuples_counter_) / self.total_tuple_count
if self.keep_stats_in_memory:
self.pair_probs_ = pair_prob_
return pair_prob_
@property
def element_prob(self):
"""
:return: Series of snips probabilities (unsmoothed count ratios)
"""
if self.element_prob_ is not None:
return self.element_prob_
else:
element_prob_ = (self.pair_prob * self.total_tuple_count)
element_prob_ = element_prob_.groupby(level=0).sum()
element_prob_ = element_prob_.drop(labels=self.empty_element)
# element_prob_ = element_prob_.iloc[
# element_prob_.index.get_level_values(level=0) != self.empty_element]
element_prob_ /= element_prob_.sum()
if self.keep_stats_in_memory:
self.element_prob_ = element_prob_
return element_prob_
@property
def conditional_prob(self):
"""
:return: Series of probabilities of last element (level) conditional on previous ones (including empty elements)
"""
if self.conditional_prob_ is not None:
return self.conditional_prob_
else:
conditional_prob_ = self._drop_empty_elements_of_sr(self.pair_prob, levels=[self.markov_window - 1])
conditional_levels = list(range(self.markov_window - 1))
conditional_prob_ = conditional_prob_.div(
conditional_prob_.groupby(level=conditional_levels).sum(), level=0) # TODO: Only works for two levels
if self.keep_stats_in_memory:
self.conditional_prob_ = conditional_prob_
return conditional_prob_
@property
def initial_element_prob(self):
"""
:return: Series of snips probabilities (unsmoothed count ratios)
"""
if self.initial_element_prob_ is not None:
return self.initial_element_prob_
else:
initial_element_prob_ = self.pair_prob.xs(self.empty_element, level=0, drop_level=True)
initial_element_prob_ /= initial_element_prob_.sum()
if self.keep_stats_in_memory:
self.initial_element_prob_ = initial_element_prob_
return initial_element_prob_
def fit(self, snips_list):
# reset anything previously learned
self._initialize_params()
return self.partial_fit(snips_list)
def partial_fit(self, snips_list):
if not set(['snip_tuples_counter_']).issubset(list(self.__dict__.keys())):
self._initialize_params()
for snips in snips_list:
self._partial_fit_of_a_single_snips(snips)
return self
def _initialize_params(self):
"""
Initializes model params (the snip_tuples_counter_, etc.)
:return: None
"""
self.snip_tuples_counter_ = Counter()
self._reset_properties()
def _reset_properties(self):
"""
Resets some properties that depend on snip_tuples_counter_ to be computed (is used when the later changes)
These will be recomputed when requested.
:return: None
"""
self.total_tuple_count_ = None
self.pair_prob_ = None
self.element_prob_ = None
self.initial_element_prob_ = None
self.conditional_prob_ = None
def _partial_fit_of_a_single_snips(self, snips):
self._reset_properties()
self.snip_tuples_counter_.update(window(self._empty_element_padding + list(snips) + self._empty_element_padding,
n=self.markov_window))
def _drop_empty_elements_of_sr(self, sr, levels=None, renormalize=False):
if levels is None:
levels = list(range(self.markov_window))
for level in levels:
sr = sr.drop(labels=self.empty_element, level=level)
if renormalize:
sr /= sr.sum()
return sr
def _predict_proba_conditioned_on_recent_subseq(self, recent_subseq):
pass
def __repr__(self):
d = {attr: getattr(self, attr) for attr in self._list_of_attributes_to_display if attr in self.__dict__}
d['total_tuple_count'] = self.total_tuple_count
return self.__class__.__name__ + '\n' + str(d)
| mit | 5,309,313,774,185,950,000 | 37.118012 | 120 | 0.608604 | false |
kivhift/qmk | src/commands/help.py | 1 | 1442 | #
# Copyright (c) 2009-2012 Joshua Hughes <[email protected]>
#
import atexit
import os
import tempfile
import urllib
import webbrowser
import qmk
class HelpCommand(qmk.Command):
'''
View help for all available commands. A new tab will be opened in the
default web browser that contains the help for all of the commands that are
registered.
'''
def __init__(self):
self._name = 'help'
self._help = self.__doc__
h, self.__filename = tempfile.mkstemp(suffix = '.html',
prefix = 'qmkhelp')
os.close(h)
atexit.register(os.remove, self.__filename)
def action(self, arg):
# For now, ignore help requests for specific commands.
# if arg is not None: pass
f = file(self.__filename, 'wb')
f.write('<html><head><title>QMK Help</title></head><body>')
f.write('<h1>QMK Command Help</h1>')
cm = qmk.CommandManager()
f.write('<table border="1"><tr><th>Name</th><th>Help</th></tr>')
for name in cm.commandNames():
cmd = cm.command(name)
ht = cmd.help
f.write('<tr><td><pre>%s</pre></td><td><pre>%s</pre></td></tr>' % (
name, ht.encode('ascii', 'xmlcharrefreplace')))
f.write('</table></body></html>\n')
f.close()
webbrowser.open_new_tab('file:%s' % urllib.pathname2url(
f.name))
def commands(): return [ HelpCommand() ]
| mit | -3,162,210,888,576,345,000 | 31.772727 | 79 | 0.576976 | false |
demisto/content | Packs/WindowsForensics/Scripts/RegistryParse/RegistryParse_test.py | 1 | 1112 | import json
import RegistryParse as reg_parse
def util_load_json(path):
with open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_get_sub_keys():
key = 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList'
folder_output_key = 'Sid'
mock_reg = util_load_json('./test_data/mock_reg_users.json')
expected = util_load_json('./test_data/mock_reg_users_result.json')
actual = reg_parse.get_sub_keys(mock_reg, key, folder_output_key)
for actual_items in actual:
for actual_item in actual_items:
assert actual_item in expected[0] or actual_item in expected[1]
def test_parse_reg_values():
expected = 'C:\\Windows\\ServiceProfiles\\LocalService'
hex_value = 'hex(2):43,00,3a,00,5c,00,57,00,69,00,6e,00,64,00,6f,00,77,\
00,73,00,5c,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,50,00,72,00,6f,00,\
66,00,69,00,6c,00,65,00,73,00,5c,00,4c,00,6f,00,63,00,61,00,6c,00,53,00,65,\
00,72,00,76,00,69,00,63,00,65,00,00,00'
actual = reg_parse.parse_reg_value(hex_value)
assert actual == expected
| mit | 7,210,574,577,760,528,000 | 38.714286 | 92 | 0.670863 | false |
KDE/twine2 | kdelibs.py | 1 | 37607 | # -*- coding: utf-8 -*-
# Copyright 2009-2010 Simon Edwards <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import re
import toolkit
import kbindinggenerator.qtkdemacros
import os.path
import kbindinggenerator.sipsymboldata
outputBaseDirectory = "/home/sbe/devel/git/kde/kdebindings/pykde4"
cmakelistBaseDirectory = "/home/sbe/devel/git/kde/kdelibs"
cmakelistPimlibsBaseDirectory = "/home/sbe/devel/git/kde/kdepimlibs"
cmakelistPhononBaseDirectory = "/home/sbe/devel/git/phonon"
kdelibsBuildDirectory = "/home/sbe/devel/git_build/kde/kdelibs"
kdepimlibsBuildDirectory = "/home/sbe/devel/git_build/kde/kdepimlibs"
cmakelistGitBaseDirectory = "/home/sbe/devel/git"
polkitqtBaseDirectory = "/home/sbe/devel/git/polkit-qt"
sipImportDir = "/home/sbe/devel/kdesvninstall/share/sip/PyQt4"
###########################################################################
kdecore = toolkit.ModuleGenerator(
module="PyKDE4.kdecore",
outputDirectory=os.path.join(outputBaseDirectory, "sip/kdecore"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kdecore"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kdecore/Mainpage.dox"),
# .h file extraction
cmakelists=os.path.join(cmakelistBaseDirectory,"kdecore/CMakeLists.txt"),
ignoreHeaders="""conversion_check.h kallocator.h kdebug.h kcodecs.h kgenericfactory.h ksortablelist.h ktrader.h ktypelist.h kmulticastsocket.h kmulticastsocketdevice.h kdecore_export.h kde_file.h ksocks.h kde_file.h ksharedptr.h klauncher_iface.h k3bufferedsocket.h k3clientsocketbase.h k3datagramsocket.h k3httpproxysocketdevice.h k3iobuffer.h k3processcontroller.h k3process.h k3procio.h k3resolver.h k3reverseresolver.h k3serversocket.h k3socketaddress.h k3socketbase.h k3socketdevice.h k3socks.h k3sockssocketdevice.h k3streamsocket.h qtest_kde.h kdefakes.h kdeversion.h kauth.h ktypelistutils.h ktypetraits.h karchive.h kar.h ktar.h kzip.h kshareddatacache.h kmountpoint.h kdirwatch.h karchive_export.h""".split(" "),
noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KDECORE_EXPORT","KDE_EXPORT","KIO_EXPORT","KDE_DEPRECATED", "KDECORE_EXPORT_DEPRECATED", "KARCHIVE_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtNetwork/QtNetworkmod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KDECORE_EXPORT","KDE_EXPORT","KIO_EXPORT","KDECORE_EXPORT_DEPRECATED","KARCHIVE_EXPORT"],
ignoreBases=[],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="*",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="pParent",
annotations="TransferThis")
]
)
###########################################################################
kdeui = toolkit.ModuleGenerator(
module="PyKDE4.kdeui",
outputDirectory=os.path.join(outputBaseDirectory,"sip/kdeui"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kdeui"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kdeui/Mainpage.dox"),
# .h file extraction
cmakelists=[
os.path.join(cmakelistBaseDirectory,"kdeui/CMakeLists.txt")
#os.path.join(cmakelistBaseDirectory,"kdeui/dialogs/CMakeLists.txt"),
#os.path.join(cmakelistBaseDirectory,"kdeui/util/CMakeLists.txt"),
#os.path.join(cmakelistBaseDirectory,"kdeui/widgets/CMakeLists.txt")
],
ignoreHeaders="""kxerrorhandler.h k3iconview.h k3iconviewsearchline.h k3listview.h k3listviewlineedit.h k3listviewsearchline.h netwm_p.h k3mimesourcefactory.h kdeui_export.h fixx11h.h kglobalshortcutinfo_p.h kkeyserver_mac.h kkeyserver_win.h kimagecache.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KDEUI_EXPORT","KDE_EXPORT","KDE_DEPRECATED","KDEUI_EXPORT_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","QtSvg/QtSvgmod.sip","kdecore/kdecoremod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KDEUI_EXPORT","KDE_EXPORT","KDEUI_EXPORT_DEPRECATED"],
ignoreBases=["Q3GridView"],
noCTSCC=["KWindowSystem","NETRootInfo","NETWinInfo"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer"),
toolkit.PySlotRule(className="KDialogButtonBox",arg1Name="receiver",arg2Name="slot"),
toolkit.PySlotRule(namespaceName="KStandardAction",arg1Name="recvr",arg2Name="slot")
]
)
###########################################################################
kio = toolkit.ModuleGenerator(
module="PyKDE4.kio",
outputDirectory=os.path.join(outputBaseDirectory,"sip/kio"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kio"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kio/Mainpage.dox"),
# .h file extraction
cmakelists=[
os.path.join(cmakelistBaseDirectory,"kio/CMakeLists.txt"),
os.path.join(cmakelistBaseDirectory,"kfile/CMakeLists.txt")
],
headers=[os.path.join(cmakelistBaseDirectory,"kdecore/io/karchive.h"),
os.path.join(cmakelistBaseDirectory,"kdecore/io/kar.h"),
os.path.join(cmakelistBaseDirectory,"kdecore/io/ktar.h"),
os.path.join(cmakelistBaseDirectory,"kdecore/io/kzip.h")],
ignoreHeaders="""http_slave_defaults.h ioslave_defaults.h kmimetyperesolver.h k3mimetyperesolver.h kfiledetailview.h kfileiconview.h kfiletreeview.h kfiletreeviewitem.h ksslpemcallback.h kpropsdialog.h kio_export.h kdirnotify.h k3filedetailview.h k3fileiconview.h k3filetreeview.h k3filetreeviewitem.h k3mimetyperesolver.h kfiletreebranch.h kfile_export.h kurlbar.h kdebug.h kdebugdbusiface_p.h kdirwatch_p.h klimitediodevice_p.h kprocess_p.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1,"Q_OS_UNIX": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KDECORE_EXPORT","KDECORE_EXPORT_DEPRECATED","KIO_EXPORT",
"KFILE_EXPORT","KIO_EXPORT_DEPRECATED","KDE_NO_EXPORT","KDE_EXPORT","KDE_DEPRECATED",
"KDEUI_EXPORT_DEPRECATED","KIO_CONNECTION_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","kdecore/kdecoremod.sip","kdeui/kdeuimod.sip","solid/solidmod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KDECORE_EXPORT","KDECORE_EXPORT_DEPRECATED","KIO_EXPORT","KFILE_EXPORT","KDE_EXPORT","KDEUI_EXPORT_DEPRECATED",
"KIO_CONNECTION_EXPORT","KIO_EXPORT_DEPRECATED"],
#ignoreBases=["Q3GridView"],
noCTSCC=["KonqBookmarkContextMenu","KImportedBookmarkMenu","KBookmark","KBookmarkGroup"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
kutils = toolkit.ModuleGenerator(
module="PyKDE4.kutils",
outputDirectory=os.path.join(outputBaseDirectory,"sip/kutils"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kutils"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kutils/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"kutils/CMakeLists.txt")],
ignoreHeaders="""kcmodulecontainer.h kutils_export.h kcmutils_export.h kemoticons_export.h kidletime_export.h kprintutils_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KUTILS_EXPORT","KDE_EXPORT","KDE_DEPRECATED","KCMUTILS_EXPORT","KEMOTICONS_EXPORT","KIDLETIME_EXPORT","KPRINTUTILS_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","kdecore/kdecoremod.sip","kdeui/kdeuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KUTILS_EXPORT","KDE_EXPORT","KCMUTILS_EXPORT","KEMOTICONS_EXPORT","KIDLETIME_EXPORT","KPRINTUTILS_EXPORT"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
solid = toolkit.ModuleGenerator(
module="PyKDE4.solid",
outputDirectory=os.path.join(outputBaseDirectory,"sip/solid"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/solid"),
mainDocs=os.path.join(cmakelistBaseDirectory,"solid/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"solid/solid/CMakeLists.txt")],
ignoreHeaders="""solid_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["SOLID_EXPORT","KDE_EXPORT","KDE_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","kdecore/kdecoremod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["SOLID_EXPORT","KDE_EXPORT"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
kparts = toolkit.ModuleGenerator(
module="PyKDE4.kparts",
outputDirectory=os.path.join(outputBaseDirectory,"sip/kparts"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/kparts"),
mainDocs=os.path.join(cmakelistBaseDirectory,"kparts/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"kparts/CMakeLists.txt")],
ignoreHeaders="""componentfactory.h genericfactory.h kparts_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KPARTS_EXPORT","KDE_EXPORT","KDE_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","kdecore/kdecoremod.sip","kdeui/kdeuimod.sip","kio/kiomod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KPARTS_EXPORT","KDE_EXPORT"],
noCTSCC=["GenericFactoryBase"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
plasma = toolkit.ModuleGenerator(
module="PyKDE4.plasma",
outputDirectory=os.path.join(outputBaseDirectory,"sip/plasma"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/plasma"),
mainDocs=os.path.join(cmakelistBaseDirectory,"plasma/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"plasma/CMakeLists.txt")],
ignoreHeaders="""plasma_export.h credentials.h """.split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1, "QT_VERSION": 0x040600},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["PLASMA_EXPORT","PLASMA_EXPORT_DEPRECATED","KDE_EXPORT",
"KDE_DEPRECATED","Q_INVOKABLE"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"QtGui/QtGuimod.sip",
"QtNetwork/QtNetworkmod.sip",
"QtSvg/QtSvgmod.sip",
"QtWebKit/QtWebKitmod.sip",
"QtXml/QtXmlmod.sip",
"QtDeclarative/QtDeclarativemod.sip",
"QtScript/QtScriptmod.sip",
"kdecore/kdecoremod.sip",
"kdeui/kdeuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["PLASMA_EXPORT","PLASMA_EXPORT_DEPRECATED","KDE_EXPORT"],
#noCTSCC=["GenericFactoryBase"],
ignoreBases=["QSharedData","KShared","QList<KUrl>"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*","QGraphicsWidget*"],
parameterNameMatch=["parent","pParent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*","QGraphicsWidget*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
khtml = toolkit.ModuleGenerator(
module="PyKDE4.khtml",
outputDirectory=os.path.join(outputBaseDirectory,"sip/khtml"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/khtml"),
mainDocs=os.path.join(cmakelistBaseDirectory,"khtml/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"khtml/CMakeLists.txt"),
#os.path.join(cmakelistBaseDirectory,"khtml/dom/CMakeLists.txt")
],
ignoreHeaders="""khtmldefaults.h dom_core.h dom_html.h khtml_events.h khtml_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KHTML_EXPORT","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"QtGui/QtGuimod.sip",
"QtXml/QtXmlmod.sip",
"kdecore/kdecoremod.sip",
"kdeui/kdeuimod.sip",
"kio/kiomod.sip",
"kutils/kutilsmod.sip",
"kparts/kpartsmod.sip",],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KHTML_EXPORT","KDE_EXPORT"],
noCTSCC=["CSSRule","CSSCharsetRule","CSSFontFaceRule","CSSImportRule","CSSMediaRule","CSSPageRule",
"CSSStyleRule","CSSUnknownRule","CSSStyleSheet","CSSPrimitiveValue","CSSValueList","CSSNamespaceRule"],
ignoreBases=["khtml::KHTMLWidget"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
def KNewStuffMapper(mod,headerName):
print("KNewStuffMapper: "+headerName)
filename = os.path.basename(headerName)
if filename.endswith(".h"):
sipName = filename[:-2]+".sip"
if "knewstuff3" in headerName:
return "knewstuff3_"+sipName
else:
return sipName
return filename
def KNewStuffCppHeaderMapper(mod,filename):
if "knewstuff3" in filename:
return "knewstuff3/" + os.path.basename(filename)
else:
return os.path.basename(filename)
knewstuff = toolkit.ModuleGenerator(
module="PyKDE4.knewstuff",
outputDirectory=os.path.join(outputBaseDirectory,"sip/knewstuff"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/knewstuff"),
mainDocs=os.path.join(cmakelistBaseDirectory,"knewstuff/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"knewstuff/CMakeLists.txt"),
os.path.join(cmakelistBaseDirectory,"knewstuff/knewstuff2/CMakeLists.txt"),
os.path.join(cmakelistBaseDirectory,"knewstuff/knewstuff3/CMakeLists.txt")],
ignoreHeaders="""knewstuff_export.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KNEWSTUFF_EXPORT","KNEWSTUFF_EXPORT_DEPRECATED","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"QtGui/QtGuimod.sip",
"QtXml/QtXmlmod.sip",
"kdecore/kdecoremod.sip",
"kdeui/kdeuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KNEWSTUFF_EXPORT","KNEWSTUFF_EXPORT_DEPRECATED","KDE_EXPORT"],
#noCTSCC=[],
#ignoreBases=["khtml::KHTMLWidget"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
],
filenameMappingFunction=KNewStuffMapper,
cppHeaderMappingFunction=KNewStuffCppHeaderMapper
)
###########################################################################
dnssd = toolkit.ModuleGenerator(
module="PyKDE4.dnssd",
outputDirectory=os.path.join(outputBaseDirectory,"sip/dnssd"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/dnssd"),
mainDocs=os.path.join(cmakelistBaseDirectory,"dnssd/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"dnssd/CMakeLists.txt")],
ignoreHeaders="""dnssd_export.h settings.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["KDNSSD_EXPORT","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"QtGui/QtGuimod.sip",
"kdecore/kdecoremod.sip",
"kdeui/kdeuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["KDNSSD_EXPORT","KDE_EXPORT"],
#noCTSCC=[],
#ignoreBases=["khtml::KHTMLWidget"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
nepomuk = toolkit.ModuleGenerator(
module="PyKDE4.nepomuk",
outputDirectory=os.path.join(outputBaseDirectory,"sip/nepomuk"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/nepomuk"),
mainDocs=os.path.join(cmakelistBaseDirectory,"nepomuk/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistBaseDirectory,"nepomuk/CMakeLists.txt"),
os.path.join(cmakelistBaseDirectory,"nepomuk/query/CMakeLists.txt")],
headers = [os.path.join(kdelibsBuildDirectory,"nepomuk",x)
for x in "ncal.h nco.h ndo.h nfo.h nie.h nmm.h nuao.h pimo.h tmo.h".split(" ")],
ignoreHeaders="""nepomuk_export.h ontologyloader.h desktopontologyloader.h fileontologyloader.h ontologymanager.h nepomukontologyloader.h nepomukquery_export.h kmetadatatagwidget.h ncal.h nco.h ndo.h nexif.h nfo.h nie.h nmm.h nmo.h nuao.h pimo.h tmo.h""".split(" "),
#noUpdateSip=["typedefs.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["NEPOMUK_EXPORT","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE","NEPOMUKQUERY_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=[
"QtCore/QtCoremod.sip",
"kdecore/kdecoremod.sip",
"soprano/sopranomod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["NEPOMUK_EXPORT","KDE_EXPORT","NEPOMUKQUERY_EXPORT"],
noCTSCC=["Term","GroupTerm","AndTerm","OrTerm","LiteralTerm","ResourceTerm","SimpleTerm","ComparisonTerm","ResourceTypeTerm","NegationTerm","OptionalTerm","FileQuery"],
#ignoreBases=["khtml::KHTMLWidget"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
soprano = toolkit.ModuleGenerator(
module="PyKDE4.soprano",
outputDirectory=os.path.join(outputBaseDirectory,"sip/soprano"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/soprano"),
mainDocs=os.path.join(cmakelistGitBaseDirectory,"soprano/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistGitBaseDirectory,"soprano/CMakeLists.txt"),
os.path.join(cmakelistGitBaseDirectory,"soprano/soprano/CMakeLists.txt"),
os.path.join(cmakelistGitBaseDirectory,"soprano/server/CMakeLists.txt"),
#os.path.join(cmakelistGitBaseDirectory,"soprano/server/sparql/CMakeLists.txt"),
os.path.join(cmakelistGitBaseDirectory,"soprano/server/dbus/CMakeLists.txt")],
ignoreHeaders="""soprano_export.h sopranomacros.h soprano.h vocabulary.h iterator.h version.h iteratorbackend.h""".split(" "),
#noUpdateSip=["iterator.sip"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1, "USING_SOPRANO_NRLMODEL_UNSTABLE_API":1, "QT_VERSION": 0x040700},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["SOPRANO_EXPORT","SOPRANO_CLIENT_EXPORT","SOPRANO_SERVER_EXPORT",
"USING_SOPRANO_NRLMODEL_UNSTABLE_API","KDE_EXPORT","KDE_DEPRECATED","Q_INVOKABLE",
"SOPRANO_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtNetwork/QtNetworkmod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["SOPRANO_EXPORT","SOPRANO_CLIENT_EXPORT","SOPRANO_SERVER_EXPORT","KDE_EXPORT"],
#noCTSCC=[],
ignoreBases=["IteratorBackend<BindingSet>","Iterator<Node>","Iterator<BindingSet>","Iterator<Statement>"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
akonadi = toolkit.ModuleGenerator(
module="PyKDE4.akonadi",
outputDirectory=os.path.join(outputBaseDirectory,"sip/akonadi"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/akonadi"),
mainDocs=os.path.join(cmakelistPimlibsBaseDirectory,"akonadi/Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistPimlibsBaseDirectory,"akonadi/CMakeLists.txt"),
os.path.join(cmakelistPimlibsBaseDirectory,"akonadi/kmime/CMakeLists.txt"),
os.path.join(cmakelistPimlibsBaseDirectory,"akonadi/kabc/CMakeLists.txt")],
ignoreHeaders="""akonadi_export.h akonadi-kmime_export.h akonadi-kabc_export.h itempayloadinternals_p.h collectionpathresolver_p.h qtest_akonadi.h exception.h contactparts.h cachepolicypage.h resourcebasesettings.h dbusconnectionpool.h """.split(" "),
#addressee.h kabc_export.h
headers=[os.path.join(kdepimlibsBuildDirectory,"akonadi/resourcebasesettings.h")],
# headers=[
# os.path.join(kdepimlibsBuildDirectory, "addressee.h")],
#resourcebase.h agentbase.h
#noUpdateSip=["iterator.sip"],
ignoreBases=["QDBusContext"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros( \
[(re.compile(r'Latin1\( "ISO-8859-1" \)'),r'Latin1'),
(re.compile(r'kmime_mk_trivial_ctor\(\s*(\w+)\s*\)'),r'public: explicit \1( Content *parent = 0 ); \1( Content *parent, const QByteArray &s ); \1( Content *parent, const QString &s, const QByteArray &charset ); ~\1();'),
(re.compile(r'kmime_mk_dptr_ctor\(\s*(\w+)\s*\)'), r'protected: explicit \1( \1::Private *d, KMime::Content *parent = 0 );'),
(re.compile(r'kmime_mk_trivial_ctor_with_name\(\s*(\w+)\s*\)'),r'public: explicit \1( Content *parent = 0 ); \1( Content *parent, const QByteArray &s ); \1( Content *parent, const QString &s, const QByteArray &charset ); ~\1();const char *type() const; static const char *staticType();'),
]),
#[(re.compile(r'AKONADI_COLLECTION_PROPERTIES_PAGE_FACTORY\s*\(\s*(\S+)\s*,\s*(\w+)\s*\)'),r'']),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(["AKONADI_DECLARE_PRIVATE"]),
bareMacros=qtkdemacros.QtBareMacros(["AKONADI_EXPORT","AKONADI_EXPORT_DEPRECATED","KDE_EXPORT",
"KDE_DEPRECATED","Q_INVOKABLE","KABC_EXPORT","KABC_EXPORT_DEPRECATED","AKONADI_KABC_EXPORT","AKONADI_KMIME_EXPORT","AKONADI_KMIME_EXPORT_DEPRECATED","KMIME_EXPORT","KMIME_EXPORT_DEPRECATED"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","kdeui/kdeuimod.sip","kdecore/kdecoremod.sip","kio/kiomod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["AKONADI_EXPORT","AKONADI_KABC_EXPORT","AKONADI_KMIME_EXPORT","KDE_EXPORT","AKONADI_EXPORT_DEPRECATED","AKONADI_KMIME_EXPORT_DEPRECATED","KABC_EXPORT","KABC_EXPORT_DEPRECATED","KMIME_EXPORT","KMIME_EXPORT_DEPRECATED"],
noCTSCC=["Collection","Entity","Item"],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
polkitqt = toolkit.ModuleGenerator(
module="PyKDE4.polkitqt",
outputDirectory=os.path.join(outputBaseDirectory,"sip/polkitqt"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/polkitqt"),
mainDocs=os.path.join(polkitqtBaseDirectory,"Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(polkitqtBaseDirectory,"CMakeLists.txt")],
ignoreHeaders="""export.h polkitqtversion.h""".split(" "),
#resourcebase.h agentbase.h
#noUpdateSip=["iterator.sip"],
#ignoreBases=["QDBusContext"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["POLKIT_QT_EXPORT","POLKITQT1_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["POLKIT_QT_EXPORT","KDE_EXPORT"],
#noCTSCC=[],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
phonon = toolkit.ModuleGenerator(
module="PyKDE4.phonon",
outputDirectory=os.path.join(outputBaseDirectory,"sip/phonon"),
docsOutputDirectory=os.path.join(outputBaseDirectory, "docs/html/phonon"),
mainDocs=os.path.join(cmakelistPhononBaseDirectory,"Mainpage.dox"),
# .h file extraction
cmakelists=[os.path.join(cmakelistPhononBaseDirectory,"phonon/CMakeLists.txt")],
ignoreHeaders="""phonondefs.h phonon_export.h export.h kaudiodevicelist_export.h phononnamespace.h addoninterface.h volumefaderinterface.h backendinterface.h effectinterface.h mediaobjectinterface.h platformplugin.h audiodataoutputinterface.h audiooutputinterface.h""".split(" "),
noUpdateSip=["phononnamespace.sip"],
ignoreBases=["QSharedData"],
#ignoreBases=["AbstractAudioOutput", "Phonon::AbstractAudioOutput", "QSharedData", "AbstractVideoOutput",
# "Phonon::AbstractVideoOutput"],
# Cpp parsing
preprocessSubstitutionMacros=qtkdemacros.QtPreprocessSubstitutionMacros(),
preprocessorValues={"Q_WS_X11": 1, "QT_VERSION": "0x040400", "_MSC_VER": 0},
macros=qtkdemacros.QtMacros(),
bareMacros=qtkdemacros.QtBareMacros(["PHONON_EXPORT","PHONONEXPERIMENTAL_EXPORT", "PHONON_DEPRECATED",
"PHONON_EXPORT_DEPRECATED", "KAUDIODEVICELIST_EXPORT"]),
# Sip generation
sipImportDirs=[sipImportDir,os.path.join(outputBaseDirectory,"sip")],
sipImports=["QtCore/QtCoremod.sip","QtGui/QtGuimod.sip","QtXml/QtXmlmod.sip","solid/solidmod.sip"],
copyrightNotice=qtkdemacros.copyrightNotice(),
exportMacros=["PHONON_EXPORT", "KDE_EXPORT", "PHONONEXPERIMENTAL_EXPORT", "KAUDIODEVICELIST_EXPORT", "PHONON_DEPRECATED", "PHONON_EXPORT_DEPRECATED"],
#noCTSCC=[],
annotationRules=[
toolkit.AnnotationRule(
methodTypeMatch="ctor",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch=["parent"],
annotations="TransferThis"),
toolkit.AnnotationRule(
methodTypeMatch="function",
parameterTypeMatch=["QWidget*","QObject*"],
parameterNameMatch="parent",
annotations="Transfer")
]
)
###########################################################################
def updateSIP():
kdecore.run()
plasma.run()
kdeui.run()
kio.run()
kutils.run()
solid.run()
kparts.run()
khtml.run()
knewstuff.run()
dnssd.run()
nepomuk.run()
soprano.run()
akonadi.run()
polkitqt.run()
phonon.run()
def updateDocs():
classNames = []
nsNames = []
def UpdateClassNamespaceList(moduleName,sipScopes):
nsNames.append( (moduleName,'global', 'global') )
def ExtractClassNamespace(scope):
for item in scope:
if isinstance(item,sipsymboldata.SymbolData.SipClass):
classNames.append( (moduleName, item.fqPythonName(), item.fqPythonName()) )
ExtractClassNamespace(item)
elif isinstance(item,sipsymboldata.SymbolData.Namespace):
nsTuple = (moduleName,item.fqPythonName(),item.fqPythonName())
if nsTuple not in nsNames:
nsNames.append( nsTuple )
ExtractClassNamespace(item)
for scope in sipScopes:
ExtractClassNamespace(scope)
UpdateClassNamespaceList('kdecore',kdecore.docs())
UpdateClassNamespaceList('plasma',plasma.docs())
UpdateClassNamespaceList('kdeui',kdeui.docs())
UpdateClassNamespaceList('kio',kio.docs())
UpdateClassNamespaceList('kutils',kutils.docs())
UpdateClassNamespaceList('solid',solid.docs())
UpdateClassNamespaceList('kparts',kparts.docs())
UpdateClassNamespaceList('khtml',khtml.docs())
UpdateClassNamespaceList('knewstuff',knewstuff.docs())
UpdateClassNamespaceList('dnssd',dnssd.docs())
UpdateClassNamespaceList('nepomuk',nepomuk.docs())
UpdateClassNamespaceList('soprano',soprano.docs())
UpdateClassNamespaceList('akonadi',akonadi.docs())
UpdateClassNamespaceList('polkitqt',polkitqt.docs())
UpdateClassNamespaceList('phonon',phonon.docs())
print("Writing all classes index:")
toolkit.ModuleGenerator.WriteAllClasses(os.path.join(outputBaseDirectory,"docs/html"),nsNames,classNames)
print("Done")
def main():
updateSIP()
updateDocs()
if __name__=="__main__":
main()
| lgpl-3.0 | -8,934,794,075,207,159,000 | 43.505325 | 738 | 0.658468 | false |
josiah-wolf-oberholtzer/supriya | tests/nonrealtime/test_nonrealtime_Session_zero_duration.py | 1 | 2475 | import pytest
import supriya.assets.synthdefs
import supriya.nonrealtime
import supriya.synthdefs
import supriya.ugens
def test_manual_with_gate():
session = supriya.nonrealtime.Session(0, 2)
with session.at(0):
group = session.add_group(duration=4)
for i in range(4):
with session.at(i):
group.add_synth(duration=0)
d_recv_commands = pytest.helpers.build_d_recv_commands(
[supriya.assets.synthdefs.default]
)
assert session.to_lists(duration=5) == [
[
0.0,
[
*d_recv_commands,
["/g_new", 1000, 0, 0],
["/s_new", "da0982184cc8fa54cf9d288a0fe1f6ca", 1001, 0, 1000],
["/n_set", 1001, "gate", 0],
],
],
[
1.0,
[
["/s_new", "da0982184cc8fa54cf9d288a0fe1f6ca", 1002, 0, 1000],
["/n_set", 1002, "gate", 0],
],
],
[
2.0,
[
["/s_new", "da0982184cc8fa54cf9d288a0fe1f6ca", 1003, 0, 1000],
["/n_set", 1003, "gate", 0],
],
],
[
3.0,
[
["/s_new", "da0982184cc8fa54cf9d288a0fe1f6ca", 1004, 0, 1000],
["/n_set", 1004, "gate", 0],
],
],
[4.0, [["/n_free", 1000]]],
[5.0, [[0]]],
]
def test_manual_without_gate():
with supriya.synthdefs.SynthDefBuilder() as builder:
source = supriya.ugens.DC.ar(1)
supriya.ugens.Out.ar(bus=0, source=source)
source_synthdef = builder.build()
session = supriya.nonrealtime.Session(0, 1)
with session.at(0):
group = session.add_group(duration=4)
for i in range(4):
with session.at(i):
group.add_synth(duration=0, synthdef=source_synthdef)
assert session.to_lists(duration=10) == [
[
0.0,
[
["/d_recv", bytearray(source_synthdef.compile())],
["/g_new", 1000, 0, 0],
["/s_new", "7839f99c38c2ac4326388a013cdd643c", 1001, 0, 1000],
],
],
[1.0, [["/s_new", "7839f99c38c2ac4326388a013cdd643c", 1002, 0, 1000]]],
[2.0, [["/s_new", "7839f99c38c2ac4326388a013cdd643c", 1003, 0, 1000]]],
[3.0, [["/s_new", "7839f99c38c2ac4326388a013cdd643c", 1004, 0, 1000]]],
[4.0, [["/n_free", 1000]]],
[10.0, [[0]]],
]
| mit | 6,077,712,018,084,925,000 | 29.9375 | 79 | 0.486465 | false |
pombredanne/hitch | hitch/commandline.py | 1 | 10374 | """High level command line interface to hitch."""
from subprocess import call, PIPE, STDOUT, CalledProcessError, Popen
from hitch.click import command, group, argument, option
from os import path, makedirs, listdir, kill, remove
from sys import stderr, exit, modules, argv
from functools import partial
from hitch import hitchdir
import shutil
import signal
import copy
def check_output(command, stdout=PIPE, stderr=PIPE):
"""Re-implemented subprocess.check_output since it is not available < python 2.7."""
return Popen(command, stdout=stdout, stderr=stderr).communicate()[0]
@group()
def cli():
pass
@command()
@option(
'-p', '--python', default=None,
help="""Create hitch virtualenv using specific python version"""
""" (e.g. /usr/bin/python3). Defaults to using python3 on the system path."""
)
@option(
'-v', '--virtualenv', default=None,
help="""Create hitch virtualenv using specific virtualenv"""
""" (e.g. /usr/bin/virtualenv). Defaults to using virtualenv on the system path."""
)
def init(python, virtualenv):
"""Initialize hitch in this directory."""
if virtualenv is None:
if call(["which", "virtualenv"], stdout=PIPE, stderr=PIPE):
stderr.write("You must have virtualenv installed to use hitch.\n")
stderr.flush()
exit(1)
virtualenv = check_output(["which", "virtualenv"]).decode('utf8').replace("\n", "")
else:
if path.exists(virtualenv):
if python is None:
python = path.join(path.dirname(virtualenv), "python")
else:
stderr.write("{} not found.\n".format(virtualenv))
if python is None:
if call(["which", "python3"], stdout=PIPE, stderr=PIPE):
stderr.write(
"To use Hitch, you must have python 3 installed on your system "
"and available. If your python3 is not on the system path with "
"the name python3, specify its exact location using --python.\n"
)
stderr.flush()
exit(1)
python3 = check_output(["which", "python3"]).decode('utf8').replace("\n", "")
else:
if path.exists(python):
python3 = python
else:
stderr.write("{} not found.\n".format(python))
exit(1)
str_version = check_output([python3, "-V"], stderr=STDOUT).decode('utf8').replace('\n', '')
tuple_version = tuple([int(v) for v in str_version.replace('Python ', '').split('.')])
if tuple_version < (3, 3):
stderr.write(
"The hitch environment must have python >=3.3 installed to be built.\n Your "
"app can run with earlier versions of python, but the testing environment can't.\n"
)
exit(1)
if hitchdir.hitch_exists():
stderr.write("Hitch has already been initialized in this directory or a directory above it.\n")
stderr.write("If you wish to re-initialize hitch in this directory, run 'hitch clean' in the")
stderr.write("directory containing the .hitch directory and run hitch init here again.\n")
stderr.flush()
exit(1)
makedirs(".hitch")
pip = path.abspath(path.join(".hitch", "virtualenv", "bin", "pip"))
call([virtualenv, ".hitch/virtualenv", "--no-site-packages", "--distribute", "-p", python3])
call([pip, "install", "-U", "pip"])
if path.exists("hitchreqs.txt"):
call([pip, "install", "-r", "hitchreqs.txt"])
else:
call([pip, "install", "hitchtest"])
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
def update_requirements():
"""Check hitchreqs.txt match what's installed via pip freeze. If not, update."""
pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip")
hitchreqs_filename = path.join(hitchdir.get_hitch_directory_or_fail(), "..", "hitchreqs.txt")
pip_freeze = check_output([pip, "freeze"]).decode('utf8').split('\n')
hitchreqs_handle = ""
with open(hitchreqs_filename, "r") as hitchreqs_handle:
hitchreqs = hitchreqs_handle.read().split('\n')
if not sorted(pip_freeze) == sorted(hitchreqs):
call([pip, "install", "-r", "hitchreqs.txt"])
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
def get_pip():
"""Get the file path to the hitch pip."""
return path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip")
@command(context_settings={'help_option_names':[],'ignore_unknown_options':True}, help="dd")
@argument('arguments', nargs=-1)
def runpackage(arguments):
# Generic method to run any installed app in the virtualenv whose name starts with hitch*
update_requirements()
binfile = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "hitch{}".format(argv[1]))
command = [binfile, ] + argv[2:]
# When receiving an exit signal, just forward it to process child.
def forward_signal_to_child(pid, signum, frame):
kill(pid, signum)
process = Popen(command)
signal.signal(signal.SIGINT, partial(forward_signal_to_child, process.pid))
signal.signal(signal.SIGTERM, partial(forward_signal_to_child, process.pid))
signal.signal(signal.SIGHUP, partial(forward_signal_to_child, process.pid))
signal.signal(signal.SIGQUIT, partial(forward_signal_to_child, process.pid))
return_code = process.wait()
exit(return_code)
@command()
@argument('package', required=True)
def uninstall(package):
"""Uninstall hitch package."""
pip = get_pip()
call([pip, "uninstall", package] )
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
@command()
@argument('package', required=True)
def install(package):
"""Install hitch package."""
pip = get_pip()
call([pip, "install", package, "-U", ])
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
@command()
def upgrade():
"""Upgrade all installed hitch packages."""
pip = get_pip()
package_list = [
p for p in check_output([pip, "freeze"]).decode('utf8').split('\n')
if p != "" and "==" in p
]
version_fixed_package_list = [p.split("==")[0] for p in package_list]
for package in version_fixed_package_list:
call([pip, "install", package, "-U", ])
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
@command()
def freeze():
"""List installed hitch packages."""
pip = path.join(hitchdir.get_hitch_directory_or_fail(), "virtualenv", "bin", "pip")
call([pip, "freeze", ])
@command()
def clean():
"""Remove the hitch directory entirely."""
if hitchdir.hitch_exists():
hitch_directory = hitchdir.get_hitch_directory_or_fail()
shutil.rmtree(hitch_directory)
else:
stderr.write("No hitch directory found. Doing nothing.\n")
stderr.flush()
@command()
@option(
'-p', '--packages', default=None, help=(
"Specify precise packages to remove - "
"e.g. postgresql, postgresql-9.3.9, python, python2.6.8"
)
)
def cleanpkg(packages):
"""Remove installed packages from the .hitchpkg directory."""
hitchpkg = path.join(path.expanduser("~"), ".hitchpkg")
if path.exists(hitchpkg):
if packages is None:
shutil.rmtree(hitchpkg)
else:
for file_or_dir in os.listdir(hitchpkg):
if file_or_dir.startswith(packages):
if path.isdir(file_or_dir)
shutil.rmtree(path.join(hitchpkg, file_or_dir))
else:
remove(path.join(hitchpkg, file_or_dir))
def run():
"""Run hitch bootstrap CLI"""
def stop_everything(sig, frame):
"""Exit hitch."""
exit(1)
signal.signal(signal.SIGINT, stop_everything)
signal.signal(signal.SIGTERM, stop_everything)
signal.signal(signal.SIGHUP, stop_everything)
signal.signal(signal.SIGQUIT, stop_everything)
if hitchdir.hitch_exists():
if not path.exists(path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin")):
stderr.write("Hitch was initialized in this directory (or one above it), but something.\n")
stderr.write("was corrupted. Try running 'hitch clean' and then run 'hitch init' again.")
stderr.flush()
exit(1)
# Get packages from bin folder that are hitch related
python_bin = path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin", "python")
packages = [
package.replace("hitch", "") for package in listdir(
path.join(hitchdir.get_hitch_directory(), "virtualenv", "bin")
)
if package.startswith("hitch") and package != "hitch"
]
# Add packages that start with hitch* to the list of commands available
for package in packages:
cmd = copy.deepcopy(runpackage)
cmd.name = package
try:
description = check_output([
python_bin, '-c',
'import sys;sys.stdout.write(__import__("hitch{}").commandline.cli.help)'.format(
package
)
]).decode('utf8')
except CalledProcessError:
description = ""
cmd.help = description
cmd.short_help = description
cli.add_command(cmd)
cli.add_command(install)
cli.add_command(uninstall)
cli.add_command(upgrade)
cli.add_command(clean)
cli.add_command(freeze)
cli.add_command(init)
cli.help = "Hitch test runner for:\n\n {0}.".format(hitchdir.get_hitch_directory())
else:
cli.add_command(init)
cli.add_command(clean)
cli.help = "Hitch bootstrapper - '.hitch' directory not detected here."
cli()
if __name__ == '__main__':
run()
| agpl-3.0 | -3,730,368,201,072,467,000 | 36.182796 | 103 | 0.613264 | false |
locaweb/simplenet | src/simplenet/common/event.py | 1 | 4587 | # Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Thiago Morello, Locaweb.
# @author: Willian Molinari, Locaweb.
# @author: Juliano Martinez, Locaweb.
import socket
from kombu import BrokerConnection, Exchange, Queue
from simplenet.common.config import config, get_logger
logger = get_logger()
class EventManager(object):
def __init__(self):
self.url = config.get("event", "broker")
def raise_fanout_event(self, exchange, event_type, params, **kwargs):
logger.debug("Raising event %s with params: %s" % (event_type, params))
with BrokerConnection(self.url) as conn:
conn.ensure_connection()
media_exchange = Exchange(
"dhcp:fanout:%s" % exchange,
type="fanout",
durable=True)
if 'route' in kwargs:
routing_key = kwargs['route']
else:
queue = Queue(
event_type,
exchange=media_exchange,
routing_key=event_type
)
if params['action'] == 'new' or params['action'] == 'rebuild_queues':
queue(conn.channel()).declare()
return
elif params['action'] == 'remove':
try:
queue(conn.channel()).unbind()
except AttributeError:
queue(conn.channel()).unbind_from(exchange=media_exchange, routing_key=event_type)
return
else:
routing_key = event_type
with conn.Producer(exchange=media_exchange, serializer="json",
routing_key=routing_key) as producer:
logger.debug("Publishing %s" % params)
producer.publish(params)
def raise_event(self, event_type, params, **kwargs):
logger.debug("Raising event %s with params: %s" % (event_type, params))
with BrokerConnection(self.url) as conn:
conn.ensure_connection()
media_exchange = Exchange(
"simplenet",
type="direct",
durable=True)
if 'route' in kwargs:
routing_key = kwargs['route']
else:
queue = Queue(
event_type,
exchange=media_exchange,
routing_key=event_type
)
queue(conn.channel()).declare()
routing_key = event_type
with conn.Producer(exchange=media_exchange, serializer="json",
routing_key=routing_key) as producer:
logger.debug("Publishing %s" % params)
producer.publish(params)
def listen_event(self, queue_name, callback):
with BrokerConnection(self.url) as conn:
conn.ensure_connection()
media_exchange = Exchange(
"simplenet",
type="direct",
durable=True
)
queue = Queue(
queue_name,
exchange=media_exchange,
routing_key=queue_name
)
logger.info("Listening for data...")
with conn.Consumer([queue], callbacks=[callback]) as consumer:
while True:
conn.drain_events()
def bind_queue(self, queue_name, routing_key):
with BrokerConnection(self.url) as conn:
conn.ensure_connection()
media_exchange = Exchange(
"simplenet",
type="direct",
durable=True
)
queue = Queue(
queue_name,
exchange=media_exchange,
routing_key=routing_key
)
queue(conn.channel()).declare()
| mit | 479,172,603,894,329,150 | 33.75 | 106 | 0.51755 | false |
certik/sfepy | sfepy/base/conf.py | 1 | 10020 | import re
from base import Struct, IndexedStruct, dict_to_struct, pause, output, copy,\
import_file, assert_, get_default
from reader import Reader
_required = ['filename_mesh', 'field_[0-9]+|fields',
'ebc_[0-9]+|ebcs', 'fe', 'equations',
'region_[0-9]+|regions', 'variable_[0-9]+|variables',
'material_[0-9]+|materials', 'integral_[0-9]+|integrals',
'solver_[0-9]+|solvers']
_other = ['epbc_[0-9]+|epbcs', 'lcbc_[0-9]+|lcbcs', 'nbc_[0-9]+|nbcs',
'ic_[0-9]+|ics', 'options']
##
# c: 19.02.2008, r: 19.02.2008
def get_standard_keywords():
return copy( _required ), copy( _other )
##
# c: 10.04.2008, r: 10.04.2008
def tuple_to_conf( name, vals, order ):
conf = Struct( name = name )
for ii, key in enumerate( order ):
setattr( conf, key, vals[ii] )
return conf
##
# Short syntax: key is suffixed with '__<number>' to prevent collisions with
# long syntax keys -> both cases can be used in a single input.
def transform_variables( adict ):
d2 = {}
for ii, (key, conf) in enumerate( adict.iteritems() ):
if isinstance( conf, tuple ):
c2 = tuple_to_conf( key, conf, ['kind', 'field'] )
if len( conf ) >= 3:
kind = c2.kind.split()[0]
if kind == 'unknown':
c2.order = conf[2]
elif kind == 'test':
c2.dual = conf[2]
elif kind == 'parameter':
c2.like = conf[2]
if len( conf ) == 4:
c2.history = conf[3]
d2['variable_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1( conf )
d2['variable_'+c2.name] = c2
return d2
##
# c: 10.04.2008, r: 06.05.2008
def transform_ebcs( adict ):
d2 = {}
for ii, (key, conf) in enumerate( adict.iteritems() ):
if isinstance( conf, tuple ):
c2 = tuple_to_conf( key, conf, ['region', 'dofs'] )
d2['ebc_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1( conf )
d2['ebc_'+c2.name] = c2
return d2
def transform_ics( adict ):
d2 = {}
for ii, (key, conf) in enumerate( adict.iteritems() ):
if isinstance( conf, tuple ):
c2 = tuple_to_conf( key, conf, ['region', 'dofs'] )
d2['ic_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1( conf )
d2['ic_'+c2.name] = c2
return d2
##
# c: 02.05.2008, r: 06.05.2008
def transform_regions( adict ):
d2 = {}
for ii, (key, conf) in enumerate( adict.iteritems() ):
if isinstance( conf, tuple ):
c2 = tuple_to_conf( key, conf, ['select', 'flags'] )
for flag, val in c2.flags.iteritems():
setattr( c2, flag, val )
delattr( c2, 'flags' )
d2['region_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1( conf )
d2['region_'+c2.name] = c2
print d2
return d2
##
# c: 20.06.2007, r: 18.02.2008
def transform_to_struct_1( adict ):
return dict_to_struct( adict, flag = (1,) )
def transform_to_i_struct_1( adict ):
return dict_to_struct( adict, flag = (1,), constructor = IndexedStruct )
def transform_to_struct_01( adict ):
return dict_to_struct( adict, flag = (0,1) )
def transform_to_struct_10( adict ):
return dict_to_struct( adict, flag = (1,0) )
transforms = {
'options' : transform_to_i_struct_1,
'solvers' : transform_to_struct_01,
'integrals' : transform_to_struct_01,
'opt' : transform_to_struct_1,
'fe' : transform_to_struct_1,
'regions' : transform_regions,
'shape_opt' : transform_to_struct_10,
'fields' : transform_to_struct_01,
'variables' : transform_variables,
'ebcs' : transform_ebcs,
'epbcs' : transform_to_struct_01,
'nbcs' : transform_to_struct_01,
'lcbcs' : transform_to_struct_01,
'ics' : transform_ics,
}
##
# 27.10.2005, c
class ProblemConf( Struct ):
"""
Problem configuration, corresponding to an input (problem description
file). It validates the input using lists of required and other keywords
that have to/can appear in the input. Default keyword lists can be obtained
by sfepy.base.conf.get_standard_keywords().
ProblemConf instance is used to construct a ProblemDefinition instance via
ProblemDefinition.from_conf( conf ).
"""
##
# c: 25.07.2006, r: 10.07.2008
def from_file( filename, required = None, other = None ):
"""
Loads the problem definition from a file.
The filename can either contain plain definitions, or it can contain
the define() function, in which case it will be called to return the
input definitions.
The job of the define() function is to return a dictionary of
parameters. How the dictionary is constructed is not our business, but
the usual way is to simply have a function define() along these lines
in the input file:
def define():
options = {
'save_eig_vectors' : None,
'eigen_solver' : 'eigen1',
}
region_2 = {
'name' : 'Surface',
'select' : 'nodes of surface',
}
...
return locals()
"""
funmod = import_file( filename )
obj = ProblemConf()
if "define" in funmod.__dict__:
define_dict = funmod.__dict__["define"]()
else:
define_dict = funmod.__dict__
obj.__dict__.update( define_dict )
obj.setup( define_dict, funmod, filename, required, other )
return obj
from_file = staticmethod( from_file )
def from_module( module, required = None, other = None ):
obj = ProblemConf()
obj.__dict__.update( module.__dict__ )
obj.setup( funmod = module, required = required, other = other )
return obj
from_module = staticmethod( from_module )
def from_dict( dict_, funmod, required = None, other = None ):
obj = ProblemConf()
obj.__dict__.update( dict_ )
obj.setup( funmod = funmod, required = required, other = other )
return obj
from_dict = staticmethod( from_dict )
def setup( self, define_dict = None, funmod = None, filename = None,
required = None, other = None ):
define_dict = get_default( define_dict, self.__dict__ )
self._filename = filename
other_missing = self.validate( required = required, other = other )
for name in other_missing:
setattr( self, name, None )
self.transform_input_trivial()
self._raw = {}
for key, val in define_dict.iteritems():
if isinstance( val, dict ):
self._raw[key] = copy( val )
self.transform_input()
self.funmod = funmod
##
# 27.10.2005, c
# 19.09.2006
# 05.06.2007
def _validate_helper( self, items, but_nots ):
keys = self.__dict__.keys()
left_over = keys[:]
if but_nots is not None:
for item in but_nots:
match = re.compile( '^' + item + '$' ).match
for key in keys:
if match( key ):
left_over.remove( key )
missing = []
if items is not None:
for item in items:
found = False
match = re.compile( '^' + item + '$' ).match
for key in keys:
if match( key ):
found = True
left_over.remove( key )
if not found:
missing.append( item )
return left_over, missing
##
# c: 27.10.2005, r: 11.07.2008
def validate( self, required = None, other = None ):
required_left_over, required_missing \
= self._validate_helper( required, other )
other_left_over, other_missing \
= self._validate_helper( other, required )
assert_( required_left_over == other_left_over )
err = False
if required_missing:
err = True
output( 'error: required missing:', required_missing )
if other_left_over:
output( 'left over:', other_left_over )
if err:
raise ValueError
return other_missing
##
# c: 31.10.2005, r: 10.07.2008
def transform_input_trivial( self ):
"""Trivial input transformations."""
##
# Unordered inputs.
tr_list = ['([a-zA-Z0-9]+)_[0-9]+']
# Keywords not in 'required', but needed even empty (e.g. for run_tests).
for key in transforms.keys():
if not self.__dict__.has_key( key ):
self.__dict__[key] = {}
keys = self.__dict__.keys()
for item in tr_list:
match = re.compile( item ).match
for key in keys:
obj = match( key )
if obj:
new = obj.group( 1 ) + 's'
result = {key : self.__dict__[key]}
try:
self.__dict__[new].update( result )
except:
self.__dict__[new] = result
del self.__dict__[key]
def transform_input( self ):
keys = self.__dict__.keys()
for key, transform in transforms.iteritems():
if not key in keys: continue
self.__dict__[key] = transform( self.__dict__[key] )
def get_raw( self, key = None ):
if key is None:
return self._raw
else:
return self._raw[key]
def edit( self, key, newval ):
self.__dict__[key] = transforms[key]( newval )
| bsd-3-clause | 5,469,896,234,037,747,000 | 31.960526 | 81 | 0.518762 | false |
asposeforcloud/Aspose_Cloud_SDK_For_Python | asposecloud/email/__init__.py | 1 | 7149 | __author__ = 'assadmahmood'
import requests
import json
from asposecloud import Product
from asposecloud import AsposeApp
from asposecloud.common import Utils
# ========================================================================
# DOCUMENT CLASS
# ========================================================================
class Document:
def __init__(self, filename):
self.filename = filename
if not filename:
raise ValueError("filename not specified")
self.base_uri = Product.product_uri + 'email/' + self.filename
def get_property(self, property_name, remote_folder='', storage_type='Aspose', storage_name=None):
"""
:param property_name:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
str_uri = self.base_uri + '/properties/' + property_name
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.get(signed_uri, headers={
'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0'
})
response.raise_for_status()
response = response.json()
except requests.HTTPError as e:
print e
print response.content
exit(1)
return response['EmailProperty']['Value']
def set_property(self, property_name, property_value, remote_folder='', storage_type='Aspose', storage_name=None):
"""
:param property_name:
:param property_value:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
str_uri = self.base_uri + '/properties/' + property_name
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
json_data = json.dumps({'Value': property_value})
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.put(signed_uri, json_data, headers={
'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0'
})
response.raise_for_status()
response = response.json()
except requests.HTTPError as e:
print e
print response.content
exit(1)
return response['EmailProperty']['Value']
def get_attachment(self, attachment_name, remote_folder='', storage_type='Aspose', storage_name=None):
"""
:param attachment_name:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
if not attachment_name:
raise ValueError("attachment_name not specified")
str_uri = self.base_uri + '/attachments/' + attachment_name
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.get(signed_uri, headers={
'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0'
}, stream=True)
response.raise_for_status()
except requests.HTTPError as e:
print e
print response.content
exit(1)
validate_output = Utils.validate_result(response)
if not validate_output:
output_path = AsposeApp.output_path + attachment_name
Utils.save_file(response, output_path)
return output_path
else:
return validate_output
def add_attachment(self, attachment_name, remote_folder='', storage_type='Aspose', storage_name=None):
"""
:param attachment_name:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
str_uri = self.base_uri + '/attachments/' + attachment_name
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.post(signed_uri, None, headers={
'content-type': 'application/json', 'accept': 'application/json'
})
response.raise_for_status()
response = response.json()
except requests.HTTPError as e:
print e
print response.content
exit(1)
return response
# ========================================================================
# CONVERTER CLASS
# ========================================================================
class Converter:
def __init__(self, filename):
self.filename = filename
if not filename:
raise ValueError("filename not specified")
self.base_uri = Product.product_uri + 'email/' + self.filename
def convert(self, save_format, stream_out=False, output_filename=None,
remote_folder='', storage_type='Aspose', storage_name=None):
"""
convert an email message document to a different format
:param save_format:
:param output_filename:
:param remote_folder: storage path to operate
:param storage_type: type of storage e.g Aspose, S3
:param storage_name: name of storage e.g. MyAmazonS3
:return:
"""
if not save_format:
raise ValueError("save_format not specified")
str_uri = self.base_uri + '?format=' + save_format
str_uri = Utils.append_storage(str_uri, remote_folder, storage_type, storage_name)
signed_uri = Utils.sign(str_uri)
response = None
try:
response = requests.get(signed_uri, headers={
'content-type': 'application/json', 'accept': 'application/json', 'x-aspose-client' : 'PYTHONSDK/v1.0'
}, stream=True)
response.raise_for_status()
except requests.HTTPError as e:
print e
print response.content
exit(1)
validate_output = Utils.validate_result(response)
if not validate_output:
if not stream_out:
if output_filename is None:
output_filename = self.filename
output_path = AsposeApp.output_path + Utils.get_filename(output_filename) + '.' + save_format
Utils.save_file(response, output_path)
return output_path
else:
return response.content
else:
return validate_output
| mit | -1,981,531,942,469,692,000 | 34.567164 | 118 | 0.566093 | false |
swprojects/wxPieTool | pyimager.py | 1 | 8720 | """
wxPieTool - wxPython Image Embedding Tool
Copyright 2016 Simon Wu <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import base64
import binascii
import multiprocessing
import os
import tempfile
import wx
import importlib.util
from wx.lib.embeddedimage import PyEmbeddedImage
#------------------------------------------------------------------------------
PROCESS_COUNT = multiprocessing.cpu_count() - 1
def WritePyImageFile(output_file, pyfiledata):
""" writes a new pyImages file from pyfiledata """
py_images_file = open(output_file, 'w') # delete any existing file.
"""
Write the relevant header portion and the import statements
Writes Python statements to the output pyImages file.
"""
py_images_file.write('#' + '-'*69 + '\n\n')
line = '# This file was generated by %s\n\n' %("wxImage Embedding Tool")
py_images_file.write(line)
py_images_file.write('import wx\n')
py_images_file.write('from wx.lib.embeddedimage import PyEmbeddedImage\n')
py_images_file.write('\n')
py_images_file.write('image_index = {}\n')
py_images_file.write('image_catalog = {}\n')
"""
Writes the Python code to the output pyImages file that both define an image
and to be able to generate raw data, wx.Image, wx.Bitmap and wx.Icon objects
when its pyImmages file is imported by any Python application.
"""
for index in sorted(pyfiledata.keys()):
values = pyfiledata[index]
name = values["name"]
data = values["data"]
py_images_file.write('#' + '-'*69 + '\n\n')
py_images_file.write('image_catalog["%s"] = PyEmbeddedImage(\n%s\n' % (name, data))
py_images_file.write(' )\n\n')
# When the PyImages file is imported,
# the following dictionary idName value will become a function name.
py_images_file.write('image_index[%s] = "%s"\n' % (str(index), name))
py_images_file.write('\n')
"""
Writes the Get functions at the end of the file
"""
py_images_file.write('#' + '-'*69 + '\n\n')
# get data function
py_images_file.write('def GetData(name):\n')
py_images_file.write(' ')
py_images_file.write('return image_catalog[name].GetData()\n')
py_images_file.write('\n')
# scale image function
py_images_file.write('def ScaleImage(name, width, height):\n')
py_images_file.write(' ')
py_images_file.write('image = image_catalog[name].GetImage()\n')
py_images_file.write(' ')
py_images_file.write('image = image.Scale(width, height, wx.IMAGE_QUALITY_HIGH)\n')
py_images_file.write(' ')
py_images_file.write('return image\n')
py_images_file.write('\n')
for func_name in ["Image","Bitmap","Icon"]:
py_images_file.write('def Get%s(name, width=-1, height=-1):\n' % func_name)
py_images_file.write(' ')
py_images_file.write('if (width,height) == (-1,-1):\n')
py_images_file.write(' ')
py_images_file.write('return image_catalog[name].Get%s()\n' % func_name)
py_images_file.write(' ')
py_images_file.write('else:\n')
py_images_file.write(' ')
py_images_file.write('image = ScaleImage(name, width, height)\n')
py_images_file.write(' ')
py_images_file.write('image = wx.%s(image)\n' % func_name)
py_images_file.write(' ')
py_images_file.write('return image\n')
py_images_file.write('\n')
py_images_file.close()
#end WritePyImageFile def
#------------------------------------------------------------------------------
def B64EncodeBinaryData(image_data):
"""
B64 encodes a binary byte string. Returns a list of lines of strings
suitable for embedding in a Python file.
"""
# Encode the PNG file's lossless-compressed binary image data into a single, big b64 string.
encoded_data = binascii.b2a_base64(image_data)[:-1]
# encoded_data= image_data.encode("base64")
# encoded_data=image_data
# Chop the b64 character-encoded encoded_data into manageable
# line lengths for writing to a file.
data_list = [] # b64 linesOfEncPngImgData list.
while encoded_data:
line_of_data = encoded_data[:57] # A chunk length of 72 chars
encoded_data = encoded_data[57:] # The remainder of data to be encoded.
# extract the string from b"<str>"
line_of_data = line_of_data.decode("utf8")
line_of_data = ' "%s"' %(line_of_data)
data_list.append(line_of_data)
image_data_list = '\n'.join(data_list)
return image_data_list
#end B64EncodeBinaryData def
#------------------------------------------------------------------------------
def BitmapToPngFile(bitmap, tmp_file) :
"""
Save a wx.Bitmap to a PNG file. The contents of this file is intended
to be b64 encoded in order to finally save it to the output pyImages file.
"""
if bitmap.SaveFile(tmp_file, wx.BITMAP_TYPE_PNG): # SaveFile() success
return True
elif wx.Image(bitmap).SaveFile(tmp_file, wx.BITMAP_TYPE_PNG):
# wx.Bitmap.SaveFile() has failed.
# Try a different save method.
return True
else:
return None
#end BitmapToPngFile def
def CreatePngFileData(path) :
"""
return data of image file, which can than be passed to B64EncodeBinaryData
"""
if not os.path.exists(path):
return None #"File no longer exists. Cancel import"
try:
bitmap = wx.Bitmap(path, wx.BITMAP_TYPE_ANY)
except:
return None #"File no longer exists. Cancel import"
# Is image file bad?
if not bitmap.IsOk():
return None #"File no longer exists. Cancel import"
# Read the original image file and write it to a new PNG file.
tmp_file = tempfile.TemporaryFile()
tmp_file = tmp_file.name # get the path of temporary file
# print(dir(tmp_file))
bmp_to_png = BitmapToPngFile(bitmap, tmp_file)
if not bmp_to_png:
print("cannot write to temporary file")
# Encode the PNG file's lossless-compressed binary image data into a single, big b64 string.
png_file = open(tmp_file, 'rb')
image_data = png_file.read()
# b64 = image_data.encode ('base64')
png_file.close()
os.remove(tmp_file)
# print("creating temporary file",tmp_file, image_data )
from wx.lib.embeddedimage import PyEmbeddedImage
return image_data
#end CreatePngFileData def
def GetPyImageData(pyimage):
"""
Import the embedded_image_file and add its images to image_dict{}.
The file's existance is expected to have been verified.
"""
file_name, file_ext = os.path.splitext(os.path.basename(pyimage))
print(file_name, file_ext)
# import using the full path of the filename
# """http://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path"""
try:
spec = importlib.util.spec_from_file_location("file_name", pyimage)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
except:
print("Failed to load file. Is it a python file?")
return
# check if the python file is actually a PyImages file.
try:
image_index = foo.image_index # should have been defined
# image_catalog = foo.image_catalog
print(image_index.items())
data = {}
for index, image_name in image_index.items():
data[index] = {"name":image_name,
"data":foo.GetData(image_name),
"bitmap":foo.GetBitmap(image_name)}
except NameError:
print("Failed to load file. Is it a valid PyEmbeddedImage File?" )
return
return data
#end GetPyImageData def | gpl-2.0 | 3,023,426,488,338,498,600 | 34.741803 | 96 | 0.607798 | false |
pli3/e2-openwbif | plugin/controllers/views/web/powerstate.py | 1 | 5042 | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.286449
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/powerstate.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class powerstate(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(powerstate, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_30375654 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2powerstate>
\t<e2instandby>
''')
if VFFSL(SL,"instandby",True) : # generated from line 5, col 3
_v = "true"
if _v is not None: write(_filter(_v))
else:
_v = "false"
if _v is not None: write(_filter(_v))
write(u'''\t</e2instandby>
</e2powerstate>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_30375654
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_powerstate= 'respond'
## END CLASS DEFINITION
if not hasattr(powerstate, '_initCheetahAttributes'):
templateAPIClass = getattr(powerstate, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(powerstate)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=powerstate()).run()
| gpl-2.0 | 7,429,331,706,846,930,000 | 32.838926 | 192 | 0.626934 | false |
erigones/esdc-ce | api/mon/backends/abstract/server.py | 1 | 1705 | from django.utils.text import Truncator
from django.utils.translation import ugettext_lazy as _
# noinspection PyProtectedMember
from vms.models.base import _DummyModel, _UserTasksModel
from vms.models import Dc
class AbstractMonitoringServer(_DummyModel, _UserTasksModel):
"""
Abstract model for representing a monitoring server in a DC.
"""
_pk_key = 'mon_server_id'
uri = NotImplemented
name = NotImplemented
address = NotImplemented
connection_id = NotImplemented
# noinspection PyPep8Naming
class Meta:
# Required for api.exceptions.ObjectNotFound
verbose_name_raw = _('Monitoring Server')
# noinspection PyUnusedLocal
def __init__(self, dc):
self.dc = dc
super(AbstractMonitoringServer, self).__init__()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
@property
def id(self):
return self.dc.id
@property
def owner(self): # Required by _UserTasksModel
return self.dc.owner
@property
def pk(self): # Required by task_log
return str(self.id)
@property
def log_name(self): # Required by task_log
return Truncator(self.uri).chars(32)
@property
def log_alias(self): # Required by task_log
return self.name
@classmethod
def get_content_type(cls): # Required by task_log
return None
@classmethod
def get_object_type(cls, content_type=None): # Required by task_log
return 'monitoringserver'
@classmethod
def get_object_by_pk(cls, pk):
dc = Dc.objects.get_by_id(pk)
return cls(dc)
MonitoringServerClass = AbstractMonitoringServer
| apache-2.0 | 4,198,058,031,038,698,500 | 24.833333 | 72 | 0.653959 | false |
umlfri/umlfri2 | umlfri2/qtgui/canvas/scrolledcanvaswidget.py | 1 | 1410 | from PyQt5.QtCore import QPoint
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QWheelEvent
from PyQt5.QtWidgets import QScrollArea
from .canvaswidget import CanvasWidget
class ScrolledCanvasWidget(QScrollArea):
def __init__(self, main_window, drawing_area):
super().__init__()
self.__canvas = CanvasWidget(main_window, drawing_area)
self.setWidget(self.__canvas)
self.setWidgetResizable(True)
def wheelEvent(self, event):
if event.modifiers() == Qt.ShiftModifier:
pixelDelta = event.pixelDelta()
angleDelta = event.angleDelta()
if angleDelta.x() == 0 and angleDelta.y() != 0:
delta = angleDelta.y()
orientation = Qt.Horizontal
else:
delta = angleDelta.x()
orientation = Qt.Vertical
super().wheelEvent(QWheelEvent(event.pos(), event.globalPos(),
QPoint(pixelDelta.y(), pixelDelta.x()),
QPoint(angleDelta.y(), angleDelta.x()),
delta, orientation,
event.buttons(), Qt.NoModifier))
else:
super().wheelEvent(event)
@property
def diagram(self):
return self.__canvas.diagram
| gpl-3.0 | 4,295,992,312,250,899,000 | 34.25 | 82 | 0.52766 | false |
carbureted/shavar-prod-lists | scripts/json_verify.py | 1 | 6963 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import glob
import json
import re
from types import DictType, ListType, UnicodeType
from urlparse import urlparse
parser = argparse.ArgumentParser(description='Verify json files for shavar.')
parser.add_argument("-f", "--file", help="filename to verify")
bad_uris = []
dupe_hosts = {
"properties": [],
"resources": []
}
block_host_uris = []
entity_host_uris = []
errors = []
file_contents = []
file_name = ""
result = 0
def run(file):
global file_name
file_name = file
try:
verify(file)
except:
errors.append("\tError: Problem handling file")
finish()
def verify(file):
try:
with open(file) as f:
raw_data = f.readlines()
# save contents of file, including line numbers
for x in range(0, len(raw_data)):
line_number = x+1
file_contents.append([raw_data[x], line_number])
# attempt to parse file as json
json_obj = json.loads("".join(raw_data))
try:
# determine which schema this file uses
if ("categories" in json_obj):
# google_mapping.json
# disconnect_blacklist.json
find_uris(json_obj["categories"])
else:
# disconnect_entitylist.json
find_uris_in_entities(json_obj)
except:
errors.append("\tError: Can't parse file")
except ValueError as e:
# invalid json formatting
errors.append("\tError: %s" % e)
return
except IOError as e:
# non-existent file
errors.append("\tError: Can't open file: %s" % e)
return
"""
categories_json is expected to match this format:
"categories": {
"Disconnect": [
{
"Facebook": {
"http://www.facebook.com/": [
"facebook.com",
...
]
}
},
{
"Google": {
"http://www.google.com/": [
"2mdn.net",
...
]
}
},
...
],
"Advertising": [
{
"[x+1]": {
"http://www.xplusone.com/": [
"ru4.com",
...
]
}
},
]
...
}
"""
def find_uris(categories_json):
assert type(categories_json) is DictType
for category, category_json in categories_json.iteritems():
assert type(category) is UnicodeType
assert type(category_json) is ListType
for entity in category_json:
assert type(entity) is DictType
for entity_name, entity_json in entity.iteritems():
assert type(entity_name) is UnicodeType
assert type(entity_json) is DictType
# pop dnt out of the dict, so we can iteritems() over the rest
try:
dnt_value = entity_json.pop('dnt', '')
assert dnt_value in ["w3c", "eff", ""]
except AssertionError:
errors.append("%s has bad DNT value: %s" % (entity_name,
dnt_value))
for domain, uris in entity_json.iteritems():
assert type(domain) is UnicodeType
assert type(uris) is ListType
for uri in uris:
check_uri(uri)
block_host_uris.append(uri)
def find_uris_in_entities(entitylist_json):
checked_uris = {
"properties": [],
"resources": []
}
assert len(entitylist_json.items()) > 0
assert type(entitylist_json) is DictType
for entity, types in entitylist_json.iteritems():
assert type(entity) is UnicodeType
assert type(types) is DictType
for host_type, uris in types.iteritems():
assert host_type in ["properties", "resources"]
assert type(uris) is ListType
for uri in uris:
if uri in checked_uris[host_type]:
dupe_hosts[host_type].append(uri)
check_uri(uri)
entity_host_uris.append(uri)
checked_uris[host_type].append(uri)
def check_uri(uri):
# Valid URI:
# no scheme, port, fragment, path or query string
# no disallowed characters
# no leading/trailing garbage
try:
uri.decode('ascii')
except UnicodeEncodeError:
bad_uris.append(uri)
parsed_uri = urlparse(uri)
try:
assert parsed_uri.scheme == ''
# domains of urls without schemes are parsed into 'path' so check path
# for port
assert ':' not in parsed_uri.path
assert parsed_uri.netloc == ''
assert parsed_uri.params == ''
assert parsed_uri.query == ''
assert parsed_uri.fragment == ''
assert len(parsed_uri.path) < 128
except AssertionError:
bad_uris.append(uri)
return
def find_line_number(uri):
line = 0
try:
for x in range(0, len(file_contents)):
temp = file_contents[x][0].decode("utf-8", "ignore")
if re.search(uri, temp):
line = file_contents[x][1]
file_contents.pop(x)
break
except ValueError as e:
print e
line = -1
return str(line)
def make_errors_from_bad_uris():
for bad_uri in bad_uris:
errors.append("\tError: Bad URI: %s\t: in line %s" %
(bad_uri, find_line_number(bad_uri)))
for host_type, hosts in dupe_hosts.iteritems():
for host in hosts:
errors.append("\tDupe: Dupe host: %s\t in line %s" %
(host, find_line_number(host)))
def finish():
make_errors_from_bad_uris()
if (len(errors) == 0):
print "\n" + file_name + " : valid"
else:
global result
result = 1
print "\n" + file_name + " : invalid"
for error in errors:
print error
reset()
def reset():
global bad_uris
bad_uris = []
global dupe_hosts
dupe_hosts = {
"properties": [],
"resources": []
}
global errors
errors = []
global file_contents
file_contents = []
global file_name
file_name = ""
def start(filename=None):
if (filename):
run(filename)
else:
for f in glob.glob("*.json"):
run(f)
args = parser.parse_args()
start(args.file)
print "\n block_host_uris: %s " % len(block_host_uris)
print "\n entity_host_uris: %s " % len(entity_host_uris)
assert "itisatracker.com" in block_host_uris
exit(result)
| gpl-3.0 | -808,360,885,678,404,500 | 27.892116 | 78 | 0.507827 | false |
kamalx/edx-platform | lms/djangoapps/discussion_api/tests/test_api.py | 1 | 90651 | """
Tests for Discussion API internal interface
"""
from datetime import datetime, timedelta
import itertools
from urlparse import parse_qs, urlparse, urlunparse
from urllib import urlencode
import ddt
import httpretty
import mock
from pytz import UTC
from django.core.exceptions import ValidationError
from django.http import Http404
from django.test.client import RequestFactory
from rest_framework.exceptions import PermissionDenied
from opaque_keys.edx.locator import CourseLocator
from courseware.tests.factories import BetaTesterFactory, StaffFactory
from discussion_api.api import (
create_comment,
create_thread,
delete_comment,
delete_thread,
get_comment_list,
get_course,
get_course_topics,
get_thread_list,
update_comment,
update_thread,
)
from discussion_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_comment,
make_minimal_cs_thread,
)
from django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role,
)
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
def _remove_discussion_tab(course, user_id):
"""
Remove the discussion tab for the course.
user_id is passed to the modulestore as the editor of the module.
"""
course.tabs = [tab for tab in course.tabs if not tab.type == 'discussion']
modulestore().update_item(course, user_id)
@ddt.ddt
class GetCourseTest(UrlResetMixin, ModuleStoreTestCase):
"""Test for get_course"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTest, self).setUp()
self.course = CourseFactory.create(org="x", course="y", run="z")
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def test_nonexistent_course(self):
with self.assertRaises(Http404):
get_course(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(Http404):
get_course(self.request, self.course.id)
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
get_course(self.request, self.course.id)
def test_basic(self):
self.assertEqual(
get_course(self.request, self.course.id),
{
"id": unicode(self.course.id),
"blackouts": [],
"thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz",
"topics_url": "http://testserver/api/discussion/v1/course_topics/x/y/z",
}
)
def test_blackout(self):
# A variety of formats is accepted
self.course.discussion_blackouts = [
["2015-06-09T00:00:00Z", "6-10-15"],
[1433980800000, datetime(2015, 6, 12)],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(
result["blackouts"],
[
{"start": "2015-06-09T00:00:00+00:00", "end": "2015-06-10T00:00:00+00:00"},
{"start": "2015-06-11T00:00:00+00:00", "end": "2015-06-12T00:00:00+00:00"},
]
)
@ddt.data(None, "not a datetime", "2015", [])
def test_blackout_errors(self, bad_value):
self.course.discussion_blackouts = [
[bad_value, "2015-06-09T00:00:00Z"],
["2015-06-10T00:00:00Z", "2015-06-11T00:00:00Z"],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(result["blackouts"], [])
@mock.patch.dict("django.conf.settings.FEATURES", {"DISABLE_START_DATES": False})
class GetCourseTopicsTest(UrlResetMixin, ModuleStoreTestCase):
"""Test for get_course_topics"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTopicsTest, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.partition = UserPartition(
0,
"partition",
"Test Partition",
[Group(0, "Cohort A"), Group(1, "Cohort B")],
scheme_id="cohort"
)
self.course = CourseFactory.create(
org="x",
course="y",
run="z",
start=datetime.now(UTC),
discussion_topics={"Test Topic": {"id": "non-courseware-topic-id"}},
user_partitions=[self.partition],
cohort_config={"cohorted": True},
days_early_for_beta=3
)
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def make_discussion_module(self, topic_id, category, subcategory, **kwargs):
"""Build a discussion module in self.course"""
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_id,
discussion_category=category,
discussion_target=subcategory,
**kwargs
)
def get_thread_list_url(self, topic_id_list):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = "http://testserver/api/discussion/v1/threads/"
query_list = [("course_id", unicode(self.course.id))] + [("topic_id", topic_id) for topic_id in topic_id_list]
return urlunparse(("", "", path, "", urlencode(query_list), ""))
def get_course_topics(self):
"""
Get course topics for self.course, using the given user or self.user if
not provided, and generating absolute URIs with a test scheme/host.
"""
return get_course_topics(self.request, self.course.id)
def make_expected_tree(self, topic_id, name, children=None):
"""
Build an expected result tree given a topic id, display name, and
children
"""
topic_id_list = [topic_id] if topic_id else [child["id"] for child in children]
children = children or []
node = {
"id": topic_id,
"name": name,
"children": children,
"thread_list_url": self.get_thread_list_url(topic_id_list)
}
return node
def test_nonexistent_course(self):
with self.assertRaises(Http404):
get_course_topics(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(Http404):
self.get_course_topics()
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
self.get_course_topics()
def test_without_courseware(self):
actual = self.get_course_topics()
expected = {
"courseware_topics": [],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_with_courseware(self):
self.make_discussion_module("courseware-topic-id", "Foo", "Bar")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"Foo",
[self.make_expected_tree("courseware-topic-id", "Bar")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_many(self):
self.course.discussion_topics = {
"A": {"id": "non-courseware-1"},
"B": {"id": "non-courseware-2"},
}
modulestore().update_item(self.course, self.user.id)
self.make_discussion_module("courseware-1", "A", "1")
self.make_discussion_module("courseware-2", "A", "2")
self.make_discussion_module("courseware-3", "B", "1")
self.make_discussion_module("courseware-4", "B", "2")
self.make_discussion_module("courseware-5", "C", "1")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"A",
[
self.make_expected_tree("courseware-1", "1"),
self.make_expected_tree("courseware-2", "2"),
]
),
self.make_expected_tree(
None,
"B",
[
self.make_expected_tree("courseware-3", "1"),
self.make_expected_tree("courseware-4", "2"),
]
),
self.make_expected_tree(
None,
"C",
[self.make_expected_tree("courseware-5", "1")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-1", "A"),
self.make_expected_tree("non-courseware-2", "B"),
],
}
self.assertEqual(actual, expected)
def test_sort_key(self):
self.course.discussion_topics = {
"W": {"id": "non-courseware-1", "sort_key": "Z"},
"X": {"id": "non-courseware-2"},
"Y": {"id": "non-courseware-3", "sort_key": "Y"},
"Z": {"id": "non-courseware-4", "sort_key": "W"},
}
modulestore().update_item(self.course, self.user.id)
self.make_discussion_module("courseware-1", "First", "A", sort_key="D")
self.make_discussion_module("courseware-2", "First", "B", sort_key="B")
self.make_discussion_module("courseware-3", "First", "C", sort_key="E")
self.make_discussion_module("courseware-4", "Second", "A", sort_key="F")
self.make_discussion_module("courseware-5", "Second", "B", sort_key="G")
self.make_discussion_module("courseware-6", "Second", "C")
self.make_discussion_module("courseware-7", "Second", "D", sort_key="A")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "B"),
self.make_expected_tree("courseware-1", "A"),
self.make_expected_tree("courseware-3", "C"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-7", "D"),
self.make_expected_tree("courseware-6", "C"),
self.make_expected_tree("courseware-4", "A"),
self.make_expected_tree("courseware-5", "B"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-4", "Z"),
self.make_expected_tree("non-courseware-2", "X"),
self.make_expected_tree("non-courseware-3", "Y"),
self.make_expected_tree("non-courseware-1", "W"),
],
}
self.assertEqual(actual, expected)
def test_access_control(self):
"""
Test that only topics that a user has access to are returned. The
ways in which a user may not have access are:
* Module is visible to staff only
* Module has a start date in the future
* Module is accessible only to a group the user is not in
Also, there is a case that ensures that a category with no accessible
subcategories does not appear in the result.
"""
beta_tester = BetaTesterFactory.create(course_key=self.course.id)
CourseEnrollmentFactory.create(user=beta_tester, course_id=self.course.id)
staff = StaffFactory.create(course_key=self.course.id)
for user, group_idx in [(self.user, 0), (beta_tester, 1)]:
cohort = CohortFactory.create(
course_id=self.course.id,
name=self.partition.groups[group_idx].name,
users=[user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=cohort,
partition_id=self.partition.id,
group_id=self.partition.groups[group_idx].id
)
self.make_discussion_module("courseware-1", "First", "Everybody")
self.make_discussion_module(
"courseware-2",
"First",
"Cohort A",
group_access={self.partition.id: [self.partition.groups[0].id]}
)
self.make_discussion_module(
"courseware-3",
"First",
"Cohort B",
group_access={self.partition.id: [self.partition.groups[1].id]}
)
self.make_discussion_module("courseware-4", "Second", "Staff Only", visible_to_staff_only=True)
self.make_discussion_module(
"courseware-5",
"Second",
"Future Start Date",
start=datetime.now(UTC) + timedelta(days=1)
)
student_actual = self.get_course_topics()
student_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(student_actual, student_expected)
self.request.user = beta_tester
beta_actual = self.get_course_topics()
beta_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[self.make_expected_tree("courseware-5", "Future Start Date")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(beta_actual, beta_expected)
self.request.user = staff
staff_actual = self.get_course_topics()
staff_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-5", "Future Start Date"),
self.make_expected_tree("courseware-4", "Staff Only"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(staff_actual, staff_expected)
@ddt.ddt
class GetThreadListTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Test for get_thread_list"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetThreadListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
self.cohort = CohortFactory.create(course_id=self.course.id)
def get_thread_list(
self,
threads,
page=1,
page_size=1,
num_pages=1,
course=None,
topic_id_list=None,
):
"""
Register the appropriate comments service response, then call
get_thread_list and return the result.
"""
course = course or self.course
self.register_get_threads_response(threads, page, num_pages)
ret = get_thread_list(self.request, course.id, page, page_size, topic_id_list)
return ret
def test_nonexistent_course(self):
with self.assertRaises(Http404):
get_thread_list(self.request, CourseLocator.from_string("non/existent/course"), 1, 1)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
self.get_thread_list([])
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
self.get_thread_list([])
def test_empty(self):
self.assertEqual(
self.get_thread_list([]),
{
"results": [],
"next": None,
"previous": None,
"text_search_rewrite": None,
}
)
def test_get_threads_by_topic_id(self):
self.get_thread_list([], topic_id_list=["topic_x", "topic_meow"])
self.assertEqual(urlparse(httpretty.last_request().path).path, "/api/v1/threads")
self.assert_last_query_params({
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["1"],
"recursive": ["False"],
"commentable_ids": ["topic_x,topic_meow"]
})
def test_basic_query_params(self):
self.get_thread_list([], page=6, page_size=14)
self.assert_last_query_params({
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["6"],
"per_page": ["14"],
"recursive": ["False"],
})
def test_thread_content(self):
source_threads = [
{
"type": "thread",
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"commentable_id": "topic_x",
"group_id": None,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"thread_type": "discussion",
"title": "Test Title",
"body": "Test body",
"pinned": False,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
},
{
"type": "thread",
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"commentable_id": "topic_y",
"group_id": self.cohort.id,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"thread_type": "question",
"title": "Another Test Title",
"body": "More content",
"pinned": False,
"closed": True,
"abuse_flaggers": [],
"votes": {"up_count": 9},
"comments_count": 18,
"unread_comments_count": 0,
},
]
expected_threads = [
{
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"topic_id": "topic_x",
"group_id": None,
"group_name": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"comment_count": 5,
"unread_comment_count": 3,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_0",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["following", "voted"],
},
{
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"topic_id": "topic_y",
"group_id": self.cohort.id,
"group_name": self.cohort.name,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"type": "question",
"title": "Another Test Title",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"pinned": False,
"closed": True,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 9,
"comment_count": 18,
"unread_comment_count": 0,
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=False"
),
"editable_fields": ["following", "voted"],
},
]
self.assertEqual(
self.get_thread_list(source_threads),
{
"results": expected_threads,
"next": None,
"previous": None,
"text_search_rewrite": None,
}
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False]
)
)
@ddt.unpack
def test_request_group(self, role_name, course_is_cohorted):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.get_thread_list([], course=cohort_course)
actual_has_group = "group_id" in httpretty.last_request().querystring
expected_has_group = (course_is_cohorted and role_name == FORUM_ROLE_STUDENT)
self.assertEqual(actual_has_group, expected_has_group)
def test_pagination(self):
# N.B. Empty thread list is not realistic but convenient for this test
self.assertEqual(
self.get_thread_list([], page=1, num_pages=3),
{
"results": [],
"next": "http://testserver/test_path?page=2",
"previous": None,
"text_search_rewrite": None,
}
)
self.assertEqual(
self.get_thread_list([], page=2, num_pages=3),
{
"results": [],
"next": "http://testserver/test_path?page=3",
"previous": "http://testserver/test_path?page=1",
"text_search_rewrite": None,
}
)
self.assertEqual(
self.get_thread_list([], page=3, num_pages=3),
{
"results": [],
"next": None,
"previous": "http://testserver/test_path?page=2",
"text_search_rewrite": None,
}
)
# Test page past the last one
self.register_get_threads_response([], page=3, num_pages=3)
with self.assertRaises(Http404):
get_thread_list(self.request, self.course.id, page=4, page_size=10)
@ddt.data(None, "rewritten search string")
def test_text_search(self, text_search_rewrite):
self.register_get_threads_search_response([], text_search_rewrite)
self.assertEqual(
get_thread_list(
self.request,
self.course.id,
page=1,
page_size=10,
text_search="test search string"
),
{
"results": [],
"next": None,
"previous": None,
"text_search_rewrite": text_search_rewrite,
}
)
self.assert_last_query_params({
"course_id": [unicode(self.course.id)],
"sort_key": ["date"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["10"],
"recursive": ["False"],
"text": ["test search string"],
})
@ddt.ddt
class GetCommentListTest(CommentsServiceMockMixin, ModuleStoreTestCase):
"""Test for get_comment_list"""
def setUp(self):
super(GetCommentListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
def make_minimal_cs_thread(self, overrides=None):
"""
Create a thread with the given overrides, plus the course_id if not
already in overrides.
"""
overrides = overrides.copy() if overrides else {}
overrides.setdefault("course_id", unicode(self.course.id))
return make_minimal_cs_thread(overrides)
def get_comment_list(self, thread, endorsed=None, page=1, page_size=1):
"""
Register the appropriate comments service response, then call
get_comment_list and return the result.
"""
self.register_get_thread_response(thread)
return get_comment_list(self.request, thread["id"], endorsed, page, page_size)
def test_nonexistent_thread(self):
thread_id = "nonexistent_thread"
self.register_get_thread_error_response(thread_id, 404)
with self.assertRaises(Http404):
get_comment_list(self.request, thread_id, endorsed=False, page=1, page_size=1)
def test_nonexistent_course(self):
with self.assertRaises(Http404):
self.get_comment_list(self.make_minimal_cs_thread({"course_id": "non/existent/course"}))
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
self.get_comment_list(self.make_minimal_cs_thread())
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
self.get_comment_list(self.make_minimal_cs_thread())
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(
self,
role_name,
course_is_cohorted,
topic_is_cohorted,
thread_group_state
):
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
thread = self.make_minimal_cs_thread({
"course_id": unicode(cohort_course.id),
"commentable_id": "test_topic",
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
topic_is_cohorted and
thread_group_state == "different_group"
)
try:
self.get_comment_list(thread)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.data(True, False)
def test_discussion_endorsed(self, endorsed_value):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "discussion"}),
endorsed=endorsed_value
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field may not be specified for discussion threads."]}
)
def test_question_without_endorsed(self):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "question"}),
endorsed=None
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field is required for question threads."]}
)
def test_empty(self):
discussion_thread = self.make_minimal_cs_thread(
{"thread_type": "discussion", "children": [], "resp_total": 0}
)
self.assertEqual(
self.get_comment_list(discussion_thread),
{"results": [], "next": None, "previous": None}
)
question_thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [],
"non_endorsed_responses": [],
"non_endorsed_resp_total": 0
})
self.assertEqual(
self.get_comment_list(question_thread, endorsed=False),
{"results": [], "next": None, "previous": None}
)
self.assertEqual(
self.get_comment_list(question_thread, endorsed=True),
{"results": [], "next": None, "previous": None}
)
def test_basic_query_params(self):
self.get_comment_list(
self.make_minimal_cs_thread({
"children": [make_minimal_cs_comment()],
"resp_total": 71
}),
page=6,
page_size=14
)
self.assert_query_params_equal(
httpretty.httpretty.latest_requests[-2],
{
"recursive": ["True"],
"user_id": [str(self.user.id)],
"mark_as_read": ["True"],
"resp_skip": ["70"],
"resp_limit": ["14"],
}
)
def test_discussion_content(self):
source_comments = [
{
"type": "comment",
"id": "test_comment_1",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"body": "Test body",
"endorsed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"children": [],
},
{
"type": "comment",
"id": "test_comment_2",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": True,
"anonymous_to_peers": False,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"body": "More content",
"endorsed": False,
"abuse_flaggers": [str(self.user.id)],
"votes": {"up_count": 7},
"children": [],
}
]
expected_comments = [
{
"id": "test_comment_1",
"thread_id": "test_thread",
"parent_id": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"children": [],
"editable_fields": ["voted"],
},
{
"id": "test_comment_2",
"thread_id": "test_thread",
"parent_id": None,
"author": None,
"author_label": None,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": True,
"voted": False,
"vote_count": 7,
"children": [],
"editable_fields": ["voted"],
},
]
actual_comments = self.get_comment_list(
self.make_minimal_cs_thread({"children": source_comments})
)["results"]
self.assertEqual(actual_comments, expected_comments)
def test_question_content(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({"id": "endorsed_comment"})],
"non_endorsed_responses": [make_minimal_cs_comment({"id": "non_endorsed_comment"})],
"non_endorsed_resp_total": 1,
})
endorsed_actual = self.get_comment_list(thread, endorsed=True)
self.assertEqual(endorsed_actual["results"][0]["id"], "endorsed_comment")
non_endorsed_actual = self.get_comment_list(thread, endorsed=False)
self.assertEqual(non_endorsed_actual["results"][0]["id"], "non_endorsed_comment")
def test_endorsed_by_anonymity(self):
"""
Ensure thread anonymity is properly considered in serializing
endorsed_by.
"""
thread = self.make_minimal_cs_thread({
"anonymous": True,
"children": [
make_minimal_cs_comment({
"endorsement": {"user_id": str(self.author.id), "time": "2015-05-18T12:34:56Z"}
})
]
})
actual_comments = self.get_comment_list(thread)["results"]
self.assertIsNone(actual_comments[0]["endorsed_by"])
@ddt.data(
("discussion", None, "children", "resp_total"),
("question", False, "non_endorsed_responses", "non_endorsed_resp_total"),
)
@ddt.unpack
def test_cs_pagination(self, thread_type, endorsed_arg, response_field, response_total_field):
"""
Test cases in which pagination is done by the comments service.
thread_type is the type of thread (question or discussion).
endorsed_arg is the value of the endorsed argument.
repsonse_field is the field in which responses are returned for the
given thread type.
response_total_field is the field in which the total number of responses
is returned for the given thread type.
"""
# N.B. The mismatch between the number of children and the listed total
# number of responses is unrealistic but convenient for this test
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [make_minimal_cs_comment()],
response_total_field: 5,
})
# Only page
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=5)
self.assertIsNone(actual["next"])
self.assertIsNone(actual["previous"])
# First page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=2)
self.assertEqual(actual["next"], "http://testserver/test_path?page=2")
self.assertIsNone(actual["previous"])
# Middle page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=2)
self.assertEqual(actual["next"], "http://testserver/test_path?page=3")
self.assertEqual(actual["previous"], "http://testserver/test_path?page=1")
# Last page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=3, page_size=2)
self.assertIsNone(actual["next"])
self.assertEqual(actual["previous"], "http://testserver/test_path?page=2")
# Page past the end
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [],
response_total_field: 5
})
with self.assertRaises(Http404):
self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=5)
def test_question_endorsed_pagination(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [
make_minimal_cs_comment({"id": "comment_{}".format(i)}) for i in range(10)
]
})
def assert_page_correct(page, page_size, expected_start, expected_stop, expected_next, expected_prev):
"""
Check that requesting the given page/page_size returns the expected
output
"""
actual = self.get_comment_list(thread, endorsed=True, page=page, page_size=page_size)
result_ids = [result["id"] for result in actual["results"]]
self.assertEqual(
result_ids,
["comment_{}".format(i) for i in range(expected_start, expected_stop)]
)
self.assertEqual(
actual["next"],
"http://testserver/test_path?page={}".format(expected_next) if expected_next else None
)
self.assertEqual(
actual["previous"],
"http://testserver/test_path?page={}".format(expected_prev) if expected_prev else None
)
# Only page
assert_page_correct(
page=1,
page_size=10,
expected_start=0,
expected_stop=10,
expected_next=None,
expected_prev=None
)
# First page of many
assert_page_correct(
page=1,
page_size=4,
expected_start=0,
expected_stop=4,
expected_next=2,
expected_prev=None
)
# Middle page of many
assert_page_correct(
page=2,
page_size=4,
expected_start=4,
expected_stop=8,
expected_next=3,
expected_prev=1
)
# Last page of many
assert_page_correct(
page=3,
page_size=4,
expected_start=8,
expected_stop=10,
expected_next=None,
expected_prev=2
)
# Page past the end
with self.assertRaises(Http404):
self.get_comment_list(thread, endorsed=True, page=2, page_size=10)
class CreateThreadTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for create_thread"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.minimal_data = {
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
}
@mock.patch("eventtracking.tracker.emit")
def test_basic(self, mock_emit):
self.register_post_thread_response({
"id": "test_id",
"username": self.user.username,
"created_at": "2015-05-19T00:00:00Z",
"updated_at": "2015-05-19T00:00:00Z",
})
actual = create_thread(self.request, self.minimal_data)
expected = {
"id": "test_id",
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-19T00:00:00Z",
"updated_at": "2015-05-19T00:00:00Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"comment_count": 0,
"unread_comment_count": 0,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_id",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["following", "raw_body", "title", "topic_id", "type", "voted"],
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": "Test Title",
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
def test_following(self):
self.register_post_thread_response({"id": "test_id"})
self.register_subscription_response(self.user)
data = self.minimal_data.copy()
data["following"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["following"], True)
cs_request = httpretty.last_request()
self.assertEqual(
urlparse(cs_request.path).path,
"/api/v1/users/{}/subscriptions".format(self.user.id)
)
self.assertEqual(cs_request.method, "POST")
self.assertEqual(
cs_request.parsed_body,
{"source_type": ["thread"], "source_id": ["test_id"]}
)
def test_voted(self):
self.register_post_thread_response({"id": "test_id"})
self.register_thread_votes_response("test_id")
data = self.minimal_data.copy()
data["voted"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_course_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["This field is required."]})
def test_course_id_invalid(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "invalid!"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_nonexistent_course(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "non/existent/course"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_invalid_field(self):
data = self.minimal_data.copy()
data["type"] = "invalid_type"
with self.assertRaises(ValidationError):
create_thread(self.request, data)
@ddt.ddt
class CreateCommentTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for create_comment"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
})
)
self.minimal_data = {
"thread_id": "test_thread",
"raw_body": "Test body",
}
@ddt.data(None, "test_parent")
@mock.patch("eventtracking.tracker.emit")
def test_success(self, parent_id, mock_emit):
if parent_id:
self.register_get_comment_response({"id": parent_id, "thread_id": "test_thread"})
self.register_post_comment_response(
{
"id": "test_comment",
"username": self.user.username,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
},
thread_id="test_thread",
parent_id=parent_id
)
data = self.minimal_data.copy()
if parent_id:
data["parent_id"] = parent_id
actual = create_comment(self.request, data)
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["raw_body", "voted"]
}
self.assertEqual(actual, expected)
expected_url = (
"/api/v1/comments/{}".format(parent_id) if parent_id else
"/api/v1/threads/test_thread/comments"
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
expected_url
)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)]
}
)
expected_event_name = (
"edx.forum.comment.created" if parent_id else
"edx.forum.response.created"
)
expected_event_data = {
"discussion": {"id": "test_thread"},
"commentable_id": "test_topic",
"options": {"followed": False},
"id": "test_comment",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
if parent_id:
expected_event_data["response"] = {"id": parent_id}
actual_event_name, actual_event_data = mock_emit.call_args[0]
self.assertEqual(actual_event_name, expected_event_name)
self.assertEqual(actual_event_data, expected_event_data)
def test_voted(self):
self.register_post_comment_response({"id": "test_comment"}, "test_thread")
self.register_comment_votes_response("test_comment")
data = self.minimal_data.copy()
data["voted"] = "True"
result = create_comment(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_thread_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["This field is required."]})
def test_thread_id_not_found(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
def test_nonexistent_course(self):
self.register_get_thread_response(
make_minimal_cs_thread({"id": "test_thread", "course_id": "non/existent/course"})
)
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, self.minimal_data)
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["Invalid value."]})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_get_thread_response(make_minimal_cs_thread({
"id": "cohort_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}))
self.register_post_comment_response({}, thread_id="cohort_thread")
data = self.minimal_data.copy()
data["thread_id"] = "cohort_thread"
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
create_comment(self.request, data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"thread_id": ["Invalid value."]}
)
def test_invalid_field(self):
data = self.minimal_data.copy()
del data["raw_body"]
with self.assertRaises(ValidationError):
create_comment(self.request, data)
@ddt.ddt
class UpdateThreadTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for update_thread"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "original_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"thread_type": "discussion",
"title": "Original Title",
"body": "Original body",
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_put_thread_response(cs_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
# Ensure that the default following value of False is not applied implicitly
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_thread()
update_thread(self.request, "test_thread", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
def test_basic(self):
self.register_thread()
actual = update_thread(self.request, "test_thread", {"raw_body": "Edited body"})
expected = {
"id": "test_thread",
"course_id": unicode(self.course.id),
"topic_id": "original_topic",
"group_id": None,
"group_name": None,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"type": "discussion",
"title": "Original Title",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"comment_count": 0,
"unread_comment_count": 0,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["following", "raw_body", "title", "topic_id", "type", "voted"],
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["original_topic"],
"thread_type": ["discussion"],
"title": ["Original Title"],
"body": ["Edited body"],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
}
)
def test_nonexistent_thread(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
self.register_thread()
with self.assertRaises(Http404):
update_thread(self.request, "test_thread", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_thread(self.request, "test_thread", {})
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_author_only_fields(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
data = {field: "edited" for field in ["topic_id", "title", "raw_body"]}
data["type"] = "question"
expected_error = role_name == FORUM_ROLE_STUDENT
try:
update_thread(self.request, "test_thread", data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{field: ["This field is not editable."] for field in data.keys()}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_following(self, old_following, new_following):
"""
Test attempts to edit the "following" field.
old_following indicates whether the thread should be followed at the
start of the test. new_following indicates the value for the "following"
field in the update. If old_following and new_following are the same, no
update should be made. Otherwise, a subscription should be POSTed or
DELETEd according to the new_following value.
"""
if old_following:
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_subscription_response(self.user)
self.register_thread()
data = {"following": new_following}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["following"], new_following)
last_request_path = urlparse(httpretty.last_request().path).path
subscription_url = "/api/v1/users/{}/subscriptions".format(self.user.id)
if old_following == new_following:
self.assertNotEqual(last_request_path, subscription_url)
else:
self.assertEqual(last_request_path, subscription_url)
self.assertEqual(
httpretty.last_request().method,
"POST" if new_following else "DELETE"
)
request_data = (
httpretty.last_request().parsed_body if new_following else
parse_qs(urlparse(httpretty.last_request().path).query)
)
request_data.pop("request_id", None)
self.assertEqual(
request_data,
{"source_type": ["thread"], "source_id": ["test_thread"]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_voted(self, old_voted, new_voted):
"""
Test attempts to edit the "voted" field.
old_voted indicates whether the thread should be upvoted at the start of
the test. new_voted indicates the value for the "voted" field in the
update. If old_voted and new_voted are the same, no update should be
made. Otherwise, a vote should be PUT or DELETEd according to the
new_voted value.
"""
if old_voted:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
self.register_thread_votes_response("test_thread")
self.register_thread()
data = {"voted": new_voted}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["voted"], new_voted)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/threads/test_thread/votes"
if old_voted == new_voted:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_voted else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_voted else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_voted:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
def test_invalid_field(self):
self.register_thread()
with self.assertRaises(ValidationError) as assertion:
update_thread(self.request, "test_thread", {"raw_body": ""})
self.assertEqual(
assertion.exception.message_dict,
{"raw_body": ["This field is required."]}
)
@ddt.ddt
class UpdateCommentTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for update_comment"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment(self, overrides=None, thread_overrides=None):
"""
Make a comment with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
cs_thread_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": "test_comment",
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"body": "Original body",
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_put_comment_response(cs_comment_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
self.register_comment()
update_comment(self.request, "test_comment", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
@ddt.data(None, "test_parent")
def test_basic(self, parent_id):
self.register_comment({"parent_id": parent_id})
actual = update_comment(self.request, "test_comment", {"raw_body": "Edited body"})
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["raw_body", "voted"]
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Edited body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
def test_nonexistent_comment(self):
self.register_get_comment_error_response("test_comment", 404)
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
def test_nonexistent_course(self):
self.register_comment(thread_overrides={"course_id": "non/existent/course"})
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
def test_unenrolled(self):
self.register_comment()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
self.register_comment()
with self.assertRaises(Http404):
update_comment(self.request, "test_comment", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_get_thread_response(make_minimal_cs_thread())
self.register_comment(
{"thread_id": "test_thread"},
thread_overrides={
"id": "test_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_comment(self.request, "test_comment", {})
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
))
@ddt.unpack
def test_raw_body_access(self, role_name, is_thread_author, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1))
}
)
expected_error = role_name == FORUM_ROLE_STUDENT and not is_comment_author
try:
update_comment(self.request, "test_comment", {"raw_body": "edited"})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"raw_body": ["This field is not editable."]}
)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
[True, False],
))
@ddt.unpack
def test_endorsed_access(self, role_name, is_thread_author, thread_type, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"thread_type": thread_type,
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1)),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(thread_type == "discussion" or not is_thread_author)
)
try:
update_comment(self.request, "test_comment", {"endorsed": True})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"endorsed": ["This field is not editable."]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_voted(self, old_voted, new_voted):
"""
Test attempts to edit the "voted" field.
old_voted indicates whether the comment should be upvoted at the start of
the test. new_voted indicates the value for the "voted" field in the
update. If old_voted and new_voted are the same, no update should be
made. Otherwise, a vote should be PUT or DELETEd according to the
new_voted value.
"""
if old_voted:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
self.register_comment_votes_response("test_comment")
self.register_comment()
data = {"voted": new_voted}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["voted"], new_voted)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/comments/test_comment/votes"
if old_voted == new_voted:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_voted else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_voted else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_voted:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
@ddt.ddt
class DeleteThreadTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for delete_thread"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and DELETE on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"user_id": str(self.user.id),
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_delete_thread_response(cs_data["id"])
def test_basic(self):
self.register_thread()
self.assertIsNone(delete_thread(self.request, self.thread_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads/{}".format(self.thread_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(Http404):
delete_thread(self.request, "missing_thread")
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(Http404):
delete_thread(self.request, self.thread_id)
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
delete_thread(self.request, self.thread_id)
def test_discussions_disabled(self):
self.register_thread()
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
delete_thread(self.request, self.thread_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a thread
All privileged roles are able to delete a thread. A student role can
only delete a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
@ddt.ddt
class DeleteCommentTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for delete_comment"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.course = CourseFactory.create()
self.thread_id = "test_thread"
self.comment_id = "test_comment"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment_and_thread(self, overrides=None, thread_overrides=None):
"""
Make a comment with appropriate data overridden by the override
parameters and register mock responses for both GET and DELETE on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
cs_thread_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": self.comment_id,
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_delete_comment_response(self.comment_id)
def test_basic(self):
self.register_comment_and_thread()
self.assertIsNone(delete_comment(self.request, self.comment_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/comments/{}".format(self.comment_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_comment_id_not_found(self):
self.register_get_comment_error_response("missing_comment", 404)
with self.assertRaises(Http404):
delete_comment(self.request, "missing_comment")
def test_nonexistent_course(self):
self.register_comment_and_thread(
thread_overrides={"course_id": "non/existent/course"}
)
with self.assertRaises(Http404):
delete_comment(self.request, self.comment_id)
def test_not_enrolled(self):
self.register_comment_and_thread()
self.request.user = UserFactory.create()
with self.assertRaises(Http404):
delete_comment(self.request, self.comment_id)
def test_discussions_disabled(self):
self.register_comment_and_thread()
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(Http404):
delete_comment(self.request, self.comment_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"user_id": str(self.user.id + 1)}
)
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a comment
All privileged roles are able to delete a comment. A student role can
only delete a comment if,
the student role is the author and the comment is not in a cohort,
the student role is the author and the comment is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"thread_id": "test_thread"},
thread_overrides={
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except Http404:
self.assertTrue(expected_error)
| agpl-3.0 | -6,897,488,860,505,443,000 | 38.259853 | 118 | 0.553916 | false |
att-comdev/deckhand | deckhand/common/validation_message.py | 1 | 2620 | # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Indicates document sanity-check validation failure pre- or post-rendering.
DOCUMENT_SANITY_CHECK_FAILURE = 'D001'
# Indicates document post-rendering validation failure.
DOCUMENT_POST_RENDERING_FAILURE = 'D002'
class ValidationMessage(object):
"""ValidationMessage per UCP convention:
https://github.com/att-comdev/ucp-integration/blob/master/docs/source/api-conventions.rst#output-structure # noqa
Construction of ``ValidationMessage`` message:
:param string message: Validation failure message.
:param boolean error: True or False, if this is an error message.
:param string name: Identifying name of the validation.
:param string level: The severity of validation result, as "Error",
"Warning", or "Info"
:param string schema: The schema of the document being validated.
:param string doc_name: The name of the document being validated.
:param string diagnostic: Information about what lead to the message,
or details for resolution.
"""
def __init__(self,
message='Document validation error.',
error=True,
name='Deckhand validation error',
level='Error',
doc_schema='',
doc_name='',
doc_layer='',
diagnostic=''):
level = 'Error' if error else 'Info'
self._output = {
'message': message,
'error': error,
'name': name,
'documents': [],
'level': level,
'kind': self.__class__.__name__
}
self._output['documents'].append(
dict(schema=doc_schema, name=doc_name, layer=doc_layer))
if diagnostic:
self._output.update(diagnostic=diagnostic)
def format_message(self):
"""Return ``ValidationMessage`` message.
:returns: The ``ValidationMessage`` for the Validation API response.
:rtype: dict
"""
return self._output
| apache-2.0 | 4,281,113,961,153,711,000 | 38.104478 | 118 | 0.646183 | false |
gwct/grampa | lib/spec_tree.py | 1 | 5143 | import sys, os, reconcore as RC, recontree as RT, global_vars as globs
#############################################################################
def readSpecTree(spec_tree_input, starttime):
if os.path.isfile(spec_tree_input):
spec_tree = open(spec_tree_input, "r").read().replace("\n", "").replace("\r","");
else:
spec_tree = spec_tree_input;
# If the input string is a filename, read the file. Otherwise, just try it as a newick string.
hybrid_spec = "";
spec_tree = RT.remBranchLength(spec_tree);
tips = spec_tree.replace("(","").replace(")","").replace(";","").split(",");
# Remove the branch lengths from the tree string and get the tip labels.
if any(tip.isdigit() for tip in tips):
RC.errorOut(6, "Tip labels cannot be purely numbers. Please add another character.");
if globs.spec_type == 's' and any(tips.count(tip) > 1 for tip in tips):
RC.errorOut(7, "You have entered a tree type (-t) of 's' but there are labels in your tree that appear more than once!");
if globs.spec_type == 'm' and any(tips.count(tip) not in [1,2] for tip in tips):
RC.errorOut(8, "You have entered a tree type (-t) of 'm', species in your tree should appear exactly once or twice.");
# Some error checking based on the tip labels in the tree.
if globs.spec_type == 'm':
hybrid_spec = list(set([tip for tip in tips if tips.count(tip) != 1]));
for h in hybrid_spec:
spec_tree = spec_tree.replace(h, h+"*", 1);
# If the user entered a MUL-tree, some internal re-labeling must be done to those labels that appear twice.
try:
sinfo, st = RT.treeParse(spec_tree);
# Parsing of the species tree.
except:
RC.errorOut(9, "Error reading species tree!");
# Reading the species tree file.
if globs.label_opt:
if globs.v != -1:
print();
print("# The input species tree with internal nodes labeled:");
print(st + "\n");
RC.endProg(starttime);
# The output if --labeltree is set.
return sinfo, st;
#############################################################################
def hInParse(sinfo, st, h1_input, h2_input):
if globs.spec_type == 's':
hybrid_clades, hybrid_nodes = getHClades(h1_input, sinfo, "h1");
copy_clades, copy_nodes = getHClades(h2_input, sinfo, "h2");
# If the input tree is singly-labeled, use the input info from -h1 and -h2 to get the hybrid clades and nodes.
elif globs.spec_type == 'm':
mul_copy_clade = [n for n in sinfo if sinfo[n][2] == 'tip' and '*' in n];
mul_hybrid_clade = [n.replace("*","") for n in mul_copy_clade];
mul_hybrid_node, mul_hybrid_mono = RT.LCA(mul_hybrid_clade, sinfo);
mul_copy_node, mul_copy_mono = RT.LCA(mul_copy_clade, sinfo);
if not mul_hybrid_mono or not mul_copy_mono:
RC.errorOut(13, "All hybrid clades specified in your MUL-tree must be monophyletic! Hybrid clade identified as: " + ",".join(mul_copy_clade));
hybrid_clades, hybrid_nodes, copy_clades, copy_nodes = [mul_hybrid_clade], [mul_hybrid_node], [mul_copy_clade], [mul_copy_node];
# If the input tree is a MUL-tree, we have to determine what the hybrid clades and nodes are.
return hybrid_clades, hybrid_nodes, copy_clades, copy_nodes;
# Parses the input h nodes.
#############################################################################
def getHClades(h_list, sinfo, h_type):
# This function takes a list of lists of -h1 or -h2 inputs and determines if they are clades or node labels. It then retrieves
# the complete lists of hybrid clades and nodes.
if h_list:
if " " in h_list:
h_clades = h_list.split(" ");
h_clades = list(map(set, [tmp_h.split(",") for tmp_h in h_clades]));
else:
h_clades = list(map(set, [h_list.split(",")]));
# Split up the input info. If there is a space, multiple nodes/clades have been specified.
if not all(h in sinfo for hybrid_list in h_clades for h in hybrid_list if not h.isdigit()):
RC.errorOut(10, "Not all -" + h_type + " species are present in your species tree!");
if not all("<" + h + ">" in sinfo for hybrid_list in h_clades for h in hybrid_list if h.isdigit()):
RC.errorOut(11, "Not all -" + h_type + " nodes are present in your species tree!");
# Some error checking to make sure everything the user input is actually in the tree.
h_nodes = [];
for hybrid_clade in h_clades:
hybrid_clade = list(hybrid_clade);
if hybrid_clade[0].isdigit():
h_node = "<" + hybrid_clade[0] + ">";
# If the input was an internal node, add it to the node list here.
else:
h_node, h_mono = RT.LCA(hybrid_clade, sinfo);
if not h_mono:
RC.errorOut(12, "All hybrid clades specified h1 and h2 must be monophyletic!");
# If the input was a clade, retrieve the ancestral node and check if it is monophyletic here.
if h_node not in h_nodes:
h_nodes.append(h_node);
# Add the hybrid node to the nodes list.
# If the user input anything as -h1 or -h2 this parses it.
else:
h_nodes = list(sinfo.keys());
h_clades = [RT.getClade(node, sinfo) for node in h_nodes];
# If the user did not specify -h1 or -h2, this adds all possible nodes to the list.
return h_clades, h_nodes;
#############################################################################
| gpl-3.0 | -548,104,545,586,755,300 | 43.336207 | 145 | 0.634649 | false |
beeftornado/sentry | src/sentry/roles/manager.py | 1 | 2011 | from __future__ import absolute_import
import six
from collections import OrderedDict
class Role(object):
def __init__(self, priority, id, name, desc="", scopes=(), is_global=False):
assert len(id) <= 32, "Role id must be no more than 32 characters"
self.priority = priority
self.id = id
self.name = name
self.desc = desc
self.scopes = frozenset(scopes)
self.is_global = bool(is_global)
def __str__(self):
return self.name.encode("utf-8")
def __unicode__(self):
return six.text_type(self.name)
def __repr__(self):
return u"<Role: {}>".format(self.id)
def has_scope(self, scope):
return scope in self.scopes
class RoleManager(object):
def __init__(self, config, default=None):
role_list = []
self._roles = OrderedDict()
for idx, role in enumerate(config):
role = Role(idx, **role)
role_list.append(role)
self._roles[role.id] = role
self._choices = tuple((r.id, r.name) for r in role_list)
if default:
self._default = self._roles[default]
else:
self._default = role_list[0]
self._top_dog = role_list[-1]
def __iter__(self):
return six.itervalues(self._roles)
def can_manage(self, role, other):
return self.get(role).priority >= self.get(other).priority
def get(self, id):
return self._roles[id]
def get_all(self):
return list(self._roles.values())
def get_choices(self):
return self._choices
def get_default(self):
return self._default
def get_top_dog(self):
return self._top_dog
def with_scope(self, scope):
for role in self.get_all():
if role.has_scope(scope):
yield role
def with_any_scope(self, scopes):
for role in self.get_all():
if any(role.has_scope(scope) for scope in scopes):
yield role
| bsd-3-clause | 2,672,312,002,791,817,700 | 24.455696 | 80 | 0.567379 | false |
akrherz/iem | htdocs/DCP/ahpsxml2wxc.py | 1 | 2188 | """Convert the AHPS XML into WXC format"""
import datetime
from paste.request import parse_formvars
from twisted.words.xish import domish, xpath
import requests
def do(nwsli):
"""work"""
res = ""
xml = requests.get(
(
"https://water.weather.gov/ahps2/"
"hydrograph_to_xml.php?gage=%s&output=xml"
)
% (nwsli,)
).content
elementStream = domish.elementStream()
roots = []
results = []
elementStream.DocumentStartEvent = roots.append
elementStream.ElementEvent = lambda elem: roots[0].addChild(elem)
elementStream.DocumentEndEvent = lambda: results.append(roots[0])
res += """IEM %s AHPS2WXC host=0 TimeStamp=%s
5
15 Station
6 UTCDate
4 UTCTime
7 Stage
7 CFS\n""" % (
nwsli,
datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"),
)
elementStream.parse(xml)
elem = results[0]
nodes = xpath.queryForNodes("/site/forecast/datum", elem)
if nodes is None:
return res
i = 0
maxval = {"val": 0, "time": None}
for node in nodes:
utc = datetime.datetime.strptime(
str(node.valid)[:15], "%Y-%m-%dT%H:%M"
)
res += ("%12s%03i %6s %4s %7s %7s\n") % (
nwsli,
i,
utc.strftime("%b %-d"),
utc.strftime("%H%M"),
node.primary,
node.secondary,
)
if float(str(node.primary)) > maxval["val"]:
maxval["val"] = float(str(node.primary))
maxval["time"] = utc
maxval["cfs"] = float(str(node.secondary))
i += 1
if maxval["time"] is not None:
utc = maxval["time"]
res += ("%12sMAX %6s %4s %7s %7s\n") % (
nwsli,
utc.strftime("%b %-d"),
utc.strftime("%H%M"),
maxval["val"],
maxval["cfs"],
)
return res
def application(environ, start_response):
"""Do Fun Things"""
fields = parse_formvars(environ)
nwsli = fields.get("nwsli", "MROI4")[:5]
start_response("200 OK", [("Content-type", "text/plain")])
return [do(nwsli).encode("ascii")]
| mit | -4,720,805,943,521,046,000 | 27.051282 | 69 | 0.528793 | false |
soaplib/soaplib | setup.py | 1 | 2916 | #!/usr/bin/env python
from unittest import TestLoader
from pkg_resources import resource_exists
from pkg_resources import resource_listdir
from setuptools import setup, find_packages
VERSION = '2.0.0'
LONG_DESC = """\
This is a simple, easily extendible soap library that provides several useful
tools for creating and publishing soap web services in python. This package
features on-demand wsdl generation for the published services, a
wsgi-compliant web application, support for complex class structures, binary
attachments, and a simple framework for creating additional serialization
mechanisms.
This project uses lxml as it's XML API, providing full namespace support.
"""
SHORT_DESC="A transport and architecture agnostic soap (de)serialization " \
"library that focuses on making small, rpc-like messaging work."
class NoInteropLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Load unit test (skip 'interop' package).
Hacked from the version in 'setuptools.command.test.ScanningLoader'.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self,module))
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file == 'interop':
# These tests require installing a bunch of extra
# code: see 'src/soaplib/test/README'.
continue
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(
module.__name__, file + '/__init__.py'
):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
return self.suiteClass(tests)
setup(
name='soaplib',
packages=find_packages('src'),
package_dir={'':'src'},
version=VERSION,
description=SHORT_DESC,
long_description=LONG_DESC,
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent',
'Natural Language :: English',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords=('soap', 'wsdl', 'wsgi'),
author='Soaplib Contributors',
author_email='[email protected]',
maintainer = 'Burak Arslan',
maintainer_email = '[email protected]',
url='http://soaplib.github.com/soaplib/2_0/',
license='LGPL',
zip_safe=False,
install_requires=[
'setuptools',
'pytz',
'lxml>=2.2.1',
],
test_suite='soaplib.core.test',
test_loader='__main__:NoInteropLoader',
namespace_packages=["soaplib"]
)
| lgpl-2.1 | -7,305,585,574,414,316,000 | 32.136364 | 77 | 0.60631 | false |
tictail/claw | claw/utils.py | 1 | 2061 | # -*- coding: utf-8 -*-
import logging
from random import shuffle
from claw.constants import RE_DELIMITER
log = logging.getLogger(__name__)
def safe_format(format_string, *args, **kwargs):
"""
Helper: formats string with any combination of bytestrings/unicode
strings without raising exceptions
"""
try:
if not args and not kwargs:
return format_string
else:
return format_string.format(*args, **kwargs)
# catch encoding errors and transform everything into utf-8 string
# before logging:
except (UnicodeEncodeError, UnicodeDecodeError):
format_string = to_utf8(format_string)
args = [to_utf8(p) for p in args]
kwargs = {k: to_utf8(v) for k, v in kwargs.iteritems()}
return format_string.format(*args, **kwargs)
# ignore other errors
except:
return u''
def to_unicode(str_or_unicode, precise=False):
"""
Safely returns a unicode version of a given string
>>> utils.to_unicode('привет')
u'привет'
>>> utils.to_unicode(u'привет')
u'привет'
If `precise` flag is True, tries to guess the correct encoding first.
"""
encoding = detect_encoding(str_or_unicode) if precise else 'utf-8'
if isinstance(str_or_unicode, str):
return unicode(str_or_unicode, encoding, 'replace')
return str_or_unicode
def to_utf8(str_or_unicode):
"""
Safely returns a UTF-8 version of a given string
>>> utils.to_utf8(u'hi')
'hi'
"""
if isinstance(str_or_unicode, unicode):
return str_or_unicode.encode("utf-8", "ignore")
return str(str_or_unicode)
def random_token(length=7):
vals = ("a b c d e f g h i j k l m n o p q r s t u v w x y z "
"0 1 2 3 4 5 6 7 8 9").split(' ')
shuffle(vals)
return ''.join(vals[:length])
def get_delimiter(msg_body):
delimiter = RE_DELIMITER.search(msg_body)
if delimiter:
delimiter = delimiter.group()
else:
delimiter = '\n'
return delimiter
| apache-2.0 | -4,425,286,584,638,680,600 | 25.802632 | 73 | 0.621993 | false |
QTB-HHU/ModelHeatShock | HSM_VaryParamsRMSvsData.py | 1 | 30309 |
from copy import deepcopy
import math
#from HSM_ParametersClass import *
from HSM_SimulateClass import *
def GenerateRandomParametersSets(NumberOfRandomSets, FactorOf, DefaultParamSetRATES):
"""GENERATE SETS OF RANDOM PARAMETERS FROM A FLAT DISTRIBUTION CENTERED AROUND FIDUCIAL VALUES AND WITHIN A FACTOR OF FactorOf"""
DictionaryOfMaxs = {}
DictionaryOfMins = {}
for key in DefaultParamSetRATES:
#DictionaryOfMaxs[key] = deepcopy(DefaultParamSetRATES[key]*FactorOf)
DictionaryOfMaxs[key] = deepcopy(DefaultParamSetRATES[key] - DefaultParamSetRATES[key]*FactorOf)
#DictionaryOfMins[key] = deepcopy(DefaultParamSetRATES[key]/FactorOf)
DictionaryOfMins[key] = deepcopy(DefaultParamSetRATES[key] + DefaultParamSetRATES[key]*FactorOf)
import random
ListOfManyDictionaryOfRandomParameters = []
for i in range(NumberOfRandomSets):
DictionaryOfRandomParameters = {}
for key in DefaultParamSetRATES:
RandomNumber = random.random()
NewValue = deepcopy(DictionaryOfMins[key] + RandomNumber*(DictionaryOfMaxs[key]-DictionaryOfMins[key]))
DictionaryOfRandomParameters[key] = NewValue
ListOfManyDictionaryOfRandomParameters.append(deepcopy((DictionaryOfRandomParameters)))
return ListOfManyDictionaryOfRandomParameters
def GenerateParametersSetsChangingOneParameter(NumberOfValuesForEachParameterk, FactorOfII, DefaultParamSetRATES):
"""GENERATE SETS OF PARAMETERS By Changing Only 1 PARAMETER AT A TIME"""
ListOfManyDictionariesOfParametersVarying1by1 = []
for key in DefaultParamSetRATES:
DictionaryOfTestParameters = deepcopy(DefaultParamSetRATES)
for j in range(NumberOfValuesForEachParameterk+1):
NewValue = deepcopy(DefaultParamSetRATES[key] + FactorOfII * DefaultParamSetRATES[key] * ( 2*j - NumberOfValuesForEachParameterk)/NumberOfValuesForEachParameterk)
DictionaryOfTestParameters[key] = deepcopy(NewValue)
ListOfManyDictionariesOfParametersVarying1by1.append(deepcopy(DictionaryOfTestParameters))
return ListOfManyDictionariesOfParametersVarying1by1
def ExtractDataControlFeedExpHSFandHSP90aFromFiles(DataFileNameHSFcontrol, DataFileNameHSP90Acontrol):
ListToBeFilledWithResults = []
# CONTROL HSF
ColumnNumber = 2 # Only the time and the row data are used
ListOfDataArraysHSF = []
FromDataFileToArrays(DataFileNameHSFcontrol, ColumnNumber, ListOfDataArraysHSF) # Read data file, put in list of arrays
ListToBeFilledWithResults.append(ListOfDataArraysHSF[1])
# Times
ListToBeFilledWithResults.append(ListOfDataArraysHSF[0])
# CONTROL HSP90a
ColumnNumber = 2 # Only the time and the row data are used
ListOfDataArraysHSP90a = []
FromDataFileToArrays(DataFileNameHSP90Acontrol, ColumnNumber, ListOfDataArraysHSP90a) # Read data file, put in list of arrays
ListToBeFilledWithResults.append(ListOfDataArraysHSP90a[1])
return ListToBeFilledWithResults
def ComputePartOfRMSSimulationVsData(ListOfDataTimes, timesetDataRMS, SimulationExperimentDataRMS, ListOfDataHSF, ListOfDataHSP90a):
"""Compute the Sum over all datapoints of ( Xth - Xdata )^2, for 1 feeding experiment, taking into caccount HSF + HSP """
##### 2-C1.1: EXTRACT SIMULATION VALUES AT PROPER TIME POINTS FOR COMPARISON WITH DATA
ListOfmRNAHSFsimulation = []
ListOfmRNAHSPsimulation = []
ListOfTimesForDatapoints = ListOfDataTimes #= [0., 15., 30., 45., 60., 120.]
for val in ListOfTimesForDatapoints:
j = ( val * 60. + vorl ) / timesetDataRMS.CurrentParams["delta_t"] # ( seconds/seconds = adimensional )
ListOfmRNAHSFsimulation.append(SimulationExperimentDataRMS.RF[j])
ListOfmRNAHSPsimulation.append(SimulationExperimentDataRMS.RHP[j])
ArrayOfmRNAHSFsimulation = np.array([val for sublist in ListOfmRNAHSFsimulation for val in sublist])
ArrayOfmRNAHSPsimulation = np.array([val for sublist in ListOfmRNAHSPsimulation for val in sublist])
##### 2-C1.2: COMPUTE pieces of LS TH VS DATA - STAUROSPORINE
# print("We now wants to compare these...")
# print(ListOfDataHSF_stau)
# print(ListOfDataHSP90a_stau)
# print(ArrayOfmRNAHSFsimulation)
# print(ArrayOfmRNAHSPsimulation)
k = 0
SumOverDataPointsHSF = 0.
for val in ListOfDataHSF:
DeviationHSF = ArrayOfmRNAHSFsimulation[k]/max(ArrayOfmRNAHSFsimulation) - ListOfDataHSF[k]/max(ListOfDataHSF)
SumOverDataPointsHSF = SumOverDataPointsHSF + pow(DeviationHSF, 2)
k = k + 1
l = 0
SumOverDataPointsHSP90a = 0.
for val in ListOfDataHSP90a:
DeviationHSP90a = ArrayOfmRNAHSPsimulation[l]/max(ArrayOfmRNAHSPsimulation) - ListOfDataHSP90a[l]/max(ListOfDataHSP90a)
SumOverDataPointsHSP90a = SumOverDataPointsHSP90a + pow(DeviationHSP90a, 2)
l = l + 1
SumOverDatapoints = SumOverDataPointsHSF + SumOverDataPointsHSP90a
return SumOverDatapoints
def PlotSimulationVsDataFeeding(SimulationFeedingControlsDataRMStimes, ListForPlottingHSF, ListForPlottingHSP, timesetDataRMS, ListOfDataHSF, ListOfDataHSP90a, ListOfDataTimes, FigureName, FigureExtension, FolderContainingDataVsSimuCalibration):
"""Plot mRNAs for HSF and HSP90a feeding experiments data VS simulations to see if it makes sense"""
fig = figure()
############ Simulations
ax1 = plt.subplot(121)
SubPlot(ax1, SimulationFeedingControlsDataRMStimes, ListForPlottingHSF, 'Time (min)', 0.,
(timesetDataRMS.CurrentParams["t_stop"] - vorl) / 60., " ", 0, 0, "upper right", "A",
Legendfontsize="small", Legendfancybox=True)
ax2 = plt.subplot(122)
SubPlot(ax2, SimulationFeedingControlsDataRMStimes, ListForPlottingHSP, 'Time (min)', 0.,
(timesetDataRMS.CurrentParams["t_stop"] - vorl) / 60., " ", 0, 0, "upper right", "B",
Legendfontsize="small", Legendfancybox=True)
############ and Data Points
ListOfDataHSFNORM = []
k = 0
for val in ListOfDataHSF:
ListOfDataHSFNORM.append(ListOfDataHSF[k]/max(ListOfDataHSF))
k = k + 1
ListOfDataHSP90aNORM = []
k = 0
for val in ListOfDataHSP90a:
ListOfDataHSP90aNORM.append(ListOfDataHSP90a[k]/max(ListOfDataHSP90a))
k = k + 1
DataLegend = [r"Data" + str(FigureName)]
DataSubPlot(ax1, ListOfDataTimes, [ListOfDataTimes, np.asarray(ListOfDataHSFNORM)], 'Time (min)', 0.,
(timesetDataRMS.CurrentParams["t_stop"] - vorl) / 60., r"mRNA$_{HSF}$ (Normalizet to Max)", 0., 1., "upper right", DataLegend, "",
Legendfontsize="small", Legendfancybox=True, Black = "Yes")
DataLegend = [r"Data" + str(FigureName)]
DataSubPlot(ax2, ListOfDataTimes, [ListOfDataTimes, np.asarray(ListOfDataHSP90aNORM)], 'Time (min)', 0.,
(timesetDataRMS.CurrentParams["t_stop"] - vorl) / 60., r"mRNA$_{HSP}$ (Normalizet to Max)", 0., 1., "upper right", DataLegend, "",
Legendfontsize="small", Legendfancybox=True, Black = "Yes")
PlotAndSave(fig, FolderContainingDataVsSimuCalibration + "FittingToData" + str(FigureName) + FigureExtension, "PS", 1, 0)
##################################################### LOOK HERE!!!!!!!!!!!!!!!!!!!!! ##########################################################
def PlotSimulationVsDataFeedingModelVSFittedData(SimulationFeedingControlsDataRMStimes, ListForPlottingHSF, ListForPlottingHSP, timesetDataRMS, ListOfDataHSF, ListOfDataHSP90a, ListOfDataTimes, FigureName, FigureExtension, FolderContainingDataVsSimuCalibration):
"""Plot mRNAs for HSF and HSP90a feeding experiments data VS simulation best fit for paper"""
##### for key in AllDataControlsFeeding[5]:#ListOfFeedingKeys:
fig = figure()
############ Simulations
ax1 = plt.subplot(121)
SubPlot(ax1, SimulationFeedingControlsDataRMStimes, ListForPlottingHSF, 'Time (min)', 0.,
(timesetDataRMS.CurrentParams["t_stop"] - vorl) / 60., " ", 0, 0, "upper right", "A",
Legendfontsize="small", Legendfancybox=True, Black = "Yes")
ax2 = plt.subplot(122)
SubPlot(ax2, SimulationFeedingControlsDataRMStimes, ListForPlottingHSP, 'Time (min)', 0.,
(timesetDataRMS.CurrentParams["t_stop"] - vorl) / 60., " ", 0, 0, "upper right", "B",
Legendfontsize="small", Legendfancybox=True, Black = "Yes")
############ and Data Points
ListOfFeedingKeys= ["stau", "radi", "ChloCyc", "canav", "Gelda", "CaChel"]
DictionaryForLegend = {"stau": "Staurosporine",
"radi": "Radicicol",
"ChloCyc": "Chlor. / Cyclo.",
"canav": "Canavanine",
"Gelda": "Geldanamicil",
"CaChel": "Calcium Chelator"}
i = 0
for key in ListOfFeedingKeys:
ListOfDataHSFNORM = []
k = 0
for val in ListOfDataHSF[key]:
ListOfDataHSFNORM.append(ListOfDataHSF[key][k]/max(ListOfDataHSF[key]))
k = k + 1
ListOfDataHSP90aNORM = []
k = 0
for val in ListOfDataHSP90a[key]:
ListOfDataHSP90aNORM.append(ListOfDataHSP90a[key][k]/max(ListOfDataHSP90a[key]))
k = k + 1
DataLegend = [r"Data Control " + DictionaryForLegend[key] + " Exp."]
DataSubPlotMOD(ax1, ListOfDataTimes[key], [ListOfDataTimes[key], np.asarray(ListOfDataHSFNORM)], 'Time (min)', 0.,
(timesetDataRMS.CurrentParams["t_stop"] - vorl) / 60., r"Concentration of mRNA$_{HSF}$ (normalized to max)", 0., 1., "upper right", DataLegend, "",
Legendfontsize="small", Legendfancybox=True, ColorNumber = i)
DataLegend = [r"Data Control " + DictionaryForLegend[key] + " Exp."]
DataSubPlotMOD(ax2, ListOfDataTimes[key], [ListOfDataTimes[key], np.asarray(ListOfDataHSP90aNORM)], 'Time (min)', 0.,
(timesetDataRMS.CurrentParams["t_stop"] - vorl) / 60., r"Concentration of mRNA$_{HSP}$ (normalized to max)", 0., 1., "upper right", DataLegend, "",
Legendfontsize="small", Legendfancybox=True, ColorNumber = i)
i = i+1
PlotAndSave(fig, FolderContainingDataVsSimuCalibration + "FittingToDataPAPERversion" + str(FigureName) + FigureExtension, "PS", 1, 0)
def ExtractDataControlsFeedingExperimentsFromFilesIntoListOfDictionaries():
""" EXTRACT EXPERIMENTAL DATA (FEEDING EXPERIMENTS) FROM FILES ALEXANDER SKUPIN """
ListOfFeedingKeys= ["stau", "radi", "ChloCyc", "canav", "Gelda", "CaChel"]
DictionaryOfHSFcontrolFiles, DictionaryOfHSP90acontrolFiles = {}, {}
for key in ListOfFeedingKeys:
DictionaryOfHSFcontrolFiles.update({key : "DataFilesOriginals/" + "hsfcontrol_" + str(key) + ".csv"})
DictionaryOfHSP90acontrolFiles.update({key : "DataFilesOriginals/" + "hsp90acontrol_" + str(key) + ".csv"})
DictionaryOfListsOfDataHSF, DictionaryOfListsOfDataTimes, DictionaryOfListsOfDataHSP90a = {}, {}, {}
for key in ListOfFeedingKeys:
ListOfListsOfExtractedData = ExtractDataControlFeedExpHSFandHSP90aFromFiles(DictionaryOfHSFcontrolFiles[key], DictionaryOfHSP90acontrolFiles[key])
DictionaryOfListsOfDataHSF.update({key : ListOfListsOfExtractedData[0]})
DictionaryOfListsOfDataTimes.update({key : ListOfListsOfExtractedData[1]})
DictionaryOfListsOfDataHSP90a.update({key : ListOfListsOfExtractedData[2]})
# Create Temperature settings and Time settings reproducing the experimental setup of controls for feeding experiments
TsetSchmol2013dataRMS = ParametersSet({"Ttype": 1, "Tin": 25., "Tup": 40., "tau": 5., "ta": 0. * 60. + vorl})
timeset240minsDataRMS = ParametersSet({"t_start": 0., "t_stop": 240. * 60. + vorl, "delta_t": 5.0})
AllDataControlsFeeding = (DictionaryOfListsOfDataTimes, DictionaryOfListsOfDataHSF, DictionaryOfListsOfDataHSP90a, TsetSchmol2013dataRMS, timeset240minsDataRMS, ListOfFeedingKeys)
return AllDataControlsFeeding
def ComputeRMSfeedingForGivenParameterSet(ParamSetRates, ParamSetForREACTIONS, ParamSetInitCond, OutputHSFandHSPtoPlot, AllDataControlsFeeding):
""" Function to compute RMS w.r.t. data of the controls of the feeding experiments from Schmollinger et al. 2013"""
# OutputHSFandHSPtoPlot = "Yes" or "No", for creating also output for time course plots or not, respectively.
############ 1] ############ NEED TO GENERATE A MODEL FOR EVERY PARAMS SET
# Temeprature and Time parameters sets reproducing the experiments
TsetSchmol2013dataRMS = AllDataControlsFeeding[3]
timeset240minsDataRMS = AllDataControlsFeeding[4]
# Set default parameter values
TestParamSetIC = ParametersSet(ParamSetInitCond)
TestParamSetRATES = ParametersSet(ParamSetRates)
ParamSetForREACTIONS["piRFconst"] = ParamSetRates["ketaRF"]*ParamSetRates["ketaF"]/ParamSetRates["kFpi0"]*0.17/8.
ParamSetForREACTIONS["piRHPconst"] = ParamSetRates["ketaRHP"]*ParamSetRates["ketaHP"]/ParamSetRates["kpiHP"]*17.5
ParamSetForREACTIONSobject = ParametersSet(ParamSetForREACTIONS)
# Create an object of the class "Heat shock models" with these parameters
TestHSM = HeatShockModel(TestParamSetIC, TestParamSetRATES, ParamSetForREACTIONSobject)
############ 2] ############ NEXT, FOR EACH MODEL COMPUTE pieces necessary to compute the RMS W.R.T. DATA
##### 2-A: SIMULATE CONTROLS OF ALL FEEDING EXPERIMENTS (one simulation only for all the datasets of feeding experiments!!!)
SimulationFeedingControlsDataRMS = Simulate(TestHSM, timeset240minsDataRMS, TsetSchmol2013dataRMS, "xyzUSELESSxyz") # "testFeedingControls" + str(i) + FigureExtension
SimulationFeedingControlsDataRMS.TimeRun(AvoidPlots="Yes")
##### 2-B : Extract from input the dictionaries containing the data
DictionaryOfListsOfDataTimes = AllDataControlsFeeding[0]
DictionaryOfListsOfDataHSF = AllDataControlsFeeding[1]
DictionaryOfListsOfDataHSP90a = AllDataControlsFeeding[2]
ListOfFeedingKeys = AllDataControlsFeeding[5]
##### 2-C: Compute Part Of RMS Simulation Vs Data FOR EACH DIFFERENT FEEDING EXPERIMENT
SumOverDatapointsFeeding = {}
for key in ListOfFeedingKeys:
SumOverDatapointsFeeding.update( {key : ComputePartOfRMSSimulationVsData(DictionaryOfListsOfDataTimes[key], timeset240minsDataRMS, SimulationFeedingControlsDataRMS, DictionaryOfListsOfDataHSF[key], DictionaryOfListsOfDataHSP90a[key])} )
############ 3] ############ Put together the pieces for each different dataset into 1 single RMS value!!!
NumberOfDataPoints, SumOverDatapoints = 0., 0.
for key in ListOfFeedingKeys:
NumberOfDataPoints = NumberOfDataPoints + len(DictionaryOfListsOfDataHSF[key]) + len(DictionaryOfListsOfDataHSP90a[key])
SumOverDatapoints = SumOverDatapoints + SumOverDatapointsFeeding[key]
RootMeanSquareDeviation = math.sqrt( SumOverDatapoints / NumberOfDataPoints )
print("\n" + str(RootMeanSquareDeviation) + "\n")
if OutputHSFandHSPtoPlot == "No":
output = (RootMeanSquareDeviation)
elif OutputHSFandHSPtoPlot == "Yes":
output = (RootMeanSquareDeviation, SimulationFeedingControlsDataRMS.RF, SimulationFeedingControlsDataRMS.RHP, SimulationFeedingControlsDataRMS.t)
else:
print("\nError in RMS feeding function!!!\n")
return output
def Convert_ORIGINAL_to_RESCALED_ParameterSet(ORIGINAL_ParameterSetDictionary, RescalingFactorsDictionary):
RESCALED_ParameterSetDictionary = {}
for key in RescalingFactorsDictionary:
RESCALED_ParameterSetDictionary.update({ key : deepcopy(ORIGINAL_ParameterSetDictionary[key])/deepcopy(RescalingFactorsDictionary[key]) })
return deepcopy(RESCALED_ParameterSetDictionary)
def Convert_RESCALED_to_ORIGINAL_ParameterSet(RESCALED_ParameterSetDictionary, RescalingFactorsDictionary):
ORIGINAL_ParameterSetDictionary = {}
for key in RescalingFactorsDictionary:
ORIGINAL_ParameterSetDictionary.update({ key : deepcopy(RESCALED_ParameterSetDictionary[key]) * deepcopy(RescalingFactorsDictionary[key]) })
return deepcopy(ORIGINAL_ParameterSetDictionary)
def ComputeRMSfeedingForGivenParameterSet_RESCALED_PARAMETERS(ParamSetRates_RESCALED, ParamSetForREACTIONS, ParamSetInitCond, OutputHSFandHSPtoPlot, AllDataControlsFeeding, RescalingFactorsDictionary):
""" Function that does exactly what ComputeRMSfeedingForGivenParameterSet does, but parameters are rescaled to their fiducial value."""
# This is a PRECONDITIONING, it serves to have a function which is easier to treat numerically with optimization algorithms as the gradient search
ParamSetRates_ORIGINAL = deepcopy(Convert_RESCALED_to_ORIGINAL_ParameterSet(ParamSetRates_RESCALED, RescalingFactorsDictionary))
Output = ComputeRMSfeedingForGivenParameterSet(ParamSetRates_ORIGINAL, ParamSetForREACTIONS, ParamSetInitCond, OutputHSFandHSPtoPlot, AllDataControlsFeeding)
return Output
def FindMinimumOfFunctionUsingGoldenRatioBisectionMethod(FunctionToMinimize, LowerBound, UpperBound, Tolerance):
GoldenRatio = 2./(math.sqrt(5.) + 1)
### Use the golden ratio to set the initial test points
x1 = UpperBound - GoldenRatio*(UpperBound - LowerBound)
x2 = LowerBound + GoldenRatio*(UpperBound - LowerBound)
### Evaluate the function at the test points
f1 = FunctionToMinimize(x1)
f2 = FunctionToMinimize(x2)
i = 0
while ( (abs(UpperBound - LowerBound) > Tolerance) and i <= 15):
i = i + 1
if f2 > f1:
# then the minimum is to the left of x2
# let x2 be the new upper bound
# let x1 be the new upper test point
### Set the new upper bound
UpperBound = deepcopy(x2)
### Set the new upper test point
### Use the special result of the golden ratio
x2 = deepcopy(x1)
f2 = deepcopy(f1)
### Set the new lower test point
x1 = UpperBound - GoldenRatio*(UpperBound - LowerBound)
f1 = FunctionToMinimize(x1)
elif f2 < f1:
# the minimum is to the right of x1
# let x1 be the new lower bound
# let x2 be the new lower test point
### Set the new lower bound
LowerBound = deepcopy(x1)
### Set the new lower test point
x1 = deepcopy(x2)
f1 = deepcopy(f2)
### Set the new upper test point
x2 = LowerBound + GoldenRatio*(UpperBound - LowerBound)
f2 = FunctionToMinimize(x2)
else:
print("Error in Golden Rule minimization algorithm!")
print(str(i) + " " + str(x1) + " " + str(x2) + " " + str(f1) + " " + str(f2))
### Use the mid-point of the final interval as the estimate of the optimzer
EstimatedMinimizer = (LowerBound + UpperBound)/2.
return EstimatedMinimizer
def NormalizeRnaCurvesFromSimulationsToMaxForPlot(RMSvalue, mRNA_HSF_simulation, mRNA_HSP_simulation, ListForPlottingHSF, ListForPlottingHSP, CurveNameInLegend, timesetDataRMS):
IndexOfT0seconds = int(vorl/timesetDataRMS.CurrentParams["delta_t"]) # ( seconds/seconds = adimensional )
RangeOfIndexesForPositiveTimes = range(IndexOfT0seconds,len(mRNA_HSF_simulation),1)
Max_HSF_FeedingControls_simulation = max(np.max(mRNA_HSF_simulation[kkk]) for kkk in RangeOfIndexesForPositiveTimes)
Max_HSP_FeedingControls_simulation = max(np.max(mRNA_HSP_simulation[kkk]) for kkk in RangeOfIndexesForPositiveTimes)
Y_HSF_FeedingControls_simulationNORM = (np.asarray(mRNA_HSF_simulation) / Max_HSF_FeedingControls_simulation) # * 100. for %
Y_HSP_FeedingControls_simulationNORM = (np.asarray(mRNA_HSP_simulation) / Max_HSP_FeedingControls_simulation) # * 100. for %
ListForPlottingHSF.append([CurveNameInLegend, Y_HSF_FeedingControls_simulationNORM])
ListForPlottingHSP.append([CurveNameInLegend, Y_HSP_FeedingControls_simulationNORM])
##############################################################################################
##################################### RMS for DOUBLE HS ######################################
##############################################################################################
def ExtractDataDoubleHSFromFiles(DataFileName, ListOfDoubleHSKeys):
ColumnNumber = 6
ListOfDataArray = []
FromDataFileToArrays(DataFileName, ColumnNumber, ListOfDataArray) # Read data file, put in list of arrays
TimePointsDoubleHS = np.array(ListOfDataArray[0])
DictionaryOfResults2HS = {}
index = 1
for key in ListOfDoubleHSKeys:
DictionaryOfResults2HS.update( {key : ListOfDataArray[index]} )
index = index + 1
ListToBeFilledWithResults = []
ListToBeFilledWithResults.append(TimePointsDoubleHS)
ListToBeFilledWithResults.append(DictionaryOfResults2HS)
return ListToBeFilledWithResults
#############################
def ExtractDataControlsDoubleHSExperimentFromFilesIntoListOfDictionaries():
""" EXTRACT EXPERIMENTAL DATA (DOUBLE HEAT SHOCK EXPERIMENTS) FROM FILES """
ListOfDoubleHSKeys= ["singleHS", "2hdoubleHS", "3hdoubleHS", "4hdoubleHS", "5hdoubleHS"]
PathOfFileContainingAll2HSData = "DataFiles/DataShroda2000ARSFig7b.dat"
ListToBeFilledWithResults = ExtractDataDoubleHSFromFiles(PathOfFileContainingAll2HSData, ListOfDoubleHSKeys)
#ArrayTimeDataPointsDoubleHS = ListToBeFilledWithResults[0] # (in seconds)
DictionaryOfArraysOfData2HS = ListToBeFilledWithResults[1]
DictionaryTimeDataPointsDoubleHS = {
"singleHS" : np.array([0.00, 30.0, 60.0, 90.0, 120.0, 150.0, 180.0, 210.0, 240.0, 270.0, 300.0, 330.0, 360.0]),
"2hdoubleHS" : np.array([0.00, 30.0, 60.0, 90.0, 120.0, 150.0, 180.0, 210.0, 240.0, 270.0]),
"3hdoubleHS" : np.array([0.00, 30.0, 60.0, 90.0, 120.0, 150.0, 180.0, 210.0, 240.0, 270.0, 300.0, 330.0]),
"4hdoubleHS" : np.array([0.00, 30.0, 60.0, 90.0, 120.0, 150.0, 180.0, 210.0, 240.0, 270.0, 300.0, 330.0, 360.0, 390.0]),
"5hdoubleHS" : np.array([0.00, 30.0, 60.0, 90.0, 120.0, 150.0, 180.0, 210.0, 240.0, 270.0, 300.0, 330.0, 360.0, 390.0, 420.0]),
}
# Create Temperature settings and Time settings reproducing the starting experimental setup (they will be modified when solving the ODE for different 2HSs)
HSduration = 30. # (min)
TsetDoubleHSdataRMS = ParametersSet({"Ttype": 2, "Tin": 23., "Tup": 40., "tau": 5., "ta": 0. * 60. + vorl, "tb": HSduration * 60. + vorl})
timesetDoubleHSDataRMS = ParametersSet({"t_start": 0., "t_stop": (2. * HSduration + 5 * 60. + 60.) * 60 + vorl, "delta_t": 5.0})
USELESS = "This should not appear. If you see it anywere, it means something is wrong. It is needed to keep the number of elements of AllDataControlsDoubleHS"
AllDataControlsDoubleHS = (DictionaryTimeDataPointsDoubleHS, DictionaryOfArraysOfData2HS, USELESS, TsetDoubleHSdataRMS, timesetDoubleHSDataRMS, ListOfDoubleHSKeys)
return AllDataControlsDoubleHS
#############################
def ComputePartOfRMSSimulationVsDataDoubleHS(ListOfDataTimes, timesetDoubleHSDataRMS, TimePointsForAllSimulations, ARSconcentrationForOneSetup, ListOfDataARSactivity, AbsoluteMaxData, AbsoluteMaxSimulation):
""" Compute the Sum over all datapoints of ( Xth - Xdata )^2, for 1 feeding experiment, taking into caccount HSF + HSP """
# OutputOfDoubleHSsimulation = [self.t, [ [Legend_singleHS, Yval_singleHS=[]], ..., [Legend_2HS5h,Yval_2HS5h=[]] ] ]
##### 2-C1.1: EXTRACT SIMULATION VALUES AT PROPER TIME POINTS FOR COMPARISON WITH DATA
ListOfARSsimulation = []
ListOfTimesForDatapoints = deepcopy(ListOfDataTimes) #= [0., 30., 60., 90., 120., etc.]
for val in ListOfTimesForDatapoints:
j = ( val * 60. + vorl ) / timesetDoubleHSDataRMS.CurrentParams["delta_t"] # ( seconds/seconds = adimensional )
ListOfARSsimulation.append(ARSconcentrationForOneSetup[j])
ArrayOfARSsimulation = np.array([val for sublist in ListOfARSsimulation for val in sublist])
#print("We now want to compare these...")
#print(ListOfDataARSactivity)
#print()
#print(ArrayOfARSsimulation)
#print()
##### 2-C1.2: COMPUTE pieces of LS TH VS DATA - STAUROSPORINE
k = 0
SumOverDataPointsARS = 0.
for val in ArrayOfARSsimulation:
DeviationHSF = ArrayOfARSsimulation[k]/AbsoluteMaxSimulation - ListOfDataARSactivity[k]/AbsoluteMaxData
SumOverDataPointsARS = SumOverDataPointsARS + pow(DeviationHSF, 2)
#print(str(k) + " " + str(SumOverDataPointsARS))
k = k + 1
return SumOverDataPointsARS
def ComputeRMSdoubleHSforGivenParameterSet(ParamSetRates, ParamSetForREACTIONS, ParamSetInitCond, OutputARSactiviryToPlot, AllDataControlsDoubleHS):
""" Function to compute RMS w.r.t. data of the controls of the feeding experiments from Schmollinger et al. 2013"""
# OutputARSactiviryToPlot = "Yes" or "No", for creating also output for time course plots or not, respectively.
############ 1] ############ NEED TO GENERATE A MODEL FOR EVERY PARAMS SET
# Temeprature and Time parameters sets reproducing the experiments
TsetDoubleHSdataRMS = AllDataControlsDoubleHS[3]
timesetDoubleHSDataRMS = AllDataControlsDoubleHS[4]
# Set default parameter values
TestParamSetIC = ParametersSet(ParamSetInitCond)
TestParamSetRATES = ParametersSet(ParamSetRates)
ParamSetForREACTIONS["piRFconst"] = ParamSetRates["ketaRF"]*ParamSetRates["ketaF"]/ParamSetRates["kFpi0"]*0.17/8.
ParamSetForREACTIONS["piRHPconst"] = ParamSetRates["ketaRHP"]*ParamSetRates["ketaHP"]/ParamSetRates["kpiHP"]*17.5
ParamSetForREACTIONSobject = ParametersSet(ParamSetForREACTIONS)
# Create an object of the class "Heat shock models" with these parameters
TestHSM = HeatShockModel(TestParamSetIC, TestParamSetRATES, ParamSetForREACTIONSobject)
############ 2] ############ NEXT, COMPUTE pieces necessary to compute the RMS W.R.T. DATA
##### 2-A: SIMULATE the single HS + the 4 2HS
SimulationARSdoubleHSdataRMS = Simulate(TestHSM, timesetDoubleHSDataRMS, TsetDoubleHSdataRMS, "Useless")
EmptyListToExtractOutput = []
SimulationARSdoubleHSdataRMS.TimeRunPlusARSdoubleHS(EmptyListToExtractOutput, AvoidPlots="Yes")
##### 2-B : Extract from input the dictionaries containing the data
DictionaryOfListsOfDataTimes = AllDataControlsDoubleHS[0]
DictionaryOfListsOfDataARSactivity = AllDataControlsDoubleHS[1]
ListOfDoubleHSKeys = AllDataControlsDoubleHS[5]
OutputOfDoubleHSsimulation = EmptyListToExtractOutput[0]
#print("I am the one " + str(OutputOfDoubleHSsimulation))
TimePointsForAllSimulations = OutputOfDoubleHSsimulation[0]
ARSconcentrationForEachHSsetupDictionary = {
"singleHS" : OutputOfDoubleHSsimulation[1][0][1],
"2hdoubleHS" : OutputOfDoubleHSsimulation[1][1][1],
"3hdoubleHS" : OutputOfDoubleHSsimulation[1][2][1],
"4hdoubleHS" : OutputOfDoubleHSsimulation[1][3][1],
"5hdoubleHS" : OutputOfDoubleHSsimulation[1][4][1]
}
EmptyARSdataMaximaList = []
for key in ListOfDoubleHSKeys:
massimo = deepcopy(max(DictionaryOfListsOfDataARSactivity[key]))
EmptyARSdataMaximaList.append(massimo)
AbsoluteMaxData = max(EmptyARSdataMaximaList)
EmptyARSSimulationMaximaList = []
for key in ListOfDoubleHSKeys:
massimo = deepcopy(max(ARSconcentrationForEachHSsetupDictionary[key]))
EmptyARSSimulationMaximaList.append(massimo)
AbsoluteMaxSimulationList = max(EmptyARSSimulationMaximaList)
AbsoluteMaxSimulation = AbsoluteMaxSimulationList[0]
#print()
#print("MAX")
#print()
#print(AbsoluteMaxData)
#print()
#print(AbsoluteMaxSimulation)
#print()
##### 2-C: Compute Part Of RMS Simulation Vs Data FOR EACH DIFFERENT FEEDING EXPERIMENT
SumOverDatapointsDoubleHS = {}
for key in ListOfDoubleHSKeys:
#print("I am into this loop!!!")
SumOverDatapointsDoubleHS.update( {key : ComputePartOfRMSSimulationVsDataDoubleHS(DictionaryOfListsOfDataTimes[key], timesetDoubleHSDataRMS, TimePointsForAllSimulations, ARSconcentrationForEachHSsetupDictionary[key], DictionaryOfListsOfDataARSactivity[key], AbsoluteMaxData, AbsoluteMaxSimulation)} )
############ 3] ############ Put together the pieces for each different dataset into 1 single RMS value!!!
NumberOfDataPoints, SumOverDatapoints = 0., 0.
for key in ListOfDoubleHSKeys:
NumberOfDataPoints = NumberOfDataPoints + len(DictionaryOfListsOfDataTimes[key])
SumOverDatapoints = SumOverDatapoints + SumOverDatapointsDoubleHS[key]
#print(key)
#print(NumberOfDataPoints)
#print(SumOverDatapoints)
RootMeanSquareDeviationDoubleHS = math.sqrt( SumOverDatapoints / NumberOfDataPoints )
print("\n" + str(RootMeanSquareDeviationDoubleHS) + "\n")
#if OutputHSFandHSPtoPlot == "No":
output = (RootMeanSquareDeviationDoubleHS)
#elif OutputHSFandHSPtoPlot == "Yes":
# output = (RootMeanSquareDeviation, SimulationFeedingControlsDataRMS.RF, SimulationFeedingControlsDataRMS.RHP, SimulationFeedingControlsDataRMS.t)
#else:
# print("\nError in RMS feeding function!!!\n")
return output
def ComputeRMStotalForGivenParameterSet_RESCALED_PARAMETERS(ParamSetRates_RESCALED, ParamSetForREACTIONS, ParamSetInitCond, AllDataControlsFeeding, RescalingFactorsDictionary, AllDataControlsDoubleHS):
""" Function that does exactly what ComputeRMSfeedingForGivenParameterSet does, but parameters are rescaled to their fiducial value."""
# This is a PRECONDITIONING, it serves to have a function which is easier to treat numerically with optimization algorithms as the gradient search
ParamSetRates_ORIGINAL = deepcopy(Convert_RESCALED_to_ORIGINAL_ParameterSet(ParamSetRates_RESCALED, RescalingFactorsDictionary))
RMS_Feeding=ComputeRMSfeedingForGivenParameterSet(ParamSetRates_ORIGINAL,ParamSetForREACTIONS,ParamSetInitCond,"No",AllDataControlsFeeding)
RMS_DoubleHS=ComputeRMSdoubleHSforGivenParameterSet(ParamSetRates_ORIGINAL,ParamSetForREACTIONS,ParamSetInitCond,"No",AllDataControlsDoubleHS)
Output = deepcopy(RMS_Feeding + RMS_DoubleHS)
return Output
| gpl-3.0 | -8,061,149,291,241,139,000 | 47.4944 | 308 | 0.711241 | false |
layus/INGInious | backend/tests/TestRemoteDocker.py | 1 | 6508 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2015 Université Catholique de Louvain.
#
# This file is part of INGInious.
#
# INGInious is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INGInious is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with INGInious. If not, see <http://www.gnu.org/licenses/>.
# This test is made to be run on a very constrained architecture (boot2docker)
# It would be very difficult to make it run everywhere.
# If it don't work as-is on your arch, you can simply disable the TEST_DOCKER_JOB_MANAGER
# flag and trust the code, or you can modify the config in the test to make it run.
import os
from nose.plugins.skip import SkipTest
import docker
from backend.job_managers.remote_docker import RemoteDockerJobManager
TEST_DOCKER_JOB_MANAGER = os.environ.get("TEST_DOCKER_JOB_MANAGER", None)
class TestDockerJobManager(object):
def setUp(self):
if TEST_DOCKER_JOB_MANAGER is None:
raise SkipTest("Testing the Docker Job Manager is disabled.")
elif TEST_DOCKER_JOB_MANAGER == "boot2docker":
self.docker_connection = docker.Client(base_url="tcp://192.168.59.103:2375")
elif TEST_DOCKER_JOB_MANAGER == "travis":
self.docker_connection = docker.Client(base_url="tcp://localhost:2375")
else:
raise Exception("Unknown method for testing the Docker Job Manager!")
# Force the removal of all containers/images linked to this test
try:
self.docker_connection.remove_container("inginious-agent", force=True)
except:
pass
try:
self.docker_connection.remove_image("ingi/inginious-agent", force=True)
except:
pass
self.setUpDocker()
self.job_manager = None
self.setUpJobManager()
def setUpDocker(self):
pass
def setUpJobManager(self):
pass
def start_manager(self):
if TEST_DOCKER_JOB_MANAGER == "boot2docker":
self.job_manager = RemoteDockerJobManager([{
"remote_host": "192.168.59.103",
"remote_docker_port": 2375,
"remote_agent_port": 63456
}], {"default": "ingi/inginious-c-default"}, is_testing=True)
elif TEST_DOCKER_JOB_MANAGER == "travis":
self.job_manager = RemoteDockerJobManager([{
"remote_host": "localhost",
"remote_docker_port": 2375,
"remote_agent_port": 63456
}], {"default": "ingi/inginious-c-default"}, is_testing=True)
self.job_manager.start()
def build_fake_agent(self, dockerfile="FakeAgentDockerfile"):
dockerfile_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "utils/"))
print [line for line in self.docker_connection.build(dockerfile_dir, dockerfile=dockerfile, rm=True, tag="ingi/inginious-agent")]
def start_fake_agent(self):
response = self.docker_connection.create_container(
"ingi/inginious-agent",
detach=True,
name="inginious-agent"
)
container_id = response["Id"]
# Start the container
self.docker_connection.start(container_id)
def tearDown(self):
# sanitize a bit Docker...
if self.job_manager is not None:
self.job_manager.close()
try:
self.docker_connection.remove_container("inginious-agent", force=True)
except:
pass
try:
self.docker_connection.remove_image("ingi/inginious-agent", force=True)
except:
pass
class TestDockerJobManagerNoUpdateNeeded(TestDockerJobManager):
def setUpDocker(self):
self.build_fake_agent("FakeAgentDockerfile")
def test_agent_no_update_needed(self):
assert RemoteDockerJobManager.is_agent_image_update_needed(self.docker_connection) is False
class TestDockerJobManagerUpdateNeeded(TestDockerJobManager):
def setUpDocker(self):
self.build_fake_agent("FakeAgentWrongDockerfile")
def test_agent_update_needed(self):
assert RemoteDockerJobManager.is_agent_image_update_needed(self.docker_connection) is True
class TestDockerJobManagerNoImage(TestDockerJobManager):
def setUpDocker(self):
pass
def test_agent_no_image(self):
assert RemoteDockerJobManager.is_agent_image_update_needed(self.docker_connection) is True
class TestDockerJobManagerAgentAlreadyStarted(TestDockerJobManager):
def setUpDocker(self):
self.build_fake_agent("FakeAgentDockerfile")
self.start_fake_agent()
def test_agent_already_started(self):
assert RemoteDockerJobManager.is_agent_valid_and_started(self.docker_connection) is True
class TestDockerJobManagerAgentAlreadyStartedButDead(TestDockerJobManager):
def setUpDocker(self):
self.build_fake_agent("FakeAgentDockerfile")
self.start_fake_agent()
self.docker_connection.kill("inginious-agent")
def test_agent_already_started_but_dead(self):
assert RemoteDockerJobManager.is_agent_valid_and_started(self.docker_connection) is False
class TestDockerJobManagerInvalidAgentAlreadyStarted(TestDockerJobManager):
def setUpDocker(self):
self.build_fake_agent("FakeAgentWrongDockerfile")
self.start_fake_agent()
def test_invalid_agent_already_started(self):
assert RemoteDockerJobManager.is_agent_valid_and_started(self.docker_connection) is False
class TestDockerJobManagerNoAgentStarted(TestDockerJobManager):
def setUpDocker(self):
pass
def test_invalid_agent_already_started(self):
assert RemoteDockerJobManager.is_agent_valid_and_started(self.docker_connection) is False
class TestDockerJobManagerRun(TestDockerJobManager):
def setUpDocker(self):
self.build_fake_agent("FakeAgentDockerfile")
def setUpJobManager(self):
self.start_manager()
def test_docker_job_manager_run(self):
assert len(self.job_manager._agents_info) == 1
| agpl-3.0 | 1,654,411,361,262,695,700 | 34.752747 | 137 | 0.685877 | false |
AutorestCI/azure-sdk-for-python | azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/prediction/prediction_endpoint.py | 1 | 14280 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from msrest.exceptions import HttpOperationError
from . import models
class PredictionEndpointConfiguration(Configuration):
"""Configuration for PredictionEndpoint
Note that all parameters used to create this instance are saved as instance
attributes.
:param api_key:
:type api_key: str
:param str base_url: Service URL
"""
def __init__(
self, api_key, base_url=None):
if api_key is None:
raise ValueError("Parameter 'api_key' must not be None.")
if not base_url:
base_url = 'https://southcentralus.api.cognitive.microsoft.com/customvision/v1.1/Prediction'
super(PredictionEndpointConfiguration, self).__init__(base_url)
self.add_user_agent('azure-cognitiveservices-vision-customvision/{}'.format(VERSION))
self.api_key = api_key
class PredictionEndpoint(object):
"""PredictionEndpoint
:ivar config: Configuration for client.
:vartype config: PredictionEndpointConfiguration
:param api_key:
:type api_key: str
:param str base_url: Service URL
"""
def __init__(
self, api_key, base_url=None):
self.config = PredictionEndpointConfiguration(api_key, base_url)
self._client = ServiceClient(None, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '1.1'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def predict_image_url(
self, project_id, iteration_id=None, application=None, url=None, custom_headers=None, raw=False, **operation_config):
"""Predict an image url and saves the result.
:param project_id: The project id
:type project_id: str
:param iteration_id: Optional. Specifies the id of a particular
iteration to evaluate against.
The default iteration for the project will be used when not specified
:type iteration_id: str
:param application: Optional. Specifies the name of application using
the endpoint
:type application: str
:param url:
:type url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePredictionResultModel or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePredictionResultModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = '/{projectId}/url'
path_format_arguments = {
'projectId': self._serialize.url("project_id", project_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if iteration_id is not None:
query_parameters['iterationId'] = self._serialize.query("iteration_id", iteration_id, 'str')
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Prediction-Key'] = self._serialize.header("self.config.api_key", self.config.api_key, 'str')
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePredictionResultModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def predict_image(
self, project_id, image_data, iteration_id=None, application=None, custom_headers=None, raw=False, **operation_config):
"""Predict an image and saves the result.
:param project_id: The project id
:type project_id: str
:param image_data:
:type image_data: Generator
:param iteration_id: Optional. Specifies the id of a particular
iteration to evaluate against.
The default iteration for the project will be used when not specified
:type iteration_id: str
:param application: Optional. Specifies the name of application using
the endpoint
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePredictionResultModel or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePredictionResultModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/{projectId}/image'
path_format_arguments = {
'projectId': self._serialize.url("project_id", project_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if iteration_id is not None:
query_parameters['iterationId'] = self._serialize.query("iteration_id", iteration_id, 'str')
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Prediction-Key'] = self._serialize.header("self.config.api_key", self.config.api_key, 'str')
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send_formdata(
request, header_parameters, form_data_content, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePredictionResultModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def predict_image_url_with_no_store(
self, project_id, iteration_id=None, application=None, url=None, custom_headers=None, raw=False, **operation_config):
"""Predict an image url without saving the result.
:param project_id: The project id
:type project_id: str
:param iteration_id: Optional. Specifies the id of a particular
iteration to evaluate against.
The default iteration for the project will be used when not specified
:type iteration_id: str
:param application: Optional. Specifies the name of application using
the endpoint
:type application: str
:param url:
:type url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePredictionResultModel or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePredictionResultModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = '/{projectId}/url/nostore'
path_format_arguments = {
'projectId': self._serialize.url("project_id", project_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if iteration_id is not None:
query_parameters['iterationId'] = self._serialize.query("iteration_id", iteration_id, 'str')
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Prediction-Key'] = self._serialize.header("self.config.api_key", self.config.api_key, 'str')
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePredictionResultModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def predict_image_with_no_store(
self, project_id, image_data, iteration_id=None, application=None, custom_headers=None, raw=False, **operation_config):
"""Predict an image without saving the result.
:param project_id: The project id
:type project_id: str
:param image_data:
:type image_data: Generator
:param iteration_id: Optional. Specifies the id of a particular
iteration to evaluate against.
The default iteration for the project will be used when not specified
:type iteration_id: str
:param application: Optional. Specifies the name of application using
the endpoint
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePredictionResultModel or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePredictionResultModel
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/{projectId}/image/nostore'
path_format_arguments = {
'projectId': self._serialize.url("project_id", project_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if iteration_id is not None:
query_parameters['iterationId'] = self._serialize.query("iteration_id", iteration_id, 'str')
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Prediction-Key'] = self._serialize.header("self.config.api_key", self.config.api_key, 'str')
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send_formdata(
request, header_parameters, form_data_content, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePredictionResultModel', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit | -8,334,644,218,442,024,000 | 39.338983 | 131 | 0.646289 | false |
Strubbl/pynder | pynder.py | 1 | 7110 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of Pynder.
Pynder is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pynder is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pynder. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
from operator import itemgetter
import os
import re
import bot
import config
import feedparser
import utils
import version
__author__ = "Strubbl"
__version__ = version.version
__credits__ = ["Strubbl"]
class Pynder(bot.Bot):
"""
class that extends the Bot class in order to add commands to the bot
"""
def __init__(self, name, jid, password, resource=None):
"""
init method to set up a Bot and an empty dict to keep the slapped things
"""
super(Pynder, self).__init__(name, jid, password, resource)
self.slapped = {}
def cmd_echo(self, args, tojid, typ="chat"):
"""
echo command to simply echo something as text
"""
self.send(self.room_name, args)
def cmd_help(self, args, tojid, typ="chat"):
"""
help command
returns all availabe commands of this bot class
"""
helptext = "available commands: " + str(self.commands)
self.send(tojid, helptext, typ)
def cmd_uptime(self, args, tojid, typ="chat"):
"""
calculates, formats it human readable and then echos the uptime
"""
uptime = datetime.datetime.today()-self.birthday
# python >= 2.7: http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds
#uptimestr = utils.format_seconds(uptime.total_seconds())
# for compatibility with python < 2.7
uptimestr = utils.format_seconds((uptime.microseconds + (uptime.seconds + uptime.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self.send(tojid, uptimestr, typ)
def cmd_slap(self, args, tojid, typ="chat"):
"""
slaps anything you want and increases the counter counting how many times it was slapped
"""
key = args.lower()
if key in self.slapped:
self.slapped[key] += 1
else:
self.slapped[key] = 1
self.send(tojid, "/me slaps " + args, typ)
# self.send(tojid, "/me slaps " + args + ". (" + str(self.slapped[key]) + " totally slapped)", typ)
def cmd_totallyslapped(self, args, tojid, typ="chat"):
"""
echo what and how often it was slapped sorted by number of slaps
"""
#sort the dict by value
pairs = sorted(self.slapped.iteritems(), key=itemgetter(1), reverse=True)
i = 1
message = ""
for key, value in pairs:
if i > 1:
message += "\n"
message += str(i) + "# " + key + ": " + str(value)
i += 1
self.send(tojid, message, typ)
def cmd_totalusers(self, args, tojid, typ="chat"):
"""
Minecraft: echo how many users are ingame
"""
netstatout = os.popen("/bin/netstat -tn").read()
searchstring = config.mcport + ".*" + config.established
connlist = re.findall(searchstring,netstatout)
usercount = str(len(connlist))
message= "user online: " + usercount
self.send(tojid,message,typ)
def cmd_listusers(self, args, tojid, typ="chat"):
"""
Minecraft: echo list of users
"""
os.popen("screen -S mc -X stuff 'list^M'")
time.sleep(0.1)
logcommand = "/usr/bin/tail -n 1 "+ config.watch_file
logoutput = os.popen(logcommand).read()
pos = re.search(r'(]:) (.*$)',logoutput)
message = pos.group(2)
self.send(tojid,message,typ)
def cronjob(self):
self.check_rss()
def check_rss(self):
rss_cache_dir = config.cache_dir + os.sep + "rss"
newest_item_written = False
if config.rss_feeds:
self.rootlog.debug("rss feeds found:" + str(config.rss_feeds))
for name, feed in config.rss_feeds.items():
last_cache_item = utils.read_file(rss_cache_dir + os.sep + name)
f = feedparser.parse(feed)
self.rootlog.debug(str(f["channel"]["title"] + " feed fetched"))
if last_cache_item != None:
self.rootlog.debug("last_cache_item not None: " + last_cache_item)
for i in f["items"]:
if str(last_cache_item.strip()) == str(i["date"].strip()):
self.rootlog.debug("item found, aborting")
break
else:
if newest_item_written == False:
utils.write_file(rss_cache_dir + os.sep + name, i["date"].strip())
newest_item_written = True
# write date of this feed into file (not elegant)
text2chat = "".join(["[", name, "] ", i["title"], " ", i["link"]])
self.rootlog.debug(text2chat)
self.send(self.room_name, text2chat)
else:
self.rootlog.debug("last_cache_item is None")
utils.write_file(rss_cache_dir + os.sep + name, f["items"][0]["date"])
def parse_minecraft_log(self, line):
# ^\[[\d:]+\] \[.*\]: <(\S+)> (.*)$
p = re.compile('^\[[\d:]+\] \[.*\]: <(\S+)> (.*)$')
m = p.match(line)
if m != None:
who = m.group(1)
msg = m.group(2)
self.send(self.room_name, who + ": " + msg)
def parse_new_file_content(self, line):
self.parse_minecraft_log(line)
def write_to_minecraft_chat(self, message):
self.rootlog.debug("write_to_minecraft_chat: orig message: " + message)
message = message.replace(config.room + "@" + config.conference_server + "/", "")
message = re.escape(message)
for i in ".,:-()[]$€?! äüöÄÖÜß":
message = message.replace("\\" + i, i)
message = message.replace("\n", " ")
self.rootlog.debug("write_to_minecraft_chat: message: " + message)
minecraft_say = config.minecraft_say.replace('%message%', message)
self.rootlog.debug("write_to_minecraft_chat: command: " + minecraft_say)
os.system(minecraft_say)
def handle_message(self, nick, text):
message = nick + ": " + text
self.write_to_minecraft_chat(message)
### main program
if __name__ == "__main__":
p = Pynder(config.name, config.jid, config.password, config.resource)
p.go_online()
| gpl-3.0 | -5,437,938,528,855,794,000 | 35.984375 | 128 | 0.558513 | false |
melodous/designate | designate/schema/resolvers.py | 1 | 1348 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from designate.openstack.common import log as logging
from designate import utils
LOG = logging.getLogger(__name__)
class LocalResolver(jsonschema.RefResolver):
def __init__(self, base_uri, referrer):
super(LocalResolver, self).__init__(base_uri, referrer, (), True)
self.api_version = None
@classmethod
def from_schema(cls, api_version, schema, *args, **kwargs):
resolver = cls(schema.get("id", ""), schema, *args, **kwargs)
resolver.api_version = api_version
return resolver
def resolve_remote(self, uri):
LOG.debug('Loading remote schema: %s' % uri)
return utils.load_schema(self.api_version, uri)
| apache-2.0 | -8,684,722,769,681,558,000 | 33.564103 | 75 | 0.707715 | false |
pjh/vm-analyze | analyze/oldscripts/gather_proc.py | 1 | 3609 | #! /usr/bin/env python3.3
# Virtual memory analysis scripts.
# Developed 2012-2014 by Peter Hornyack, [email protected]
# Copyright (c) 2012-2014 Peter Hornyack and University of Washington
from vm_regex import *
from pjh_utils import *
import vm_common as vm
import errno
import os
import re
import stat
import sys
proc_files_we_care_about = ("cmdline", "maps", "smaps", "comm", "status")
'''
output_subdir should have just been created, and should be empty.
'''
def copy_proc_files(pid_dir, output_subdir):
tag = "copy_proc_files"
# pid_dir is a /proc/[pid] directory, and output_subdir is a corresponding
# [pid] subdirectory in the output directory. Scan through the list of
# files that we care about and copy the contents of each one to the output
# directory. Because /proc files are not normal file system files, we
# don't use a copy command, but instead open every file for reading and
# then write every line to the output file.
for fname in proc_files_we_care_about:
proc_fname = "{0}/{1}".format(pid_dir, fname)
out_fname = "{0}/{1}".format(output_subdir, fname)
print_debug(tag, ("copying '{0}' to '{1}'".format(
proc_fname, out_fname)))
vm.copy_proc_file_old(proc_fname, out_fname)
def gather_proc_files(output_dir):
tag = "gather_proc_files"
proc_root = "/proc"
# Scan through all of the files under /proc, and for every process
# subdirectory (names with just a PID), copy the files that we care
# about to a corresponding directory in the output directory.
if not os.path.exists(proc_root):
print_error_exit(tag, ("proc_root directory '{0}' does not "
"exist!").format(proc_root))
dir_contents = os.listdir(proc_root)
for item in dir_contents:
match = valid_pid_dir.match(item)
if match:
pid = match.group(1)
pid_dir = "{0}/{1}".format(proc_root, pid)
if os.path.isdir(pid_dir):
output_subdir = "{0}/{1}".format(output_dir, pid)
os.mkdir(output_subdir)
copy_proc_files(pid_dir, output_subdir)
return
def create_output_dir(output_dir):
tag = "create_output_dir"
if os.path.exists(output_dir):
print_error_exit(tag, "Output directory '{0}' already exists".format(
output_dir))
else:
os.mkdir(output_dir)
print(("Output will be created in directory '{0}'").format(output_dir))
return
def check_requirements(output_dir):
tag = "check_requirements"
# Check for super-user permissions: try to open a /proc file that should
# not be readable by normal users.
kernel_fname = "/proc/kcore"
try:
f = open(kernel_fname, 'r')
f.close()
except IOError as e:
#if (e == errno.EACCES):
print_error_exit(tag, "must be run as root")
if os.path.exists(output_dir):
print_error_exit(tag, ("output directory '{0}' already exists").format(
output_dir))
return
def usage():
print("usage: {0} <output-dir> <user>[:<group>]".format(sys.argv[0]))
print(" <output-dir> will be created, its owner will be set to <user>, ")
print(" and its group will optionally be set to <group>.")
print(" This script must be run with root privilege (in order to read "
"/proc)!")
sys.exit(1)
def parse_args(argv):
tag = "parse_args"
if len(argv) != 3:
usage()
print_debug(tag, 'argv: {0}'.format(argv))
output_dir = argv[1]
usrgrp = argv[2]
return (output_dir, usrgrp)
# Main:
if __name__ == "__main__":
tag = "main"
print_debug(tag, "entered")
(output_dir, usrgrp) = parse_args(sys.argv)
check_requirements(output_dir)
create_output_dir(output_dir)
gather_proc_files(output_dir)
set_owner_group(output_dir, usrgrp)
sys.exit(0)
else:
print("Must run stand-alone")
usage()
sys.exit(1)
| bsd-3-clause | -7,003,235,764,294,394,000 | 27.195313 | 75 | 0.688556 | false |
andreasvc/disco-dop | web/browse.py | 1 | 12449 | """Web interface to browse a corpus with various visualizations."""
# stdlib
import os
import re
import sys
import glob
import math
import logging
from collections import OrderedDict
from functools import wraps
import matplotlib
matplotlib.use('AGG')
import matplotlib.cm as cm
import pandas
# Flask & co
from flask import Flask, Response
from flask import request, render_template
# disco-dop
from discodop import treebank, treebanktransforms
from discodop.tree import DrawTree
DEBUG = False # when True: enable debugging interface, disable multiprocessing
PASSWD = None # optionally, dict with user=>pass strings
HEADRULES = '../alpino.headrules'
logging.basicConfig(
format='%(asctime)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG)
APP = Flask(__name__)
log = APP.logger
STANDALONE = __name__ == '__main__'
CORPUS_DIR = "corpus/"
COLORS = dict(enumerate('''
Black Red Green Orange Blue Turquoise SlateGray Peru Teal Aqua
Aquamarine BlanchedAlmond Brown Burlywood CadetBlue Chartreuse
Chocolate Coral Crimson Cyan Firebrick ForestGreen Fuchsia Gainsboro
Gold Goldenrod Gray GreenYellow HotPink IndianRed Indigo Khaki Lime
YellowGreen Magenta Maroon Yellow MidnightBlue Moccasin NavyBlue Olive
OliveDrab Orchid PapayaWhip Pink Plum PowderBlue Purple RebeccaPurple
RoyalBlue SaddleBrown Salmon SandyBrown SeaGreen Sienna Silver SkyBlue
SlateBlue Tan Thistle Tomato Violet Wheat'''.split()))
WORDLIST = pandas.read_table('sonar-word.freqsort.lower.gz',
encoding='utf8', index_col=0, header=None, names=['word', 'count'],
nrows=20000).index
def getdeplen(item):
"""Compute dependency length."""
tree = item.tree.copy(True)
deps = treebank.dependencies(tree)
a, b = treebank.deplen(deps)
return ([abs(x - y) > 7 for x, _, y in deps], a / b if b else 0)
# cannot highlight due to removing punct
# return (None, a / b if b else 0)
def getmodifiers(item):
"""Count and highlight REL/PP-modifiers."""
nodes = list(item.tree.subtrees(lambda n: n.label in ('REL', 'PP')
and treebanktransforms.function(n) == 'mod'))
return toboolvec(len(item.sent), {a for x in nodes
for a in x.leaves()}), len(nodes)
def toboolvec(length, indices):
"""Convert a list of indices into a list of booleans."""
return [n in indices for n in range(length)]
# Functions that accept item object with item.tree and item.sent members;
# return tuple (wordhighlights, sentweight).
FILTERS = {
'average dependency length': getdeplen,
'd-level': lambda i: (None, treebanktransforms.dlevel(i.tree)),
'rare words': lambda i: (list(~pandas.Index(
t.lower() for t in i.sent
).isin(WORDLIST)
& pandas.Series([ # filter names
'eigen' not in n.source[treebank.MORPH]
for n in
sorted(i.tree.subtrees(lambda n: isinstance(n[0], int)),
key=lambda n: n[0])])
), None),
'PP/REL modifiers': getmodifiers,
'punctuation': lambda i:
(None, max('.,\'"?!(:;'.find(t) + 1 for t in i.sent)),
'direct speech': lambda i:
(None, re.match(r"^- .*$|(?:^|.* )['\"](?: .*|$)",
' '.join(i.sent)) is not None),
}
def torgb(val, mappable):
"""Return hexadecimal HTML color string."""
return '#%02x%02x%02x' % mappable.to_rgba(val, bytes=True)[:3]
def charvalues(sent, values):
"""Project token values to character values.
>>> sorted(charvalues(['The', 'cat', 'is', 'on', 'the', 'mat'],
... [0, 0, 1, 1, 0, 1]))
[0, 1, 2, 3, 8, 9, 10, 14, 15, 16, 17]
"""
assert len(sent) == len(values)
result = []
for a, b in zip(sent, values):
result.extend([b] * (len(a) + 1))
return result
# http://flask.pocoo.org/snippets/8/
def check_auth(username, password):
"""This function is called to check if a username / password
combination is valid."""
return PASSWD is None or (username in PASSWD
and password == PASSWD[username])
def authenticate():
"""Sends a 401 response that enables basic auth."""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
"""Decorator to require basic authentication for route."""
@wraps(f)
def decorated(*args, **kwargs):
"""This docstring intentionally left blank."""
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# end snipppet
def applyhighlight(sent, high1, high2, colorvec=None):
"""Return a version of sent where given char. indices are highlighted."""
cur = None
start = 0
out = []
for n, _ in enumerate(sent):
if colorvec is not None:
if cur != COLORS.get(colorvec[n], 'gray'):
out.append(sent[start:n])
if cur is not None:
out.append('</font>')
out.append('<font color=%s>' % COLORS.get(colorvec[n], 'gray'))
start = n
cur = COLORS.get(colorvec[n], 'gray')
elif n in high1:
if cur != 'red':
out.append(sent[start:n])
if cur is not None:
out.append('</span>')
out.append('<span class=r>')
start = n
cur = 'red'
elif n in high2:
if cur != 'blue':
out.append(sent[start:n])
if cur is not None:
out.append('</span>')
out.append('<span class=b>')
start = n
cur = 'blue'
else:
if cur is not None:
out.append(sent[start:n])
out.append('</span>')
start = n
cur = None
out.append(sent[start:])
if cur is not None:
out.append('</font>')
return ''.join(out)
def addsentweight(x):
wordhighlights, sentweight = x
if sentweight is None:
return wordhighlights, sum(wordhighlights)
return x
@APP.route('/browse')
@requires_auth
def browsetrees():
"""Browse through trees in a file."""
chunk = 20 # number of trees to fetch for one request
if 'text' in request.args and 'sent' in request.args:
textno = int(request.args['text'])
sentno = int(request.args['sent'])
start = max(1, sentno - sentno % chunk)
stop = start + chunk
nofunc = 'nofunc' in request.args
nomorph = 'nomorph' in request.args
filename = os.path.join(CORPUS_DIR, TEXTS[textno] + '.export')
trees = CORPORA[filename].itertrees(start, stop)
results = ['<pre id="t%s"%s>%s\n%s</pre>' % (n,
' style="display: none; "' if 'ajax' in request.args else '',
', '.join('%s: %.3g' % (f, addsentweight(FILTERS[f](item))[1])
for f in sorted(FILTERS)),
DrawTree(item.tree, item.sent).text(
unicodelines=True, html=True))
for n, (_key, item) in enumerate(trees, start)]
if 'ajax' in request.args:
return '\n'.join(results)
prevlink = '<a id=prev>prev</a>'
if sentno > chunk:
prevlink = '<a href="browse?text=%d;sent=%d" id=prev>prev</a>' % (
textno, sentno - chunk + 1)
nextlink = '<a id=next>next</a>'
nextlink = '<a href="browse?text=%d;sent=%d" id=next>next</a>' % (
textno, sentno + chunk + 1)
return render_template('browse.html', textno=textno, sentno=sentno,
text=TEXTS[textno], totalsents=1000,
trees=results, prevlink=prevlink, nextlink=nextlink,
chunk=chunk, nofunc=nofunc, nomorph=nomorph,
mintree=start, maxtree=stop)
return '<h1>Browse through trees</h1>\n<ol>\n%s</ol>\n' % '\n'.join(
'<li><a href="browse?text=%d;sent=1;nomorph">%s</a> ' % (n, text)
for n, text in enumerate(TEXTS))
@APP.route('/')
@APP.route('/browsesents')
@requires_auth
def browsesents():
"""Browse through sentences in a file; highlight selectable features."""
chunk = 20 # number of sentences per page
if 'text' in request.args and 'sent' in request.args:
textno = int(request.args['text'])
sentno = int(request.args['sent'])
sentno = max(chunk // 2 + 1, sentno)
start = max(1, sentno - chunk // 2)
stop = start + chunk
filename = os.path.join(CORPUS_DIR, TEXTS[textno] + '.export')
feat = request.args.get('feat', next(iter(FILTERS)))
trees = list(CORPORA[filename].itertrees(start, stop))
results = []
values = [addsentweight(FILTERS[feat](item))
for n, (_key, item) in enumerate(trees, start)]
norm = matplotlib.colors.Normalize(
vmin=0, vmax=max(a for _, a in values) * 2)
mappable = cm.ScalarMappable(norm, 'YlOrBr')
for n, ((_key, item), (wordhighlights, sentweight)) in enumerate(
zip(trees, values), start):
if sentweight is None:
sentweight = sum(wordhighlights)
if wordhighlights is not None:
xsent = applyhighlight(
' '.join(item.sent), None, None,
colorvec=charvalues(item.sent, wordhighlights))
else:
xsent = ' '.join(item.sent)
results.append(
'<a href="browse?text=%d;sent=%d" '
'style="text-decoration: none; color: black;">'
'<span style="background: %s; " title="%s: %.3g">'
' %s </span></a>' % (textno, n,
torgb(sentweight, mappable), feat, sentweight, xsent))
legend = 'Feature: [ %s ]<br>' % ', '.join(f if f == feat
else ('<a href="browsesents?text=%d;sent=%d;feat=%s">'
'%s</a>' % (textno, sentno, f, f))
for f in sorted(FILTERS))
legend += 'Legend: ' + ''.join(
'<span style="background-color: %s; width: 30px; '
'display: inline-block; text-align: center; ">'
'%d</span>' % (torgb(n, mappable), n)
for n in range(0,
int(math.ceil(max(a for _, a in values))) + 1))
prevlink = '<a id=prev>prev</a>'
if sentno > chunk:
prevlink = (
'<a href="browsesents?text=%d;sent=%d;feat=%s" id=prev>'
'prev</a>' % (textno, sentno - chunk, feat))
nextlink = '<a id=next>next</a>'
nextlink = ('<a href="browsesents?text=%d;sent=%d;feat=%s" id=next>'
'next</a>' % (textno, sentno + chunk, feat))
return render_template('browsesents.html', textno=textno,
sentno=sentno, text=TEXTS[textno],
totalsents='??', # FIXME
sents=results, prevlink=prevlink, nextlink=nextlink,
chunk=chunk, mintree=start, legend=legend,
query=request.args.get('query', ''),
engine='')
return render_template('browsemain.html',
texts=TEXTS)
def querydict(queries):
"""Return an OrderedDict of names and queries.
name is abbreviated query if not given."""
result = OrderedDict()
for line in (x for x in queries.splitlines() if x.strip()):
if ':' in line and line[:line.index(':')].isalnum():
name, query = line.split(':', 1)
else:
name = line[:100] + ('' if len(line) < 100 else '...')
query = line
if '\t' in query:
normquery, query = query.split('\t')
else:
normquery = None
result[name] = normquery, query
return result
def getcorpus():
"""Get list of files and number of lines in them."""
files = sorted(glob.glob(os.path.join(CORPUS_DIR, '*.export')))
assert files, ('no corpus files with extension .export '
'found.')
texts = [os.path.splitext(os.path.basename(a))[0] for a in files]
corpora = {filename: treebank.NegraCorpusReader(filename,
headrules=HEADRULES, punct='move')
for filename in files}
if os.path.exists('metadata.csv'):
metadata = pandas.read_csv('metadata.csv', index_col=0)
assert set(metadata.index) == set(texts), (
'metadata.csv does not match list of files.\n'
'only in metadata: %s\nonly in files: %s' % (
set(metadata.index) - set(texts),
set(texts) - set(metadata.index)))
metadata = metadata.loc[texts]
else:
metadata = None
return texts, corpora, metadata
class QueryStringRedirectMiddleware(object):
"""Support ; as query delimiter.
http://flask.pocoo.org/snippets/43/"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
qs = environ.get('QUERY_STRING', '')
environ['QUERY_STRING'] = qs.replace(';', '&')
return self.application(environ, start_response)
APP.wsgi_app = QueryStringRedirectMiddleware(APP.wsgi_app)
log.info('loading corpus.')
if STANDALONE:
from getopt import gnu_getopt, GetoptError
try:
opts, _args = gnu_getopt(sys.argv[1:], '',
['port=', 'ip=', 'numproc=', 'debug'])
opts = dict(opts)
except GetoptError as err:
print('error: %r' % err, file=sys.stderr)
sys.exit(2)
DEBUG = '--debug' in opts
# NB: load corpus regardless of whether running standalone:
(TEXTS, CORPORA, METADATA) = getcorpus()
log.info('corpus loaded.')
try:
with open('treesearchpasswd.txt', 'rt') as fileobj:
PASSWD = {a.strip(): b.strip() for a, b
in (line.split(':', 1) for line in fileobj)}
log.info('password protection enabled.')
except IOError:
log.info('no password protection.')
if STANDALONE:
APP.run(use_reloader=False,
host=opts.get('--ip', '0.0.0.0'),
port=int(opts.get('--port', 5003)),
debug=DEBUG)
| gpl-2.0 | -5,266,283,117,673,447,000 | 31.674541 | 79 | 0.660053 | false |
cloud-rocket/python-OBD | obd/decoders.py | 1 | 9011 |
########################################################################
# #
# python-OBD: A python OBD-II serial module derived from pyobd #
# #
# Copyright 2004 Donour Sizemore ([email protected]) #
# Copyright 2009 Secons Ltd. (www.obdtester.com) #
# Copyright 2009 Peter J. Creath #
# Copyright 2015 Brendan Whitfield ([email protected]) #
# #
########################################################################
# #
# decoders.py #
# #
# This file is part of python-OBD (a derivative of pyOBD) #
# #
# python-OBD is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 2 of the License, or #
# (at your option) any later version. #
# #
# python-OBD is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with python-OBD. If not, see <http://www.gnu.org/licenses/>. #
# #
########################################################################
import math
from .utils import *
from .codes import *
from .debug import debug
'''
All decoders take the form:
def <name>(_hex):
...
return (<value>, <unit>)
'''
# todo
def todo(_hex):
return (_hex, Unit.NONE)
# hex in, hex out
def noop(_hex):
return (_hex, Unit.NONE)
# hex in, bitstring out
def pid(_hex):
v = bitstring(_hex, len(_hex) * 4)
return (v, Unit.NONE)
'''
Sensor decoders
Return Value object with value and units
'''
def count(_hex):
v = unhex(_hex)
return (v, Unit.COUNT)
# 0 to 100 %
def percent(_hex):
v = unhex(_hex[0:2])
v = v * 100.0 / 255.0
return (v, Unit.PERCENT)
# -100 to 100 %
def percent_centered(_hex):
v = unhex(_hex[0:2])
v = (v - 128) * 100.0 / 128.0
return (v, Unit.PERCENT)
# -40 to 215 C
def temp(_hex):
v = unhex(_hex)
v = v - 40
return (v, Unit.C)
# -40 to 6513.5 C
def catalyst_temp(_hex):
v = unhex(_hex)
v = (v / 10.0) - 40
return (v, Unit.C)
# -128 to 128 mA
def current_centered(_hex):
v = unhex(_hex[4:8])
v = (v / 256.0) - 128
return (v, Unit.MA)
# 0 to 1.275 volts
def sensor_voltage(_hex):
v = unhex(_hex[0:2])
v = v / 200.0
return (v, Unit.VOLT)
# 0 to 8 volts
def sensor_voltage_big(_hex):
v = unhex(_hex[4:8])
v = (v * 8.0) / 65535
return (v, Unit.VOLT)
# 0 to 765 kPa
def fuel_pressure(_hex):
v = unhex(_hex)
v = v * 3
return (v, Unit.KPA)
# 0 to 255 kPa
def pressure(_hex):
v = unhex(_hex)
return (v, Unit.KPA)
# 0 to 5177 kPa
def fuel_pres_vac(_hex):
v = unhex(_hex)
v = v * 0.079
return (v, Unit.KPA)
# 0 to 655,350 kPa
def fuel_pres_direct(_hex):
v = unhex(_hex)
v = v * 10
return (v, Unit.KPA)
# -8192 to 8192 Pa
def evap_pressure(_hex):
# decode the twos complement
a = twos_comp(unhex(_hex[0:2]), 8)
b = twos_comp(unhex(_hex[2:4]), 8)
v = ((a * 256.0) + b) / 4.0
return (v, Unit.PA)
# 0 to 327.675 kPa
def abs_evap_pressure(_hex):
v = unhex(_hex)
v = v / 200.0
return (v, Unit.KPA)
# -32767 to 32768 Pa
def evap_pressure_alt(_hex):
v = unhex(_hex)
v = v - 32767
return (v, Unit.PA)
# 0 to 16,383.75 RPM
def rpm(_hex):
v = unhex(_hex)
v = v / 4.0
return (v, Unit.RPM)
# 0 to 255 KPH
def speed(_hex):
v = unhex(_hex)
return (v, Unit.KPH)
# -64 to 63.5 degrees
def timing_advance(_hex):
v = unhex(_hex)
v = (v - 128) / 2.0
return (v, Unit.DEGREES)
# -210 to 301 degrees
def inject_timing(_hex):
v = unhex(_hex)
v = (v - 26880) / 128.0
return (v, Unit.DEGREES)
# 0 to 655.35 grams/sec
def maf(_hex):
v = unhex(_hex)
v = v / 100.0
return (v, Unit.GPS)
# 0 to 2550 grams/sec
def max_maf(_hex):
v = unhex(_hex[0:2])
v = v * 10
return (v, Unit.GPS)
# 0 to 65535 seconds
def seconds(_hex):
v = unhex(_hex)
return (v, Unit.SEC)
# 0 to 65535 minutes
def minutes(_hex):
v = unhex(_hex)
return (v, Unit.MIN)
# 0 to 65535 km
def distance(_hex):
v = unhex(_hex)
return (v, Unit.KM)
# 0 to 3212 Liters/hour
def fuel_rate(_hex):
v = unhex(_hex)
v = v * 0.05
return (v, Unit.LPH)
'''
Special decoders
Return objects, lists, etc
'''
def status(_hex):
bits = bitstring(_hex, 32)
output = Status()
output.MIL = bitToBool(bits[0])
output.DTC_count = unbin(bits[1:8])
output.ignition_type = IGNITION_TYPE[unbin(bits[12])]
output.tests.append(Test("Misfire", \
bitToBool(bits[15]), \
bitToBool(bits[11])))
output.tests.append(Test("Fuel System", \
bitToBool(bits[14]), \
bitToBool(bits[10])))
output.tests.append(Test("Components", \
bitToBool(bits[13]), \
bitToBool(bits[9])))
# different tests for different ignition types
if(output.ignition_type == IGNITION_TYPE[0]): # spark
for i in range(8):
if SPARK_TESTS[i] is not None:
t = Test(SPARK_TESTS[i], \
bitToBool(bits[(2 * 8) + i]), \
bitToBool(bits[(3 * 8) + i]))
output.tests.append(t)
elif(output.ignition_type == IGNITION_TYPE[1]): # compression
for i in range(8):
if COMPRESSION_TESTS[i] is not None:
t = Test(COMPRESSION_TESTS[i], \
bitToBool(bits[(2 * 8) + i]), \
bitToBool(bits[(3 * 8) + i]))
output.tests.append(t)
return (output, Unit.NONE)
def fuel_status(_hex):
v = unhex(_hex[0:2]) # todo, support second fuel system
if v <= 0:
debug("Invalid fuel status response (v <= 0)", True)
return (None, Unit.NONE)
i = math.log(v, 2) # only a single bit should be on
if i % 1 != 0:
debug("Invalid fuel status response (multiple bits set)", True)
return (None, Unit.NONE)
i = int(i)
if i >= len(FUEL_STATUS):
debug("Invalid fuel status response (no table entry)", True)
return (None, Unit.NONE)
return (FUEL_STATUS[i], Unit.NONE)
def air_status(_hex):
v = unhex(_hex)
if v <= 0:
debug("Invalid air status response (v <= 0)", True)
return (None, Unit.NONE)
i = math.log(v, 2) # only a single bit should be on
if i % 1 != 0:
debug("Invalid air status response (multiple bits set)", True)
return (None, Unit.NONE)
i = int(i)
if i >= len(AIR_STATUS):
debug("Invalid air status response (no table entry)", True)
return (None, Unit.NONE)
return (AIR_STATUS[i], Unit.NONE)
def obd_compliance(_hex):
i = unhex(_hex)
v = "Error: Unknown OBD compliance response"
if i < len(OBD_COMPLIANCE):
v = OBD_COMPLIANCE[i]
return (v, Unit.NONE)
def fuel_type(_hex):
i = unhex(_hex)
v = "Error: Unknown fuel type response"
if i < len(FUEL_TYPES):
v = FUEL_TYPES[i]
return (v, Unit.NONE)
# converts 2 bytes of hex into a DTC code
def single_dtc(_hex):
if len(_hex) != 4:
return None
if _hex == "0000":
return None
bits = bitstring(_hex[0], 4)
dtc = ""
dtc += ['P', 'C', 'B', 'U'][unbin(bits[0:2])]
dtc += str(unbin(bits[2:4]))
dtc += _hex[1:4]
return dtc
# converts a frame of 2-byte DTCs into a list of DTCs
# example input = "010480034123"
# [ ][ ][ ]
def dtc(_hex):
codes = []
for n in range(0, len(_hex), 4):
dtc = single_dtc(_hex[n:n+4])
if dtc is not None:
# pull a description if we have one
desc = "Unknown error code"
if dtc in DTC:
desc = DTC[dtc]
codes.append( (dtc, desc) )
return (codes, Unit.NONE)
| gpl-2.0 | 8,309,814,910,839,828,000 | 23.687671 | 72 | 0.487182 | false |
ryneches/SuchTree | SuchTree/tests/test_SuchTree.py | 1 | 3768 | from __future__ import print_function
import pytest
from pytest import approx
from SuchTree import SuchTree
from dendropy import Tree
from itertools import combinations, chain
import numpy
try :
import networkx
has_networkx = True
except ImportError :
has_networkx = False
test_tree = 'SuchTree/tests/test.tree'
dpt = Tree.get( file=open(test_tree), schema='newick' )
dpt.resolve_polytomies()
for n,node in enumerate( dpt.inorder_node_iter() ) :
node.label = n
def test_init() :
T = SuchTree( test_tree )
assert type(T) == SuchTree
def test_get_children() :
T = SuchTree( test_tree )
for node in dpt.inorder_node_iter() :
if not node.taxon :
left, right = [ n.label for n in node.child_nodes() ]
else :
left, right = -1, -1
L,R = T.get_children( node.label )
assert L == left
assert R == right
def test_get_distance_to_root() :
T = SuchTree( test_tree )
for leaf in dpt.leaf_node_iter() :
assert T.get_distance_to_root( leaf.label ) == approx( leaf.distance_from_root(), 0.001 )
def test_distance() :
T = SuchTree( test_tree )
for line in open( 'SuchTree/tests/test.matrix' ) :
a,b,d1 = line.split()
d1 = float(d1)
d2 = T.distance( a, b )
assert d1 == approx( d2, 0.001 )
def test_distances() :
T = SuchTree( test_tree )
ids = []
d1 = []
for line in open( 'SuchTree/tests/test.matrix' ) :
a,b,d = line.split()
d1.append( float(d) )
A = T.leafs[a]
B = T.leafs[b]
ids.append( (A,B) )
result = T.distances( numpy.array( ids, dtype=numpy.int64 ) )
for D1,D2 in zip( d1,result ) :
assert D1 == approx( D2, 0.001 )
def test_distances_by_name() :
T = SuchTree( test_tree )
ids = []
d1 = []
for line in open( 'SuchTree/tests/test.matrix' ) :
a,b,d = line.split()
d1.append( float(d) )
ids.append( (a,b) )
result = T.distances_by_name( ids )
for D1,D2 in zip( d1,result ) :
assert D1 == approx( D2, 0.001 )
def test_get_leafs() :
T = SuchTree( test_tree )
assert set( list(T.get_leafs( T.root )) ) == set( T.leafs.values() )
def test_hierarchy() :
T = SuchTree( test_tree )
all_leafs = set( T.get_leafs( T.root ) )
for i in T.get_internal_nodes() :
some_leafs = set( T.get_leafs( i ) )
assert some_leafs <= all_leafs
def test_adjacency() :
T = SuchTree( test_tree )
aj, leaf_ids = T.adjacency( T.root ).values()
leaf_ids = list( leaf_ids )
for node in chain(T.leafs.values(), list(T.get_internal_nodes() )):
if node == T.root : continue # skip the root node
parent = T.get_parent( node )
distance = T.distance( node, parent )
i,j = leaf_ids.index( node ), leaf_ids.index( parent )
print( node, parent, ':', i, j, ' :: ', aj[i,j], distance )
def test_get_descendant_nodes() :
T = SuchTree( test_tree )
A = set( T.get_descendant_nodes( T.root ) )
B = set( T.get_leafs( T.root ) )
C = set( T.get_internal_nodes() )
assert A == B | C
def test_is_ancestor() :
T = SuchTree( test_tree )
assert T.length - 1 == sum( map( lambda x : T.is_ancestor( T.root, x ),
T.get_descendant_nodes( T.root ) ) )
assert 1 - T.length == sum( map( lambda x : T.is_ancestor( x, T.root ),
T.get_descendant_nodes( T.root ) ) )
@pytest.mark.skipif(not has_networkx, reason="networkx not installed")
def test_networkx() :
T = SuchTree( test_tree )
g = networkx.graph.Graph()
g.add_nodes_from( T.nodes_data() )
g.add_edges_from( T.edges_data() )
assert set( g.nodes() ) == set( T.get_nodes() )
| bsd-3-clause | -5,152,190,606,593,604,000 | 30.4 | 97 | 0.573779 | false |
MERegistro/meregistro | meregistro/apps/seguridad/models/Rol.py | 1 | 1354 | # -*- coding: UTF-8 -*-
from django.db import models
from apps.seguridad.models import Credencial, TipoAmbito
class Rol(models.Model):
ROL_ADMIN_NACIONAL = 'AdminNacional'
ROL_ADMIN_SEGURIDAD = 'AdminSeguridad'
ROL_REFERENTE_JURISDICCIONAL = 'ReferenteJurisdiccional'
ROL_REFERENTE_INSTITUCIONAL = 'ReferenteInstitucional'
nombre = models.CharField(max_length=40)
descripcion = models.CharField(max_length=255)
credenciales = models.ManyToManyField(Credencial, related_name='roles')
tipos_ambito_asignable = models.ManyToManyField(TipoAmbito, related_name='roles')
roles_asignables = models.ManyToManyField('self', related_name='roles_asignadores', symmetrical=False)
path = models.CharField(max_length=255)
padre = models.ForeignKey('self', null=True, blank=True)
class Meta:
app_label = 'seguridad'
def __unicode__(self):
return self.descripcion
def save(self):
if self.padre is None:
padre_path = '/'
else:
padre_path = self.padre.path
self.path = padre_path + str(self.id) + '/'
models.Model.save(self)
def asigna(self, rol):
return bool(self.roles_asignables.filter(id=rol.id))
def asignableAAmbito(self, ambito):
return bool(self.tipos_ambito_asignable.filter(id=ambito.tipo.id))
| bsd-3-clause | 5,661,577,332,660,196,000 | 31.238095 | 106 | 0.679468 | false |
hasgeek/funnel | tests/unit/models/test_user_User.py | 1 | 15870 | from datetime import timedelta
import pytest
from coaster.utils import utcnow
import funnel.models as models
def test_user(db_session):
"""Test for creation of user object from User model."""
user = models.User(username='hrun', fullname="Hrun the Barbarian")
db_session.add(user)
db_session.commit()
hrun = models.User.get(username='hrun')
assert isinstance(hrun, models.User)
assert user.username == 'hrun'
assert user.fullname == "Hrun the Barbarian"
assert user.state.ACTIVE
assert hrun == user
def test_user_pickername(user_twoflower, user_rincewind):
"""Test to verify pickername contains fullname and optional username."""
assert user_twoflower.pickername == "Twoflower"
assert user_rincewind.pickername == "Rincewind (@rincewind)"
def test_user_is_profile_complete(db_session, user_twoflower, user_rincewind):
"""
Test to check if user profile is complete.
That is fullname, username and email are present.
"""
# Both fixtures start out incomplete
assert user_twoflower.is_profile_complete() is False
assert user_rincewind.is_profile_complete() is False
# Rincewind claims an email address, but it is not verified
db_session.add(
models.UserEmailClaim(user=user_rincewind, email='[email protected]')
)
db_session.commit()
assert user_rincewind.is_profile_complete() is False
# Rincewind's profile is complete when a verified email address is added
user_rincewind.add_email('[email protected]')
assert user_rincewind.is_profile_complete() is True
# Email is insufficient for Twoflower
user_twoflower.add_email('[email protected]')
assert user_twoflower.is_profile_complete() is False
# Twoflower also needs a username
user_twoflower.username = 'twoflower'
assert user_twoflower.is_profile_complete() is True
def test_user_organization_owned(user_ridcully, org_uu):
"""Test for verifying organizations a user is a owner of."""
assert list(user_ridcully.organizations_as_owner) == [org_uu]
def test_user_email(db_session, user_twoflower):
"""Add and retrieve an email address."""
assert user_twoflower.email == ''
useremail = user_twoflower.add_email('[email protected]')
assert isinstance(useremail, models.UserEmail)
db_session.commit()
assert useremail.primary is False
# When there is no primary, accessing the `email` property will promote existing
assert user_twoflower.email == useremail
assert useremail.primary is True
useremail2 = user_twoflower.add_email( # type: ignore[unreachable]
'[email protected]', primary=True
)
db_session.commit()
# The primary has changed
assert user_twoflower.email == useremail2
assert useremail.primary is False
assert useremail2.primary is True
def test_user_del_email(db_session, user_twoflower):
"""Delete an email address from a user's account."""
assert user_twoflower.primary_email is None
assert len(user_twoflower.emails) == 0
user_twoflower.add_email('[email protected]', primary=True)
user_twoflower.add_email('[email protected]')
user_twoflower.add_email('[email protected]')
db_session.commit()
assert len(user_twoflower.emails) == 3
assert user_twoflower.primary_email is not None
assert str(user_twoflower.primary_email) == '[email protected]' # type: ignore[unreachable]
assert {str(e) for e in user_twoflower.emails} == {
'[email protected]',
'[email protected]',
'[email protected]',
}
# Delete a non-primary email address. It will be removed
user_twoflower.del_email('[email protected]')
db_session.commit()
assert len(user_twoflower.emails) == 2
assert user_twoflower.primary_email is not None
assert str(user_twoflower.primary_email) == '[email protected]'
assert {str(e) for e in user_twoflower.emails} == {
'[email protected]',
'[email protected]',
}
# Delete a primary email address. The next available address will be made primary
user_twoflower.del_email('[email protected]')
db_session.commit()
assert len(user_twoflower.emails) == 1
assert user_twoflower.primary_email is not None
assert str(user_twoflower.primary_email) == '[email protected]'
assert {str(e) for e in user_twoflower.emails} == {
'[email protected]',
}
# Delete last remaining email address. Primary will be removed
user_twoflower.del_email('[email protected]')
db_session.commit()
assert len(user_twoflower.emails) == 0
assert user_twoflower.primary_email is None
assert user_twoflower.email == ''
def test_user_phone(db_session, user_twoflower):
"""Test to retrieve UserPhone property phone."""
assert user_twoflower.phone == ''
userphone = user_twoflower.add_phone('+12345678900')
assert isinstance(userphone, models.UserPhone)
db_session.commit()
assert userphone.primary is False
# When there is no primary, accessing the `phone` property will promote existing
assert user_twoflower.phone == userphone
assert userphone.primary is True
userphone2 = user_twoflower.add_phone( # type: ignore[unreachable]
'+12345678901', primary=True
)
db_session.commit()
# The primary has changed
assert user_twoflower.phone == userphone2
assert userphone.primary is False
assert userphone2.primary is True
def test_user_del_phone(db_session, user_twoflower):
"""Delete an phone address from a user's account."""
assert user_twoflower.primary_phone is None
assert len(user_twoflower.phones) == 0
user_twoflower.add_phone('+12345678900', primary=True)
user_twoflower.add_phone('+12345678901')
user_twoflower.add_phone('+12345678902')
db_session.commit()
assert len(user_twoflower.phones) == 3
assert user_twoflower.primary_phone is not None
assert str(user_twoflower.primary_phone) == '+12345678900' # type: ignore[unreachable]
assert {str(e) for e in user_twoflower.phones} == {
'+12345678900',
'+12345678901',
'+12345678902',
}
# Delete a non-primary phone address. It will be removed
user_twoflower.del_phone('+12345678902')
db_session.commit()
assert len(user_twoflower.phones) == 2
assert user_twoflower.primary_phone is not None
assert str(user_twoflower.primary_phone) == '+12345678900'
assert {str(e) for e in user_twoflower.phones} == {
'+12345678900',
'+12345678901',
}
# Delete a primary phone address. The next available address will be made primary
user_twoflower.del_phone('+12345678900')
db_session.commit()
assert len(user_twoflower.phones) == 1
assert user_twoflower.primary_phone is not None
assert str(user_twoflower.primary_phone) == '+12345678901'
assert {str(e) for e in user_twoflower.phones} == {
'+12345678901',
}
# Delete last remaining phone address. Primary will be removed
user_twoflower.del_phone('+12345678901')
db_session.commit()
assert len(user_twoflower.phones) == 0
assert user_twoflower.primary_phone is None
assert user_twoflower.phone == ''
def test_user_autocomplete(
db_session, user_twoflower, user_rincewind, user_dibbler, user_librarian
):
"""
Test for User autocomplete method.
Queries valid users defined in fixtures, as well as input that should not return
a response.
"""
user_rincewind.add_email('[email protected]')
db_session.commit()
# A typical lookup with part of someone's name will find matches
assert models.User.autocomplete('Dib') == [user_dibbler]
# Spurious characters like `[` and `]` are ignored
assert models.User.autocomplete('[tw]') == [user_twoflower]
# Multiple users with the same starting character(s), sorted alphabetically
# Both users with and without usernames are found
assert user_librarian.fullname.startswith('The') # The `The` prefix is tested here
assert user_twoflower.username is None
assert user_librarian.username is not None
assert models.User.autocomplete('t') == [user_librarian, user_twoflower]
# Lookup by email address
assert models.User.autocomplete('[email protected]') == [user_rincewind]
# More spurious characters
assert models.User.autocomplete('[]twofl') == [user_twoflower]
# Empty searches
assert models.User.autocomplete('@[') == []
assert models.User.autocomplete('[[]]') == []
assert models.User.autocomplete('[%') == []
# TODO: Test for @username searches against external ids (requires fixtures)
@pytest.mark.parametrize('defercols', [False, True])
def test_user_all(
db_session,
user_twoflower,
user_rincewind,
user_ridcully,
user_dibbler,
user_death,
user_mort,
defercols,
):
"""Retrieve all users matching specified criteria."""
# Some fixtures are not used in the tests because the test determines that they
# won't show up in the query unless specifically asked for
db_session.commit() # Commit required to generate UUID (userid/buid)
# A parameter is required
with pytest.raises(TypeError):
models.User.all()
with pytest.raises(TypeError):
models.User.all(defercols=True)
# Scenario 1: Lookup by buids only
assert set(
models.User.all(
buids=[user_twoflower.buid, user_rincewind.buid], defercols=defercols
)
) == {
user_twoflower,
user_rincewind,
}
# Scenario 2: lookup by buid or username
assert (
set(
models.User.all(
buids=[user_twoflower.buid, user_rincewind.buid],
usernames=[user_ridcully.username, user_dibbler.username],
defercols=defercols,
)
)
== {user_twoflower, user_rincewind, user_ridcully, user_dibbler}
)
# Scenario 3: lookup by usernames only
assert (
set(
models.User.all(
usernames=[user_ridcully.username, user_dibbler.username],
defercols=defercols,
)
)
== {user_ridcully, user_dibbler}
)
# Scenario 4: querying for a merged user buid
models.merge_users(user_death, user_rincewind)
db_session.commit()
assert set(
models.User.all(
buids=[user_twoflower.buid, user_rincewind.buid], defercols=defercols
)
) == {
user_twoflower,
user_death,
}
def test_user_add_email(db_session, user_rincewind):
"""Test to add email address for a user."""
# scenario 1: if primary flag is True and user has no existing email
email1 = '[email protected]'
useremail1 = user_rincewind.add_email(email1, primary=True)
db_session.commit()
assert user_rincewind.email == useremail1
assert useremail1.email == email1
assert useremail1.primary is True
# scenario 2: when primary flag is True but user has existing primary email
email2 = '[email protected]'
useremail2 = user_rincewind.add_email(email2, primary=True)
db_session.commit()
assert useremail2.email == email2
assert useremail2.primary is True
assert useremail1.primary is False
assert user_rincewind.email == useremail2 # type: ignore[unreachable]
# scenario 3: when primary flag is True but user has that existing email
useremail3 = user_rincewind.add_email(email1, primary=True)
db_session.commit()
assert useremail3 == useremail1
assert useremail3.primary is True
assert useremail2.primary is False
def test_make_email_primary(user_rincewind):
"""Test to make an email primary for a user."""
email = '[email protected]'
useremail = user_rincewind.add_email(email)
assert useremail.email == email
assert useremail.primary is False
assert user_rincewind.primary_email is None
user_rincewind.primary_email = useremail
assert useremail.primary is True
def test_user_password(user_twoflower):
"""Test to set user password."""
# User account starts out with no password
assert user_twoflower.pw_hash is None
# User account can set a password
user_twoflower.password = 'test-password'
assert user_twoflower.password_is('test-password') is True
assert user_twoflower.password_is('wrong-password') is False
def test_user_password_has_expired(db_session, user_twoflower):
"""Test to check if password for a user has expired."""
assert user_twoflower.pw_hash is None
user_twoflower.password = 'test-password'
db_session.commit() # Required to set pw_expires_at and pw_set_at
assert user_twoflower.pw_expires_at > user_twoflower.pw_set_at
assert user_twoflower.password_has_expired() is False
user_twoflower.pw_expires_at = utcnow() - timedelta(seconds=1)
assert user_twoflower.password_has_expired() is True
def test_password_hash_upgrade(user_twoflower):
"""Test for password hash upgrade."""
# pw_hash contains bcrypt.hash('password')
user_twoflower.pw_hash = (
'$2b$12$q/TiZH08kbgiUk2W0I99sOaW5hKQ1ETgJxoAv8TvV.5WxB3dYQINO'
)
assert user_twoflower.pw_hash.startswith('$2b$')
assert not user_twoflower.password_is('incorrect')
assert user_twoflower.pw_hash.startswith('$2b$')
assert not user_twoflower.password_is('incorrect', upgrade_hash=True)
assert user_twoflower.pw_hash.startswith('$2b$')
assert user_twoflower.password_is('password')
assert user_twoflower.pw_hash.startswith('$2b$')
assert user_twoflower.password_is('password', upgrade_hash=True)
# Transparent upgrade to Argon2 after a successful password validation
assert user_twoflower.pw_hash.startswith('$argon2id$')
def test_password_not_truncated(user_twoflower):
"""Argon2 passwords are not truncated at up to 1000 characters."""
# Bcrypt passwords are truncated at 72 characters, making larger length limits
# pointless. Argon2 passwords are not truncated for a very large size. Passlib has
# a default max size of 4096 chars.
# https://passlib.readthedocs.io/en/stable/lib/passlib.exc.html#passlib.exc.PasswordSizeError
user_twoflower.password = '1' * 999 + 'a'
assert user_twoflower.password_is('1' * 999 + 'a')
assert not user_twoflower.password_is('1' * 999 + 'b')
def test_user_merged_user(db_session, user_death, user_rincewind):
"""Test for checking if user had a old id."""
db_session.commit()
assert user_death.state.ACTIVE
assert user_rincewind.state.ACTIVE
models.merge_users(user_death, user_rincewind)
assert user_death.state.ACTIVE
assert user_rincewind.state.MERGED
assert {o.uuid for o in user_death.oldids} == {user_rincewind.uuid}
def test_user_get(db_session, user_twoflower, user_rincewind, user_death):
"""Test for User's get method."""
# scenario 1: if both username and buid not passed
db_session.commit()
with pytest.raises(TypeError):
models.User.get()
# scenario 2: if buid is passed
lookup_by_buid = models.User.get(buid=user_twoflower.buid)
assert lookup_by_buid == user_twoflower
# scenario 3: if username is passed
lookup_by_username = models.User.get(username='rincewind')
assert lookup_by_username == user_rincewind
# scenario 4: if defercols is set to True
lookup_by_username = models.User.get(username='rincewind', defercols=True)
assert lookup_by_username == user_rincewind
# scenario 5: when user.state.MERGED
assert user_rincewind.state.ACTIVE
models.merge_users(user_death, user_rincewind)
assert user_rincewind.state.MERGED
lookup_by_buid = models.User.get(buid=user_rincewind.buid)
assert lookup_by_buid == user_death
| agpl-3.0 | 4,014,777,160,011,535,000 | 35.150342 | 100 | 0.692691 | false |
jtratner/sudoku-fuzzer-udacity | fuzz_solver.py | 1 | 9025 | # Use a different solved board to generate different tests.
valid = [[5,3,4,6,7,8,9,1,2],
[6,7,2,1,9,5,3,4,8],
[1,9,8,3,4,2,5,6,7],
[8,5,9,7,6,1,4,2,3],
[4,2,6,8,5,3,7,9,1],
[7,1,3,9,2,4,8,5,6],
[9,6,1,5,3,7,2,8,4],
[2,8,7,4,1,9,6,3,5],
[3,4,5,2,8,6,1,7,9]]
# test cases with no solution
no_soln1 = [
[1,2,3,4,5,6,7,8,0],
[0,0,0,0,0,0,0,0,9],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
no_soln2 = [
[1, 2, 3, 0, 0, 0, 0, 0, 0],
[4, 5, 0, 0, 0, 0, 6, 0, 0],
[0, 0, 0, 6, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
import random, time
squares = [(i,j) for i in range(9) for j in range(9)]
units = dict(((i,j), [[(i,k) for k in range(9)]] +
[[(k,j) for k in range(9)]] +
[[(k,l) for k in range(i/3*3, i/3*3+3) for l in range(j/3*3, j/3*3+3)]])
for (i,j) in squares)
peers = dict((s, set(sum(units[s], [])) - set([s]))
for s in squares)
def erase(board, i, j, d):
if d not in board[i][j]:
return board
board[i][j] = board[i][j].replace(d, '')
if len(board[i][j]) == 0:
return False # contradiction
elif len(board[i][j]) == 1:
d2 = board[i][j]
if not all(erase(board, i1, j1, d2) for (i1, j1) in peers[i,j]):
return False
for unit in units[(i,j)]:
numplaces = [(i1, j1) for (i1, j1) in unit if d in board[i1][j1]]
if len(numplaces) == 0:
return False
elif len(numplaces) == 1:
if not assign(board, numplaces[0][0], numplaces[0][1], d):
return False
return board
def assign(board, i, j, d):
if all(erase(board, i, j, d2) for d2 in board[i][j].replace(d, '')):
return board
else:
return False
def random_constr_prop_sudoku(N):
"""
Generates random sudoku puzzles by filling in cells while checking for
constraint violations. If a constraint is violated, random sudoku is called again.
"""
board = [['123456789' for _ in range(9)] for _ in range(9)]
cells = [s for s in squares]
random.shuffle(cells)
for cell in cells:
i,j = cell
if not assign(board, i, j, random.choice(board[i][j])):
break
ds = [board[i][j] for i in range(9) for j in range(9) if len(board[i][j]) == 1]
if len(ds) >= N and len(set(ds)) >= 8:
return [map(lambda v: int(v) if len(v) == 1 else 0, row) for row in board]
return random_constr_prop_sudoku(N)
## Contributed by David Froese
def random_froese_puzzle(check_sudoku, N):
"""
Generates random sudoku puzzles by randomly filling entries in the grid and
then calling check sudoku. Assumes check sudoku is running correctly.
"""
nums = range(1, 10)
grid = [[0 for _ in xrange(9)] for _ in xrange(9)] # empty grid
for _ in xrange(N):
i, j = random.randrange(0, 9), random.randrange(0, 9)
grid[i][j] = random.choice(nums)
if check_sudoku(grid) in [None, False]:
grid[i][j] = 0
return grid
return random_froese_puzzle(check_sudoku, N)
def check_random_solns(random_puzzle, solve_sudoku, check_sudoku,
iters, solve_fraction = 0.9):
random.seed()
solved = 0
num_nz = 0
range_mutates = range(17, 20)
for i in range(iters):
# Generate a valid random board
mutates = random.choice(range_mutates)
board = random_puzzle(mutates)
board_nz = 81 - sum(row.count(0) for row in board)
bd = ''.join(''.join(map(str, row)) for row in board)
# If it's unsolvable the solver screwed up
start = time.clock()
if solve_sudoku(board) not in [None, False]:
num_nz += board_nz
solved += 1
t = time.clock() - start
if t > 5.0:
print "board[%d] %s with %d non-zeros took (%.2f seconds)" % (i, bd, num_nz, t)
assert solved > (solve_fraction * iters), "Your solver failed on more than %.1f%% of random boards! It solved only %d / %d boards." % (100 * solve_fraction, solved, iters)
print "Your solver completed %d / %d random boards with average #non-zeros=%d generated by %s! Congrats!" % (solved, iters, num_nz/solved, repr(random_puzzle))
return True
# Random strategy 2: Take a valid board and perform transformations
# that do not change validity
# Transposing a grid maintains validity
def transpose(grid):
return map(list, zip(*grid))
# Permutes the row/column with another row/column in the same range
# (i.e. 6 with 6-8, 0 with 0-2, etc.)
def permute(grid, i, row=True):
if not row: grid = transpose(grid)
j = random.choice(range(i/3*3, i/3*3+3))
grid[j], grid[i] = grid[i], grid[j]
return grid if row else transpose(grid)
# Permutes the row/column blocks (i.e. 0-2 with 6-8)
def permute_block(grid, i, row=True):
if not row: grid = transpose(grid)
bi = i*3
bj = random.choice(range(3))*3
for offset in range(3):
grid[bi+offset], grid[bj+offset] = grid[bj+offset], grid[bi+offset]
return grid if row else transpose(grid)
# Reflects the board along the horizontal or vertical axis
def reflect(grid, horizontal=True):
if not horizontal: grid = transpose(grid)
for i in range(9): grid[i].reverse()
return grid if horizontal else transpose(grid)
def random_mutation_sudoku(soln, iters=1000):
# generate a valid grid
grid = copy(soln)
choices = [['reflect', horizontal] for horizontal in (True, False)] + [['transpose']] + [['permute', i, row] for row in (True, False) for i in range(9)] + [['permute_block', bi, row] for row in (True, False) for bi in range(3)]
for i in range(iters):
choice = random.choice(choices)
if choice[0] == 'reflect': grid = reflect(grid, *choice[1:])
if choice[0] == 'transpose': grid = transpose(grid)
if choice[0] == 'permute': grid = permute(grid, *choice[1:])
if choice[0] == 'permute_block': grid = permute_block(grid, *choice[1:])
return grid
# Make a copy of a grid so we can modify it without touching the original
def copy(grid):
return map (lambda x: x[:], grid)
# Assert than a solution remains solvable after mutates-many moves are undone.
# Run iters-many tests of this nature.
def fuzz_solution(soln, mutates, iters, check_sudoku, solve_sudoku):
""" fuzzes a given *valid* solution """
random.seed()
for i in range(iters):
board = copy(soln)
# Undo a set of moves. This should leave the board solvable
for mutate in range(mutates):
x = random.randrange(0,9)
y = random.randrange(0,9)
# Might already be 0 in which case we didn't undo "mutates" moves
# but still generated a reasonable test case
board[x][y] = 0
# If this board is invalid the test harness screwed up
assert check_sudoku(board), "Input checker failed with input {board}".format(board=board)
# If it's unsolvable the solver screwed up
assert solve_sudoku(board), "Solver failed to solve board {board}".format(board=board)
return True
def check_no_valid_solns(solve_sudoku, tests=None):
""" runs solver against cases with no solution"""
tests = tests or [no_soln1, no_soln2]
for test in tests:
res = solve_sudoku(test)
assert res is False, """Solver failed to return False for valid, but unsolveable sudoku.
Returned {res} instead. Input was: {test}""".format(test=test, res=res)
return True
def fuzz_solver(check_sudoku, solve_sudoku, mutates=10, iters=10, soln=None, tests=None):
soln = soln or valid
# Check that some boards have no valid solutions
if not check_no_valid_solns(solve_sudoku, tests):
return False
# Some boards should have solutions
if not fuzz_solution(soln, mutates, iters, check_sudoku, solve_sudoku):
return False
# Check for solutions exist for majority of random puzzles
# 1. Constraint propagated random board
if not check_random_solns(random_constr_prop_sudoku, solve_sudoku, check_sudoku, iters):
return False
# 2. Random boards accepted by check_sudoku
# (proposed by David Froese)
def random_froese_sudoku(N): return random_froese_puzzle(check_sudoku, N)
if not check_random_solns(random_froese_sudoku, solve_sudoku, check_sudoku, iters):
return False
# 3. Random boards created by mutating a valid board must have solutions
if not all(fuzz_solution(random_mutation_sudoku(soln), mutates, 1, check_sudoku, solve_sudoku) for _ in xrange(iters)):
return False
else:
print "Your solver completed %d randomly generated boards with %d mutations! Congrats!" % (iters, mutates)
return True
| mit | -7,949,316,682,714,491,000 | 39.470852 | 231 | 0.601994 | false |
osspeak/osspeak | osspeak/recognition/commands/monitor.py | 1 | 3535 | import threading
import collections
import log
import copy
import asyncio
import settings
import clargs
from recognition.actions.library import pywindow
from recognition.commands import loader
from recognition.actions import perform
from communication import topics, pubsub
import time
def create_message_subscriptions(msg_list, command_module_controller):
pubsub.subscribe(topics.RELOAD_COMMAND_MODULE_FILES, lambda: set_message(msg_list, topics.RELOAD_COMMAND_MODULE_FILES))
pubsub.subscribe(topics.RELOAD_GRAMMAR, lambda: set_message(msg_list, topics.RELOAD_GRAMMAR))
pubsub.subscribe(topics.PERFORM_COMMANDS,
lambda grammar_id, words: perform_commands(command_module_controller, grammar_id, words))
def start_watching_user_state():
msg_list = [None]
command_module_file_pattern = settings.settings['file_pattern']
module_loader = loader.StaticFileCommandModuleLoader(settings.settings['command_directory'], command_module_file_pattern)
command_module_controller = loader.CommandModuleController(module_loader)
command_module_controller.command_modules = command_module_controller.initialize_command_modules()
engine_status_history = collections.deque([], 10)
create_message_subscriptions(msg_list, command_module_controller)
fut = watch_user_system_state(msg_list, command_module_controller)
asyncio.ensure_future(fut)
async def watch_user_system_state(msg_list, command_module_controller):
from recognition.actions.library.stdlib import namespace, engine
previous_window = None
previous_state = None
previous_engine_settings = copy.copy(engine.settings)
initial_load_done = False
while True:
current_state = copy.copy(namespace['state'])
current_window = pywindow.foreground_window().title.lower()
current_engine_settings = copy.copy(engine.settings)
is_different_window = current_window != previous_window
is_different_state = current_state != previous_state
is_different_engine_settings = current_engine_settings != previous_engine_settings
msg = msg_list[0]
if is_different_window or is_different_state or msg:
msg_list[0] = None
new_active_modules = command_module_controller.get_active_modules(current_window)
reload_files = msg == topics.RELOAD_COMMAND_MODULE_FILES
if new_active_modules != command_module_controller.active_command_modules or reload_files:
initialize_modules = not initial_load_done or reload_files
command_module_controller.load_modules(current_window, initialize_modules=False)
initial_load_done = True
elif msg == topics.RELOAD_GRAMMAR:
raise NotImplementedError
command_module_controller.load_and_send_grammar()
previous_window = current_window
previous_state = current_state
if is_different_engine_settings:
pubsub.publish(topics.SET_ENGINE_SETTINGS, current_engine_settings)
previous_engine_settings = current_engine_settings
await asyncio.sleep(1)
def set_message(msg_list, msg):
msg_list[0] = msg
def perform_commands(command_module_controller: loader.CommandModuleController, grammar_id: str, words):
try:
grammar_context = command_module_controller.grammars[grammar_id]
except KeyError:
log.logger.warning(f'Grammar {grammar_id} no longer exists')
return
perform.perform_commands(grammar_context, words) | mit | 532,628,761,851,594,700 | 47.438356 | 125 | 0.722772 | false |
jamielennox/django-openstack-auth-websso | openstack_auth_websso/plugin.py | 1 | 2014 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient.auth.identity import v3 as v3_auth
from keystoneclient.v3 import client as v3_client
from openstack_auth.plugin import base
from openstack_auth import exceptions
from openstack_auth import utils
__all__ = ['FederatedTokenPlugin']
class FederatedTokenPlugin(base.BasePlugin):
"""Authenticate against keystone with an existing token."""
def get_plugin(self, auth_url=None, token=None, project_id=None,
**kwargs):
if not all((auth_url, token)):
return None
if utils.get_keystone_version() >= 3:
return v3_auth.Token(auth_url=auth_url,
token=token,
project_id=project_id,
reauthenticate=False)
else:
return v2_auth.Token(auth_url=auth_url,
token=token,
tenant_id=project_id,
reauthenticate=False)
def list_projects(self, session, auth_plugin, auth_ref=None):
if utils.get_keystone_version() < 3:
msg = _('Cannot list federated tokens from v2 API')
raise exceptions.KeystoneAuthException(msg)
client = v3_client.Client(session=session, auth=auth_plugin)
return client.federation.projects.list()
| apache-2.0 | -5,599,959,279,583,941,000 | 37.730769 | 75 | 0.644985 | false |
0ps/wfuzz | src/wfuzz/mixins.py | 1 | 1871 | from .plugin_api.urlutils import parse_url
from .exception import FuzzExceptBadInstall
# python 2 and 3
import sys
if sys.version_info >= (3, 0):
from urllib.parse import urljoin
else:
from urlparse import urljoin
class FuzzRequestSoupMixing(object):
def get_soup(self):
try:
from bs4 import BeautifulSoup
except ImportError:
raise FuzzExceptBadInstall("You need to install beautifulsoup4 first!")
soup = BeautifulSoup(self.content, 'html.parser')
return soup
class FuzzRequestUrlMixing(object):
# urlparse functions
@property
def urlparse(self):
return parse_url(self.url)
@property
def urlp(self):
return parse_url(self.url)
@property
def pstrip(self):
return self.to_cache_key()
@property
def is_path(self):
if self.code == 200 and self.url[-1] == '/':
return True
elif self.code >= 300 and self.code < 400:
if "Location" in self.headers.response and self.headers.response["Location"][-1] == '/':
return True
elif self.code == 401:
if self.url[-1] == '/':
return True
return False
@property
def recursive_url(self):
if self.code >= 300 and self.code < 400 and "Location" in self.headers.response:
new_url = self.headers.response["Location"]
if not new_url[-1] == '/':
new_url += "/"
# taking into consideration redirections to /xxx/ without full URL
new_url = urljoin(self.url, new_url)
elif self.code == 401 or self.code == 200:
new_url = self.url
if not self.url[-1] == '/':
new_url = "/"
else:
raise Exception("Error generating recursive url")
return new_url + "FUZZ"
| gpl-2.0 | 6,004,915,391,877,477,000 | 27.348485 | 100 | 0.5783 | false |
google-research/google-research | summae/human_and_extractive.py | 1 | 8614 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Human and extractive baseline evaluation.
human_and_extractive \
--data_dir=$ROCSTORIES_DATA \
--eval_subset=test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from rouge import rouge_scorer
from rouge import scoring
from summae import p2s_eval
from summae import util
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '.', 'Data directory.')
flags.DEFINE_string('eval_subset', 'test',
'which subset (valid/test) to eval/decode.')
flags.DEFINE_string('output_dir', '/tmp/12342',
'local directory to save extractive oracle')
flags.DEFINE_string('vocab_file', '',
'Subword vocab file.') # for detok first sentence
my_rouge_scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'],
use_stemmer=True)
def get_extracts(s):
# get 5 sentences as the extractive baselines
sents = s.feature_lists.feature_list['untokenized_sentences'].feature
assert len(sents) == 5
return tuple([sents[i].bytes_list.value[0] for i in range(5)])
def human_ave(summ_list):
"""Average pairwise rouge between two human summaries."""
agg = scoring.BootstrapAggregator()
for s1_id, s1 in enumerate(summ_list):
for s2_id, s2 in enumerate(summ_list):
if s1_id >= s2_id: # only compute for s1_id < s2_id
continue
s2_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(s2), p2s_eval.TRUNC_LEN)
s1_s2_trunc_score = my_rouge_scorer.score(s1, s2_trunc)
agg.add_scores(s1_s2_trunc_score)
agg_ave = agg.aggregate()
score_ave = {
rouge_type: agg_ave[rouge_type].mid for rouge_type in agg_ave # mid=mean
}
nwords_ave = np.mean([p2s_eval.count_words(s) for s in summ_list])
return (score_ave, nwords_ave)
def human_max(summ_list):
"""Maximum pairwise rouge between any two human summaries."""
score_max = None
rouge_1r_trunc_max = 0
for s1_id, s1 in enumerate(summ_list):
for s2_id, s2 in enumerate(summ_list):
if s1_id >= s2_id:
continue
s2_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(s2), p2s_eval.TRUNC_LEN)
s1_s2_trunc_score = my_rouge_scorer.score(s1, s2_trunc)
if s1_s2_trunc_score['rouge1'].recall >= rouge_1r_trunc_max:
score_max = s1_s2_trunc_score
rouge_1r_trunc_max = s1_s2_trunc_score['rouge1'].recall
nwords_max = np.max([p2s_eval.count_words(s) for s in summ_list])
return (score_max, nwords_max)
def extract_ave(e, summ_list):
"""Average rouge between ith sentence and human summaries."""
agg = scoring.BootstrapAggregator()
e_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(e),
p2s_eval.TRUNC_LEN) # get_summary_first_sentence may not be necessary
for s in summ_list:
s_e_trunc_score = my_rouge_scorer.score(s, e_trunc)
agg.add_scores(s_e_trunc_score)
agg_ave = agg.aggregate()
score_ave = {
rouge_type: agg_ave[rouge_type].mid for rouge_type in agg_ave # mid=mean
}
nwords_e = p2s_eval.count_words(e)
return (score_ave, nwords_e)
def extract_oracle(extract_list, summ_list):
"""Choose sentence with maximum average rouge."""
# Choose sentence with maximum average rouge.
score_accum = []
for e in extract_list:
e_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(e),
p2s_eval.TRUNC_LEN) # get_summary_first_sentence may not be necessary
accum_rouge_1r_trunc = 0
for s in summ_list:
s_e_trunc_score = my_rouge_scorer.score(s, e_trunc)
# for computing accumulative rouge
accum_rouge_1r_trunc += s_e_trunc_score['rouge1'].recall
score_accum.append(accum_rouge_1r_trunc)
e_id_o = np.argmax(score_accum)
e_o = extract_list[e_id_o]
# Compute average rouge for the oracle sentence
agg = scoring.BootstrapAggregator()
e_o_trunc = p2s_eval.get_summary_truncated(
p2s_eval.get_summary_first_sentence(e_o),
p2s_eval.TRUNC_LEN) # get_summary_first_sentence may not be necessary
for s in summ_list:
e_o_trunc_score = my_rouge_scorer.score(s, e_o_trunc)
agg.add_scores(e_o_trunc_score)
agg_o = agg.aggregate()
score_o = {
rouge_type: agg_o[rouge_type].mid for rouge_type in agg_o # mid=mean
}
nwords_o = p2s_eval.count_words(e_o)
return (score_o, nwords_o, e_o)
def print_agg_score(label, agg, nwords):
print(
'%s: \n\t rouge-1r-trunc20=%.3f \t rouge-Lr-trunc20=%.3f \t nwords=%.1f' %
(label, agg.aggregate()['rouge1'].mid.recall,
agg.aggregate()['rougeL'].mid.recall, np.mean(nwords)))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.io.gfile.mkdir(FLAGS.output_dir)
data_file = os.path.join(
FLAGS.data_dir,
'rocstories_gt.' + six.ensure_str(FLAGS.eval_subset) + '.tfrecord')
seq_ex_list = util.get_seq_exs(data_file)
print('Input data %s' % data_file)
# Human summary baselines.
# We have 3 human summaries for each example, and
# 2 human performance variants:
# 1. 'a': average pairwise rouge between two summaries
# 2. 'm': maximum pairwise rouge between any two summaries
agg_human = {}
nwords_human = {}
for h in ['a', 'm']:
agg_human[h] = scoring.BootstrapAggregator()
nwords_human[h] = []
# Extractive baselines
# 1. '1','2','3','4','5': rouge between ith sentence and human summary
# 2. 'o': for each example, choose sentence with maximum average rouge
agg_extract = {}
nwords_extract = {}
for e in [str(x) for x in list(range(5))] + ['o']:
agg_extract[e] = scoring.BootstrapAggregator()
nwords_extract[e] = []
# human performance
sent2oracle = {}
for ex in seq_ex_list:
summ_list = p2s_eval.get_summaries(ex)
summ_list = [x.decode('utf-8') for x in summ_list]
# human eval
score, nwords = human_ave(summ_list)
agg_human['a'].add_scores(score)
nwords_human['a'].append(nwords)
score, nwords = human_max(summ_list)
agg_human['m'].add_scores(score)
nwords_human['m'].append(nwords)
# extractive eval
extract_list = get_extracts(ex)
extract_list = [x.decode('utf-8') for x in extract_list]
for e_id, e in enumerate(extract_list):
score, nwords = extract_ave(e, summ_list)
agg_extract[str(e_id)].add_scores(score)
nwords_extract[str(e_id)].append(nwords)
score, nwords, e_o = extract_oracle(extract_list, summ_list)
agg_extract['o'].add_scores(score)
nwords_extract['o'].append(nwords)
# save story and oracle sentence for future use
first = p2s_eval.get_first_sentence(ex)
if first in sent2oracle:
logging.fatal('duplicate first sentence: %s', str(first))
sent2oracle[first] = (' '.join(extract_list), e_o) # (story, oracle)
# write each example and the corresponding oracle to disk
tk, _ = util.get_tokenizer_with_special(FLAGS.vocab_file, [])
def detok(s):
return tk.decode(util.strip_after_eos(s))
keys_sorted = sorted(sent2oracle.keys(), key=detok)
out_file = os.path.join(
FLAGS.output_dir, 'rocstories_gt.' + six.ensure_str(FLAGS.eval_subset) +
'.firstsent2oracle.txt')
with tf.gfile.Open(out_file, 'w') as f:
for k in keys_sorted:
f.write('%s\n' % (sent2oracle[k][1]))
# print out rouge scores for human performance
print_agg_score('human average', agg_human['a'], nwords_human['a'])
print_agg_score('human max', agg_human['m'], nwords_human['m'])
for e_id in range(5):
print_agg_score('extractive baseline{}'.format(e_id),
agg_extract[str(e_id)], nwords_extract[str(e_id)])
print_agg_score('extractive oracle', agg_extract['o'], nwords_extract['o'])
if __name__ == '__main__':
app.run(main)
| apache-2.0 | -9,174,223,925,574,483,000 | 34.01626 | 80 | 0.666241 | false |
jpvanhal/cloudsizzle | cloudsizzle/asi/server.py | 1 | 11139 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2009-2010 CloudSizzle Team
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import logging
from cloudsizzle.kp import SIBConnection, Triple, bnode, uri
from cloudsizzle import settings
from cloudsizzle.asi.importer import user_to_rdf
from cloudsizzle.asi.service import AbstractService, \
ASIServiceKnowledgeProcessor
from cloudsizzle.asi.asi_friends_connection import \
ASIFriendsConnection as ASIConnection
LOG = logging.getLogger('cloudsizzle.asi.server')
PEOPLE_BASE_URI = 'http://cos.alpha.sizl.org/people/'
class SessionStore(object):
def __init__(self):
self._sessions = {}
def __del__(self):
for ac in self._sessions.itervalues():
ac.close()
def __getitem__(self, key):
return self._sessions[key]
def login(self, username, password):
msg = "Logging in to ASI with username '{0}' and password '{1}'."
LOG.debug(msg.format(username, password))
ac = ASIConnection(
base_url=settings.ASI_BASE_URL,
app_name=settings.ASI_APP_NAME,
app_password=settings.ASI_APP_PASSWORD,
username=username,
password=password)
response = ac.open()
try:
user_id = response['entry']['user_id']
self._sessions[user_id] = ac
LOG.debug("Logged in with user_id {0}!".format(user_id))
return ac.session['entry']
except KeyError:
ac.close()
LOG.warning("Logging in failed: {0}".format(response['messages']))
return response
def logout(self, user_id):
LOG.debug('Logging out user with user_id {0}.'.format(user_id))
try:
ac = self._sessions[user_id]
ac.close()
del self._sessions[user_id]
except KeyError:
msg = 'Logging out failed: user {0} was not logged in.'
LOG.warning(msg.format(user_id))
class AbstractServer(AbstractService):
"""Abstract base class for building the server side of a request-response
type service.
AbstractServer subscribes to service requests and provides a method for
responding to these requests.
"""
def __init__(self, sc):
super(AbstractServer, self).__init__(sc)
@property
def query_triple(self):
return Triple(None, 'rdf:type', self.request_type)
def respond(self, request_id, response):
"""Respond to a service request.
request_id -- The ID of the service request.
response -- A dict containing the response data.
"""
response['rdf:type'] = self.response_type
response['response_to'] = uri(request_id)
LOG.debug(
'Responding to request {0} with {1}.'.format(request_id, response))
response_triples = []
for key, values in response.iteritems():
if not isinstance(values, list):
values = [values]
for value in values:
response_triples.append(Triple(bnode('id'), key, value))
self.sc.insert(response_triples)
class LoginServer(AbstractServer):
def __init__(self, sc, session_store):
super(LoginServer, self).__init__(sc)
self.session_store = session_store
@property
def name(self):
return 'Login'
def process(self, id_, data):
response = self.session_store.login(data['username'],
data['password'])
self.respond(id_, response)
class LogoutServer(AbstractServer):
def __init__(self, sc, session_store):
super(LogoutServer, self).__init__(sc)
self.session_store = session_store
@property
def name(self):
return 'Logout'
def process(self, id_, data):
self.session_store.logout(data['user_id'])
class RegisterServer(AbstractServer):
def __init__(self, sc):
super(RegisterServer, self).__init__(sc)
@property
def name(self):
return 'Register'
def process(self, id_, data):
with ASIConnection(
base_url=settings.ASI_BASE_URL,
app_name=settings.ASI_APP_NAME,
app_password=settings.ASI_APP_PASSWORD) as ac:
user_info = ac.create_user(
username=data['username'],
password=data['password'],
email=data['email'])
if 'messages' not in user_info:
# Copy user info from ASI to SIB.
triples = user_to_rdf(user_info)
self.sc.insert(triples)
user_id = user_info['id']
response = {'user_id': user_id}
else:
messages = user_info['messages']
response = {'messages': messages}
self.respond(id_, response)
class RejectFriendRequestServer(AbstractServer):
def __init__(self, sc, session_store):
super(RejectFriendRequestServer, self).__init__(sc)
self.session_store = session_store
@property
def name(self):
return 'RejectFriendRequest'
def process(self, id_, data):
user_id = str(data['user_id'])
friend_id = str(data['friend_id'])
try:
ac = self.session_store[user_id]
except KeyError, e:
print e
response = {'messages': 'did not login ASi'}
else:
result = ac.reject_friend_request(friend_id)
user_uri = '%sID#%s' % (PEOPLE_BASE_URI, user_id)
friend_uri = '%sID#%s' % (PEOPLE_BASE_URI, friend_id)
# Remove from my view
remove_triple = Triple(
user_uri,
uri('http://cos.alpha.sizl.org/people#PendingFriend'),
friend_uri)
self.sc.remove(remove_triple)
response = {'result': str(result)}
self.respond(id_, response)
class RemoveFriendsServer(AbstractServer):
def __init__(self, sc, session_store):
super(RemoveFriendsServer, self).__init__(sc)
self.session_store = session_store
@property
def name(self):
return 'RemoveFriends'
def process(self, id_, data):
user_id = str(data['user_id'])
friend_id = str(data['friend_id'])
try:
ac = self.session_store[user_id]
except KeyError, e:
print e
response = {'messages': 'did not login ASi'}
else:
ac.remove_friend(friend_id)
user_uri = '%sID#%s' % (PEOPLE_BASE_URI, user_id)
friend_uri = '%sID#%s' % (PEOPLE_BASE_URI, friend_id)
# Remove from my view
remove_triple1 = Triple(
user_uri,
uri('http://cos.alpha.sizl.org/people#Friend'),
friend_uri)
# Remove from my friend's view
remove_triple2 = Triple(
friend_uri,
uri('http://cos.alpha.sizl.org/people#Friend'),
user_uri)
result = self.sc.remove([remove_triple1, remove_triple2])
response = {'result': str(result)}
self.respond(id_, response)
class AddFriendsServer(AbstractServer):
def __init__(self, sc, session_store):
super(AddFriendsServer, self).__init__(sc)
self.session_store = session_store
@property
def name(self):
return 'AddFriends'
def process(self, id_, data):
user_id = str(data['user_id'])
friend_id = str(data['friend_id'])
try:
ac = self.session_store[user_id]
except KeyError, e:
print e
response = {'messages': 'did not login ASi'}
else:
pending_friends = ac.get_pending_friend_requests()
my_pending_friend_list = []
try:
for pending_friend in pending_friends['entry']:
my_pending_friend_list.append(pending_friend['id'])
except KeyError, e:
print e
result = ac.add_friend(friend_id)
response = {'result': str(result)}
if friend_id in my_pending_friend_list:
user_uri = '%sID#%s' % (PEOPLE_BASE_URI, user_id)
friend_uri = '%sID#%s' % (PEOPLE_BASE_URI, friend_id)
# Remove from my view
remove_triple = Triple(
user_uri,
uri('http://cos.alpha.sizl.org/people#PendingFriend'),
friend_uri)
self.sc.remove(remove_triple)
# Add to friend's view
insert_triple1 = Triple(
friend_uri,
uri('http://cos.alpha.sizl.org/people#Friend'),
user_uri)
# Add to my view
insert_triple2 = Triple(
user_uri,
uri('http://cos.alpha.sizl.org/people#Friend'),
friend_uri)
self.sc.insert([insert_triple1, insert_triple2])
else:
user_uri = '%sID#%s' % (PEOPLE_BASE_URI, user_id)
friend_uri = '%sID#%s' % (PEOPLE_BASE_URI, friend_id)
# Add to friend's view
insert_triple = Triple(
friend_uri,
uri('http://cos.alpha.sizl.org/people#PendingFriend'),
user_uri)
self.sc.insert(insert_triple)
self.respond(id_, response)
def main():
session_store = SessionStore()
with SIBConnection('ASI service server', method='preconfigured') as sc:
services = (
LoginServer(sc, session_store),
LogoutServer(sc, session_store),
RegisterServer(sc),
AddFriendsServer(sc, session_store),
RemoveFriendsServer(sc, session_store),
RejectFriendRequestServer(sc, session_store),
)
asi_server_kp = ASIServiceKnowledgeProcessor(services)
asi_server_kp.start()
try:
raw_input('Press enter to stop.\n')
finally:
asi_server_kp.stop()
if __name__ == '__main__':
main()
| mit | -7,420,965,687,537,019,000 | 30.735043 | 79 | 0.573301 | false |
diplomacy/research | diplomacy_research/__init__.py | 1 | 1696 | # ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Diplomacy Research """
# Setting up root logger
import os
import logging
import sys
# Adding path to proto/ dir
sys.path.append(os.path.join(os.path.dirname(__file__), 'proto'))
LOGGING_LEVEL = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG}.get(os.environ.get('DIPLOMACY_LOGGING', 'INFO'), logging.INFO)
# Defining root logger
ROOT_LOGGER = logging.getLogger('diplomacy_research')
ROOT_LOGGER.setLevel(LOGGING_LEVEL)
ROOT_LOGGER.propagate = False
# Adding output to stdout by default
STREAM_HANDLER = logging.StreamHandler(sys.stdout)
STREAM_HANDLER.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
STREAM_HANDLER.setFormatter(FORMATTER)
ROOT_LOGGER.addHandler(STREAM_HANDLER)
| mit | 6,087,254,447,154,937,000 | 42.487179 | 103 | 0.655071 | false |
MiczFlor/Booktype | lib/booktype/constants.py | 1 | 22740 | # This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
import os
from django.utils.translation import ugettext_noop
# SSL cert verification during request using 'requests' lib
REQUESTS_VERIFY_SSL_CERT = True
# SECURITY CLASS
BOOKTYPE_BASE_SECURITY_CLASS = 'booktype.utils.security.base.BaseSecurity'
# Should track changes be turned on for the book
BOOK_TRACK_CHANGES = False
# CHAPTER STATUS RELATED
CHAPTER_STATUS_LIST = [
{'name': ugettext_noop('new'), 'color': '#3a87ad'},
{'name': ugettext_noop('needs content'), 'color': '#ff0000'},
{'name': ugettext_noop('completed'), 'color': '#5cb85c'},
{'name': ugettext_noop('to be proofed'), 'color': '#f0ad4e'}
]
CHAPTER_STATUS_DEFAULT = CHAPTER_STATUS_LIST[0]['name']
# IMPORTERS RELATED STUFF
BOOKTYPE_IMPORTERS = {
'epub': ('booktype.importer.epub', 'import_epub'),
'docx': ('booktype.importer.docx', 'import_docx')
}
# Default styles matched so far. We'll add more in future
# these constants are used on docimporter.py to correctly
# assign classes to imported elements
DOCX_PARAGRAPH_STYLES_MAP = {
'AuthorName': 'authorname',
'Reference': 'reference',
'Citation': 'bk-cite'
}
# Which elements are considered <h1> style
H1_STYLES = ['heading1']
# Which elements are considered <h2> style
H2_STYLES = ['heading2']
# Which elements are considered <h3> style
H3_STYLES = ['heading3']
# Which elements are considered <h4> style
H4_STYLES = ['heading4']
# Which elements are considered <h5> style
H5_STYLES = ['heading5']
# Which elements are considered <h6> style
H6_STYLES = ['heading6']
# All of our Heading styles
DOCX_HEADING_STYLES = H1_STYLES + H2_STYLES + H3_STYLES + H4_STYLES + H5_STYLES + H6_STYLES
DOCX_HEADING_STYLES_TUPLE = (
('h1', H1_STYLES),
('h2', H2_STYLES),
('h3', H3_STYLES),
('h4', H4_STYLES),
('h5', H5_STYLES),
('h6', H6_STYLES)
)
# This will allow settings custom class on clients
DOCX_IMPORTER_CLASS = 'booktype.importer.WordImporter'
# END IMPORTERS STUFF
# SERVER RELATED
THIS_BOOKI_SERVER = os.environ.get('HTTP_HOST', 'booktype-demo.sourcefabric.org')
# ADMINISTRATIVE RELATED
CREATE_BOOK_VISIBLE = True
CREATE_BOOK_LICENSE = ""
FREE_REGISTRATION = True
ADMIN_CREATE_BOOKS = False
ADMIN_IMPORT_BOOKS = False
BOOKTYPE_MAX_USERS = 0
BOOKTYPE_MAX_BOOKS = 0
BOOKTYPE_BOOKS_PER_USER = -1
GROUP_LIST_PAGE_SIZE = 20
USER_LIST_PAGE_SIZE = 20
BOOK_LIST_PAGE_SIZE = 20
# google analytics
USE_GOOGLE_ANALYTICS = False
GOOGLE_ANALYTICS_ID = ''
# reports
REPORTS_EMAIL_FROM = '[email protected]'
REPORTS_EMAIL_USERS = ['[email protected]']
REPORTS_CUSTOM_FONT_PATH = False
MAX_ADDITIONAL_METADATA = 3
# IMPORT RELATED
EPUB_COVER_MIN_DPI = 300
EPUB_COVER_MIN_SIZE = 500
EPUB_COVER_MAX_SIZE = 2800
EPUB_COVER_MAX_PIXELS = 3200000
# PUBLISHING RELATED
PUBLISH_OPTIONS = ['mpdf', 'screenpdf', 'epub3', 'epub2', 'icml', 'docx', 'mobi', 'xhtml']
# mobi conversion
# Options are "kindlegen" or "calibre"
MOBI_CONVERT = "calibre"
KINDLEGEN_PATH = "kindlegen"
CALIBRE_PATH = "ebook-convert"
CALIBRE_ARGS = ""
OBJAVI_URL = "http://objavi.booktype.pro/objavi.cgi"
ESPRI_URL = "http://objavi.booktype.pro/espri.cgi"
# theme plugins
BOOKTYPE_THEME_PLUGINS = {
'custom': 'booktype.apps.themes.convert.custom',
'academic': 'booktype.apps.themes.convert.academic'
}
# define path to module where class ExportBook is located
BOOKTYPE_EXPORT_CLASS_MODULE = 'booktype.apps.export.utils'
EXPORT_WAIT_FOR = 90
# convert constants
CONVERT_EDITOR_WIDTH = 898
XHTML_DOCUMENT_WIDTH = 2480
MOBI_DOCUMENT_WIDTH = 1500
EPUB_DOCUMENT_WIDTH = 1500
# editor stuff here
EDITOR_AUTOSAVE_ENABLED = False # disabled by default
EDITOR_AUTOSAVE_DELAY = 60 # time in seconds
EDITOR_SETTINGS_ROLES_SHOW_PERMISSIONS = 0
# end editor stuff
EPUB_NOT_ALLOWED_TAGS = (
# 'strip' - drop tag, leave content
# 'drop' - drop tag, drop content
# 'replace' - replace tag with 'replacement'
# EXAMPLES:
# {'tag': 'i', 'action': 'strip'},
# {'tag': 'b', 'action': 'drop'},
# {
# 'tag': 'u',
# 'action': 'replace',
# 'replacement': {
# 'tag': 'span',
# 'attrs': (
# ('style', 'text-decoration: underline;'),
# ('class', 'happy'),
# )
# }
# },
)
# According to epubcheck, after(inside) body tag,
# on the 1st level of deepness, must be only the next list of tags.
# If tag doesn't fit requierements, it will be replaced with "<p>"
EPUB_AVAILABLE_INBODY_ROOT_TAGS = (
'address', 'blockquote', 'del', 'div', 'dl', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'ins', 'noscript', 'ns:svg', 'ol', 'p', 'pre', 'script', 'table', 'ul'
)
# mapping tag and allowed attributes in it
# required by epubcheck
EPUB_ALLOWED_TAG_ATTRS = (
('ol', ('class', 'dir', 'id', 'lang', 'style', 'title', 'xml:lang')),
)
EXPORT_SETTINGS = {
'mpdf': [
{u'name': u'size', u'value': u'A4'}, {u'name': u'custom_width', u'value': u''},
{u'name': u'custom_height', u'value': u''}, {u'name': u'top_margin', u'value': u'20'},
{u'name': u'side_margin', u'value': u'20'}, {u'name': u'bottom_margin', u'value': u'20'},
{u'name': u'gutter', u'value': u'20'}, {u'name': u'show_header', u'value': u'on'},
{u'name': u'header_margin', u'value': u'10'}, {u'name': u'show_footer', u'value': u'on'},
{u'name': u'footer_margin', u'value': u'10'}, {u'name': u'bleed_size', u'value': u''},
{u'name': u'styling', u'value': u''}, {u'name': u'crop_marks', u'value': u'off'}],
'screenpdf': [
{u'name': u'size', u'value': u'A4'}, {u'name': u'custom_width', u'value': u''},
{u'name': u'custom_height', u'value': u''}, {u'name': u'top_margin', u'value': u'20'},
{u'name': u'side_margin', u'value': u'20'}, {u'name': u'bottom_margin', u'value': u'20'},
{u'name': u'gutter', u'value': u'20'}, {u'name': u'show_header', u'value': u'on'},
{u'name': u'header_margin', u'value': u'10'}, {u'name': u'show_footer', u'value': u'on'},
{u'name': u'footer_margin', u'value': u'10'}, {u'name': u'cover_image', u'value': u' '},
{u'name': u'styling', u'value': u''}],
'epub2': [{u'name': u'cover_image', u'value': u' '}, {u'name': u'styling', u'value': u''}],
'epub3': [{u'name': u'cover_image', u'value': u' '}, {u'name': u'styling', u'value': u''}],
'icml': [{u'name': u'cover_image', u'value': u' '}, {u'name': u'styling', u'value': u''}],
'docx': [{u'name': u'cover_image', u'value': u' '}, {u'name': u'styling', u'value': u''}],
'mobi': [{u'name': u'cover_image', u'value': u' '}, {u'name': u'styling', u'value': u''}],
'xhtml': [{u'name': u'styling', u'value': u''}]
}
INCH_TO_MM = 25.4
PAGE_SIZE_DATA = {
'comicbook': (6.625 * INCH_TO_MM, 10.25 * INCH_TO_MM),
"pocket": (4.25 * INCH_TO_MM, 6.875 * INCH_TO_MM),
"usletter": (8.5 * INCH_TO_MM, 11 * INCH_TO_MM),
"ustrade6x9": (6 * INCH_TO_MM, 9 * INCH_TO_MM),
"ustrade": (6 * INCH_TO_MM, 9 * INCH_TO_MM),
"landscape9x7": (9 * INCH_TO_MM, 7 * INCH_TO_MM),
"square7.5": (7.5 * INCH_TO_MM, 7.5 * INCH_TO_MM),
"royal": (6.139 * INCH_TO_MM, 9.21 * INCH_TO_MM),
"crownquarto": (7.444 * INCH_TO_MM, 9.681 * INCH_TO_MM),
"square8.5": (8.5 * INCH_TO_MM, 8.5 * INCH_TO_MM),
"us5.5x8.5": (5.5 * INCH_TO_MM, 8.5 * INCH_TO_MM),
"digest": (5.5 * INCH_TO_MM, 8.5 * INCH_TO_MM),
"us5x8": (5 * INCH_TO_MM, 8 * INCH_TO_MM),
"us7x10": (7 * INCH_TO_MM, 10 * INCH_TO_MM),
"a5": (148, 210),
"a4": (210, 297),
"a3 (nz tabloid)": (297, 420),
"a2 (nz broadsheet)": (420, 594),
"a1": (594, 841),
"b5": (176, 250),
"b4": (250, 353),
"b3": (353, 500),
"b2": (500, 707),
"b1": (707, 1000),
# Not so sure about next 3
"uk tabloid": (11 * INCH_TO_MM, 17 * INCH_TO_MM),
"uk broadsheet": (18 * INCH_TO_MM, 24 * INCH_TO_MM),
"us broadsheet": (15 * INCH_TO_MM, 22.75 * INCH_TO_MM),
"berliner" : (315, 470),
"foolscap (f4)": (210, 330),
"oamaru broadsheet" :(382, 540),
"oamaru tabloid": (265, 380),
}
# These are default options for CSS settings
BOOKTYPE_CSS_BOOK = ('.objavi-chapter{ color: #000; }'
'a { text-decoration:none; color:#000; } '
'h1 .initial{ color: #000; } '
'.objavi-subsection{ display: block; '
'page-break-before: always; '
'/* page-break-after: always;*/ '
'text-transform: uppercase; font-size: 20pt; }'
'body .objavi-subsection:first-child{ '
'page-break-before: avoid; } '
'.objavi-subsection .initial { '
'font-size: 1em; color: #000; }'
'.objavi-subsection-heading { font-size: 20pt; '
'text-align: center; '
'line-height: 300px; font-weight: normal; } '
'h1 { page-break-before: always; } '
'table { float: none; }'
'h1.frontpage{ page-break-after:always; margin-top:70%; '
'font-size: 20pt; '
'text-align: center; page-break-before: avoid; '
'font-weight: normal; }'
'div.copyright{ padding: 1em; } '
'/* TOC ******************************/ '
'table { float: none; } '
'table.toc { font-size: 1.1em; width: 95%; } '
'table.toc td{ vertical-align:top padding-left: 0.5em; } '
'td.chapter { padding: 0 0.5em; text-align: right; } '
'table.toc td.pagenumber { text-align: right; '
'vertical-align:bottom; } '
'td.section { padding-top: 1.1em; font-weight: bold; } '
'/* End TOC **************************/ '
'pre { overflow: hidden; white-space: pre-wrap; } '
'h1 h2 h3 h4 h5 h6{ page-break-after: avoid; '
'page-break-inside: avoid; } '
'.page-break{ page-break-before: always; height: 7em; '
'display: block; } '
'#right-footer { text-align: right; } '
'#left-footer { text-align: left; } '
'a { word-wrap: break-word; } '
'.objavi-no-page-break { page-break-inside: avoid; } '
'.unseen{ z-index: -66; margin-left: -1000pt; }'
'sup {vertical-align:text-top;font-size:0.7em; }'
'img { max-width: 95%; }'
'p { word-wrap: break-word; }'
'li { word-wrap: break-word; }'
'#InsertNote_NoteList { word-wrap: break-word; }')
BOOKTYPE_CSS_BOOKJS = ('/* DOCUMENT */ @page { size: auto;}'
'body { word-break: break-word; -webkit-hyphens: auto;'
'hyphens: auto; font-family: "Liberation Serif";'
'background-color: white;}' '/* CONTENT */'
'img { max-width: 90%; height: auto;'
'image-resolution: from-image;}'
'sup { font-size: 80%;}'
'p { line-height: 130%; word-break: break-word;'
'/* text-align: justify; */'
'text-align: left;}'
'a { color: #000; text-decoration: none; '
'word-wrap: break-word;}'
'ol ul { text-align: justify;}'
'li { margin-left: 1em; word-wrap: break-word; '
'page-break-inside: avoid; windows: 4; orphans: 4;}'
'/* HEADINGS */'
'h1 {}'
'h1 .initial { display: none;}'
'h1 .subtitle {}'
'h1 .author { display: block; margin-top: 0.2in; '
'font-weight: normal;}'
'h1 .comma { font-size: 22pt; display: none;}'
'h2 { page-break-after: avoid;}'
'h3 { page-break-after: avoid;}'
'h4 { page-break-after: avoid;}'
'h5 { font-weight: normal; text-align: left;'
'page-break-after: avoid;}'
'/* CODE BLOCKS */'
'pre { white-space: pre-wrap; /* css-3 */ '
'white-space: -moz-pre-wrap; /* Mozilla since 1999 */'
'white-space: -pre-wrap;/* Opera 4-6 */'
'white-space: -o-pre-wrap; /* Opera 7 */'
'word-wrap: break-word; /* Internet Explorer 5.5+ */'
'widows:4; orphans:4;}'
'code {}'
'/* TOC */'
'#pagination-toc-title { font-size: 20pt; '
'font-weight: 700; text-align: left; '
'padding-bottom: .4in;}'
'.pagination-toc-entry {/* width: 6.2in; */ '
'width: 90%; display: block; padding-bottom: .3in; '
'font-size: 16pt;}'
'.pagination-toc-entry .pagination-toc-pagenumber { '
'font-weight: 400; display: inline-block; '
'vertical-align: text-bottom; font-size: 16pt; '
'float:right; '
'/* SET AUTOMATICALLY */}'
'.pagination-toc-entry.section { font-weight:700; '
'font-size: 16pt; text-transform: uppercase; '
'padding-bottom: .3in;}'
'/* FRONT MATTER */'
'#booktitle { margin-top: 1.7in; font-size: 26pt; '
'font-weight: normal; text-align: center; '
'text-transform: uppercase;}'
'#booksubtitle { font-size: 22px; margin-top: 0.2in; '
'font-weight: normal; text-align: center;}'
'#bookeditors { padding-top: 1.5in; '
'font-weight: normal; text-align: center; '
'font-size: 24pt;}'
'#bookpress { padding-top: 1.8in; font-weight: normal;'
'text-align: center; font-size: 24pt;}'
'#copyrightpage { font-weight: normal; '
'font-size: 18pt; padding-top: 0.2in;}'
'/* HEADER */'
'.pagination-header {font-size: 12pt;'
'font-weight: light;}'
'.pagination-pagenumber {font-size: 12pt;}'
'.pagination-header '
'.pagination-section { display: none; }'
'.pagination-toc-text .initial { display: none; }'
'.pagination-chapter .initial { display: none; }'
'/* MISC */'
'.imagecaption { font-size: 9pt; padding-left: 0.2in;'
'line-height: 18px; text-align: justify;'
'font-weight: normal; display: block;}'
'.pagebreak { -webkit-region-break-after: always;}'
'.pagebreakbefore{'
' -webkit-region-break-before: always;}'
'.objavi-chapter .initial { display: none;}'
'.objavi-subsection { display: none;}'
'.objavi-subsection-heading { '
'line-height: 120px !important; '
'/* work-around to make section title pages no longer '
'than one page */ font-size: 22px; font-weight: bold;'
' text-align: left; display: none;}'
'@media screen { .page { border: solid 1px #000;'
' margin-bottom: .2in; }'
'body { background-color: #efefef; }}'
'#InsertNote_NoteList { word-wrap: break-word;}')
BOOKTYPE_CSS_EBOOK = ('.objavi-chapter{ color: #000; display:none;} '
'a { text-decoration:none; color:#000;} '
'h1 .initial{ color: #000; display:none;} '
'.objavi-subsection{ display: block; '
'page-break-before: always;} '
'body .objavi-subsection:first-child{ '
'page-break-before: avoid;} '
'.objavi-subsection .initial { color: #000; '
'display:none;} .objavi-subsection-heading {'
'font-size: 20pt; text-align: center; '
'line-height: 300px; font-weight: normal;}'
'table { float: none;} h1.frontpage{'
'page-break-after:always; margin-top:70%; '
'font-size: 20pt; text-align: center;'
'page-break-before: avoid; max-width: 700pt; '
'font-weight: normal;} div.copyright{padding: 1em;}'
'/* TOC ******************************/'
'table { float: none;}'
'table.toc { font-size: 1.1em; width: 95%;}'
'table.toc td{ vertical-align:top; padding-left: 0.5em;}'
'td.chapter { padding: 0 0.5em; text-align: right;} '
'table.toc td.pagenumber { text-align: right; '
'vertical-align:bottom;} '
'td.section { padding-top: 1.1em; font-weight: bold;}'
'/* End TOC **************************/ '
'img { max-width: 500px; height: auto;}'
'.objavi-no-page-break {page-break-inside: avoid;} '
'.unseen { z-index: -66; margin-left: -1000pt;} '
'.objavi-subsection-heading{ height:860px; '
'font-size:0px; display:block;}')
BOOKTYPE_CSS_PDF = ('.objavi-subsection{ display: block; '
'page-break-before: always; /* page-break-after: always;*/'
'text-transform: uppercase; font-size: 20pt; } '
'body .objavi-subsection:first-child{ '
'page-break-before: avoid; } '
'.objavi-subsection .initial { font-size: 1em;'
'color: #000; } .objavi-subsection-heading {'
'font-size: 20pt; text-align: center; line-height: 300px;'
'font-weight: normal;} h1 { page-break-before: always; } '
'table { float: none; } '
'h1.frontpage{ page-break-after:always; margin-top:70%; '
'font-size: 20pt; text-align: center; '
'page-break-before: avoid; font-weight: normal; } '
'div.copyright{ padding: 1em; } '
'/* TOC ******************************/ '
'table { float: none; } '
'table.toc { font-size: 1.1em; width: 95%; } '
'table.toc td{ vertical-align:top; padding-left: 0.5em; } '
'td.chapter { padding: 0 0.5em; text-align: right; } '
'table.toc td.pagenumber { text-align: right; '
'vertical-align:bottom; } td.section { padding-top: 1.1em;'
'font-weight: bold; } '
'/* End TOC **************************/ '
'pre { overflow: hidden; white-space: pre-wrap; } '
'h1, h2, h3, h4, h5, h6{ page-break-after: avoid; '
'page-break-inside: avoid; } '
'.page-break{ page-break-before: always; height: 7em;'
'display: block; } a { word-wrap: break-word; } '
'.objavi-no-page-break { page-break-inside: avoid; } '
'/*To force a blank page it is sometimes necessary to '
'add unseen content. Display:none and visibility: hidden'
' do not work -- the renderer realises that they are not '
'there and skips the page. So we add a tiny bit of text '
'beyond the margin of the page. */ '
'.unseen{ z-index: -66; margin-left: -1000pt; }'
'img { max-width: 95%; } p { word-wrap: break-word; }'
'li { word-wrap: break-word; }'
'#InsertNote_NoteList { word-wrap: break-word; } ')
BOOKTYPE_CSS_ODT = ('body {} #book-title { font-size: 64pt; '
'page-break-before: avoid; margin-bottom: 12em; '
'max-width: 700px;} .unseen { display: none;}'
'.chapter { color: #000;} h1 .initial { color: #000; '
'font-size: 2em;} body .subsection:first-child {} '
'h1 { page-break-before: always;} '
'.objavi-subsection{ text-transform: uppercase; '
'font-size: 20pt;} .objavi-subsection .initial { '
'font-size: 1em; color: #000;}'
'.objavi-subsection-heading{ font-size: 36pt; '
'font-weight: bold; page-break-before: always;} '
'table { float: none;} h1.frontpage{ font-size: 64pt; '
'text-align: center; max-width: 700px;} '
'div.copyright{ padding: 1em;} pre { max-width:700px; '
'overflow: hidden;} '
'img { max-width: 700px; height: auto;}')
| agpl-3.0 | 6,846,859,636,964,900,000 | 45.790123 | 97 | 0.497845 | false |
pepitogithub/PythonScripts | musica/drumExFachade.py | 1 | 2964 | # import pygame
import threading
import drumExMachina
class Fasade:
"""
Matrix
-> reproducir()
-> pausar()
-> salir()
-> volumen-general()
-> tempo()
-> figura()
-> agregar-pista()
-> quitar-pista()
-> activar-pista()
-> desactivar-pista()
Pista
-> nombre()
-> setear-instrumento()
-> canal-midi()
-> volumen()
-> duracion()
-> activar()
-> desactivar()
-> editar-partitura()
-> agregar-seccion()
-> quitar-seccion()
-> modificar-seccion()
Partitura
-> agregar-seccion()
-> quitar-seccion()
-> modificar-seccion()
Seccion
-> duracion()
-> pulsos()
-> ritmo()
-> rotacion()
-> notas()
"""
def __init__(self):
pass
class _DXFConsola:
salir_codes = [0, "0", "salir", "exit"]
def __init__(self):
pass
def loop(self):
salir = False
while not salir:
user_input = input("> ")
salir = user_input in self.salir_codes
class _DXFGrafico:
def __init__(self, ancho=1200, alto=800):
self.alto = alto
self.ancho = ancho
self.screen = pygame.display.set_mode([self.ancho, self.alto])
pygame.display.set_caption("Drum Ex Machina")
def loop(self):
self.engine.loop()
pygame.init()
clock = pygame.time.Clock()
salir = False
while not salir:
for event in pygame.event.get():
if event.type == pygame.QUIT:
salir = True
pygame.draw.rect(self.screen, [255,0,0], [75, 10, 50, 20] , 1)
pygame.display.flip()
pygame.time.delay(50)
class DrumExFacade:
"""
Interfaz de DrumExMachina.
Tiene dos modos de uso, consola y grafico.
"""
def __init__(self, modo='consola', ancho=1200, alto=800):
self.modo = modo
self.engine = None
# Modo: Consola | grafico
self.engine = _DXFConsola() if modo == 'consola' else _DXFGrafico(alto, ancho)
def loop(self):
DXM_thread = threading.Thread(target=drumExMachina.testeos)
DXM_thread.start()
self.engine.loop()
DXF_thread.exit()
DXM_thread.exit()
DXF = DrumExFacade("consola")
DXF_thread = threading.Thread(target=DXF.loop)
DXF_thread.start() | gpl-2.0 | 718,095,634,692,036,600 | 22.164063 | 86 | 0.430162 | false |
jeffery9/mixprint_addons | ineco_jasper_report/__openerp__.py | 1 | 1946 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 INECO PARTNERSHIP LIMITED (http://openerp.tititab.com)
# All Right Reserved
#
# Author : Tititab Srisookco ([email protected])
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
"name" : "Ineco Jasper Report Plug-in",
"description" : """
1. Please install M2CRYPTO by sudo apt-get install python-m2crypto
""",
"version" : "0.1",
"depends" : ["base"],
"author" : "INECO",
"category": "Report Tools",
"url": "http://openerp.tititab.com",
"data": [ ],
'update_xml': [
'ineco_jasper_report_view.xml',
'security.xml',
],
'init_xml': [ ],
'demo_xml': [ ],
'test':[ ],
"installable" : True,
"active" : False,
}
| agpl-3.0 | 3,297,264,119,505,475,600 | 36.423077 | 78 | 0.629496 | false |
stvstnfrd/edx-platform | lms/djangoapps/courseware/views/views.py | 1 | 89915 | """
Courseware views functions
"""
import json
import logging
from collections import OrderedDict, namedtuple
from datetime import datetime
import bleach
import requests
import six
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser, User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import PermissionDenied
from django.db import transaction
from django.db.models import Q, prefetch_related_objects
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import redirect
from django.template.context_processors import csrf
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.http import urlquote_plus
from django.utils.text import slugify
from django.utils.translation import ugettext
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext_noop
from django.views.decorators.cache import cache_control
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_GET, require_http_methods, require_POST
from django.views.generic import View
from edx_django_utils import monitoring as monitoring_utils
from edx_django_utils.monitoring import set_custom_attribute, set_custom_attributes_for_course_key
from ipware.ip import get_client_ip
from markupsafe import escape
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from pytz import UTC
from requests.exceptions import ConnectionError, Timeout # pylint: disable=redefined-builtin
from rest_framework import status
from rest_framework.decorators import api_view, throttle_classes
from rest_framework.response import Response
from rest_framework.throttling import UserRateThrottle
from six import text_type
from web_fragments.fragment import Fragment
from lms.djangoapps.survey import views as survey_views
from common.djangoapps.course_modes.models import CourseMode, get_course_prices
from common.djangoapps.edxmako.shortcuts import marketing_link, render_to_response, render_to_string
from lms.djangoapps.edxnotes.helpers import is_feature_enabled
from lms.djangoapps.ccx.custom_exception import CCXLocatorValidationException
from lms.djangoapps.certificates import api as certs_api
from lms.djangoapps.certificates.models import CertificateStatuses
from lms.djangoapps.commerce.utils import EcommerceService
from lms.djangoapps.course_home_api.toggles import course_home_mfe_dates_tab_is_active
from openedx.features.course_experience.url_helpers import get_learning_mfe_home_url, is_request_from_learning_mfe
from lms.djangoapps.courseware.access import has_access, has_ccx_coach_role
from lms.djangoapps.courseware.access_utils import check_course_open_for_learner, check_public_access
from lms.djangoapps.courseware.courses import (
can_self_enroll_in_course,
course_open_for_self_enrollment,
get_course,
get_course_date_blocks,
get_course_overview_with_access,
get_course_with_access,
get_courses,
get_current_child,
get_permission_for_course_about,
get_studio_url,
sort_by_announcement,
sort_by_start_date
)
from lms.djangoapps.courseware.date_summary import verified_upgrade_deadline_link
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect, Redirect
from lms.djangoapps.courseware.masquerade import setup_masquerade
from lms.djangoapps.courseware.model_data import FieldDataCache
from lms.djangoapps.courseware.models import BaseStudentModuleHistory, StudentModule
from lms.djangoapps.courseware.permissions import ( # lint-amnesty, pylint: disable=unused-import
MASQUERADE_AS_STUDENT,
VIEW_COURSE_HOME,
VIEW_COURSEWARE,
VIEW_XQA_INTERFACE
)
from lms.djangoapps.courseware.user_state_client import DjangoXBlockUserStateClient
from lms.djangoapps.experiments.utils import get_experiment_user_metadata_context
from lms.djangoapps.grades.api import CourseGradeFactory
from lms.djangoapps.instructor.enrollment import uses_shib
from lms.djangoapps.instructor.views.api import require_global_staff
from lms.djangoapps.verify_student.services import IDVerificationService
from openedx.core.djangoapps.catalog.utils import get_programs, get_programs_with_type
from openedx.core.djangoapps.certificates import api as auto_certs_api
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.credit.api import (
get_credit_requirement_status,
is_credit_course,
is_user_eligible_for_credit
)
from openedx.core.djangoapps.enrollments.api import add_enrollment, get_enrollment # lint-amnesty, pylint: disable=unused-import
from openedx.core.djangoapps.enrollments.permissions import ENROLL_IN_COURSE
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from openedx.core.djangoapps.programs.utils import ProgramMarketingDataExtender
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.util.user_messages import PageLevelMessages
from openedx.core.djangoapps.zendesk_proxy.utils import create_zendesk_ticket
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.lib.mobile_utils import is_request_from_mobile_app
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
from openedx.features.course_duration_limits.access import generate_course_expired_fragment
from openedx.features.course_experience import DISABLE_UNIFIED_COURSE_TAB_FLAG, course_home_url_name
from openedx.features.course_experience.course_tools import CourseToolsPluginManager
from openedx.features.course_experience.url_helpers import get_legacy_courseware_url
from openedx.features.course_experience.utils import dates_banner_should_display
from openedx.features.course_experience.views.course_dates import CourseDatesFragmentView
from openedx.features.course_experience.waffle import ENABLE_COURSE_ABOUT_SIDEBAR_HTML
from openedx.features.course_experience.waffle import waffle as course_experience_waffle
from openedx.features.enterprise_support.api import data_sharing_consent_required
from common.djangoapps.student.models import CourseEnrollment, UserTestGroup
from common.djangoapps.track import segment
from common.djangoapps.util.cache import cache, cache_if_anonymous
from common.djangoapps.util.db import outer_atomic
from common.djangoapps.util.milestones_helpers import get_prerequisite_courses_display
from common.djangoapps.util.views import ensure_valid_course_key, ensure_valid_usage_key
from xmodule.course_module import COURSE_VISIBILITY_PUBLIC, COURSE_VISIBILITY_PUBLIC_OUTLINE
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.tabs import CourseTabList
from xmodule.x_module import STUDENT_VIEW
from ..context_processor import user_timezone_locale_prefs
from ..entrance_exams import user_can_skip_entrance_exam
from ..module_render import get_module, get_module_by_usage_id, get_module_for_descriptor
from ..tabs import _get_dynamic_tabs
from ..toggles import COURSEWARE_OPTIMIZED_RENDER_XBLOCK
log = logging.getLogger("edx.courseware")
# Only display the requirements on learner dashboard for
# credit and verified modes.
REQUIREMENTS_DISPLAY_MODES = CourseMode.CREDIT_MODES + [CourseMode.VERIFIED]
CertData = namedtuple(
"CertData", ["cert_status", "title", "msg", "download_url", "cert_web_view_url"]
)
EARNED_BUT_NOT_AVAILABLE_CERT_STATUS = 'earned_but_not_available'
AUDIT_PASSING_CERT_DATA = CertData(
CertificateStatuses.audit_passing,
_('Your enrollment: Audit track'),
_('You are enrolled in the audit track for this course. The audit track does not include a certificate.'),
download_url=None,
cert_web_view_url=None
)
HONOR_PASSING_CERT_DATA = CertData(
CertificateStatuses.honor_passing,
_('Your enrollment: Honor track'),
_('You are enrolled in the honor track for this course. The honor track does not include a certificate.'),
download_url=None,
cert_web_view_url=None
)
INELIGIBLE_PASSING_CERT_DATA = {
CourseMode.AUDIT: AUDIT_PASSING_CERT_DATA,
CourseMode.HONOR: HONOR_PASSING_CERT_DATA
}
GENERATING_CERT_DATA = CertData(
CertificateStatuses.generating,
_("We're working on it..."),
_(
"We're creating your certificate. You can keep working in your courses and a link "
"to it will appear here and on your Dashboard when it is ready."
),
download_url=None,
cert_web_view_url=None
)
INVALID_CERT_DATA = CertData(
CertificateStatuses.invalidated,
_('Your certificate has been invalidated'),
_('Please contact your course team if you have any questions.'),
download_url=None,
cert_web_view_url=None
)
REQUESTING_CERT_DATA = CertData(
CertificateStatuses.requesting,
_('Congratulations, you qualified for a certificate!'),
_("You've earned a certificate for this course."),
download_url=None,
cert_web_view_url=None
)
UNVERIFIED_CERT_DATA = CertData(
CertificateStatuses.unverified,
_('Certificate unavailable'),
_(
u'You have not received a certificate because you do not have a current {platform_name} '
'verified identity.'
).format(platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)),
download_url=None,
cert_web_view_url=None
)
EARNED_BUT_NOT_AVAILABLE_CERT_DATA = CertData(
EARNED_BUT_NOT_AVAILABLE_CERT_STATUS,
_('Your certificate will be available soon!'),
_('After this course officially ends, you will receive an email notification with your certificate.'),
download_url=None,
cert_web_view_url=None
)
def _downloadable_cert_data(download_url=None, cert_web_view_url=None):
return CertData(
CertificateStatuses.downloadable,
_('Your certificate is available'),
_("You've earned a certificate for this course."),
download_url=download_url,
cert_web_view_url=cert_web_view_url
)
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated:
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses_list = []
course_discovery_meanings = getattr(settings, 'COURSE_DISCOVERY_MEANINGS', {})
if not settings.FEATURES.get('ENABLE_COURSE_DISCOVERY'):
courses_list = get_courses(request.user)
if configuration_helpers.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses_list = sort_by_start_date(courses_list)
else:
courses_list = sort_by_announcement(courses_list)
# Add marketable programs to the context.
programs_list = get_programs_with_type(request.site, include_hidden=False)
return render_to_response(
"courseware/courses.html",
{
'courses': courses_list,
'course_discovery_meanings': course_discovery_meanings,
'programs_list': programs_list,
}
)
class PerUserVideoMetadataThrottle(UserRateThrottle):
"""
setting rate limit for yt_video_metadata API
"""
rate = settings.RATE_LIMIT_FOR_VIDEO_METADATA_API
@ensure_csrf_cookie
@login_required
@api_view(['GET'])
@throttle_classes([PerUserVideoMetadataThrottle])
def yt_video_metadata(request):
"""
Will hit the youtube API if the key is available in settings
:return: youtube video metadata
"""
video_id = request.GET.get('id', None)
metadata, status_code = load_metadata_from_youtube(video_id, request)
return Response(metadata, status=status_code, content_type='application/json')
def load_metadata_from_youtube(video_id, request):
"""
Get metadata about a YouTube video.
This method is used via the standalone /courses/yt_video_metadata REST API
endpoint, or via the video XBlock as a its 'yt_video_metadata' handler.
"""
metadata = {}
status_code = 500
if video_id and settings.YOUTUBE_API_KEY and settings.YOUTUBE_API_KEY != 'PUT_YOUR_API_KEY_HERE':
yt_api_key = settings.YOUTUBE_API_KEY
yt_metadata_url = settings.YOUTUBE['METADATA_URL']
yt_timeout = settings.YOUTUBE.get('TEST_TIMEOUT', 1500) / 1000 # converting milli seconds to seconds
headers = {}
http_referer = None
try:
# This raises an attribute error if called from the xblock yt_video_metadata handler, which passes
# a webob request instead of a django request.
http_referer = request.META.get('HTTP_REFERER')
except AttributeError:
# So here, let's assume it's a webob request and access the referer the webob way.
http_referer = request.referer
if http_referer:
headers['Referer'] = http_referer
payload = {'id': video_id, 'part': 'contentDetails', 'key': yt_api_key}
try:
res = requests.get(yt_metadata_url, params=payload, timeout=yt_timeout, headers=headers)
status_code = res.status_code
if res.status_code == 200:
try:
res_json = res.json()
if res_json.get('items', []):
metadata = res_json
else:
logging.warning(u'Unable to find the items in response. Following response '
u'was received: {res}'.format(res=res.text))
except ValueError:
logging.warning(u'Unable to decode response to json. Following response '
u'was received: {res}'.format(res=res.text))
else:
logging.warning(u'YouTube API request failed with status code={status} - '
u'Error message is={message}'.format(status=status_code, message=res.text))
except (Timeout, ConnectionError):
logging.warning(u'YouTube API request failed because of connection time out or connection error')
else:
logging.warning(u'YouTube API key or video id is None. Please make sure API key and video id is not None')
return metadata, status_code
@ensure_csrf_cookie
@ensure_valid_course_key
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = CourseKey.from_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: %s in course_id: %s. Referer: %s. Using first: %s",
module_id,
course_id,
request.META.get("HTTP_REFERER", ""),
text_type(items[0].location)
)
return jump_to(request, course_id, text_type(items[0].location))
@ensure_csrf_cookie
def jump_to(_request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(location).replace(course_key=course_key)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key") # lint-amnesty, pylint: disable=raise-missing-from
try:
redirect_url = get_legacy_courseware_url(course_key, usage_key, _request)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key)) # lint-amnesty, pylint: disable=raise-missing-from
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key)) # lint-amnesty, pylint: disable=raise-missing-from
return redirect(redirect_url)
@ensure_csrf_cookie
@ensure_valid_course_key
@data_sharing_consent_required
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
# TODO: LEARNER-611: This can be deleted with Course Info removal. The new
# Course Home is using its own processing of last accessed.
def get_last_accessed_courseware(course, request, user):
"""
Returns the courseware module URL that the user last accessed, or None if it cannot be found.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2
)
course_module = get_module_for_descriptor(
user,
request,
course,
field_data_cache,
course.id,
course=course,
will_recheck_access=True,
)
chapter_module = get_current_child(course_module)
if chapter_module is not None:
section_module = get_current_child(chapter_module)
if section_module is not None:
url = reverse('courseware_section', kwargs={
'course_id': text_type(course.id),
'chapter': chapter_module.url_name,
'section': section_module.url_name
})
return url
return None
course_key = CourseKey.from_string(course_id)
# If the unified course experience is enabled, redirect to the "Course" tab
if not DISABLE_UNIFIED_COURSE_TAB_FLAG.is_enabled(course_key):
return redirect(reverse(course_home_url_name(course_key), args=[course_id]))
with modulestore().bulk_operations(course_key):
course = get_course_with_access(request.user, 'load', course_key)
can_masquerade = request.user.has_perm(MASQUERADE_AS_STUDENT, course)
masquerade, user = setup_masquerade(request, course_key, can_masquerade, reset_masquerade_data=True)
# LEARNER-612: CCX redirect handled by new Course Home (DONE)
# LEARNER-1697: Transition banner messages to new Course Home (DONE)
# if user is not enrolled in a course then app will show enroll/get register link inside course info page.
user_is_enrolled = CourseEnrollment.is_enrolled(user, course.id)
show_enroll_banner = request.user.is_authenticated and not user_is_enrolled
# If the user is not enrolled but this is a course that does not support
# direct enrollment then redirect them to the dashboard.
if not user_is_enrolled and not can_self_enroll_in_course(course_key):
return redirect(reverse('dashboard'))
# LEARNER-170: Entrance exam is handled by new Course Outline. (DONE)
# If the user needs to take an entrance exam to access this course, then we'll need
# to send them to that specific course module before allowing them into other areas
if not user_can_skip_entrance_exam(user, course):
return redirect(reverse('courseware', args=[text_type(course.id)]))
# Construct the dates fragment
dates_fragment = None
if request.user.is_authenticated:
# TODO: LEARNER-611: Remove enable_course_home_improvements
if SelfPacedConfiguration.current().enable_course_home_improvements:
# Shared code with the new Course Home (DONE)
dates_fragment = CourseDatesFragmentView().render_to_fragment(request, course_id=course_id)
# Shared code with the new Course Home (DONE)
# Get the course tools enabled for this user and course
course_tools = CourseToolsPluginManager.get_enabled_course_tools(request, course_key)
course_homepage_invert_title =\
configuration_helpers.get_value(
'COURSE_HOMEPAGE_INVERT_TITLE',
False
)
course_homepage_show_subtitle =\
configuration_helpers.get_value(
'COURSE_HOMEPAGE_SHOW_SUBTITLE',
True
)
course_homepage_show_org =\
configuration_helpers.get_value('COURSE_HOMEPAGE_SHOW_ORG', True)
course_title = course.display_number_with_default
course_subtitle = course.display_name_with_default
if course_homepage_invert_title:
course_title = course.display_name_with_default
course_subtitle = course.display_number_with_default
context = {
'request': request,
'masquerade_user': user,
'course_id': text_type(course_key),
'url_to_enroll': CourseTabView.url_to_enroll(course_key),
'cache': None,
'course': course,
'course_title': course_title,
'course_subtitle': course_subtitle,
'show_subtitle': course_homepage_show_subtitle,
'show_org': course_homepage_show_org,
'can_masquerade': can_masquerade,
'masquerade': masquerade,
'supports_preview_menu': True,
'studio_url': get_studio_url(course, 'course_info'),
'show_enroll_banner': show_enroll_banner,
'user_is_enrolled': user_is_enrolled,
'dates_fragment': dates_fragment,
'course_tools': course_tools,
}
context.update(
get_experiment_user_metadata_context(
course,
user,
)
)
# Get the URL of the user's last position in order to display the 'where you were last' message
context['resume_course_url'] = None
# TODO: LEARNER-611: Remove enable_course_home_improvements
if SelfPacedConfiguration.current().enable_course_home_improvements:
context['resume_course_url'] = get_last_accessed_courseware(course, request, user)
if not check_course_open_for_learner(user, course):
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
context['supports_preview_menu'] = False
return render_to_response('courseware/info.html', context)
class StaticCourseTabView(EdxFragmentView):
"""
View that displays a static course tab with a given name.
"""
@method_decorator(ensure_csrf_cookie)
@method_decorator(ensure_valid_course_key)
def get(self, request, course_id, tab_slug, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Displays a static course tab page with a given name
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
# Show warnings if the user has limited access
CourseTabView.register_user_access_warning_messages(request, course)
return super(StaticCourseTabView, self).get(request, course=course, tab=tab, **kwargs) # lint-amnesty, pylint: disable=super-with-arguments
def render_to_fragment(self, request, course=None, tab=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Renders the static tab to a fragment.
"""
return get_static_tab_fragment(request, course, tab)
def render_standalone_response(self, request, fragment, course=None, tab=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Renders this static tab's fragment to HTML for a standalone page.
"""
return render_to_response('courseware/static_tab.html', {
'course': course,
'active_page': 'static_tab_{0}'.format(tab['url_slug']),
'tab': tab,
'fragment': fragment,
'disable_courseware_js': True,
})
class CourseTabView(EdxFragmentView):
"""
View that displays a course tab page.
"""
@method_decorator(ensure_csrf_cookie)
@method_decorator(ensure_valid_course_key)
@method_decorator(data_sharing_consent_required)
def get(self, request, course_id, tab_type, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Displays a course tab page that contains a web fragment.
"""
course_key = CourseKey.from_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_with_access(request.user, 'load', course_key)
try:
# Render the page
course_tabs = course.tabs + _get_dynamic_tabs(course, request.user)
tab = CourseTabList.get_tab_by_type(course_tabs, tab_type)
page_context = self.create_page_context(request, course=course, tab=tab, **kwargs)
# Show warnings if the user has limited access
# Must come after masquerading on creation of page context
self.register_user_access_warning_messages(request, course)
set_custom_attributes_for_course_key(course_key)
return super(CourseTabView, self).get(request, course=course, page_context=page_context, **kwargs) # lint-amnesty, pylint: disable=super-with-arguments
except Exception as exception: # pylint: disable=broad-except
return CourseTabView.handle_exceptions(request, course_key, course, exception)
@staticmethod
def url_to_enroll(course_key):
"""
Returns the URL to use to enroll in the specified course.
"""
url_to_enroll = reverse('about_course', args=[text_type(course_key)])
if settings.FEATURES.get('ENABLE_MKTG_SITE'):
url_to_enroll = marketing_link('COURSES')
return url_to_enroll
@staticmethod
def register_user_access_warning_messages(request, course):
"""
Register messages to be shown to the user if they have limited access.
"""
allow_anonymous = check_public_access(course, [COURSE_VISIBILITY_PUBLIC])
if request.user.is_anonymous and not allow_anonymous:
if CourseTabView.course_open_for_learner_enrollment(course):
PageLevelMessages.register_warning_message(
request,
Text(_(u"To see course content, {sign_in_link} or {register_link}.")).format(
sign_in_link=HTML(u'<a href="/login?next={current_url}">{sign_in_label}</a>').format(
sign_in_label=_("sign in"),
current_url=urlquote_plus(request.path),
),
register_link=HTML(u'<a href="/register?next={current_url}">{register_label}</a>').format(
register_label=_("register"),
current_url=urlquote_plus(request.path),
),
),
once_only=True
)
else:
PageLevelMessages.register_warning_message(
request,
Text(_(u"{sign_in_link} or {register_link}.")).format(
sign_in_link=HTML(u'<a href="/login?next={current_url}">{sign_in_label}</a>').format(
sign_in_label=_("Sign in"),
current_url=urlquote_plus(request.path),
),
register_link=HTML(u'<a href="/register?next={current_url}">{register_label}</a>').format(
register_label=_("register"),
current_url=urlquote_plus(request.path),
),
)
)
else:
if not CourseEnrollment.is_enrolled(request.user, course.id) and not allow_anonymous:
# Only show enroll button if course is open for enrollment.
if CourseTabView.course_open_for_learner_enrollment(course):
enroll_message = _(u'You must be enrolled in the course to see course content. \
{enroll_link_start}Enroll now{enroll_link_end}.')
PageLevelMessages.register_warning_message(
request,
Text(enroll_message).format(
enroll_link_start=HTML('<button class="enroll-btn btn-link">'),
enroll_link_end=HTML('</button>')
)
)
else:
PageLevelMessages.register_warning_message(
request,
Text(_('You must be enrolled in the course to see course content.'))
)
@staticmethod
def course_open_for_learner_enrollment(course):
return (course_open_for_self_enrollment(course.id)
and not course.invitation_only
and not CourseMode.is_masters_only(course.id))
@staticmethod
def handle_exceptions(request, course_key, course, exception):
u"""
Handle exceptions raised when rendering a view.
"""
if isinstance(exception, Redirect) or isinstance(exception, Http404): # lint-amnesty, pylint: disable=consider-merging-isinstance
raise # lint-amnesty, pylint: disable=misplaced-bare-raise
if settings.DEBUG:
raise # lint-amnesty, pylint: disable=misplaced-bare-raise
user = request.user
log.exception(
u"Error in %s: user=%s, effective_user=%s, course=%s",
request.path,
getattr(user, 'real_user', user),
user,
text_type(course_key),
)
try:
return render_to_response(
'courseware/courseware-error.html',
{
'staff_access': has_access(user, 'staff', course),
'course': course,
},
status=500,
)
except:
# Let the exception propagate, relying on global config to
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
def create_page_context(self, request, course=None, tab=None, **kwargs):
"""
Creates the context for the fragment's template.
"""
can_masquerade = request.user.has_perm(MASQUERADE_AS_STUDENT, course)
supports_preview_menu = tab.get('supports_preview_menu', False)
if supports_preview_menu:
masquerade, masquerade_user = setup_masquerade(
request,
course.id,
can_masquerade,
reset_masquerade_data=True,
)
request.user = masquerade_user
else:
masquerade = None
context = {
'course': course,
'tab': tab,
'active_page': tab.get('type', None),
'can_masquerade': can_masquerade,
'masquerade': masquerade,
'supports_preview_menu': supports_preview_menu,
'uses_bootstrap': True,
'disable_courseware_js': True,
}
# Avoid Multiple Mathjax loading on the 'user_profile'
if 'profile_page_context' in kwargs:
context['load_mathjax'] = kwargs['profile_page_context'].get('load_mathjax', True)
context.update(
get_experiment_user_metadata_context(
course,
request.user,
)
)
return context
def render_to_fragment(self, request, course=None, page_context=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Renders the course tab to a fragment.
"""
tab = page_context['tab']
return tab.render_to_fragment(request, course, **kwargs)
def render_standalone_response(self, request, fragment, course=None, tab=None, page_context=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Renders this course tab's fragment to HTML for a standalone page.
"""
if not page_context:
page_context = self.create_page_context(request, course=course, tab=tab, **kwargs)
tab = page_context['tab']
page_context['fragment'] = fragment
return render_to_response('courseware/tab-view.html', page_context)
@ensure_csrf_cookie
@ensure_valid_course_key
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated:
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
class EnrollStaffView(View):
"""
Displays view for registering in the course to a global staff user.
User can either choose to 'Enroll' or 'Don't Enroll' in the course.
Enroll: Enrolls user in course and redirects to the courseware.
Don't Enroll: Redirects user to course about page.
Arguments:
- request : HTTP request
- course_id : course id
Returns:
- RedirectResponse
"""
template_name = 'enroll_staff.html'
@method_decorator(require_global_staff)
@method_decorator(ensure_valid_course_key)
def get(self, request, course_id):
"""
Display enroll staff view to global staff user with `Enroll` and `Don't Enroll` options.
"""
user = request.user
course_key = CourseKey.from_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_with_access(user, 'load', course_key)
if not registered_for_course(course, user):
context = {
'course': course,
'csrftoken': csrf(request)["csrf_token"]
}
return render_to_response(self.template_name, context)
@method_decorator(require_global_staff)
@method_decorator(ensure_valid_course_key)
def post(self, request, course_id):
"""
Either enrolls the user in course or redirects user to course about page
depending upon the option (Enroll, Don't Enroll) chosen by the user.
"""
_next = six.moves.urllib.parse.quote_plus(request.GET.get('next', 'info'), safe='/:?=')
course_key = CourseKey.from_string(course_id)
enroll = 'enroll' in request.POST
if enroll:
add_enrollment(request.user.username, course_id)
log.info(
u"User %s enrolled in %s via `enroll_staff` view",
request.user.username,
course_id
)
return redirect(_next)
# In any other case redirect to the course about page.
return redirect(reverse('about_course', args=[text_type(course_key)]))
@ensure_csrf_cookie
@ensure_valid_course_key
@cache_if_anonymous()
def course_about(request, course_id):
"""
Display the course's about page.
"""
course_key = CourseKey.from_string(course_id)
# If a user is not able to enroll in a course then redirect
# them away from the about page to the dashboard.
if not can_self_enroll_in_course(course_key):
return redirect(reverse('dashboard'))
# If user needs to be redirected to course home then redirect
if _course_home_redirect_enabled():
return redirect(reverse(course_home_url_name(course_key), args=[text_type(course_key)]))
with modulestore().bulk_operations(course_key):
permission = get_permission_for_course_about()
course = get_course_with_access(request.user, permission, course_key)
course_details = CourseDetails.populate(course)
modes = CourseMode.modes_for_course_dict(course_key)
registered = registered_for_course(course, request.user)
staff_access = bool(has_access(request.user, 'staff', course))
studio_url = get_studio_url(course, 'settings/details')
if request.user.has_perm(VIEW_COURSE_HOME, course):
course_target = reverse(course_home_url_name(course.id), args=[text_type(course.id)])
else:
course_target = reverse('about_course', args=[text_type(course.id)])
show_courseware_link = bool(
(
request.user.has_perm(VIEW_COURSEWARE, course)
) or settings.FEATURES.get('ENABLE_LMS_MIGRATION')
)
# If the ecommerce checkout flow is enabled and the mode of the course is
# professional or no id professional, we construct links for the enrollment
# button to add the course to the ecommerce basket.
ecomm_service = EcommerceService()
ecommerce_checkout = ecomm_service.is_enabled(request.user)
ecommerce_checkout_link = ''
ecommerce_bulk_checkout_link = ''
single_paid_mode = None
if ecommerce_checkout:
if len(modes) == 1 and list(modes.values())[0].min_price:
single_paid_mode = list(modes.values())[0]
else:
# have professional ignore other modes for historical reasons
single_paid_mode = modes.get(CourseMode.PROFESSIONAL)
if single_paid_mode and single_paid_mode.sku:
ecommerce_checkout_link = ecomm_service.get_checkout_page_url(single_paid_mode.sku)
if single_paid_mode and single_paid_mode.bulk_sku:
ecommerce_bulk_checkout_link = ecomm_service.get_checkout_page_url(single_paid_mode.bulk_sku)
registration_price, course_price = get_course_prices(course) # lint-amnesty, pylint: disable=unused-variable
# Used to provide context to message to student if enrollment not allowed
can_enroll = bool(request.user.has_perm(ENROLL_IN_COURSE, course))
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.objects.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not (registered or is_course_full or not can_enroll)
is_shib_course = uses_shib(course)
# get prerequisite courses display names
pre_requisite_courses = get_prerequisite_courses_display(course)
# Overview
overview = CourseOverview.get_from_id(course.id)
sidebar_html_enabled = course_experience_waffle().is_enabled(ENABLE_COURSE_ABOUT_SIDEBAR_HTML)
allow_anonymous = check_public_access(course, [COURSE_VISIBILITY_PUBLIC, COURSE_VISIBILITY_PUBLIC_OUTLINE])
context = {
'course': course,
'course_details': course_details,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'is_cosmetic_price_enabled': settings.FEATURES.get('ENABLE_COSMETIC_DISPLAY_PRICE'),
'course_price': course_price,
'ecommerce_checkout': ecommerce_checkout,
'ecommerce_checkout_link': ecommerce_checkout_link,
'ecommerce_bulk_checkout_link': ecommerce_bulk_checkout_link,
'single_paid_mode': single_paid_mode,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
# We do not want to display the internal courseware header, which is used when the course is found in the
# context. This value is therefore explicitly set to render the appropriate header.
'disable_courseware_header': True,
'pre_requisite_courses': pre_requisite_courses,
'course_image_urls': overview.image_urls,
'sidebar_html_enabled': sidebar_html_enabled,
'allow_anonymous': allow_anonymous,
}
return render_to_response('courseware/course_about.html', context)
@ensure_csrf_cookie
@cache_if_anonymous()
def program_marketing(request, program_uuid):
"""
Display the program marketing page.
"""
program_data = get_programs(uuid=program_uuid)
if not program_data:
raise Http404
program = ProgramMarketingDataExtender(program_data, request.user).extend()
program['type_slug'] = slugify(program['type'])
skus = program.get('skus')
ecommerce_service = EcommerceService()
context = {'program': program}
if program.get('is_learner_eligible_for_one_click_purchase') and skus:
context['buy_button_href'] = ecommerce_service.get_checkout_page_url(*skus, program_uuid=program_uuid)
context['uses_bootstrap'] = True
return render_to_response('courseware/program_marketing.html', context)
@login_required
@ensure_csrf_cookie
@ensure_valid_course_key
def dates(request, course_id):
"""
Display the course's dates.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
from lms.urls import COURSE_DATES_NAME, RESET_COURSE_DEADLINES_NAME
course_key = CourseKey.from_string(course_id)
if course_home_mfe_dates_tab_is_active(course_key) and not request.user.is_staff:
microfrontend_url = get_learning_mfe_home_url(course_key=course_key, view_name=COURSE_DATES_NAME)
raise Redirect(microfrontend_url)
# Enable NR tracing for this view based on course
monitoring_utils.set_custom_attribute('course_id', text_type(course_key))
monitoring_utils.set_custom_attribute('user_id', request.user.id)
monitoring_utils.set_custom_attribute('is_staff', request.user.is_staff)
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=False)
masquerade = None
can_masquerade = request.user.has_perm(MASQUERADE_AS_STUDENT, course)
if can_masquerade:
masquerade, masquerade_user = setup_masquerade(
request,
course.id,
can_masquerade,
reset_masquerade_data=True,
)
request.user = masquerade_user
user_is_enrolled = CourseEnrollment.is_enrolled(request.user, course_key)
user_is_staff = bool(has_access(request.user, 'staff', course_key))
# Render the full content to enrolled users, as well as to course and global staff.
# Unenrolled users who are not course or global staff are redirected to the Outline Tab.
if not user_is_enrolled and not user_is_staff:
raise CourseAccessRedirect(reverse('openedx.course_experience.course_home', args=[course_id]))
course_date_blocks = get_course_date_blocks(course, request.user, request,
include_access=True, include_past_dates=True)
learner_is_full_access = not ContentTypeGatingConfig.enabled_for_enrollment(request.user, course_key)
# User locale settings
user_timezone_locale = user_timezone_locale_prefs(request)
user_timezone = user_timezone_locale['user_timezone']
user_language = user_timezone_locale['user_language']
missed_deadlines, missed_gated_content = dates_banner_should_display(course_key, request.user)
context = {
'course': course,
'course_date_blocks': course_date_blocks,
'verified_upgrade_link': verified_upgrade_deadline_link(request.user, course=course),
'learner_is_full_access': learner_is_full_access,
'user_timezone': user_timezone,
'user_language': user_language,
'supports_preview_menu': True,
'can_masquerade': can_masquerade,
'masquerade': masquerade,
'on_dates_tab': True,
'content_type_gating_enabled': ContentTypeGatingConfig.enabled_for_enrollment(
user=request.user,
course_key=course_key,
),
'missed_deadlines': missed_deadlines,
'missed_gated_content': missed_gated_content,
'reset_deadlines_url': reverse(RESET_COURSE_DEADLINES_NAME),
'has_ended': course.has_ended(),
}
return render_to_response('courseware/dates.html', context)
@transaction.non_atomic_requests
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
@data_sharing_consent_required
def progress(request, course_id, student_id=None):
""" Display the progress page. """
course_key = CourseKey.from_string(course_id)
with modulestore().bulk_operations(course_key):
return _progress(request, course_key, student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
if student_id is not None:
try:
student_id = int(student_id)
# Check for ValueError if 'student_id' cannot be converted to integer.
except ValueError:
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
can_masquerade = request.user.has_perm(MASQUERADE_AS_STUDENT, course)
masquerade = None
if student_id is None or student_id == request.user.id:
# This will be a no-op for non-staff users, returning request.user
masquerade, student = setup_masquerade(request, course_key, can_masquerade, reset_masquerade_data=True)
else:
try:
coach_access = has_ccx_coach_role(request.user, course_key)
except CCXLocatorValidationException:
coach_access = False
has_access_on_students_profiles = staff_access or coach_access
# Requesting access to a different student's profile
if not has_access_on_students_profiles:
raise Http404
try:
student = User.objects.get(id=student_id)
except User.DoesNotExist:
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
prefetch_related_objects([student], 'groups')
if request.user.id != student.id:
# refetch the course as the assumed student
course = get_course_with_access(student, 'load', course_key, check_if_enrolled=True)
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
course_grade = CourseGradeFactory().read(student, course)
courseware_summary = list(course_grade.chapter_grades.values())
studio_url = get_studio_url(course, 'settings/grading')
# checking certificate generation configuration
enrollment_mode, _ = CourseEnrollment.enrollment_mode_for_user(student, course_key)
course_expiration_fragment = generate_course_expired_fragment(student, course)
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': course_grade.summary,
'can_masquerade': can_masquerade,
'staff_access': staff_access,
'masquerade': masquerade,
'supports_preview_menu': True,
'student': student,
'credit_course_requirements': credit_course_requirements(course_key, student),
'course_expiration_fragment': course_expiration_fragment,
'certificate_data': get_cert_data(student, course, enrollment_mode, course_grade)
}
context.update(
get_experiment_user_metadata_context(
course,
student,
)
)
with outer_atomic():
response = render_to_response('courseware/progress.html', context)
return response
def _downloadable_certificate_message(course, cert_downloadable_status): # lint-amnesty, pylint: disable=missing-function-docstring
if certs_api.has_html_certificates_enabled(course):
if certs_api.get_active_web_certificate(course) is not None:
return _downloadable_cert_data(
download_url=None,
cert_web_view_url=certs_api.get_certificate_url(
course_id=course.id, uuid=cert_downloadable_status['uuid']
)
)
elif not cert_downloadable_status['is_pdf_certificate']:
return GENERATING_CERT_DATA
return _downloadable_cert_data(download_url=cert_downloadable_status['download_url'])
def _missing_required_verification(student, enrollment_mode):
return (
enrollment_mode in CourseMode.VERIFIED_MODES and not IDVerificationService.user_is_verified(student)
)
def _certificate_message(student, course, enrollment_mode): # lint-amnesty, pylint: disable=missing-function-docstring
if certs_api.is_certificate_invalid(student, course.id):
return INVALID_CERT_DATA
cert_downloadable_status = certs_api.certificate_downloadable_status(student, course.id)
if cert_downloadable_status.get('earned_but_not_available'):
return EARNED_BUT_NOT_AVAILABLE_CERT_DATA
if cert_downloadable_status['is_generating']:
return GENERATING_CERT_DATA
if cert_downloadable_status['is_unverified']:
return UNVERIFIED_CERT_DATA
if cert_downloadable_status['is_downloadable']:
return _downloadable_certificate_message(course, cert_downloadable_status)
if _missing_required_verification(student, enrollment_mode):
return UNVERIFIED_CERT_DATA
return REQUESTING_CERT_DATA
def get_cert_data(student, course, enrollment_mode, course_grade=None):
"""Returns students course certificate related data.
Arguments:
student (User): Student for whom certificate to retrieve.
course (Course): Course object for which certificate data to retrieve.
enrollment_mode (String): Course mode in which student is enrolled.
course_grade (CourseGrade): Student's course grade record.
Returns:
returns dict if course certificate is available else None.
"""
cert_data = _certificate_message(student, course, enrollment_mode)
if not CourseMode.is_eligible_for_certificate(enrollment_mode, status=cert_data.cert_status):
return INELIGIBLE_PASSING_CERT_DATA.get(enrollment_mode)
if cert_data.cert_status == EARNED_BUT_NOT_AVAILABLE_CERT_STATUS:
return cert_data
certificates_enabled_for_course = certs_api.cert_generation_enabled(course.id)
if course_grade is None:
course_grade = CourseGradeFactory().read(student, course)
if not auto_certs_api.can_show_certificate_message(course, student, course_grade, certificates_enabled_for_course):
return
if not certs_api.get_active_web_certificate(course) and not auto_certs_api.is_valid_pdf_certificate(cert_data):
return
return cert_data
def credit_course_requirements(course_key, student):
"""Return information about which credit requirements a user has satisfied.
Arguments:
course_key (CourseKey): Identifier for the course.
student (User): Currently logged in user.
Returns: dict if the credit eligibility enabled and it is a credit course
and the user is enrolled in either verified or credit mode, and None otherwise.
"""
# If credit eligibility is not enabled or this is not a credit course,
# short-circuit and return `None`. This indicates that credit requirements
# should NOT be displayed on the progress page.
if not (settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY", False) and is_credit_course(course_key)):
return None
# This indicates that credit requirements should NOT be displayed on the progress page.
enrollment = CourseEnrollment.get_enrollment(student, course_key)
if enrollment and enrollment.mode not in REQUIREMENTS_DISPLAY_MODES:
return None
# Credit requirement statuses for which user does not remain eligible to get credit.
non_eligible_statuses = ['failed', 'declined']
# Retrieve the status of the user for each eligibility requirement in the course.
# For each requirement, the user's status is either "satisfied", "failed", or None.
# In this context, `None` means that we don't know the user's status, either because
# the user hasn't done something (for example, submitting photos for verification)
# or we're waiting on more information (for example, a response from the photo
# verification service).
requirement_statuses = get_credit_requirement_status(course_key, student.username)
# If the user has been marked as "eligible", then they are *always* eligible
# unless someone manually intervenes. This could lead to some strange behavior
# if the requirements change post-launch. For example, if the user was marked as eligible
# for credit, then a new requirement was added, the user will see that they're eligible
# AND that one of the requirements is still pending.
# We're assuming here that (a) we can mitigate this by properly training course teams,
# and (b) it's a better user experience to allow students who were at one time
# marked as eligible to continue to be eligible.
# If we need to, we can always manually move students back to ineligible by
# deleting CreditEligibility records in the database.
if is_user_eligible_for_credit(student.username, course_key):
eligibility_status = "eligible"
# If the user has *failed* any requirements (for example, if a photo verification is denied),
# then the user is NOT eligible for credit.
elif any(requirement['status'] in non_eligible_statuses for requirement in requirement_statuses):
eligibility_status = "not_eligible"
# Otherwise, the user may be eligible for credit, but the user has not
# yet completed all the requirements.
else:
eligibility_status = "partial_eligible"
return {
'eligibility_status': eligibility_status,
'requirements': requirement_statuses,
}
def _course_home_redirect_enabled():
"""
Return True value if user needs to be redirected to course home based on value of
`ENABLE_MKTG_SITE` and `ENABLE_COURSE_HOME_REDIRECT feature` flags
Returns: boolean True or False
"""
if configuration_helpers.get_value(
'ENABLE_MKTG_SITE', settings.FEATURES.get('ENABLE_MKTG_SITE', False)
) and configuration_helpers.get_value(
'ENABLE_COURSE_HOME_REDIRECT', settings.FEATURES.get('ENABLE_COURSE_HOME_REDIRECT', True)
):
return True
@login_required
@ensure_valid_course_key
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course_key = CourseKey.from_string(course_id)
try:
usage_key = UsageKey.from_string(location).map_into_course(course_key)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_overview_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
user_state_client = DjangoXBlockUserStateClient()
try:
history_entries = list(user_state_client.get_history(student_username, usage_key))
except DjangoXBlockUserStateClient.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
# This is ugly, but until we have a proper submissions API that we can use to provide
# the scores instead, it will have to do.
csm = StudentModule.objects.filter(
module_state_key=usage_key,
student__username=student_username,
course_id=course_key)
scores = BaseStudentModuleHistory.get_history(csm)
if len(scores) != len(history_entries):
log.warning(
u"Mismatch when fetching scores for student "
u"history for course %s, user %s, xblock %s. "
u"%d scores were found, and %d history entries were found. "
u"Matching scores to history entries by date for display.",
course_id,
student_username,
location,
len(scores),
len(history_entries),
)
scores_by_date = {
score.created: score
for score in scores
}
scores = [
scores_by_date[history.updated]
for history in history_entries
]
context = {
'history_entries': history_entries,
'scores': scores,
'username': student_username,
'location': location,
'course_id': text_type(course_key)
}
return render_to_response('courseware/submission_history.html', context)
def get_static_tab_fragment(request, course, tab):
"""
Returns the fragment for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path, course=course
)
logging.debug(u'course_module = %s', tab_module)
fragment = Fragment()
if tab_module is not None:
try:
fragment = tab_module.render(STUDENT_VIEW, {})
except Exception: # pylint: disable=broad-except
fragment.content = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course=%s, tab=%s", course, tab['url_slug']
)
return fragment
@require_GET
@ensure_valid_course_key
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
course_key = CourseKey.from_string(course_id)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_descriptors.extend(modulestore().get_items(course.id, qualifiers={'category': 'lti_consumer'}))
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key,
course=course
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json') # lint-amnesty, pylint: disable=http-response-with-content-type-json, http-response-with-json-dumps
@login_required
def course_survey(request, course_id):
"""
URL endpoint to present a survey that is associated with a course_id
Note that the actual implementation of course survey is handled in the
views.py file in the Survey Djangoapp
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, 'load', course_key, check_survey_complete=False)
redirect_url = reverse(course_home_url_name(course.id), args=[course_id])
# if there is no Survey associated with this course,
# then redirect to the course instead
if not course.course_survey_name:
return redirect(redirect_url)
return survey_views.view_student_survey(
request.user,
course.course_survey_name,
course=course,
redirect_url=redirect_url,
is_required=course.course_survey_required,
)
def is_course_passed(student, course, course_grade=None):
"""
check user's course passing status. return True if passed
Arguments:
student : user object
course : course object
course_grade (CourseGrade) : contains student grade details.
Returns:
returns bool value
"""
if course_grade is None:
course_grade = CourseGradeFactory().read(student, course)
return course_grade.passed
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@require_POST
def generate_user_cert(request, course_id):
"""Start generating a new certificate for the user.
Certificate generation is allowed if:
* The user has passed the course, and
* The user does not already have a pending/completed certificate.
Note that if an error occurs during certificate generation
(for example, if the queue is down), then we simply mark the
certificate generation task status as "error" and re-run
the task with a management command. To students, the certificate
will appear to be "generating" until it is re-run.
Args:
request (HttpRequest): The POST request to this view.
course_id (unicode): The identifier for the course.
Returns:
HttpResponse: 200 on success, 400 if a new certificate cannot be generated.
"""
if not request.user.is_authenticated:
log.info(u"Anon user trying to generate certificate for %s", course_id)
return HttpResponseBadRequest(
_(u'You must be signed in to {platform_name} to create a certificate.').format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
)
student = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key, depth=2)
if not course:
return HttpResponseBadRequest(_("Course is not valid"))
if certs_api.is_using_certificate_allowlist_and_is_on_allowlist(student, course_key):
log.info(f'{course_key} is using allowlist certificates, and the user {student.id} is on its allowlist. '
f'Attempt will be made to generate an allowlist certificate.')
certs_api.generate_allowlist_certificate_task(student, course_key)
return HttpResponse()
if not is_course_passed(student, course):
log.info(u"User %s has not passed the course: %s", student.username, course_id)
return HttpResponseBadRequest(_("Your certificate will be available when you pass the course."))
certificate_status = certs_api.certificate_downloadable_status(student, course.id)
log.info(
u"User %s has requested for certificate in %s, current status: is_downloadable: %s, is_generating: %s",
student.username,
course_id,
certificate_status["is_downloadable"],
certificate_status["is_generating"],
)
if certificate_status["is_downloadable"]:
return HttpResponseBadRequest(_("Certificate has already been created."))
elif certificate_status["is_generating"]:
return HttpResponseBadRequest(_("Certificate is being created."))
else:
# If the certificate is not already in-process or completed,
# then create a new certificate generation task.
# If the certificate cannot be added to the queue, this will
# mark the certificate with "error" status, so it can be re-run
# with a management command. From the user's perspective,
# it will appear that the certificate task was submitted successfully.
certs_api.generate_user_certificates(student, course.id, course=course, generation_mode='self')
_track_successful_certificate_generation(student.id, course.id)
return HttpResponse()
def _track_successful_certificate_generation(user_id, course_id):
"""
Track a successful certificate generation event.
Arguments:
user_id (str): The ID of the user generating the certificate.
course_id (CourseKey): Identifier for the course.
Returns:
None
"""
event_name = 'edx.bi.user.certificate.generate'
segment.track(user_id, event_name, {
'category': 'certificates',
'label': text_type(course_id)
})
def enclosing_sequence_for_gating_checks(block):
"""
Return the first ancestor of this block that is a SequenceDescriptor.
Returns None if there is no such ancestor. Returns None if you call it on a
SequenceDescriptor directly.
We explicitly test against the three known tag types that map to sequences
(even though two of them have been long since deprecated and are never
used). We _don't_ test against SequentialDescriptor directly because:
1. A direct comparison on the type fails because we magically mix it into a
SequenceDescriptorWithMixins object.
2. An isinstance check doesn't give us the right behavior because Courses
and Sections both subclass SequenceDescriptor. >_<
Also important to note that some content isn't contained in Sequences at
all. LabXchange uses learning pathways, but even content inside courses like
`static_tab`, `book`, and `about` live outside the sequence hierarchy.
"""
seq_tags = ['sequential', 'problemset', 'videosequence']
# If it's being called on a Sequence itself, then don't bother crawling the
# ancestor tree, because all the sequence metadata we need for gating checks
# will happen automatically when rendering the render_xblock view anyway,
# and we don't want weird, weird edge cases where you have nested Sequences
# (which would probably "work" in terms of OLX import).
if block.location.block_type in seq_tags:
return None
ancestor = block
while ancestor and ancestor.location.block_type not in seq_tags:
ancestor = ancestor.get_parent() # Note: CourseDescriptor's parent is None
return ancestor
@require_http_methods(["GET", "POST"])
@ensure_valid_usage_key
@xframe_options_exempt
@transaction.non_atomic_requests
@ensure_csrf_cookie
def render_xblock(request, usage_key_string, check_if_enrolled=True):
"""
Returns an HttpResponse with HTML content for the xBlock with the given usage_key.
The returned HTML is a chromeless rendering of the xBlock (excluding content of the containing courseware).
"""
from lms.urls import RESET_COURSE_DEADLINES_NAME
from openedx.features.course_experience.urls import COURSE_HOME_VIEW_NAME
usage_key = UsageKey.from_string(usage_key_string)
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
course_key = usage_key.course_key
# Gathering metrics to make performance measurements easier.
set_custom_attributes_for_course_key(course_key)
set_custom_attribute('usage_key', usage_key_string)
set_custom_attribute('block_type', usage_key.block_type)
requested_view = request.GET.get('view', 'student_view')
if requested_view != 'student_view' and requested_view != 'public_view': # lint-amnesty, pylint: disable=consider-using-in
return HttpResponseBadRequest(
u"Rendering of the xblock view '{}' is not supported.".format(bleach.clean(requested_view, strip=True))
)
staff_access = has_access(request.user, 'staff', course_key)
_course_masquerade, request.user = setup_masquerade(request, course_key, staff_access)
with modulestore().bulk_operations(course_key):
# verify the user has access to the course, including enrollment check
try:
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=check_if_enrolled)
except CourseAccessRedirect:
raise Http404("Course not found.") # lint-amnesty, pylint: disable=raise-missing-from
# get the block, which verifies whether the user has access to the block.
recheck_access = request.GET.get('recheck_access') == '1'
block, _ = get_module_by_usage_id(
request, str(course_key), str(usage_key), disable_staff_debug_info=True, course=course,
will_recheck_access=recheck_access
)
student_view_context = request.GET.dict()
student_view_context['show_bookmark_button'] = request.GET.get('show_bookmark_button', '0') == '1'
student_view_context['show_title'] = request.GET.get('show_title', '1') == '1'
is_learning_mfe = is_request_from_learning_mfe(request)
# Right now, we only care about this in regards to the Learning MFE because it results
# in a bad UX if we display blocks with access errors (repeated upgrade messaging).
# If other use cases appear, consider removing the is_learning_mfe check or switching this
# to be its own query parameter that can toggle the behavior.
student_view_context['hide_access_error_blocks'] = is_learning_mfe and recheck_access
enable_completion_on_view_service = False
completion_service = block.runtime.service(block, 'completion')
if completion_service and completion_service.completion_tracking_enabled():
if completion_service.blocks_to_mark_complete_on_view({block}):
enable_completion_on_view_service = True
student_view_context['wrap_xblock_data'] = {
'mark-completed-on-view-after-delay': completion_service.get_complete_on_view_delay_ms()
}
missed_deadlines, missed_gated_content = dates_banner_should_display(course_key, request.user)
# Some content gating happens only at the Sequence level (e.g. "has this
# timed exam started?").
ancestor_seq = enclosing_sequence_for_gating_checks(block)
if ancestor_seq:
seq_usage_key = ancestor_seq.location
# We have a Descriptor, but I had trouble getting a SequenceModule
# from it (even using ._xmodule to force the conversion) because the
# runtime wasn't properly initialized. This view uses multiple
# runtimes (including Blockstore), so I'm pulling it from scratch
# based on the usage_key. We'll have to watch the performance impact
# of this. :(
seq_module_descriptor, _ = get_module_by_usage_id(
request, str(course_key), str(seq_usage_key), disable_staff_debug_info=True, course=course
)
# I'm not at all clear why get_module_by_usage_id returns the
# descriptor or why I need to manually force it to load the module
# like this manually instead of the proxying working, but trial and
# error has led me here. Hopefully all this weirdness goes away when
# SequenceModule gets converted to an XBlock in:
# https://github.com/edx/edx-platform/pull/25965
seq_module = seq_module_descriptor._xmodule # pylint: disable=protected-access
# If the SequenceModule feels that gating is necessary, redirect
# there so we can have some kind of error message at any rate.
if seq_module.descendants_are_gated():
return redirect(
reverse(
'render_xblock',
kwargs={'usage_key_string': str(seq_module.location)}
)
)
fragment = block.render(requested_view, context=student_view_context)
optimization_flags = get_optimization_flags_for_content(block, fragment)
context = {
'fragment': fragment,
'course': course,
'disable_accordion': True,
'allow_iframing': True,
'disable_header': True,
'disable_footer': True,
'disable_window_wrap': True,
'enable_completion_on_view_service': enable_completion_on_view_service,
'edx_notes_enabled': is_feature_enabled(course, request.user),
'staff_access': staff_access,
'xqa_server': settings.FEATURES.get('XQA_SERVER', 'http://your_xqa_server.com'),
'missed_deadlines': missed_deadlines,
'missed_gated_content': missed_gated_content,
'has_ended': course.has_ended(),
'web_app_course_url': reverse(COURSE_HOME_VIEW_NAME, args=[course.id]),
'on_courseware_page': True,
'verified_upgrade_link': verified_upgrade_deadline_link(request.user, course=course),
'is_learning_mfe': is_learning_mfe,
'is_mobile_app': is_request_from_mobile_app(request),
'reset_deadlines_url': reverse(RESET_COURSE_DEADLINES_NAME),
**optimization_flags,
}
return render_to_response('courseware/courseware-chromeless.html', context)
def get_optimization_flags_for_content(block, fragment):
"""
Return a dict with a set of display options appropriate for the block.
This is going to start in a very limited way.
"""
safe_defaults = {
'enable_mathjax': True
}
# Only run our optimizations on the leaf HTML and ProblemBlock nodes. The
# mobile apps access these directly, and we don't have to worry about
# XBlocks that dynamically load content, like inline discussions.
usage_key = block.location
# For now, confine ourselves to optimizing just the HTMLBlock
if usage_key.block_type != 'html':
return safe_defaults
if not COURSEWARE_OPTIMIZED_RENDER_XBLOCK.is_enabled(usage_key.course_key):
return safe_defaults
inspector = XBlockContentInspector(block, fragment)
flags = dict(safe_defaults)
flags['enable_mathjax'] = inspector.has_mathjax_content()
return flags
class XBlockContentInspector:
"""
Class to inspect rendered XBlock content to determine dependencies.
A lot of content has been written with the assumption that certain
JavaScript and assets are available. This has caused us to continue to
include these assets in the render_xblock view, despite the fact that they
are not used by the vast majority of content.
In order to try to provide faster load times for most users on most content,
this class has the job of detecting certain patterns in XBlock content that
would imply these dependencies, so we know when to include them or not.
"""
def __init__(self, block, fragment):
self.block = block
self.fragment = fragment
def has_mathjax_content(self):
"""
Returns whether we detect any MathJax in the fragment.
Note that this only works for things that are rendered up front. If an
XBlock is capable of modifying the DOM afterwards to inject math content
into the page, this will not catch it.
"""
# The following pairs are used to mark Mathjax syntax in XBlocks. There
# are other options for the wiki, but we don't worry about those here.
MATHJAX_TAG_PAIRS = [
(r"\(", r"\)"),
(r"\[", r"\]"),
("[mathjaxinline]", "[/mathjaxinline]"),
("[mathjax]", "[/mathjax]"),
]
content = self.fragment.body_html()
for (start_tag, end_tag) in MATHJAX_TAG_PAIRS:
if start_tag in content and end_tag in content:
return True
return False
# Translators: "percent_sign" is the symbol "%". "platform_name" is a
# string identifying the name of this installation, such as "edX".
FINANCIAL_ASSISTANCE_HEADER = _(
u'{platform_name} now offers financial assistance for learners who want to earn Verified Certificates but'
u' who may not be able to pay the Verified Certificate fee. Eligible learners may receive up to 90{percent_sign} off' # lint-amnesty, pylint: disable=line-too-long
' the Verified Certificate fee for a course.\nTo apply for financial assistance, enroll in the'
' audit track for a course that offers Verified Certificates, and then complete this application.'
' Note that you must complete a separate application for each course you take.\n We plan to use this'
' information to evaluate your application for financial assistance and to further develop our'
' financial assistance program.'
)
def _get_fa_header(header):
return header.\
format(percent_sign="%",
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)).split('\n')
FA_INCOME_LABEL = ugettext_noop('Annual Household Income')
FA_REASON_FOR_APPLYING_LABEL = ugettext_noop('Tell us about your current financial situation. Why do you need assistance?') # lint-amnesty, pylint: disable=line-too-long
FA_GOALS_LABEL = ugettext_noop('Tell us about your learning or professional goals. How will a Verified Certificate in this course help you achieve these goals?') # lint-amnesty, pylint: disable=line-too-long
FA_EFFORT_LABEL = ugettext_noop('Tell us about your plans for this course. What steps will you take to help you complete the course work and receive a certificate?') # lint-amnesty, pylint: disable=line-too-long
FA_SHORT_ANSWER_INSTRUCTIONS = _('Use between 1250 and 2500 characters or so in your response.')
@login_required
def financial_assistance(_request):
"""Render the initial financial assistance page."""
return render_to_response('financial-assistance/financial-assistance.html', {
'header_text': _get_fa_header(FINANCIAL_ASSISTANCE_HEADER)
})
@login_required
@require_POST
def financial_assistance_request(request):
"""Submit a request for financial assistance to Zendesk."""
try:
data = json.loads(request.body.decode('utf8'))
# Simple sanity check that the session belongs to the user
# submitting an FA request
username = data['username']
if request.user.username != username:
return HttpResponseForbidden()
course_id = data['course']
course = modulestore().get_course(CourseKey.from_string(course_id))
legal_name = data['name']
email = data['email']
country = data['country']
income = data['income']
reason_for_applying = data['reason_for_applying']
goals = data['goals']
effort = data['effort']
marketing_permission = data['mktg-permission']
ip_address = get_client_ip(request)[0]
except ValueError:
# Thrown if JSON parsing fails
return HttpResponseBadRequest(u'Could not parse request JSON.')
except InvalidKeyError:
# Thrown if course key parsing fails
return HttpResponseBadRequest(u'Could not parse request course key.')
except KeyError as err:
# Thrown if fields are missing
return HttpResponseBadRequest(u'The field {} is required.'.format(text_type(err)))
zendesk_submitted = create_zendesk_ticket(
legal_name,
email,
u'Financial assistance request for learner {username} in course {course_name}'.format(
username=username,
course_name=course.display_name
),
u'Financial Assistance Request',
tags={'course_id': course_id},
# Send the application as additional info on the ticket so
# that it is not shown when support replies. This uses
# OrderedDict so that information is presented in the right
# order.
additional_info=OrderedDict((
('Username', username),
('Full Name', legal_name),
('Course ID', course_id),
(FA_INCOME_LABEL, income),
('Country', country),
('Allowed for marketing purposes', 'Yes' if marketing_permission else 'No'),
(FA_REASON_FOR_APPLYING_LABEL, '\n' + reason_for_applying + '\n\n'),
(FA_GOALS_LABEL, '\n' + goals + '\n\n'),
(FA_EFFORT_LABEL, '\n' + effort + '\n\n'),
('Client IP', ip_address),
)),
group='Financial Assistance',
)
if not (zendesk_submitted == 200 or zendesk_submitted == 201): # lint-amnesty, pylint: disable=consider-using-in
# The call to Zendesk failed. The frontend will display a
# message to the user.
return HttpResponse(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@login_required
def financial_assistance_form(request):
"""Render the financial assistance application form page."""
user = request.user
enrolled_courses = get_financial_aid_courses(user)
incomes = ['Less than $5,000', '$5,000 - $10,000', '$10,000 - $15,000', '$15,000 - $20,000', '$20,000 - $25,000',
'$25,000 - $40,000', '$40,000 - $55,000', '$55,000 - $70,000', '$70,000 - $85,000',
'$85,000 - $100,000', 'More than $100,000']
annual_incomes = [
{'name': _(income), 'value': income} for income in incomes # lint-amnesty, pylint: disable=translation-of-non-string
]
return render_to_response('financial-assistance/apply.html', {
'header_text': _get_fa_header(FINANCIAL_ASSISTANCE_HEADER),
'student_faq_url': marketing_link('FAQ'),
'dashboard_url': reverse('dashboard'),
'account_settings_url': reverse('account_settings'),
'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
'user_details': {
'email': user.email,
'username': user.username,
'name': user.profile.name,
'country': text_type(user.profile.country.name),
},
'submit_url': reverse('submit_financial_assistance_request'),
'fields': [
{
'name': 'course',
'type': 'select',
'label': _('Course'),
'placeholder': '',
'defaultValue': '',
'required': True,
'options': enrolled_courses,
'instructions': ugettext(
'Select the course for which you want to earn a verified certificate. If'
' the course does not appear in the list, make sure that you have enrolled'
' in the audit track for the course.'
)
},
{
'name': 'income',
'type': 'select',
'label': _(FA_INCOME_LABEL), # lint-amnesty, pylint: disable=translation-of-non-string
'placeholder': '',
'defaultValue': '',
'required': True,
'options': annual_incomes,
'instructions': _('Specify your annual household income in US Dollars.')
},
{
'name': 'reason_for_applying',
'type': 'textarea',
'label': _(FA_REASON_FOR_APPLYING_LABEL), # lint-amnesty, pylint: disable=translation-of-non-string
'placeholder': '',
'defaultValue': '',
'required': True,
'restrictions': {
'min_length': settings.FINANCIAL_ASSISTANCE_MIN_LENGTH,
'max_length': settings.FINANCIAL_ASSISTANCE_MAX_LENGTH
},
'instructions': FA_SHORT_ANSWER_INSTRUCTIONS
},
{
'name': 'goals',
'type': 'textarea',
'label': _(FA_GOALS_LABEL), # lint-amnesty, pylint: disable=translation-of-non-string
'placeholder': '',
'defaultValue': '',
'required': True,
'restrictions': {
'min_length': settings.FINANCIAL_ASSISTANCE_MIN_LENGTH,
'max_length': settings.FINANCIAL_ASSISTANCE_MAX_LENGTH
},
'instructions': FA_SHORT_ANSWER_INSTRUCTIONS
},
{
'name': 'effort',
'type': 'textarea',
'label': _(FA_EFFORT_LABEL), # lint-amnesty, pylint: disable=translation-of-non-string
'placeholder': '',
'defaultValue': '',
'required': True,
'restrictions': {
'min_length': settings.FINANCIAL_ASSISTANCE_MIN_LENGTH,
'max_length': settings.FINANCIAL_ASSISTANCE_MAX_LENGTH
},
'instructions': FA_SHORT_ANSWER_INSTRUCTIONS
},
{
'placeholder': '',
'name': 'mktg-permission',
'label': _(
'I allow edX to use the information provided in this application '
'(except for financial information) for edX marketing purposes.'
),
'defaultValue': '',
'type': 'checkbox',
'required': False,
'instructions': '',
'restrictions': {}
}
],
})
def get_financial_aid_courses(user):
""" Retrieve the courses eligible for financial assistance. """
financial_aid_courses = []
for enrollment in CourseEnrollment.enrollments_for_user(user).order_by('-created'):
if enrollment.mode != CourseMode.VERIFIED and \
enrollment.course_overview and \
enrollment.course_overview.eligible_for_financial_aid and \
CourseMode.objects.filter(
Q(_expiration_datetime__isnull=True) | Q(_expiration_datetime__gt=datetime.now(UTC)),
course_id=enrollment.course_id,
mode_slug=CourseMode.VERIFIED).exists():
financial_aid_courses.append(
{
'name': enrollment.course_overview.display_name,
'value': text_type(enrollment.course_id)
}
)
return financial_aid_courses
| agpl-3.0 | 646,148,226,935,186,800 | 41.980402 | 212 | 0.656131 | false |
CasataliaLabs/biscuit_drishtiman | v4l_capture_example.py | 1 | 1279 | import Image
import select
import v4l2capture
import numpy
import pylab
import time
# Open the video device.
#~ video = v4l2capture.Video_device("/dev/video0")
video = v4l2capture.Video_device("http://admin:@192.168.1.105/snapshot.cgi?.mjpeg")
# Suggest an image size to the device. The device may choose and
# return another size if it doesn't support the suggested one.
size_x, size_y = video.set_format(800, 448)
# Create a buffer to store image data in. This must be done before
# calling 'start' if v4l2capture is compiled with libv4l2. Otherwise
# raises IOError.
video.create_buffers(1)
# Send the buffer to the device. Some devices require this to be done
# before calling 'start'.
video.queue_all_buffers()
# Start the device. This lights the LED if it's a camera that has one.
video.start()
# Wait for the device to fill the buffer.
select.select((video,), (), ())
# The rest is easy :-)
image_data = video.read()
video.close()
#~ image = Image.fromstring("L", (size_x, size_y), image_data)
image = Image.fromstring("RGB", (size_x, size_y), image_data)
imageNumpy = numpy.asarray(image)
pylab.imshow(imageNumpy)
pylab.show()
#~ a = input('test')
#time.sleep(4)
#image.save("image.jpg")
#print "Saved image.jpg (Size: " + str(size_x) + " x " + str(size_y) + ")"
| gpl-3.0 | 5,290,132,691,312,513,000 | 28.744186 | 83 | 0.713839 | false |
vipod/pyzimbra | pyzimbra/soap_auth.py | 2 | 6473 | # -*- coding: utf-8 -*-
"""
################################################################################
# Copyright (c) 2010, Ilgar Mashayev
#
# E-mail: [email protected]
# Website: http://github.com/ilgarm/pyzimbra
################################################################################
# This file is part of pyzimbra.
#
# Pyzimbra is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyzimbra is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyzimbra. If not, see <http://www.gnu.org/licenses/>.
################################################################################
Soap related methods and classes.
@author: ilgar
"""
from pyzimbra import zconstant, sconstant, util
from pyzimbra.auth import AuthException, AuthToken, Authenticator
from pyzimbra.soap import SoapException
from time import time
import SOAPpy
import hashlib
import hmac
import logging
class SoapAuthenticator(Authenticator):
"""
Soap authenticator.
"""
# --------------------------------------------------------------- properties
# -------------------------------------------------------------------- bound
def __init__(self):
Authenticator.__init__(self)
self.log = logging.getLogger(__name__)
# ------------------------------------------------------------------ unbound
def authenticate_admin(self, transport, account_name, password):
"""
Authenticates administrator using username and password.
"""
Authenticator.authenticate_admin(self, transport, account_name, password)
auth_token = AuthToken()
auth_token.account_name = account_name
params = {sconstant.E_NAME: account_name,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating admin %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ADMIN_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated admin %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
def authenticate(self, transport, account_name, password=None):
"""
Authenticates account using soap method.
"""
Authenticator.authenticate(self, transport, account_name, password)
if password == None:
return self.pre_auth(transport, account_name)
else:
return self.auth(transport, account_name, password)
def auth(self, transport, account_name, password):
"""
Authenticates using username and password.
"""
auth_token = AuthToken()
auth_token.account_name = account_name
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating account %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
if type(res) is tuple:
auth_token.token = res[0].authToken
else:
auth_token.token = res.authToken
if hasattr(res, 'sessionId'):
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
def pre_auth(self, transport, account_name):
"""
Authenticates using username and domain key.
"""
auth_token = AuthToken()
auth_token.account_name = account_name
domain = util.get_domain(account_name)
if domain == None:
raise AuthException('Invalid auth token account')
if domain in self.domains:
domain_key = self.domains[domain]
else:
domain_key = None
if domain_key == None:
raise AuthException('Invalid domain key for domain %s' % domain)
self.log.debug('Initialized domain key for account %s'
% account_name)
expires = 0
timestamp = int(time() * 1000)
pak = hmac.new(domain_key, '%s|%s|%s|%s' %
(account_name, sconstant.E_NAME, expires, timestamp),
hashlib.sha1).hexdigest()
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
attrs = {sconstant.A_TIMESTAMP: timestamp, sconstant.A_EXPIRES: expires}
preauth = SOAPpy.Types.stringType(data=pak,
name=sconstant.E_PREAUTH,
attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PREAUTH: preauth}
self.log.debug('Authenticating account %s using domain key'
% account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token
| lgpl-3.0 | -5,685,805,696,362,786,000 | 34.371585 | 81 | 0.549359 | false |
beeftornado/sentry | src/sentry/api/endpoints/group_events.py | 1 | 5127 | from __future__ import absolute_import
import six
from datetime import timedelta
from django.utils import timezone
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from functools import partial
from sentry import eventstore
from sentry.api.base import EnvironmentMixin
from sentry.api.bases import GroupEndpoint
from sentry.api.event_search import get_filter, InvalidSearchQuery
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.helpers.environments import get_environments
from sentry.api.helpers.events import get_direct_hit_response
from sentry.api.serializers import EventSerializer, serialize, SimpleEventSerializer
from sentry.api.paginator import GenericOffsetPaginator
from sentry.api.utils import get_date_range_from_params, InvalidParams
from sentry.search.utils import InvalidQuery, parse_query
class NoResults(Exception):
pass
class GroupEventsError(Exception):
pass
class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):
def get(self, request, group):
"""
List an Issue's Events
``````````````````````
This endpoint lists an issue's events.
:qparam bool full: if this is set to true then the event payload will
include the full event body, including the stacktrace.
Set to 1 to enable.
:pparam string issue_id: the ID of the issue to retrieve.
:auth: required
"""
try:
environments = get_environments(request, group.project.organization)
query, tags = self._get_search_query_and_tags(request, group, environments)
except InvalidQuery as exc:
return Response({"detail": six.text_type(exc)}, status=400)
except (NoResults, ResourceDoesNotExist):
return Response([])
try:
start, end = get_date_range_from_params(request.GET, optional=True)
except InvalidParams as e:
raise ParseError(detail=six.text_type(e))
try:
return self._get_events_snuba(request, group, environments, query, tags, start, end)
except GroupEventsError as exc:
raise ParseError(detail=six.text_type(exc))
def _get_events_snuba(self, request, group, environments, query, tags, start, end):
default_end = timezone.now()
default_start = default_end - timedelta(days=90)
params = {
"group_ids": [group.id],
"project_id": [group.project_id],
"organization_id": group.project.organization_id,
"start": start if start else default_start,
"end": end if end else default_end,
}
direct_hit_resp = get_direct_hit_response(request, query, params, "api.group-events")
if direct_hit_resp:
return direct_hit_resp
if environments:
params["environment"] = [env.name for env in environments]
full = request.GET.get("full", False)
try:
snuba_filter = get_filter(request.GET.get("query", None), params)
except InvalidSearchQuery as e:
raise ParseError(detail=six.text_type(e))
snuba_filter.conditions.append(["event.type", "!=", "transaction"])
data_fn = partial(eventstore.get_events, referrer="api.group-events", filter=snuba_filter)
serializer = EventSerializer() if full else SimpleEventSerializer()
return self.paginate(
request=request,
on_results=lambda results: serialize(results, request.user, serializer),
paginator=GenericOffsetPaginator(data_fn=data_fn),
)
def _get_search_query_and_tags(self, request, group, environments=None):
raw_query = request.GET.get("query")
if raw_query:
query_kwargs = parse_query([group.project], raw_query, request.user, environments)
query = query_kwargs.pop("query", None)
tags = query_kwargs.pop("tags", {})
else:
query = None
tags = {}
if environments:
env_names = set(env.name for env in environments)
if "environment" in tags:
# If a single environment was passed as part of the query, then
# we'll just search for that individual environment in this
# query, even if more are selected.
if tags["environment"] not in env_names:
# An event can only be associated with a single
# environment, so if the environments associated with
# the request don't contain the environment provided as a
# tag lookup, the query cannot contain any valid results.
raise NoResults
else:
# XXX: Handle legacy backends here. Just store environment as a
# single tag if we only have one so that we don't break existing
# usage.
tags["environment"] = list(env_names) if len(env_names) > 1 else env_names.pop()
return query, tags
| bsd-3-clause | 4,531,243,865,721,410,000 | 38.744186 | 98 | 0.634484 | false |
vygr/Python-PCB | pcb.py | 1 | 3869 | #!/opt/local/bin/pypy -tt
# -*- coding: utf-8 -*-
#Copyright (C) 2014 Chris Hinsley All Rights Reserved
import sys, argparse, router
from copy import deepcopy
from ast import literal_eval
from mymath import *
def main():
parser = argparse.ArgumentParser(description = 'Pcb layout optimizer.', formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('infile', nargs = '?', type = argparse.FileType('r'), default = sys.stdin, help = 'filename, default stdin')
parser.add_argument('--t', nargs = 1, type = int, default = [600], help = 'timeout in seconds, default 600')
parser.add_argument('--v', nargs = 1, type = int, default = [0], choices = range(0, 2), help = 'verbosity level 0..1, default 0')
parser.add_argument('--s', nargs = 1, type = int, default = [1], help = 'number of samples, default 1')
parser.add_argument('--r', nargs = 1, type = int, default = [1], choices = range(1, 5), help = 'grid resolution 1..4, default 1')
parser.add_argument('--z', nargs = 1, type = int, default = [0], choices = range(0, 2), help = 'minimize vias 0..1, default 0')
parser.add_argument('--d', nargs = 1, type = int, default = [0], choices = range(0, 6), \
help = 'distance metric 0..5, default 0.\n' \
'0 -> manhattan\n1 -> squared_euclidean\n2 -> euclidean\n3 -> chebyshev\n4 -> reciprocal\n5 -> random')
parser.add_argument('--fr', nargs = 1, type = int, default = [2], choices = range(1, 6), help = 'flood range 1..5, default 2')
parser.add_argument('--xr', nargs = 1, type = int, default = [1], choices = range(0, 6), help = 'even layer x range 0..5, default 1')
parser.add_argument('--yr', nargs = 1, type = int, default = [1], choices = range(0, 6), help = 'odd layer y range 0..5, default 1')
args = parser.parse_args()
flood_range = args.fr[0]
flood_range_x_even_layer = args.xr[0]
flood_range_y_odd_layer = args.yr[0]
path_range = flood_range + 0
path_range_x_even_layer = flood_range_x_even_layer + 0
path_range_y_odd_layer = flood_range_y_odd_layer + 0
routing_flood_vectors = [[(x, y, 0) for x in xrange(-flood_range_x_even_layer, flood_range_x_even_layer + 1) for y in xrange(-flood_range, flood_range + 1) \
if length_2d((x, y)) > 0.1 and length_2d((x, y)) <= flood_range] + [(0, 0, -1), (0, 0, 1)], \
[(x, y, 0) for x in xrange(-flood_range, flood_range + 1) for y in xrange(-flood_range_y_odd_layer, flood_range_y_odd_layer + 1) \
if length_2d((x, y)) > 0.1 and length_2d((x, y)) <= flood_range] + [(0, 0, -1), (0, 0, 1)]]
routing_path_vectors = [[(x, y, 0) for x in xrange(-path_range_x_even_layer, path_range_x_even_layer + 1) for y in xrange(-path_range, path_range + 1) \
if length_2d((x, y)) > 0.1 and length_2d((x, y)) <= path_range] + [(0, 0, -1), (0, 0, 1)], \
[(x, y, 0) for x in xrange(-path_range, path_range + 1) for y in xrange(-path_range_y_odd_layer, path_range_y_odd_layer + 1) \
if length_2d((x, y)) > 0.1 and length_2d((x, y)) <= path_range] + [(0, 0, -1), (0, 0, 1)]]
dfunc = [manhattan_distance, squared_euclidean_distance, euclidean_distance, \
chebyshev_distance, reciprical_distance, random_distance][args.d[0]]
dimensions = literal_eval(args.infile.readline().strip())
pcb = router.Pcb(dimensions, routing_flood_vectors, routing_path_vectors, dfunc, args.r[0], args.v[0], args.z[0])
for line in args.infile:
track = literal_eval(line.strip())
if not track:
break
pcb.add_track(track)
args.infile.close()
pcb.print_pcb()
best_cost = None
best_pcb = None
for i in xrange(args.s[0]):
if not pcb.route(args.t[0]):
pcb.shuffle_netlist()
continue
cost = pcb.cost()
if best_cost == None or cost < best_cost:
best_cost = cost
best_pcb = deepcopy(pcb)
pcb.shuffle_netlist()
if best_pcb != None:
best_pcb.print_netlist()
best_pcb.print_stats()
else:
print []
if __name__ == '__main__':
main()
| gpl-2.0 | -8,110,803,568,652,330,000 | 51.283784 | 158 | 0.632722 | false |
CanalTP/kirin | tests/integration/piv_worker_test.py | 1 | 6425 | # coding: utf8
#
# Copyright (c) 2020, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# [matrix] channel #navitia:matrix.org (https://app.element.io/#/room/#navitia:matrix.org)
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from kirin.command.piv_worker import PivWorker, PIV_LOCK_NAME, PIV_WORKER_REDIS_TIMEOUT_LOCK
from kirin.core.model import RealTimeUpdate
from kirin.piv.piv import get_piv_contributor
from tests.integration.conftest import PIV_CONTRIBUTOR_ID, PIV_EXCHANGE_NAME, PIV_QUEUE_NAME
from amqp.exceptions import NotFound
from kombu import Connection, Exchange
import pytest
import logging
import threading
from retrying import retry
from mock import patch
from datetime import datetime
import time
@retry(stop_max_delay=20000, wait_exponential_multiplier=100)
def wait_until(predicate):
assert predicate()
def is_exchange_created(connection, exchange_name, exchange_type="fanout"):
try:
channel = connection.channel()
channel.exchange_declare(exchange_name, exchange_type, nowait=False, passive=True)
except NotFound:
return False
except Exception as e:
raise e
return True
def is_queue_created(connection, queue_name):
try:
channel = connection.channel()
channel.queue_declare(queue=queue_name, nowait=False, passive=True)
except NotFound:
return False
except Exception as e:
raise e
return True
@pytest.fixture(scope="session", autouse=True)
def broker_connection(rabbitmq_docker_fixture):
return Connection(rabbitmq_docker_fixture.url)
@pytest.fixture(scope="session", autouse=True)
def mq_handler(rabbitmq_docker_fixture, broker_connection):
return rabbitmq_docker_fixture.create_rabbitmq_handler(PIV_EXCHANGE_NAME, "fanout")
def create_exchange(broker_connection, exchange_name):
exchange = Exchange(
exchange_name,
durable=True,
delivery_mode=2,
type=str("fanout"),
auto_delete=False,
no_declare=False,
)
exchange.declare(channel=broker_connection.channel())
# Use scope 'function' so the Exchange is recreated for every test.
# It is useful because some tests are deleting the Exchange.
@pytest.fixture(scope="function", autouse=True)
def init_piv_exchange(broker_connection):
create_exchange(broker_connection, PIV_EXCHANGE_NAME)
assert is_exchange_created(broker_connection, PIV_EXCHANGE_NAME)
def launch_piv_worker(pg_docker_fixture):
from kirin import app
from tests.conftest import init_flask_db
from tests.integration.conftest import PIV_CONTRIBUTOR_ID
with app.app_context():
# re-init the db by overriding the db_url
init_flask_db(pg_docker_fixture)
contributor = get_piv_contributor(PIV_CONTRIBUTOR_ID)
with PivWorker(contributor) as worker:
worker.run()
class PivWorkerTest:
def __init__(self, test_client, broker_url, broker_connection, pg_docker_fixture):
self.test_client = test_client
self.broker_url = broker_url
self.broker_connection = broker_connection
self.pg_docker_fixture = pg_docker_fixture
self.last_lock_update = datetime.now()
def __enter__(self):
# Launch a PivWorker
self.thread = threading.Thread(target=launch_piv_worker, args=(self.pg_docker_fixture,))
self.thread.start()
wait_until(lambda: self.thread.is_alive())
# Check that PivWorker is ready (a good hint is when queue is created)
wait_until(lambda: is_queue_created(self.broker_connection, PIV_QUEUE_NAME))
def __exit__(self, type, value, traceback):
# Remove the contributor
self.test_client.delete("/contributors/{}".format(PIV_CONTRIBUTOR_ID))
# PivWorker should die eventually when no PIV contributors is available
wait_until(lambda: not self.thread.is_alive())
def test_mq_message_received_and_stored(
test_client, pg_docker_fixture, rabbitmq_docker_fixture, broker_connection, mq_handler
):
with PivWorkerTest(test_client, rabbitmq_docker_fixture.url, broker_connection, pg_docker_fixture):
# Check that PivWorker is creating the queue
wait_until(lambda: is_queue_created(broker_connection, PIV_QUEUE_NAME))
# Check that MQ message is received and stored in DB
mq_handler.publish(str('{"key": "Some valid JSON"}'), PIV_CONTRIBUTOR_ID)
wait_until(lambda: RealTimeUpdate.query.count() == 1)
def test_redis_lock_update(
test_client,
pg_docker_fixture,
rabbitmq_docker_fixture,
broker_connection,
mq_handler,
):
logger = logging.getLogger("kirin.command.piv_worker")
with patch.object(logger, "debug") as mock_debug:
with PivWorkerTest(test_client, rabbitmq_docker_fixture.url, broker_connection, pg_docker_fixture):
# Check that PivWorker is creating the queue
wait_until(lambda: is_queue_created(broker_connection, PIV_QUEUE_NAME))
time.sleep((PIV_WORKER_REDIS_TIMEOUT_LOCK.total_seconds() // 5) + 1)
mq_handler.publish(str('{"key": "Some valid JSON"}'), PIV_CONTRIBUTOR_ID)
wait_until(lambda: RealTimeUpdate.query.count() == 1)
# Check lock refreshed
mock_debug.assert_any_call("lock {%s} updated", PIV_LOCK_NAME)
| agpl-3.0 | 5,102,547,780,023,800,000 | 36.138728 | 107 | 0.710661 | false |
notnownikki/quatloo | quatloo/qanda/tests.py | 1 | 7688 | from django.test import TestCase, SimpleTestCase
from qanda.parser import parse_qa, ParserError
from qanda.models import Question, Answer
from qanda.factory import get_question, get_answer
class QAParserTestCase(SimpleTestCase):
def test_extract_question_and_url(self):
qa = 'How do I do the thing? http://learntodoathing.com/'
expected = {
'question': 'How do I do the thing?',
'keywords': '',
'url': 'http://learntodoathing.com/',
'answer': ''
}
self.assertEqual(parse_qa(qa), expected)
def test_extract_question_and_answer(self):
qa = 'How do I do the thing? You must believe you can do the thing.'
expected = {
'question': 'How do I do the thing?',
'keywords': '',
'url': '',
'answer': 'You must believe you can do the thing.'
}
self.assertEqual(parse_qa(qa), expected)
def test_extract_question_answer_url(self):
qa = 'How do I do the thing? Believe you can! http://doathing.com/'
expected = {
'question': 'How do I do the thing?',
'keywords': '',
'url': 'http://doathing.com/',
'answer': 'Believe you can!'
}
self.assertEqual(parse_qa(qa), expected)
def test_questions_and_answers_can_talk_about_http(self):
qa = 'How do I redirect from https to https? Just redirect from http to https. http://beggingthequestion.com/'
expected = {
'question': 'How do I redirect from https to https?',
'keywords': '',
'url': 'http://beggingthequestion.com/',
'answer': 'Just redirect from http to https.'
}
self.assertEqual(parse_qa(qa), expected)
def test_keywords_are_added_to_the_question(self):
qa = 'How do I change the theme? (themes, style, styles) Use our handy tool! https://example.com/'
expected = {
'question': 'How do I change the theme?',
'keywords': 'themes, style, styles',
'url': 'https://example.com/',
'answer': 'Use our handy tool!'
}
self.assertEqual(parse_qa(qa), expected)
def test_fields_stripped_of_whitespace(self):
qa = ' How do I do the thing ? Believe you can! http://doathing.com/ '
expected = {
'question': 'How do I do the thing?',
'keywords': '',
'url': 'http://doathing.com/',
'answer': 'Believe you can!'
}
self.assertEqual(parse_qa(qa), expected)
def test_only_question(self):
qa = 'How do I do the thing?'
expected = {
'question': 'How do I do the thing?',
'keywords': '',
'url': '',
'answer': ''
}
self.assertEqual(parse_qa(qa), expected)
def test_no_question(self):
qa = 'I liek chocolate milk'
self.assertRaises(ParserError, parse_qa, qa)
class QuestionFactoryTestCase(TestCase):
def test_new_question(self):
question = get_question(
question_txt='How do I do a thing?', keywords='stuff, things')
self.assertEqual(question.question, 'How do I do a thing?')
self.assertEqual(question.keywords, 'stuff, things')
def test_update_question_keywords(self):
question = Question.objects.create(
question='How do I do a thing?', keywords='things, stuff')
question = get_question(
question_txt='How do I do a thing?', keywords='jam, cakes')
self.assertEqual(question.keywords, 'cakes, jam, stuff, things')
class AnswerFactoryTestCase(TestCase):
def test_new_answer(self):
question = Question.objects.create(
question='How do I do a thing?', keywords='things, stuff')
answer = get_answer(
question=question, url='http://example.com/',
answer_txt='Here is an example.')
self.assertEqual(answer.question, question)
self.assertEqual(answer.url, 'http://example.com/')
self.assertEqual(answer.answer, 'Here is an example.')
self.assertEqual(Answer.objects.all().count(), 1)
def test_new_answer_with_empty_url(self):
question = Question.objects.create(
question='How do I do a thing?', keywords='things, stuff')
answer = get_answer(
question=question, answer_txt='Here is an example.', url='')
self.assertEqual(answer.question, question)
self.assertEqual(answer.url, '')
self.assertEqual(answer.answer, 'Here is an example.')
self.assertEqual(Answer.objects.all().count(), 1)
def test_additional_answer(self):
question = Question.objects.create(
question='How do I do a thing?', keywords='things, stuff')
Answer.objects.create(
question=question, url='http://example.com/',
answer='Here is an example.')
answer = get_answer(
question=question, url='http://other-example.com/',
answer_txt='Here is another example.')
self.assertEqual(answer.question, question)
self.assertEqual(answer.url, 'http://other-example.com/')
self.assertEqual(answer.answer, 'Here is another example.')
self.assertEqual(Answer.objects.all().count(), 2)
def test_answer_text_updated(self):
question = Question.objects.create(
question='How do I do a thing?', keywords='things, stuff')
Answer.objects.create(
question=question, url='http://example.com/',
answer='Old answer')
answer = get_answer(
question=question, url='http://example.com/',
answer_txt='Here is an example.')
self.assertEqual(answer.question, question)
self.assertEqual(answer.url, 'http://example.com/')
self.assertEqual(answer.answer, 'Here is an example.')
self.assertEqual(Answer.objects.all().count(), 1)
def test_answer_text_not_updated_if_blank(self):
question = Question.objects.create(
question='How do I do a thing?', keywords='things, stuff')
Answer.objects.create(
question=question, url='http://example.com/',
answer='Old answer')
answer = get_answer(
question=question, url='http://example.com/',
answer_txt='')
self.assertEqual(answer.question, question)
self.assertEqual(answer.url, 'http://example.com/')
self.assertEqual(answer.answer, 'Old answer')
self.assertEqual(Answer.objects.all().count(), 1)
class QuestionMatchTestCase(SimpleTestCase):
"""
We need to override Django's transaction handling to make sure
the fulltext index is used when we insert test data.
"""
allow_database_queries = True
def tearDown(self):
super(QuestionMatchTestCase, self).tearDown()
Question.objects.all().delete()
def test_match_against_question(self):
question = Question(
question='How do I make a widget?',
keywords='custom, widgets, easteregg')
question.save()
questions = Question.match('I want to make a widget.')
self.assertEqual(1, len(list(questions)))
self.assertEqual(question.id, questions[0].id)
def test_match_against_keywords(self):
question = Question(
question='How do I make a widget?',
keywords='custom, widgets, easteregg')
question.save()
questions = Question.match('Show an easteregg please.')
self.assertEqual(1, len(list(questions)))
self.assertEqual(question.id, questions[0].id)
| gpl-3.0 | -1,262,025,947,253,863,000 | 39.463158 | 119 | 0.598725 | false |
AntonelliLab/seqcap_processor | bin/aTRAM-master/atram.py | 1 | 8421 | #!/usr/bin/env python3
"""
Start atram.
This wrapper module parses the input arguments and passes them to the module
that does the actual processing (core_atram.py).
"""
import os
import argparse
import textwrap
import lib.db as db
import lib.log as log
import lib.bio as bio
import lib.util as util
import lib.blast as blast
import lib.assembler as assembly
from lib.core_atram import assemble
def parse_command_line():
"""Process command-line arguments."""
description = """
This takes a query sequence and a blast database built with the
atram_preprocessor.py script and builds assemblies.
If you specify more than one query sequence and/or more than one blast
database then aTRAM will build one assembly for each query/blast
DB pair.
NOTE: You may use a text file to hold the command-line arguments
like: @/path/to/args.txt. This is particularly useful when specifying
multiple blast databases or multiple query sequences.
"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(description))
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(db.ATRAM_VERSION))
group = parser.add_argument_group('required arguments')
group.add_argument(
'-b', '--blast-db', '--sra', '--db', '--database',
required=True, metavar='DB', nargs='+',
help="""This needs to match the DB prefix you entered for
atram_preprocessor.py. You may repeat this argument to run the
--query sequence(s) against multiple blast databases.""")
group.add_argument(
'-q', '--query', '--target', '--probe', required=False, nargs='+',
help="""The path to the fasta file with sequences of interest. You may
repeat this argument. If you do then Each --query sequence file
will be run against every --blast-db.""")
group.add_argument(
'-Q', '--query-split', '--target-split', required=False, nargs='+',
help="""The path to the fasta file with multiple sequences of interest.
This will take every sequence in the fasta file and treat it as if
it were its own --query argument. So every sequence in
--query-split will be run against every --blast-db.""")
group.add_argument(
'-o', '--output-prefix', required=True,
help="""This is the prefix of all of the output files. So you can
identify different blast output file sets. You may include a
directory as part of the prefix. aTRAM will add suffixes to
differentiate output files.""")
group.add_argument(
'-a', '--assembler', default='none',
choices=['abyss', 'trinity', 'velvet', 'spades', 'none'],
help="""Which assembler to use. Choosing "none" (the default) will do
a single blast run and stop before any assembly.""")
group.add_argument(
'-i', '--iterations', type=int, default=5, metavar='N',
help="""The number of pipeline iterations. The default is "5".""")
group.add_argument(
'-p', '--protein', action='store_true',
help="""Are the query sequences protein? aTRAM will guess if you skip
this argument.""")
group.add_argument(
'--fraction', type=float, default=1.0,
help="""Use only the specified fraction of the aTRAM database. The
default is 1.0.""")
cpus = min(10, os.cpu_count() - 4 if os.cpu_count() > 4 else 1)
group.add_argument(
'--cpus', '--processes', '--max-processes', type=int, default=cpus,
help="""Number of CPU processors to use. This will also be used for
the assemblers when possible. We will use {} out of {} CPUs.
""".format(cpus, os.cpu_count()))
group.add_argument('--log-file', help="""Log file (full path)".""")
group.add_argument(
'--log-level', choices=['debug', 'info', 'error'], default='info',
help="""Log messages of the given level (or above). 'debug' shows the
most messages and 'error' shows the least. The default is
'info'""")
group.add_argument(
'--path',
help="""If the assembler or blast you want to use is not in your $PATH\
then use this to prepend directories to your path.""")
group.add_argument(
'-t', '--temp-dir', metavar='DIR',
help="""Place temporary files in this directory. All files will be
deleted after aTRAM completes. The directory must exist.""")
group.add_argument(
'--keep-temp-dir', action='store_true',
help="""This flag will keep the temporary files in the --temp-dir
around for debugging.""")
group.add_argument(
'-T', '--timeout', metavar='SECONDS', default=600, type=int,
help="""How many seconds to wait for an assembler or BLAST before
stopping the run. To wait forever set this to 0. The default
is "600" (10 minutes).""")
group = parser.add_argument_group(
'optional values for blast-filtering contigs')
group.add_argument(
'--no-filter', action='store_true',
help="""Do not filter the assembled contigs. This will: set both the
--bit-score and --contig-length to 0""")
group.add_argument(
'--bit-score', type=float, default=70.0, metavar='SCORE',
help="""Remove contigs that have a value less than this. The default
is "70.0". This is turned off by the --no-filter argument.""")
group.add_argument(
'--contig-length', '--length', type=int, default=100,
help="""Remove blast hits that are shorter than this length. The
default is "100". This is turned off by the --no-filter argument.
""")
blast.command_line_args(parser)
assembly.command_line_args(parser)
args = vars(parser.parse_args())
check_query_args(args)
blast.check_args(args)
# Set defaults and adjust arguments based on other arguments
args['cov_cutoff'] = assembly.default_cov_cutoff(args['cov_cutoff'])
args['blast_db'] = blast.touchup_blast_db_names(args['blast_db'])
args['kmer'] = assembly.default_kmer(args['kmer'], args['assembler'])
args['max_target_seqs'] = blast.default_max_target_seqs(
args['max_target_seqs'], args['blast_db'], args['max_memory'])
# Timeout: As always, None != 0
args['timeout'] = max(0, args['timeout'])
if not(args['timeout']):
args['timeout'] = None
setup_blast_args(args)
set_protein_arg(args)
setup_path_arg(args)
find_programs(args)
util.temp_dir_exists(args['temp_dir'], args.get('debug_dir'))
util.set_blast_batch_size(args['batch_size'])
return args
def setup_path_arg(args):
"""Prepend to PATH environment variable if requested."""
if args['path']:
os.environ['PATH'] = '{}:{}'.format(args['path'], os.environ['PATH'])
def setup_blast_args(args):
"""Set up the blast args."""
if args['no_filter']:
args['bit_score'] = 0
args['contig_length'] = 0
def check_query_args(args):
"""Validate the query arguments."""
if not args.get('query') and not args.get('query_split'):
err = 'You must have at least one --query or --query-split argument.'
log.fatal(err)
def set_protein_arg(args):
"""Set up the protein argument."""
if not args['protein'] and args['query']:
args['protein'] = bio.fasta_file_has_protein(args['query'])
def find_programs(args):
"""Make sure we can find the programs needed by the assembler and blast."""
blast.find_program('makeblastdb')
blast.find_program('tblastn')
blast.find_program('blastn')
assembly.find_program(
'abyss', 'bwa', args['assembler'], not args['no_long_reads'])
assembly.find_program('trinity', 'Trinity', args['assembler'])
assembly.find_program(
'trinity', 'Trinity', args['assembler'], args['bowtie2'])
assembly.find_program('velvet', 'velveth', args['assembler'])
assembly.find_program('velvet', 'velvetg', args['assembler'])
assembly.find_program('spades', 'spades.py', args['assembler'])
if __name__ == '__main__':
ARGS = parse_command_line()
assemble(ARGS)
| mit | -5,189,248,304,416,858,000 | 36.762332 | 79 | 0.624273 | false |
AmericanResearchInstitute/poweru-server | pr_services/user_system/organization_email_domain_manager.py | 1 | 1902 | """
OrgEmailDomain manager class
@author Chris Church <[email protected]>
@copyright Copyright 2011 American Research Institute, Inc.
"""
from pr_services.object_manager import ObjectManager
from pr_services.rpc.service import service_method
import facade
class OrgEmailDomainManager(ObjectManager):
"""
Manage mappings between email domain and automatic organization and role
assignment.
"""
def __init__(self):
""" constructor """
ObjectManager.__init__(self)
self.getters.update({
'email_domain' : 'get_general',
'organization' : 'get_foreign_key',
'role' : 'get_foreign_key',
'effective_role' : 'get_foreign_key',
'effective_role_name' : 'get_general',
})
self.setters.update({
'email_domain' : 'set_general',
'organization' : 'set_foreign_key',
'role' : 'set_foreign_key',
})
self.my_django_model = facade.models.OrgEmailDomain
@service_method
def create(self, auth_token, email_domain, organization, role=None):
"""
Create a new OrgEmailDomain mapping
@param email_domain domain name to look for in user's email address
@param organization organization to be assigned
@param role role to be assigned within organization
@return a reference to the newly created OrgEmailDomain
"""
organization_object = self._find_by_id(organization, facade.models.Organization)
role_object = self._find_by_id(role, facade.models.OrgRole) if role else None
obj = self.my_django_model.objects.create(email_domain=email_domain, organization=organization_object, role=role_object)
self.authorizer.check_create_permissions(auth_token, obj)
return obj
# vim:tabstop=4 shiftwidth=4 expandtab
| bsd-3-clause | 8,118,541,161,619,871,000 | 33.581818 | 128 | 0.640904 | false |
ShuboshaKuro/SimpleGameEngine | Test.py | 1 | 1251 | import numpy as np
import os
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# has to change whenever noise_width and noise_height change in the PerlinNoise.hpp file
DIMENSION1 = 200
DIMENSION2 = 200
# works if the working directory is set
path = os.path.dirname(os.path.realpath(__file__))
FILENAME = path + "\input0.txt"
if __name__ == '__main__':
string = open(FILENAME, '+r')
noise = np.fromstring(string.read(), sep=" ", dtype=float).reshape(DIMENSION2, DIMENSION1)
# Build a grid by the 2 dimensions
Xr = np.arange(DIMENSION1)
Yr = np.arange(DIMENSION2)
X, Y = np.meshgrid(Xr, Yr)
# Build a figure with 2 subplots, the first is 3D
fig = plt.figure()
fig.suptitle("3D and 2D heighmap")
colormap = 'coolwarm'
ax = fig.add_subplot(2, 1, 1, projection='3d')
surf = ax.plot_surface(X, Y, noise, rstride=1, cstride=1, cmap=colormap, linewidth=0, antialiased=False)
ax2 = fig.add_subplot(2, 1, 2)
im = ax2.imshow(noise, cmap=colormap, interpolation='nearest')
# swap the Y axis so it aligns with the 3D plot
ax2.invert_yaxis()
# add an explanatory colour bar
plt.colorbar(im, orientation='horizontal')
# Show the image
plt.show()
| mit | 6,276,378,628,629,430,000 | 27.431818 | 108 | 0.67466 | false |
OCA/account-financial-tools | account_journal_lock_date/tests/test_journal_lock_date.py | 1 | 8511 | # Copyright 2017 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from datetime import date, timedelta
from odoo import tools
from odoo.modules import get_module_resource
from odoo.tests import common
from ..exceptions import JournalLockDateError
class TestJournalLockDate(common.TransactionCase):
def setUp(self):
super(TestJournalLockDate, self).setUp()
tools.convert_file(
self.cr,
"account",
get_module_resource("account", "test", "account_minimal_test.xml"),
{},
"init",
False,
"test",
)
self.account_move_obj = self.env["account.move"]
self.account_move_line_obj = self.env["account.move.line"]
self.company_id = self.ref("base.main_company")
self.partner = self.browse_ref("base.res_partner_12")
self.account = self.browse_ref("account.a_recv")
self.account2 = self.browse_ref("account.a_expense")
self.journal = self.browse_ref("account.bank_journal")
def test_journal_lock_date(self):
self.env.user.write({"groups_id": [(3, self.ref("base.group_system"))]})
self.env.user.write(
{"groups_id": [(3, self.ref("account.group_account_manager"))]}
)
self.assertFalse(self.env.user.has_group("account.group_account_manager"))
# create a move and post it
move = self.account_move_obj.create(
{
"date": date.today(),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
move.post()
# lock journal, set 'Lock Date for Non-Advisers'
self.journal.period_lock_date = date.today() + timedelta(days=2)
# Test that the move cannot be created, written, or cancelled
with self.assertRaises(JournalLockDateError):
self.account_move_obj.create(
{
"date": date.today(),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
with self.assertRaises(JournalLockDateError):
move.write({"name": "TEST"})
# allow cancel posted move
self.journal.update_posted = True
with self.assertRaises(JournalLockDateError):
move.button_cancel()
# create a move after the 'Lock Date for Non-Advisers' and post it
move3 = self.account_move_obj.create(
{
"date": self.journal.period_lock_date + timedelta(days=3),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
move3.post()
def test_journal_lock_date_adviser(self):
""" The journal lock date is ignored for Advisers """
self.env.user.write(
{"groups_id": [(4, self.ref("account.group_account_manager"))]}
)
self.assertTrue(self.env.user.has_group("account.group_account_manager"))
# create a move and post it
move = self.account_move_obj.create(
{
"date": date.today(),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
move.post()
# lock journal. Set 'Lock Date'
self.journal.fiscalyear_lock_date = date.today() + timedelta(days=2)
# lock journal. Set 'Lock Date for Non-Advisers'
self.journal.period_lock_date = date.today() + timedelta(days=4)
# Advisers cannot create, write, or cancel moves before 'Lock Date'
with self.assertRaises(JournalLockDateError):
self.account_move_obj.create(
{
"date": date.today(),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
with self.assertRaises(JournalLockDateError):
move.write({"name": "TEST"})
# allow cancel posted move
self.journal.update_posted = True
with self.assertRaises(JournalLockDateError):
move.button_cancel()
# Advisers can create movements on a date after the 'Lock Date'
# even if that date is before and inclusive of
# the 'Lock Date for Non-Advisers' (self.journal.period_lock_date)
move3 = self.account_move_obj.create(
{
"date": self.journal.period_lock_date,
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
move3.post()
| agpl-3.0 | 2,972,015,974,190,809,000 | 34.911392 | 82 | 0.364117 | false |
ros2/rclpy | rclpy/rclpy/action/client.py | 1 | 22966 | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import uuid
import weakref
from action_msgs.msg import GoalStatus
from action_msgs.srv import CancelGoal
from rclpy.executors import await_or_execute
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
from rclpy.qos import qos_profile_action_status_default
from rclpy.qos import qos_profile_services_default
from rclpy.qos import QoSProfile
from rclpy.task import Future
from rclpy.type_support import check_for_type_support
from rclpy.waitable import NumberOfEntities, Waitable
from unique_identifier_msgs.msg import UUID
class ClientGoalHandle():
"""Goal handle for working with Action Clients."""
def __init__(self, action_client, goal_id, goal_response):
self._action_client = action_client
self._goal_id = goal_id
self._goal_response = goal_response
self._status = GoalStatus.STATUS_UNKNOWN
def __eq__(self, other):
return self._goal_id == other.goal_id
def __ne__(self, other):
return self._goal_id != other.goal_id
def __repr__(self):
return 'ClientGoalHandle <id={0}, accepted={1}, status={2}>'.format(
self.goal_id.uuid,
self.accepted,
self.status)
@property
def goal_id(self):
return self._goal_id
@property
def stamp(self):
return self._goal_response.stamp
@property
def accepted(self):
return self._goal_response.accepted
@property
def status(self):
return self._status
def cancel_goal(self):
"""
Send a cancel request for the goal and wait for the response.
Do not call this method in a callback or a deadlock may occur.
:return: The cancel response.
"""
return self._action_client._cancel_goal(self)
def cancel_goal_async(self):
"""
Asynchronous request for the goal be canceled.
:return: a Future instance that completes when the server responds.
:rtype: :class:`rclpy.task.Future` instance
"""
return self._action_client._cancel_goal_async(self)
def get_result(self):
"""
Request the result for the goal and wait for the response.
Do not call this method in a callback or a deadlock may occur.
:return: The result response.
"""
return self._action_client._get_result(self)
def get_result_async(self):
"""
Asynchronously request the goal result.
:return: a Future instance that completes when the result is ready.
:rtype: :class:`rclpy.task.Future` instance
"""
return self._action_client._get_result_async(self)
class ActionClient(Waitable):
"""ROS Action client."""
def __init__(
self,
node,
action_type,
action_name,
*,
callback_group=None,
goal_service_qos_profile=qos_profile_services_default,
result_service_qos_profile=qos_profile_services_default,
cancel_service_qos_profile=qos_profile_services_default,
feedback_sub_qos_profile=QoSProfile(depth=10),
status_sub_qos_profile=qos_profile_action_status_default
):
"""
Create an ActionClient.
:param node: The ROS node to add the action client to.
:param action_type: Type of the action.
:param action_name: Name of the action.
Used as part of the underlying topic and service names.
:param callback_group: Callback group to add the action client to.
If None, then the node's default callback group is used.
:param goal_service_qos_profile: QoS profile for the goal service.
:param result_service_qos_profile: QoS profile for the result service.
:param cancel_service_qos_profile: QoS profile for the cancel service.
:param feedback_sub_qos_profile: QoS profile for the feedback subscriber.
:param status_sub_qos_profile: QoS profile for the status subscriber.
"""
if callback_group is None:
callback_group = node.default_callback_group
super().__init__(callback_group)
# Import the typesupport for the action module if not already done
check_for_type_support(action_type)
self._node = node
self._action_type = action_type
self._action_name = action_name
with node.handle:
self._client_handle = _rclpy.ActionClient(
node.handle,
action_type,
action_name,
goal_service_qos_profile.get_c_qos_profile(),
result_service_qos_profile.get_c_qos_profile(),
cancel_service_qos_profile.get_c_qos_profile(),
feedback_sub_qos_profile.get_c_qos_profile(),
status_sub_qos_profile.get_c_qos_profile()
)
self._is_ready = False
# key: UUID in bytes, value: weak reference to ClientGoalHandle
self._goal_handles = {}
# key: goal request sequence_number, value: Future for goal response
self._pending_goal_requests = {}
# key: goal request sequence_number, value: UUID
self._sequence_number_to_goal_id = {}
# key: cancel request sequence number, value: Future for cancel response
self._pending_cancel_requests = {}
# key: result request sequence number, value: Future for result response
self._pending_result_requests = {}
# key: UUID in bytes, value: callback function
self._feedback_callbacks = {}
callback_group.add_entity(self)
self._node.add_waitable(self)
def _generate_random_uuid(self):
return UUID(uuid=list(uuid.uuid4().bytes))
def _remove_pending_request(self, future, pending_requests):
"""
Remove a future from the list of pending requests.
This prevents a future from receiving a request and executing its done callbacks.
:param future: a future returned from one of :meth:`send_goal_async`,
:meth:`_cancel_goal_async`, or :meth:`_get_result_async`.
:type future: rclpy.task.Future
:param pending_requests: The list of pending requests.
:type pending_requests: dict
:return: The sequence number associated with the removed future, or
None if the future was not found in the list.
"""
for seq, req_future in list(pending_requests.items()):
if future == req_future:
try:
del pending_requests[seq]
except KeyError:
pass
else:
self.remove_future(future)
return seq
return None
def _remove_pending_goal_request(self, future):
seq = self._remove_pending_request(future, self._pending_goal_requests)
if seq in self._sequence_number_to_goal_id:
del self._sequence_number_to_goal_id[seq]
def _remove_pending_cancel_request(self, future):
self._remove_pending_request(future, self._pending_cancel_requests)
def _remove_pending_result_request(self, future):
self._remove_pending_request(future, self._pending_result_requests)
# Start Waitable API
def is_ready(self, wait_set):
"""Return True if one or more entities are ready in the wait set."""
ready_entities = self._client_handle.is_ready(wait_set)
self._is_feedback_ready = ready_entities[0]
self._is_status_ready = ready_entities[1]
self._is_goal_response_ready = ready_entities[2]
self._is_cancel_response_ready = ready_entities[3]
self._is_result_response_ready = ready_entities[4]
return any(ready_entities)
def take_data(self):
"""Take stuff from lower level so the wait set doesn't immediately wake again."""
data = {}
if self._is_goal_response_ready:
taken_data = self._client_handle.take_goal_response(
self._action_type.Impl.SendGoalService.Response)
# If take fails, then we get (None, None)
if all(taken_data):
data['goal'] = taken_data
if self._is_cancel_response_ready:
taken_data = self._client_handle.take_cancel_response(
self._action_type.Impl.CancelGoalService.Response)
# If take fails, then we get (None, None)
if all(taken_data):
data['cancel'] = taken_data
if self._is_result_response_ready:
taken_data = self._client_handle.take_result_response(
self._action_type.Impl.GetResultService.Response)
# If take fails, then we get (None, None)
if all(taken_data):
data['result'] = taken_data
if self._is_feedback_ready:
taken_data = self._client_handle.take_feedback(
self._action_type.Impl.FeedbackMessage)
# If take fails, then we get None
if taken_data is not None:
data['feedback'] = taken_data
if self._is_status_ready:
taken_data = self._client_handle.take_status(
self._action_type.Impl.GoalStatusMessage)
# If take fails, then we get None
if taken_data is not None:
data['status'] = taken_data
return data
async def execute(self, taken_data):
"""
Execute work after data has been taken from a ready wait set.
This will set results for Future objects for any received service responses and
call any user-defined callbacks (e.g. feedback).
"""
if 'goal' in taken_data:
sequence_number, goal_response = taken_data['goal']
if sequence_number in self._sequence_number_to_goal_id:
goal_handle = ClientGoalHandle(
self,
self._sequence_number_to_goal_id[sequence_number],
goal_response)
if goal_handle.accepted:
goal_uuid = bytes(goal_handle.goal_id.uuid)
if goal_uuid in self._goal_handles:
raise RuntimeError(
'Two goals were accepted with the same ID ({})'.format(goal_handle))
self._goal_handles[goal_uuid] = weakref.ref(goal_handle)
self._pending_goal_requests[sequence_number].set_result(goal_handle)
else:
self._node.get_logger().warning(
'Ignoring unexpected goal response. There may be more than '
f"one action server for the action '{self._action_name}'"
)
if 'cancel' in taken_data:
sequence_number, cancel_response = taken_data['cancel']
if sequence_number in self._pending_cancel_requests:
self._pending_cancel_requests[sequence_number].set_result(cancel_response)
else:
self._node.get_logger().warning(
'Ignoring unexpected cancel response. There may be more than '
f"one action server for the action '{self._action_name}'"
)
if 'result' in taken_data:
sequence_number, result_response = taken_data['result']
if sequence_number in self._pending_result_requests:
self._pending_result_requests[sequence_number].set_result(result_response)
else:
self._node.get_logger().warning(
'Ignoring unexpected result response. There may be more than '
f"one action server for the action '{self._action_name}'"
)
if 'feedback' in taken_data:
feedback_msg = taken_data['feedback']
goal_uuid = bytes(feedback_msg.goal_id.uuid)
# Call a registered callback if there is one
if goal_uuid in self._feedback_callbacks:
await await_or_execute(self._feedback_callbacks[goal_uuid], feedback_msg)
if 'status' in taken_data:
# Update the status of all goal handles maintained by this Action Client
for status_msg in taken_data['status'].status_list:
goal_uuid = bytes(status_msg.goal_info.goal_id.uuid)
status = status_msg.status
if goal_uuid in self._goal_handles:
goal_handle = self._goal_handles[goal_uuid]()
if goal_handle is not None:
goal_handle._status = status
# Remove "done" goals from the list
if (GoalStatus.STATUS_SUCCEEDED == status or
GoalStatus.STATUS_CANCELED == status or
GoalStatus.STATUS_ABORTED == status):
del self._goal_handles[goal_uuid]
else:
# Weak reference is None
del self._goal_handles[goal_uuid]
def get_num_entities(self):
"""Return number of each type of entity used in the wait set."""
num_entities = self._client_handle.get_num_entities()
return NumberOfEntities(*num_entities)
def add_to_wait_set(self, wait_set):
"""Add entities to wait set."""
self._client_handle.add_to_waitset(wait_set)
# End Waitable API
def send_goal(self, goal, **kwargs):
"""
Send a goal and wait for the result.
Do not call this method in a callback or a deadlock may occur.
See :meth:`send_goal_async` for more info about keyword arguments.
Unlike :meth:`send_goal_async`, this method returns the final result of the
action (not a goal handle).
:param goal: The goal request.
:type goal: action_type.Goal
:return: The result response.
:rtype: action_type.Result
:raises: TypeError if the type of the passed goal isn't an instance of
the Goal type of the provided action when the service was
constructed.
"""
if not isinstance(goal, self._action_type.Goal):
raise TypeError()
event = threading.Event()
def unblock(future):
nonlocal event
event.set()
send_goal_future = self.send_goal_async(goal, **kwargs)
send_goal_future.add_done_callback(unblock)
event.wait()
if send_goal_future.exception() is not None:
raise send_goal_future.exception()
goal_handle = send_goal_future.result()
result = self._get_result(goal_handle)
return result
def send_goal_async(self, goal, feedback_callback=None, goal_uuid=None):
"""
Send a goal and asynchronously get the result.
The result of the returned Future is set to a ClientGoalHandle when receipt of the goal
is acknowledged by an action server.
:param goal: The goal request.
:type goal: action_type.Goal
:param feedback_callback: Callback function for feedback associated with the goal.
:type feedback_callback: function
:param goal_uuid: Universally unique identifier for the goal.
If None, then a random UUID is generated.
:type: unique_identifier_msgs.UUID
:return: a Future instance to a goal handle that completes when the goal request
has been accepted or rejected.
:rtype: :class:`rclpy.task.Future` instance
:raises: TypeError if the type of the passed goal isn't an instance of
the Goal type of the provided action when the service was
constructed.
"""
if not isinstance(goal, self._action_type.Goal):
raise TypeError()
request = self._action_type.Impl.SendGoalService.Request()
request.goal_id = self._generate_random_uuid() if goal_uuid is None else goal_uuid
request.goal = goal
sequence_number = self._client_handle.send_goal_request(request)
if sequence_number in self._pending_goal_requests:
raise RuntimeError(
'Sequence ({}) conflicts with pending goal request'.format(sequence_number))
if feedback_callback is not None:
# TODO(jacobperron): Move conversion function to a general-use package
goal_uuid = bytes(request.goal_id.uuid)
self._feedback_callbacks[goal_uuid] = feedback_callback
future = Future()
self._pending_goal_requests[sequence_number] = future
self._sequence_number_to_goal_id[sequence_number] = request.goal_id
future.add_done_callback(self._remove_pending_goal_request)
# Add future so executor is aware
self.add_future(future)
return future
def _cancel_goal(self, goal_handle):
"""
Send a cancel request for an active goal and wait for the response.
Do not call this method in a callback or a deadlock may occur.
:param goal_handle: Handle to the goal to cancel.
:type goal_handle: :class:`ClientGoalHandle`
:return: The cancel response.
"""
event = threading.Event()
def unblock(future):
nonlocal event
event.set()
future = self._cancel_goal_async(goal_handle)
future.add_done_callback(unblock)
event.wait()
if future.exception() is not None:
raise future.exception()
return future.result()
def _cancel_goal_async(self, goal_handle):
"""
Send a cancel request for an active goal and asynchronously get the result.
:param goal_handle: Handle to the goal to cancel.
:type goal_handle: :class:`ClientGoalHandle`
:return: a Future instance that completes when the cancel request has been processed.
:rtype: :class:`rclpy.task.Future` instance
"""
if not isinstance(goal_handle, ClientGoalHandle):
raise TypeError(
'Expected type ClientGoalHandle but received {}'.format(type(goal_handle)))
cancel_request = CancelGoal.Request()
cancel_request.goal_info.goal_id = goal_handle.goal_id
sequence_number = self._client_handle.send_cancel_request(cancel_request)
if sequence_number in self._pending_cancel_requests:
raise RuntimeError(
'Sequence ({}) conflicts with pending cancel request'.format(sequence_number))
future = Future()
self._pending_cancel_requests[sequence_number] = future
future.add_done_callback(self._remove_pending_cancel_request)
# Add future so executor is aware
self.add_future(future)
return future
def _get_result(self, goal_handle):
"""
Request the result for an active goal and wait for the response.
Do not call this method in a callback or a deadlock may occur.
:param goal_handle: Handle to the goal to get the result for.
:type goal_handle: :class:`ClientGoalHandle`
:return: The result response.
"""
event = threading.Event()
def unblock(future):
nonlocal event
event.set()
future = self._get_result_async(goal_handle)
future.add_done_callback(unblock)
event.wait()
if future.exception() is not None:
raise future.exception()
return future.result()
def _get_result_async(self, goal_handle):
"""
Request the result for an active goal asynchronously.
:param goal_handle: Handle to the goal to cancel.
:type goal_handle: :class:`ClientGoalHandle`
:return: a Future instance that completes when the get result request has been processed.
:rtype: :class:`rclpy.task.Future` instance
"""
if not isinstance(goal_handle, ClientGoalHandle):
raise TypeError(
'Expected type ClientGoalHandle but received {}'.format(type(goal_handle)))
result_request = self._action_type.Impl.GetResultService.Request()
result_request.goal_id = goal_handle.goal_id
sequence_number = self._client_handle.send_result_request(result_request)
if sequence_number in self._pending_result_requests:
raise RuntimeError(
'Sequence ({}) conflicts with pending result request'.format(sequence_number))
future = Future()
self._pending_result_requests[sequence_number] = future
future.add_done_callback(self._remove_pending_result_request)
# Add future so executor is aware
self.add_future(future)
return future
def server_is_ready(self):
"""
Check if there is an action server ready to process requests from this client.
:return: True if an action server is ready, False otherwise.
"""
with self._node.handle:
return self._client_handle.is_action_server_available()
def wait_for_server(self, timeout_sec=None):
"""
Wait for an action sever to be ready.
Returns as soon as an action server is ready for this client.
:param timeout_sec: Number of seconds to wait until an action server is available.
If None, then wait indefinitely.
:return: True if an action server is available, False if the timeout is exceeded.
"""
# TODO(jacobperron): Remove arbitrary sleep time and return as soon as server is ready
# See https://github.com/ros2/rclpy/issues/58
sleep_time = 0.25
if timeout_sec is None:
timeout_sec = float('inf')
while self._node.context.ok() and not self.server_is_ready() and timeout_sec > 0.0:
time.sleep(sleep_time)
timeout_sec -= sleep_time
return self.server_is_ready()
def destroy(self):
"""Destroy the underlying action client handle."""
if self._client_handle is None:
return
with self._node.handle:
self._client_handle.destroy_when_not_in_use()
self._node.remove_waitable(self)
self._client_handle = None
def __del__(self):
"""Destroy the underlying action client handle."""
self.destroy()
| apache-2.0 | -5,743,371,639,841,132,000 | 38.057823 | 97 | 0.613254 | false |
mmalyska/eve-wspace | evewspace/Map/views.py | 1 | 36404 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime, timedelta
import json
import csv
import pytz
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template.response import TemplateResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.models import Group, Permission
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from Map.models import *
from Map import utils, signals
from core.utils import get_config
# Decorator to check map permissions. Takes request and map_id
# Permissions are 0 = None, 1 = View, 2 = Change
# When used without a permission=x specification, requires Change access
def require_map_permission(permission=2):
def _dec(view_func):
def _view(request, map_id, *args, **kwargs):
current_map = get_object_or_404(Map, pk=map_id)
if current_map.get_permission(request.user) < permission:
raise PermissionDenied
else:
return view_func(request, map_id, *args, **kwargs)
_view.__name__ = view_func.__name__
_view.__doc__ = view_func.__doc__
_view.__dict__ = view_func.__dict__
return _view
return _dec
@login_required
@require_map_permission(permission=1)
def get_map(request, map_id):
"""Get the map and determine if we have permissions to see it.
If we do, then return a TemplateResponse for the map. If map does not
exist, return 404. If we don't have permission, return PermissionDenied.
"""
current_map = get_object_or_404(Map, pk=map_id)
context = {
'map': current_map,
'access': current_map.get_permission(request.user),
}
return TemplateResponse(request, 'map.html', context)
@login_required
@require_map_permission(permission=1)
def map_checkin(request, map_id):
# Initialize json return dict
json_values = {}
current_map = get_object_or_404(Map, pk=map_id)
# AJAX requests should post a JSON datetime called loadtime
# # back that we use to get recent logs.
if 'loadtime' not in request.POST:
return HttpResponse(json.dumps({'error': "No loadtime"}),
mimetype="application/json")
time_string = request.POST['loadtime']
load_time = datetime.strptime(time_string, "%Y-%m-%d %H:%M:%S.%f")
load_time = load_time.replace(tzinfo=pytz.utc)
if request.is_igb_trusted:
dialog_html = _checkin_igb_trusted(request, current_map)
if dialog_html is not None:
json_values.update({'dialogHTML': dialog_html})
log_list = MapLog.objects.filter(timestamp__gt=load_time,
visible=True,
map=current_map)
log_string = render_to_string('log_div.html', {'logs': log_list})
json_values.update({'logs': log_string})
return HttpResponse(json.dumps(json_values), mimetype="application/json")
@login_required
@require_map_permission(permission=1)
def map_refresh(request, map_id):
"""
Returns an HttpResponse with the updated systemJSON for an asynchronous
map refresh.
"""
if not request.is_ajax():
raise PermissionDenied
current_map = get_object_or_404(Map, pk=map_id)
result = [
datetime.strftime(datetime.now(pytz.utc),
"%Y-%m-%d %H:%M:%S.%f"),
utils.MapJSONGenerator(current_map,
request.user).get_systems_json()
]
return HttpResponse(json.dumps(result))
def _checkin_igb_trusted(request, current_map):
"""
Runs the specific code for the case that the request came from an igb that
trusts us, returns None if no further action is required, returns a string
containing the html for a system add dialog if we detect that a new system
needs to be added
"""
current_system = System.objects.get(name=request.eve_systemname)
old_system = None
result = None
threshold = datetime.now(pytz.utc) - timedelta(minutes=5)
recently_active = request.user.locations.filter(
timestamp__gt=threshold,
charactername=request.eve_charname
).all()
if recently_active.count():
old_system = request.user.locations.get(
charactername=request.eve_charname
).system
#Conditions for the system to be automagically added to the map.
if (
old_system in current_map
and current_system not in current_map
and not _is_moving_from_kspace_to_kspace(old_system, current_system)
and recently_active.count()
):
context = {
'oldsystem': current_map.systems.filter(
system=old_system).all()[0],
'newsystem': current_system,
'wormholes': utils.get_possible_wh_types(old_system,
current_system),
}
result = render_to_string('igb_system_add_dialog.html', context,
context_instance=RequestContext(request))
current_system.add_active_pilot(request.user, request.eve_charname,
request.eve_shipname,
request.eve_shiptypename)
return result
def _is_moving_from_kspace_to_kspace(old_system, current_system):
"""
returns whether we are moving through kspace
:param old_system:
:param current_system:
:return:
"""
return old_system.is_kspace() and current_system.is_kspace()
def get_system_context(ms_id):
map_system = get_object_or_404(MapSystem, pk=ms_id)
#If map_system represents a k-space system get the relevant KSystem object
if map_system.system.is_kspace():
system = map_system.system.ksystem
else:
system = map_system.system.wsystem
scan_threshold = datetime.now(pytz.utc) - timedelta(
hours=int(get_config("MAP_SCAN_WARNING", None).value)
)
interest_offset = int(get_config("MAP_INTEREST_TIME", None).value)
interest_threshold = (datetime.now(pytz.utc)
- timedelta(minutes=interest_offset))
scan_warning = system.lastscanned < scan_threshold
if interest_offset > 0:
interest = (map_system.interesttime and
map_system.interesttime > interest_threshold)
else:
interest = map_system.interesttime
# Include any SiteTracker fleets that are active
st_fleets = map_system.system.stfleets.filter(ended=None).all()
return {'system': system, 'mapsys': map_system,
'scanwarning': scan_warning, 'isinterest': interest,
'stfleets': st_fleets}
@login_required
@require_map_permission(permission=2)
def add_system(request, map_id):
"""
AJAX view to add a system to a current_map. Requires POST containing:
topMsID: map_system ID of the parent map_system
bottomSystem: Name of the new system
topType: WormholeType name of the parent side
bottomType: WormholeType name of the new side
timeStatus: Wormhole time status integer value
massStatus: Wormhole mass status integer value
topBubbled: 1 if Parent side bubbled
bottomBubbled: 1 if new side bubbled
friendlyName: Friendly name for the new map_system
"""
if not request.is_ajax():
raise PermissionDenied
try:
# Prepare data
current_map = Map.objects.get(pk=map_id)
top_ms = MapSystem.objects.get(pk=request.POST.get('topMsID'))
bottom_sys = System.objects.get(
name=request.POST.get('bottomSystem')
)
top_type = WormholeType.objects.get(
name=request.POST.get('topType')
)
bottom_type = WormholeType.objects.get(
name=request.POST.get('bottomType')
)
time_status = int(request.POST.get('timeStatus'))
mass_status = int(request.POST.get('massStatus'))
top_bubbled = "1" == request.POST.get('topBubbled')
bottom_bubbled = "1" == request.POST.get('bottomBubbled')
# Add System
bottom_ms = current_map.add_system(
request.user, bottom_sys,
request.POST.get('friendlyName'), top_ms
)
# Add Wormhole
bottom_ms.connect_to(top_ms, top_type, bottom_type, top_bubbled,
bottom_bubbled, time_status, mass_status)
return HttpResponse()
except ObjectDoesNotExist:
return HttpResponse(status=400)
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def remove_system(request, map_id, ms_id):
"""
Removes the supplied map_system from a map.
"""
system = get_object_or_404(MapSystem, pk=ms_id)
system.remove_system(request.user)
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_details(request, map_id, ms_id):
"""
Returns a html div representing details of the System given by ms_id in
map map_id
"""
if not request.is_ajax():
raise PermissionDenied
return render(request, 'system_details.html', get_system_context(ms_id))
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_menu(request, map_id, ms_id):
"""
Returns the html for system menu
"""
if not request.is_ajax():
raise PermissionDenied
return render(request, 'system_menu.html', get_system_context(ms_id))
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_tooltips(request, map_id):
"""
Returns the system tooltips for map_id
"""
if not request.is_ajax():
raise PermissionDenied
ms_list = MapSystem.objects.filter(map_id=map_id)\
.select_related('parent_wormhole', 'system__region')\
.iterator()
return render(request, 'system_tooltip.html', {'map_systems': ms_list})
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def wormhole_tooltips(request, map_id):
"""Takes a POST request from AJAX with a Wormhole ID and renders the
wormhole tooltip for that ID to response.
"""
if not request.is_ajax():
raise PermissionDenied
cur_map = get_object_or_404(Map, pk=map_id)
ms_list = MapSystem.objects.filter(map=cur_map).all()
whs = Wormhole.objects.filter(top__in=ms_list).all()
return render(request, 'wormhole_tooltip.html', {'wormholes': whs})
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def collapse_system(request, map_id, ms_id):
"""
Mark the system as collapsed.
"""
if not request.is_ajax():
raise PermissionDenied
map_sys = get_object_or_404(MapSystem, pk=ms_id)
parent_wh = map_sys.parent_wormhole
parent_wh.collapsed = True
parent_wh.save()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def resurrect_system(request, map_id, ms_id):
"""
Unmark the system as collapsed.
"""
if not request.is_ajax():
raise PermissionDenied
map_sys = get_object_or_404(MapSystem, pk=ms_id)
parent_wh = map_sys.parent_wormhole
parent_wh.collapsed = False
parent_wh.save()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def mark_scanned(request, map_id, ms_id):
"""Takes a POST request from AJAX with a system ID and marks that system
as scanned.
"""
if request.is_ajax():
map_system = get_object_or_404(MapSystem, pk=ms_id)
map_system.system.lastscanned = datetime.now(pytz.utc)
map_system.system.save()
return HttpResponse()
else:
raise PermissionDenied
# noinspection PyUnusedLocal
@login_required()
def manual_location(request, map_id, ms_id):
"""Takes a POST request form AJAX with a System ID and marks the user as
being active in that system.
"""
if request.is_ajax():
map_system = get_object_or_404(MapSystem, pk=ms_id)
map_system.system.add_active_pilot(request.user, "OOG Browser",
"Unknown", "Uknown")
return HttpResponse()
else:
raise PermissionDenied
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def set_interest(request, map_id, ms_id):
"""Takes a POST request from AJAX with an action and marks that system
as having either utcnow or None as interesttime. The action can be either
"set" or "remove".
"""
if request.is_ajax():
action = request.POST.get("action", "none")
if action == "none":
raise Http404
system = get_object_or_404(MapSystem, pk=ms_id)
if action == "set":
system.interesttime = datetime.now(pytz.utc)
system.save()
return HttpResponse()
if action == "remove":
system.interesttime = None
system.save()
return HttpResponse()
return HttpResponse(status=418)
else:
raise PermissionDenied
def _update_sig_from_tsv(signature, row):
COL_SIG = 0
COL_SIG_TYPE = 3
COL_SIG_GROUP = 2
COL_SIG_SCAN_GROUP = 1
COL_SIG_STRENGTH = 4
COL_DISTANCE = 5
info = row[COL_SIG_TYPE]
updated = False
sig_type = None
if (row[COL_SIG_SCAN_GROUP] == "Cosmic Signature"
or row[COL_SIG_SCAN_GROUP] == "Cosmic Anomaly"
):
try:
sig_type = SignatureType.objects.get(
longname=row[COL_SIG_GROUP])
except:
sig_type = None
else:
sig_type = None
if info and sig_type:
updated = True
if sig_type:
signature.sigtype = sig_type
signature.updated = updated or signature.updated
if info:
signature.info = info
return signature
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def bulk_sig_import(request, map_id, ms_id):
"""
GET gets a bulk signature import form. POST processes it, creating sigs
with blank info and type for each sig ID detected.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
k = 0
if request.method == 'POST':
reader = csv.reader(request.POST.get('paste', '').decode(
'utf-8').splitlines(), delimiter="\t")
COL_SIG = 0
COL_STRENGTH = 4
for row in reader:
# To prevent pasting of POSes into the sig importer, make sure
# the strength column is present
try:
test_var = row[COL_STRENGTH]
except IndexError:
return HttpResponse('A valid signature paste was not found',
status=400)
if k < 75:
sig_id = utils.convert_signature_id(row[COL_SIG])
sig = Signature.objects.get_or_create(sigid=sig_id,
modified_by=request.user,
system=map_system.system)[0]
sig = _update_sig_from_tsv(sig, row)
sig.modified_by = request.user
sig.save()
signals.signature_update.send_robust(sig, user=request.user,
map=map_system.map,
signal_strength=row[COL_STRENGTH])
k += 1
map_system.map.add_log(request.user,
"Imported %s signatures for %s(%s)."
% (k, map_system.system.name,
map_system.friendlyname), True)
map_system.system.lastscanned = datetime.now(pytz.utc)
map_system.system.save()
return HttpResponse()
else:
return TemplateResponse(request, "bulk_sig_form.html",
{'mapsys': map_system})
@login_required
@require_map_permission(permission=2)
def toggle_sig_owner(request, map_id, ms_id, sig_id=None):
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.toggle_ownership(request.user)
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def edit_signature(request, map_id, ms_id, sig_id=None):
"""
GET gets a pre-filled edit signature form.
POST updates the signature with the new information and returns a
blank add form.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
action = None
if sig_id != None:
signature = get_object_or_404(Signature, pk=sig_id)
created = False
if not signature.owned_by:
signature.toggle_ownership(request.user)
if request.method == 'POST':
form = SignatureForm(request.POST)
if form.is_valid():
ingame_id = utils.convert_signature_id(form.cleaned_data['sigid'])
if sig_id == None:
signature, created = Signature.objects.get_or_create(
system=map_system.system, sigid=ingame_id)
signature.sigid = ingame_id
signature.updated = True
signature.info = form.cleaned_data['info']
if request.POST['sigtype'] != '':
sigtype = form.cleaned_data['sigtype']
else:
sigtype = None
signature.sigtype = sigtype
signature.modified_by = request.user
signature.save()
map_system.system.lastscanned = datetime.now(pytz.utc)
map_system.system.save()
if created:
action = 'Created'
else:
action = 'Updated'
if signature.owned_by:
signature.toggle_ownership(request.user)
map_system.map.add_log(request.user,
"%s signature %s in %s (%s)" %
(action, signature.sigid, map_system.system.name,
map_system.friendlyname))
signals.signature_update.send_robust(signature, user=request.user,
map=map_system.map)
else:
return TemplateResponse(request, "edit_sig_form.html",
{'form': form,
'system': map_system, 'sig': signature})
form = SignatureForm()
if sig_id == None or action == 'Updated':
return TemplateResponse(request, "add_sig_form.html",
{'form': form, 'system': map_system})
else:
return TemplateResponse(request, "edit_sig_form.html",
{'form': SignatureForm(instance=signature),
'system': map_system, 'sig': signature})
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=1)
def get_signature_list(request, map_id, ms_id):
"""
Determines the proper escalationThreshold time and renders
system_signatures.html
"""
if not request.is_ajax():
raise PermissionDenied
system = get_object_or_404(MapSystem, pk=ms_id)
escalation_downtimes = int(get_config("MAP_ESCALATION_BURN",
request.user).value)
return TemplateResponse(request, "system_signatures.html",
{'system': system,
'downtimes': escalation_downtimes})
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def mark_signature_cleared(request, map_id, ms_id, sig_id):
"""
Marks a signature as having its NPCs cleared.
"""
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.clear_rats()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def escalate_site(request, map_id, ms_id, sig_id):
"""
Marks a site as having been escalated.
"""
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.escalate()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def activate_signature(request, map_id, ms_id, sig_id):
"""
Marks a site activated.
"""
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.activate()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def delete_signature(request, map_id, ms_id, sig_id):
"""
Deletes a signature.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
sig = get_object_or_404(Signature, pk=sig_id)
sig.delete()
map_system.map.add_log(request.user, "Deleted signature %s in %s (%s)."
% (sig.sigid, map_system.system.name,
map_system.friendlyname))
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def manual_add_system(request, map_id, ms_id):
"""
A GET request gets a blank add system form with the provided MapSystem
as top system. The form is then POSTed to the add_system view.
"""
top_map_system = get_object_or_404(MapSystem, pk=ms_id)
systems = System.objects.all()
wormholes = WormholeType.objects.all()
return render(request, 'add_system_box.html',
{'topMs': top_map_system, 'sysList': systems,
'whList': wormholes})
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def edit_system(request, map_id, ms_id):
"""
A GET request gets the edit system dialog pre-filled with current
information.
A POST request saves the posted data as the new information.
POST values are friendlyName, info, and occupied.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
if request.method == 'GET':
occupied = map_system.system.occupied.replace("<br />", "\n")
info = map_system.system.info.replace("<br />", "\n")
return TemplateResponse(request, 'edit_system.html',
{'mapsys': map_system,
'occupied': occupied, 'info': info}
)
if request.method == 'POST':
map_system.friendlyname = request.POST.get('friendlyName', '')
if (
(map_system.system.info != request.POST.get('info', '')) or
(map_system.system.occupied !=
request.POST.get('occupied', ''))
):
map_system.system.info = request.POST.get('info', '')
map_system.system.occupied = request.POST.get('occupied', '')
map_system.system.save()
map_system.save()
map_system.map.add_log(request.user, "Edited System: %s (%s)"
% (map_system.system.name,
map_system.friendlyname))
return HttpResponse()
raise PermissionDenied
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def edit_wormhole(request, map_id, wh_id):
"""
A GET request gets the edit wormhole dialog pre-filled with current info.
A POST request saves the posted data as the new info.
POST values are topType, bottomType, massStatus, timeStatus, topBubbled,
and bottomBubbled.
"""
if not request.is_ajax():
raise PermissionDenied
wormhole = get_object_or_404(Wormhole, pk=wh_id)
if request.method == 'GET':
return TemplateResponse(request, 'edit_wormhole.html',
{'wormhole': wormhole}
)
if request.method == 'POST':
wormhole.mass_status = int(request.POST.get('massStatus', 0))
wormhole.time_status = int(request.POST.get('timeStatus', 0))
wormhole.top_type = get_object_or_404(
WormholeType,
name=request.POST.get('topType', 'K162')
)
wormhole.bottom_type = get_object_or_404(
WormholeType,
name=request.POST.get('bottomType', 'K162')
)
wormhole.top_bubbled = request.POST.get('topBubbled', '1') == '1'
wormhole.bottom_bubbled = request.POST.get('bottomBubbled', '1') == '1'
wormhole.save()
wormhole.map.add_log(request.user,
("Updated the wormhole between %s(%s) and %s(%s)."
% (wormhole.top.system.name,
wormhole.top.friendlyname,
wormhole.bottom.system.name,
wormhole.bottom.friendlyname)))
return HttpResponse()
raise PermissiondDenied
@permission_required('Map.add_map')
def create_map(request):
"""
This function creates a map and then redirects to the new map.
"""
if request.method == 'POST':
form = MapForm(request.POST)
if form.is_valid():
new_map = form.save()
new_map.add_log(request.user, "Created the %s map." % new_map.name)
new_map.add_system(request.user, new_map.root, "Root", None)
return HttpResponseRedirect(reverse('Map.views.get_map',
kwargs={'map_id': new_map.pk}))
else:
return TemplateResponse(request, 'new_map.html', {'form': form})
else:
form = MapForm
return TemplateResponse(request, 'new_map.html', {'form': form, })
def _sort_destinations(destinations):
"""
Takes a list of destination tuples and returns the same list, sorted in order of the jumps.
"""
results = []
onVal = 0
for dest in destinations:
if len(results) == 0:
results.append(dest)
else:
while onVal <= len(results):
if onVal == len(results):
results.append(dest)
onVal = 0
break
else:
if dest[1] > results[onVal][1]:
onVal += 1
else:
results.insert(onVal, dest)
onVal = 0
break
return results
# noinspection PyUnusedLocal
@require_map_permission(permission=1)
def destination_list(request, map_id, ms_id):
"""
Returns the destinations of interest tuple for K-space systems and
a blank response for w-space systems.
"""
if not request.is_ajax():
raise PermissionDenied
destinations = Destination.objects.filter(Q(user=None) |
Q(user=request.user))
map_system = get_object_or_404(MapSystem, pk=ms_id)
try:
system = KSystem.objects.get(pk=map_system.system.pk)
rf = utils.RouteFinder()
result = []
for destination in destinations:
result.append((destination.system,
rf.route_length(system,
destination.system) - 1,
round(rf.ly_distance(system,
destination.system), 3)
))
except ObjectDoesNotExist:
return HttpResponse()
return render(request, 'system_destinations.html',
{'system': system, 'destinations': _sort_destinations(result)})
# noinspection PyUnusedLocal
def site_spawns(request, map_id, ms_id, sig_id):
"""
Returns the spawns for a given signature and system.
"""
sig = get_object_or_404(Signature, pk=sig_id)
spawns = SiteSpawn.objects.filter(sigtype=sig.sigtype).all()
if spawns[0].sysclass != 0:
spawns = SiteSpawn.objects.filter(sigtype=sig.sigtype,
sysclass=sig.system.sysclass).all()
return render(request, 'site_spawns.html', {'spawns': spawns})
#########################
#Settings Views #
#########################
@permission_required('Map.map_admin')
def general_settings(request):
"""
Returns and processes the general settings section.
"""
npc_threshold = get_config("MAP_NPC_THRESHOLD", None)
pvp_threshold = get_config("MAP_PVP_THRESHOLD", None)
scan_threshold = get_config("MAP_SCAN_WARNING", None)
interest_time = get_config("MAP_INTEREST_TIME", None)
escalation_burn = get_config("MAP_ESCALATION_BURN", None)
if request.method == "POST":
scan_threshold.value = int(request.POST['scanwarn'])
interest_time.value = int(request.POST['interesttimeout'])
pvp_threshold.value = int(request.POST['pvpthreshold'])
npc_threshold.value = int(request.POST['npcthreshold'])
escalation_burn.value = int(request.POST['escdowntimes'])
scan_threshold.save()
interest_time.save()
pvp_threshold.save()
npc_threshold.save()
escalation_burn.save()
return HttpResponse()
return TemplateResponse(
request, 'general_settings.html',
{'npcthreshold': npc_threshold.value,
'pvpthreshold': pvp_threshold.value,
'scanwarn': scan_threshold.value,
'interesttimeout': interest_time.value,
'escdowntimes': escalation_burn.value}
)
@permission_required('Map.map_admin')
def sites_settings(request):
"""
Returns the site spawns section.
"""
return TemplateResponse(request, 'spawns_settings.html',
{'spawns': SiteSpawn.objects.all()})
@permission_required('Map.map_admin')
def add_spawns(request):
"""
Adds a site spawn.
"""
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def delete_spawns(request, spawn_id):
"""
Deletes a site spawn.
"""
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def edit_spawns(request, spawn_id):
"""
Alters a site spawn.
"""
return HttpResponse()
def destination_settings(request, user=None):
"""
Returns the destinations section.
"""
if not user:
dest_list = Destination.objects.filter(user=None)
else:
dest_list = Destination.objects.filter(Q(user=None) |
Q(user=request.user))
return TemplateResponse(request, 'dest_settings.html',
{'destinations': dest_list,
'user_context': user})
def add_destination(request, dest_user=None):
"""
Add a destination.
"""
if not dest_user and not request.user.has_perm('Map.map_admin'):
raise PermissionDenied
system = get_object_or_404(KSystem, name=request.POST['systemName'])
Destination(system=system, user=dest_user).save()
return HttpResponse()
def add_personal_destination(request):
"""
Add a personal destination.
"""
return add_destination(request, dest_user=request.user)
def delete_destination(request, dest_id):
"""
Deletes a destination.
"""
destination = get_object_or_404(Destination, pk=dest_id)
if not request.user.has_perm('Map.map_admin') and not destination.user:
raise PermissionDenied
if destination.user and not request.user == destination.user:
raise PermissionDenied
destination.delete()
return HttpResponse()
@permission_required('Map.map_admin')
def sigtype_settings(request):
"""
Returns the signature types section.
"""
return TemplateResponse(request, 'sigtype_settings.html',
{'sigtypes': SignatureType.objects.all()})
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def edit_sigtype(request, sigtype_id):
"""
Alters a signature type.
"""
return HttpResponse()
@permission_required('Map.map_admin')
def add_sigtype(request):
"""
Adds a signature type.
"""
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def delete_sigtype(request, sigtype_id):
"""
Deletes a signature type.
"""
return HttpResponse()
@permission_required('Map.map_admin')
def map_settings(request, map_id):
"""
Returns and processes the settings section for a map.
"""
subject = get_object_or_404(Map, pk=map_id)
return TemplateResponse(request, 'map_settings_single.html',
{'map': subject})
@permission_required('Map.map_admin')
def delete_map(request, map_id):
"""
Deletes a map.
"""
subject = get_object_or_404(Map, pk=map_id)
subject.delete()
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def edit_map(request, map_id):
"""
Alters a map.
"""
return HttpResponse('[]')
@permission_required('Map.map_admin')
def global_permissions(request):
"""
Returns and processes the global permissions section.
"""
if not request.is_ajax():
raise PermissionDenied
group_list = []
admin_perm = Permission.objects.get(codename="map_admin")
unrestricted_perm = Permission.objects.get(codename="map_unrestricted")
add_map_perm = Permission.objects.get(codename="add_map")
if request.method == "POST":
for group in Group.objects.all():
if request.POST.get('%s_unrestricted' % group.pk, None):
if unrestricted_perm not in group.permissions.all():
group.permissions.add(unrestricted_perm)
else:
if unrestricted_perm in group.permissions.all():
group.permissions.remove(unrestricted_perm)
if request.POST.get('%s_add' % group.pk, None):
if add_map_perm not in group.permissions.all():
group.permissions.add(add_map_perm)
else:
if add_map_perm in group.permissions.all():
group.permissions.remove(add_map_perm)
if request.POST.get('%s_admin' % group.pk, None):
if admin_perm not in group.permissions.all():
group.permissions.add(admin_perm)
else:
if admin_perm in group.permissions.all():
group.permissions.remove(admin_perm)
return HttpResponse()
for group in Group.objects.all():
entry = {
'group': group, 'admin': admin_perm in group.permissions.all(),
'unrestricted': unrestricted_perm in group.permissions.all(),
'add_map': add_map_perm in group.permissions.all()
}
group_list.append(entry)
return TemplateResponse(request, 'global_perms.html',
{'groups': group_list})
| gpl-3.0 | -173,141,384,186,333,800 | 33.150094 | 95 | 0.608779 | false |
tedlaz/pyted | tedutil/db_context_manager.py | 1 | 5076 | '''
Module db_context_manager.py
Connect to sqlite database and perform crud functions
'''
import sqlite3
import os
PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
print(PATH)
def grup(txtv):
'''
Trasforms a string to uppercase special for Greek comparison
'''
ar1 = u"αάΆΑβγδεέΈζηήΉθιίϊΊκλμνξοόΌπρσςτυύΎφχψωώΏ"
ar2 = u"ΑΑΑΑΒΓΔΕΕΕΖΗΗΗΘΙΙΙΙΚΛΜΝΞΟΟΟΠΡΣΣΤΥΥΥΦΧΨΩΩΩ"
ftxt = u''
for letter in txtv:
if letter in ar1:
ftxt += ar2[ar1.index(letter)]
else:
ftxt += letter.upper()
return ftxt
class OpenSqlite:
'''
Context manager class
Use it as:
with Open_sqlite(dbfilename) as db:
your code here ...
'''
def __init__(self, dbfile):
self.dbf = dbfile
self.active = False
self.con = None
self.cur = None
def __enter__(self):
self.con = sqlite3.connect(self.dbf)
self.con.create_function("grup", 1, grup)
self.cur = self.con.cursor()
self.active = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.active:
self.cur.close()
self.con.close()
def script(self, sqlscript):
"""Execute an sql script against self.dbf"""
self.con.executescript(sqlscript)
return True
def application_id(self):
'''Get application_id from database file'''
sql = 'PRAGMA application_id;'
try:
rws = self.select(sql)
return rws[0][0]
except:
return -9
def set_application_id(self, idv):
'''Set application_id to database file'''
self.script('PRAGMA application_id = %s;' % idv)
def user_version(self):
'''Get user_version from database file'''
sql = 'PRAGMA user_version;'
try:
rws = self.select(sql)
return rws[0][0]
except:
return -9
def set_user_version(self, version):
'''Set user_version to database file'''
self.script('PRAGMA user_version = %s;' % version)
def select(self, sql):
'''Get a list of tuples with data'''
self.cur.execute(sql)
rows = self.cur.fetchall()
return rows
def select_with_names(self, sql):
'''Get a tuple with column names and a list of tuples with data'''
self.cur.execute(sql)
column_names = tuple([t[0] for t in self.cur.description])
rows = self.cur.fetchall()
return column_names, rows
def select_as_dict(self, sql):
'''Get a list of dictionaries [{}, {}, ...]'''
self.cur.execute(sql)
column_names = [t[0] for t in self.cur.description]
rows = self.cur.fetchall()
diclist = []
for row in rows:
dic = {}
for i, col in enumerate(row):
dic[column_names[i]] = col
diclist.append(dic)
diclen = len(diclist)
if diclen > 0:
return diclist
return [{}]
def select_master_detail_as_dic(self,
idv,
tablemaster,
tabledetail=None,
id_at_end=True):
'''
Get a specific record from table tablemaster with id = idv
If we pass a tabledetail value it gets detail records too
idv : id value of record
tablemaster : Master table name
tabledetail : Detail table name
id_at_end : If True Foreign key is like <masterTable>_id
else is like id_<masterTable>
'''
if id_at_end:
fkeytemplate = '%s_id'
else:
fkeytemplate = 'id_%s'
id_field = fkeytemplate % tablemaster
sql1 = "SELECT * FROM %s WHERE id='%s'" % (tablemaster, idv)
sql2 = "SELECT * FROM %s WHERE %s='%s'" % (tabledetail, id_field, idv)
dic = self.select_as_dict(sql1)[0]
ldic = len(dic)
if ldic == 0:
return dic
if tabledetail:
dic['zlines'] = self.select_as_dict(sql2)
# Remove id_field key
for elm in dic['zlines']:
del elm[id_field]
return dic
if __name__ == '__main__':
DBPATH = '/home/tedlaz/tedfiles/prj/2017/2017a.sql3'
with OpenSqlite(DBPATH) as db:
print(db.select('select * from lmo limit 2;'))
print(db.select_as_dict('select * from vtr_trd limit 10;'))
print(db.select_with_names('select * from lmo limit 2;'))
# print(db.script('PRAGMA application_id = 20170313;'))
print(db.application_id())
print(db.user_version())
print(db.select_master_detail_as_dic(1, 'tr', 'trd', False))
print(db.select_master_detail_as_dic(20, 'tr'))
print(db.select_master_detail_as_dic(200000, 'tr'))
print(db.select_master_detail_as_dic(200000, 'tr', 'trd', False))
| gpl-3.0 | 2,498,087,319,140,315,600 | 30.408805 | 78 | 0.547257 | false |
Erotemic/local | build_scripts/custom_fletch.py | 1 | 5960 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
from os.path import dirname # NOQA
import sys
def disable_packages():
if pkgname == 'OpenBLAS':
"""
PKGNAME=OpenBLAS
PKGNAME=Zlib
find build/src/ -iname CMakeCache.txt -delete
rm -rf build/src/$PKGNAME*
rm -rf build/tmp/$PKGNAME*
rm -rf ${CMAKE_BUILD_DIR}/build/src/${PKGNAME}*
rm -rf ${CMAKE_BUILD_DIR}/build/tmp/${PKGNAME}*
REMOVE CMAKE VARS ${PKGNAME}_*
"""
cmake_build_dir =
pass
pass
def kwiver():
import utool as ut
ut.codeblock(
r'''
# STARTBLOCK bash
git checkout master
cd ~/code/kwiver
rm -rf ~/code/kwiver/build-py2-nocuda
mkdir -p build-py2-nocuda
cd ~/code/kwiver/build-py2-nocuda
cmake -G "Unix Makefiles" \
-D KWIVER_ENABLE_ARROWS:BOOL=True \
-D KWIVER_ENABLE_C_BINDINGS:BOOL=True \
-D KWIVER_ENABLE_PYTHON:BOOL=True \
-D KWIVER_ENABLE_TESTS:BOOL=True \
-D PYTHON_VERSION=$(python -c "import sys; print(sys.version[0:3])") \
-D fletch_DIR:PATH=~/code/fletch/build-py2-nocuda/ \
~/code/kwiver
''')
def rebase_python3_support():
import utool as ut
ut.codeblock(
r'''
# STARTBLOCK bash
cd ~/code/fletch
git checkout master
# blow away old branch
git branch -D tmp/pre-python3-support
# Recreate the branch
git checkout -b tmp/pre-python3-support
# Merge all prereqs into this branch
git merge dev/find_numpy dev/update-openblas-0.2.20 dev/update-opencv dev/update-vtk dev/update-caffe --no-edit
# or could do it one at a time, but w/e
# git merge dev/find_numpy
# git merge dev/update-openblas-0.2.20 --no-edit
# git merge dev/update-opencv --no-edit
# git merge dev/update-vtk --no-edit
git checkout dev/python3-support
# Find the oldest merge branch after master
# This should be the old tmp/pre-python3-support
OLD_MERGE_POINT=$(python -c "import sys; print(sys.argv[-1])" $(git rev-list --min-parents=2 HEAD ^master))
# Check to make sure its the merge point
git log -n 1 $OLD_MERGE_POINT
echo "$OLD_MERGE_POINT"
# Find the most recent merge
# echo $(python -c "import sys; print(sys.argv[-1])" $(git rev-list --min-parents=1 HEAD ^master))
git checkout tmp/pre-python3-support
git checkout -b tmp/rebased-python3-support
# These should be the relevant python3 commits
git log $OLD_MERGE_POINT..dev/python3-support
# Move all the relevant python3-support commits onto the new pre-python3-support
git cherry-pick $OLD_MERGE_POINT..dev/python3-support
git rebase --onto tmp/rebased-python3-support $OLD_MERGE_POINT
git checkout dev/python3-support
git reset --hard tmp/rebased-python3-support
git push --force
git checkout tmp/pre-python3-support
git push --force
cd ~/code/fletch-expt
git checkout master
git branch -D dev/python3-support
git branch -D tmp/pre-python3-support
git checkout dev/python3-support
# git checkout dev/python3-support
# git checkout -b backup-py3-support
# git checkout dev/python3-support
# git merge --strategy-option=theirs tmp/pre-python3-support
# git rebase -i --strategy-option=theirs tmp/pre-python3-support
# ENDBLOCK bash
''')
pass
def cuda_fletch():
"""
# Find cuda version
nvcc --version
8.0
# Find cudnn version
cat /usr/include/cudnn.h | grep CUDNN_Major -A 2
6.0
ldconfig -p | grep libcuda
ldconfig -p | grep libcudnn
"""
def generate_and_make(repo_dpath, **kwargs):
import utool as ut
cmake_vars = {
# build with
'fletch_BUILD_WITH_PYTHON': True,
'fletch_BUILD_WITH_MATLAB': False,
'fletch_BUILD_WITH_CUDA': False,
'fletch_BUILD_WITH_CUDNN': False,
# select version
'OpenCV_SELECT_VERSION': '3.1.0',
'VTK_SELECT_VERSION': '6.2.0',
'fletch_PYTHON_VERSION': sys.version[0:3],
'PYTHON_EXECUTABLE': sys.executable,
}
ut.update_existing(cmake_vars, kwargs)
DISABLED_LIBS = [ # NOQA
'ITK',
]
VTK_LIBS = [
'VTK',
'TinyXML',
'libxml2',
'Qt',
]
ENABLED_LIBS = [
'Boost', 'Caffe', 'Ceres', 'Eigen', 'FFmpeg', 'GeographicLib',
'GFlags', 'GLog', 'HDF5', 'jom', 'LevelDB', 'libjpeg-turbo', 'libjson',
'libkml', 'libtiff', 'LMDB', 'log4cplus', 'OpenBLAS', 'OpenCV',
'OpenCV_contrib', 'PNG', 'PROJ4', 'Protobuf', 'shapelib', 'Snappy',
'SuiteSparse', 'VXL', 'yasm', 'ZLib',
] + VTK_LIBS
lines = ['cmake -G "Unix Makefiles" -D CMAKE_BUILD_TYPE=RELEASE']
lines += ['-D fletch_ENABLE_{}=True'.format(lib) for lib in ENABLED_LIBS]
lines += ['-D {}={}'.format(key, val) for key, val in cmake_vars.items()]
lines += [repo_dpath]
command = ' '.join(lines)
print(command)
if False:
# import utool as ut
# cmake_retcode = ut.cmd2(command, verbose=True)['ret']
cmake_retcode = os.system(command)
if cmake_retcode == 0:
os.system('make -j9')
if __name__ == '__main__':
r"""
CommandLine:
python ~/local/build_scripts/custom_fletch.py
"""
# repo_dpath = '~/code/fletch'
# repo_dpath = dirname(__file__)
repo_dpath = os.getcwd()
if repo_dpath.endswith('fletch-expt'):
kwargs = dict(
OpenCV_SELECT_VERSION='3.2.0',
VTK_SELECT_VERSION='8.0',
)
generate_and_make(repo_dpath, **kwargs)
elif repo_dpath.endswith('fletch'):
generate_and_make(repo_dpath)
| gpl-3.0 | -2,039,726,112,540,294,700 | 26.850467 | 119 | 0.58104 | false |
vidyar/testing-yml | setup.py | 1 | 1647 | # dockerpty.
#
# Copyright 2014 Chris Corbyn <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import os
def fopen(filename):
return open(os.path.join(os.path.dirname(__file__), filename))
def read(filename):
return fopen(filename).read()
setup(
name='dockerpty',
version='0.1.1',
description='Python library to use the pseudo-tty of a docker container',
long_description=read('README.md'),
url='https://github.com/d11wtq/dockerpty',
author='Chris Corbyn',
author_email='[email protected]',
license='Apache 2.0',
keywords='docker, tty, pty, terminal',
packages=['dockerpty'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Environment :: Console',
'Intended Audience :: Developers',
'Topic :: Terminals',
'Topic :: Terminals :: Terminal Emulators/X Terminals',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| apache-2.0 | -6,114,690,253,438,141,000 | 33.3125 | 77 | 0.681239 | false |
hgiemza/DIRAC | tests/System/dirac-test-production.py | 1 | 4565 | """ This script submits a test prodJobuction with filter
"""
import time
import os
import json
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s test directory' % Script.scriptName
] ) )
from DIRAC.Core.Base.Script import parseCommandLine
Script.registerSwitch( "", "UseFilter=", "e.g. True/False" )
parseCommandLine()
from DIRAC import gLogger
from DIRAC.Interfaces.API.Job import Job
from DIRAC.TransformationSystem.Client.Transformation import Transformation
### Needed to test transformations with Filters
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
# Parse the arguments
args = Script.getPositionalArgs()
if ( len( args ) != 1 ):
Script.showHelp()
directory = args[0]
UseFilter = False
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() == "usefilter":
if switch[1] == 'True':
UseFilter = True
#Let's first create the prodJobuction
prodJobType = 'Merge'
transName = 'testProduction_' + str(int(time.time()))
desc = 'just test'
prodJob = Job()
prodJob._addParameter( prodJob.workflow, 'PRODUCTION_ID', 'string', '00012345', 'ProductionID' )
prodJob._addParameter( prodJob.workflow, 'JOB_ID', 'string', '00006789', 'ProductionJobID' )
prodJob._addParameter( prodJob.workflow, 'eventType', 'string', 'TestEventType', 'Event Type of the prodJobuction' )
prodJob._addParameter( prodJob.workflow, 'numberOfEvents', 'string', '-1', 'Number of events requested' )
prodJob._addParameter( prodJob.workflow, 'ProcessingType', 'JDL', str( 'Test' ), 'ProductionGroupOrType' )
prodJob._addParameter( prodJob.workflow, 'Priority', 'JDL', str( 9 ), 'UserPriority' )
prodJob.setType( prodJobType )
prodJob.workflow.setName(transName)
prodJob.workflow.setDescrShort( desc )
prodJob.workflow.setDescription( desc )
prodJob.setCPUTime( 86400 )
prodJob.setInputDataPolicy( 'Download' )
prodJob.setExecutable('/bin/ls', '-l')
#Let's submit the prodJobuction now
#result = prodJob.create()
name = prodJob.workflow.getName()
name = name.replace( '/', '' ).replace( '\\', '' )
prodJob.workflow.toXMLFile( name )
print 'Workflow XML file name is: %s' % name
workflowBody = ''
if os.path.exists( name ):
with open( name, 'r' ) as fopen:
workflowBody = fopen.read()
else:
print 'Could not get workflow body'
# Standard parameters
transformation = Transformation()
transformation.setTransformationName( name )
transformation.setTransformationGroup( 'Test' )
transformation.setDescription( desc )
transformation.setLongDescription( desc )
transformation.setType( 'Merge' )
transformation.setBody( workflowBody )
transformation.setPlugin( 'Standard' )
transformation.setTransformationFamily( 'Test' )
transformation.setGroupSize( 2 )
transformation.setOutputDirectories([ '/dirac/outConfigName/configVersion/LOG/00000000',
'/dirac/outConfigName/configVersion/RAW/00000000',
'/dirac/outConfigName/configVersion/CORE/00000000'])
## Set directory meta data and create a transformation with a meta-data filter
if UseFilter:
fc = FileCatalog()
dm = DataManager()
metaCatalog = 'DIRACFileCatalog'
## Set meta data fields in the DFC
MDFieldDict = {'particle':'VARCHAR(128)', 'timestamp':'VARCHAR(128)'}
for MDField in MDFieldDict.keys():
MDFieldType = MDFieldDict[MDField]
res = fc.addMetadataField( MDField, MDFieldType )
if not res['OK']:
gLogger.error( "Failed to add metadata fields", res['Message'] )
exit( -1 )
## Set directory meta data
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
MDdict1 = {'particle':'gamma', 'timestamp':timestamp}
res = fc.setMetadata( directory, MDdict1 )
if not res['OK']:
gLogger.error( "Failed to set metadata", res['Message'] )
exit( -1 )
## Set the transformation meta data filter
MDdict1b = {'particle':'gamma', 'timestamp':timestamp}
mqJson1b = json.dumps( MDdict1b )
res = transformation.setFileMask( mqJson1b )
if not res['OK']:
gLogger.error( "Failed to set FileMask", res['Message'] )
exit( -1 )
## Create the transformation
result = transformation.addTransformation()
if not result['OK']:
print result
exit(1)
transID = result['Value']
with open('TransformationID', 'w') as fd:
fd.write(str(transID))
print "Created %s, stored in file 'TransformationID'" % transID
| gpl-3.0 | 4,057,335,523,541,933,600 | 34.115385 | 116 | 0.699452 | false |
cournape/Bento | bento/commands/build.py | 1 | 2331 | import os
import os.path as op
from bento.utils.utils \
import \
subst_vars
from bento.installed_package_description \
import \
BuildManifest, build_manifest_meta_from_pkg
from bento._config \
import \
BUILD_MANIFEST_PATH
from bento.commands.core \
import \
Option
from bento.commands.core \
import \
Command
from bento.utils \
import \
cpu_count
class SectionWriter(object):
def __init__(self):
self.sections = {}
def store(self, filename, pkg):
meta = build_manifest_meta_from_pkg(pkg)
p = BuildManifest(self.sections, meta, pkg.executables)
if not op.exists(op.dirname(filename)):
os.makedirs(op.dirname(filename))
p.write(filename)
def jobs_callback(option, opt, value, parser):
setattr(parser.values, option.dest, cpu_count())
class BuildCommand(Command):
long_descr = """\
Purpose: build the project
Usage: bentomaker build [OPTIONS]."""
short_descr = "build the project."
common_options = Command.common_options \
+ [Option("-i", "--inplace",
help="Build extensions in place", action="store_true"),
Option("-j", "--jobs",
help="Parallel builds (yaku build only - EXPERIMENTAL)",
dest="jobs", action="callback", callback=jobs_callback),
Option("-v", "--verbose",
help="Verbose output (yaku build only)",
action="store_true")]
def run(self, ctx):
p = ctx.options_context.parser
o, a = p.parse_args(ctx.command_argv)
if o.help:
p.print_help()
return
ctx.compile()
ctx.post_compile()
def finish(self, ctx):
super(BuildCommand, self).finish(ctx)
n = ctx.build_node.make_node(BUILD_MANIFEST_PATH)
ctx.section_writer.store(n.abspath(), ctx.pkg)
def _config_content(paths):
keys = sorted(paths.keys())
n = max([len(k) for k in keys]) + 2
content = []
for name, value in sorted(paths.items()):
content.append('%s = %r' % (name.upper().ljust(n), subst_vars(value, paths)))
return "\n".join(content)
| bsd-3-clause | 4,304,503,386,611,436,500 | 29.272727 | 90 | 0.557701 | false |
joowani/dtags | dtags/commands/tags.py | 1 | 5372 | import json
import sys
from pathlib import Path
from typing import List, Optional, Set, Tuple
from dtags import style
from dtags.commons import (
dtags_command,
get_argparser,
normalize_tags,
prompt_user,
reverse_map,
)
from dtags.files import get_new_config, load_config_file, save_config_file
USAGE = "tags [-j] [-r] [-y] [-c] [-p] [-t TAG [TAG ...]]"
DESCRIPTION = f"""
Manage directory tags.
examples:
# show all tags
{style.command("tags")}
# show tags in JSON format with -j/--json
{style.command("tags --json")}
# show reverse mapping with -r/--reverse
{style.command("tags --reverse")}
# filter specific tags with -t
{style.command("tags -t foo bar baz")}
# clean invalid directories with -c/--clean
{style.command("tags --clean")}
# purge all tags with -p/--purge
{style.command("tags --purge")}
# skip confirmation prompts with -y/--yes
{style.command("tags --clean --yes")}
"""
@dtags_command
def execute(args: Optional[List[str]] = None) -> None:
parser = get_argparser(prog="tags", desc=DESCRIPTION, usage=USAGE)
arg_group = parser.add_mutually_exclusive_group()
parser.add_argument(
"-j",
"--json",
action="store_true",
dest="json",
help="show tags in JSON format",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
help="show tag to directories relationship",
)
parser.add_argument(
"-y",
"--yes",
action="store_true",
dest="yes",
help="assume yes to prompts",
)
arg_group.add_argument(
"-c",
"--clean",
action="store_true",
dest="clean",
help="clean invalid directories",
)
arg_group.add_argument(
"-p",
"--purge",
action="store_true",
dest="purge",
help="purge all tags",
)
arg_group.add_argument(
"-t",
metavar="TAG",
nargs="+",
dest="tags",
help="tag names to filter",
)
parsed_args = parser.parse_args(sys.argv[1:] if args is None else args)
if parsed_args.reverse and parsed_args.clean:
parser.error("argument -r/--reverse: not allowed with argument -c/--clean")
elif parsed_args.reverse and parsed_args.purge:
parser.error("argument -r/--reverse: not allowed with argument -p/--purge")
elif parsed_args.json and parsed_args.clean:
parser.error("argument -j/--json: not allowed with argument -c/--clean")
elif parsed_args.json and parsed_args.purge:
parser.error("argument -j/--json: not allowed with argument -p/--purge")
elif parsed_args.clean:
clean_tags(skip_prompts=parsed_args.yes)
elif parsed_args.purge:
purge_tags(skip_prompts=parsed_args.yes)
else:
show_tags(
filters=parsed_args.tags,
in_json=parsed_args.json,
in_reverse=parsed_args.reverse,
)
def show_tags(
filters: Optional[List[str]] = None,
in_json: bool = False,
in_reverse: bool = False,
) -> None:
config = load_config_file()
tag_config = config["tags"]
tag_filters = None if filters is None else normalize_tags(filters)
if in_json and in_reverse:
raw_data = {
tag: sorted(dirpath.as_posix() for dirpath in dirpaths)
for tag, dirpaths in reverse_map(tag_config).items()
if not tag_filters or tag in tag_filters
}
print(json.dumps(raw_data, indent=2, sort_keys=True))
elif in_json and not in_reverse:
raw_data = {
dirpath.as_posix(): sorted(tags)
for dirpath, tags in tag_config.items()
if not tag_filters or tags.intersection(tag_filters)
}
print(json.dumps(raw_data, indent=2, sort_keys=True))
elif not in_json and in_reverse:
tag_to_dirpaths = reverse_map(tag_config)
for tag in sorted(tag_to_dirpaths):
if not tag_filters or tag in tag_filters:
print(style.tag(tag))
for dirpath in sorted(tag_to_dirpaths[tag]):
print(" " + style.path(dirpath))
else:
for dirpath, tags in tag_config.items():
if not tag_filters or tags.intersection(tag_filters):
print(style.mapping(dirpath, tags))
def clean_tags(skip_prompts: bool = True) -> None:
config = load_config_file()
tag_config = config["tags"]
diffs: List[Tuple[Path, Set[str]]] = [
(dirpath, tags) for dirpath, tags in tag_config.items() if not dirpath.is_dir()
]
if not diffs:
print("Nothing to clean")
else:
for dirpath, tags in diffs:
print(style.diff(dirpath, del_tags=tags))
del tag_config[dirpath]
if skip_prompts or prompt_user():
save_config_file(config)
print("Tags cleaned successfully")
def purge_tags(skip_prompts: bool = True) -> None:
config = load_config_file()
tag_config = config["tags"]
if not tag_config:
print("Nothing to purge")
else:
for dirpath, tags in tag_config.items():
print(style.diff(dirpath, del_tags=tags))
if skip_prompts or prompt_user():
save_config_file(get_new_config())
print("Tags purged successfully")
| mit | -5,143,990,087,949,404,000 | 28.195652 | 87 | 0.592144 | false |
tonnrueter/pymca_devel | PyMca/TiffStack.py | 1 | 14610 | #/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This file is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
#############################################################################*/
__author__ = "V.A. Sole - ESRF Data Analysis"
import sys
import os
import numpy
from PyMca import DataObject
from PyMca import TiffIO
if sys.version > '2.9':
long = int
SOURCE_TYPE = "TiffStack"
class TiffArray(object):
def __init__(self, filelist, shape, dtype, imagestack=True):
self.__fileList = filelist
self.__shape = shape
self.__dtype = dtype
self.__imageStack = imagestack
if imagestack:
self.__nImagesPerFile = int(shape[0]/len(filelist))
else:
self.__nImagesPerFile = int(shape[-1]/len(filelist))
self.__oldFileNumber = -1
def __getitem__(self, args0):
standardSlice = True
indices = []
outputShape = []
scalarArgs = []
args = []
if not hasattr(args0, "__len__"):
args0 = [args0]
for i in range(len(self.__shape)):
if i < len(args0):
args.append(args0[i])
else:
args.append(slice(None, None, None))
for i in range(len(args)):
if isinstance(args[i], slice):
start = args[i].start
stop = args[i].stop
step = args[i].step
if start is None:
start = 0
if stop is None:
stop = self.__shape[i]
if step is None:
step = 1
if step < 1:
raise ValueError("Step must be >= 1 (got %d)" % step)
if start is None:
start = 0
if start < 0:
start = self.__shape[i]-start
if stop < 0:
stop = self.__shape[i]-stop
if stop == start:
raise ValueError("Zero-length selections are not allowed")
indices.append(list(range(start, stop, step)))
elif type(args[i]) == type([]):
if len(args[i]):
indices.append([int(x) for x in args[i]])
else:
standardSlice = False
elif type(args[i]) in [type(1), type(long(1))]:
start = args[i]
if start < 0:
start = self.__shape[i] - start
stop = start + 1
step = 1
start = args[i]
args[i] = slice(start, stop, step)
indices.append(list(range(start, stop, step)))
scalarArgs.append(i)
else:
standardSlice = False
if not standardSlice:
print("args = ", args)
raise NotImplemented("__getitem__(self, args) only works on slices")
if len(indices) < 3:
print("input args = ", args0)
print("working args = ", args)
print("indices = ", indices)
raise NotImplemented("__getitem__(self, args) only works on slices")
outputShape = [len(indices[0]), len(indices[1]), len(indices[2])]
outputArray = numpy.zeros(outputShape, dtype=self.__dtype)
# nbFiles = len(self.__fileList)
nImagesPerFile = self.__nImagesPerFile
if self.__imageStack:
i = 0
rowMin = min(indices[1])
rowMax = max(indices[1])
for imageIndex in indices[0]:
fileNumber = int(imageIndex/nImagesPerFile)
if fileNumber != self.__oldFileNumber:
self.__tmpInstance = TiffIO.TiffIO(self.__fileList[fileNumber],
mode='rb+')
self.__oldFileNumber = fileNumber
imageNumber = imageIndex % nImagesPerFile
imageData = self.__tmpInstance.getData(imageNumber,
rowMin=rowMin,
rowMax=rowMax)
try:
outputArray[i,:,:] = imageData[args[1],args[2]]
except:
print("outputArray[i,:,:].shape =",outputArray[i,:,:].shape)
print("imageData[args[1],args[2]].shape = " , imageData[args[1],args[2]].shape)
print("input args = ", args0)
print("working args = ", args)
print("indices = ", indices)
print("scalarArgs = ", scalarArgs)
raise
i += 1
else:
i = 0
rowMin = min(indices[0])
rowMax = max(indices[0])
for imageIndex in indices[-1]:
fileNumber = int(imageIndex/nImagesPerFile)
if fileNumber != self.__oldFileNumber:
self.__tmpInstance = TiffIO.TiffIO(self.__fileList[fileNumber],
mode='rb+')
self.__oldFileNumber = fileNumber
imageNumber = imageIndex % nImagesPerFile
imageData = self.__tmpInstance.getData(imageNumber,
rowMin=rowMin,
rowMax=rowMax)
outputArray[:,:, i] = imageData[args[0],args[1]]
i += 1
if len(scalarArgs):
finalShape = []
for i in range(len(outputShape)):
if i in scalarArgs:
continue
finalShape.append(outputShape[i])
outputArray.shape = finalShape
return outputArray
def getShape(self):
return self.__shape
shape = property(getShape)
def getDtype(self):
return self.__dtype
dtype = property(getDtype)
def getSize(self):
s = 1
for item in self.__shape:
s *= item
return s
size = property(getSize)
class TiffStack(DataObject.DataObject):
def __init__(self, filelist=None, imagestack=None, dtype=None):
DataObject.DataObject.__init__(self)
self.sourceType = SOURCE_TYPE
if imagestack is None:
self.__imageStack = True
else:
self.__imageStack = imagestack
self.__dtype = dtype
if filelist is not None:
if type(filelist) != type([]):
filelist = [filelist]
if len(filelist) == 1:
self.loadIndexedStack(filelist)
else:
self.loadFileList(filelist)
def loadFileList(self, filelist, dynamic=False, fileindex=0):
if type(filelist) != type([]):
filelist = [filelist]
#retain the file list
self.sourceName = filelist
#the number of files
nbFiles=len(filelist)
#the intance to access the first file
fileInstance = TiffIO.TiffIO(filelist[0])
#the number of images per file
nImagesPerFile = fileInstance.getNumberOfImages()
#get the dimensions from the image itself
tmpImage = fileInstance.getImage(0)
if self.__dtype is None:
self.__dtype = tmpImage.dtype
nRows, nCols = tmpImage.shape
#stack shape
if self.__imageStack:
shape = (nbFiles * nImagesPerFile, nRows, nCols)
else:
shape = (nRows, nCols, nbFiles * nImagesPerFile)
#we can create the stack
if not dynamic:
try:
data = numpy.zeros(shape,
self.__dtype)
except (MemoryError, ValueError):
dynamic = True
if not dynamic:
imageIndex = 0
self.onBegin(nbFiles * nImagesPerFile)
for i in range(nbFiles):
tmpInstance =TiffIO.TiffIO(filelist[i])
for j in range(nImagesPerFile):
tmpImage = tmpInstance.getImage(j)
if self.__imageStack:
data[imageIndex,:,:] = tmpImage
else:
data[:,:,imageIndex] = tmpImage
imageIndex += 1
self.incrProgressBar = imageIndex
self.onProgress(imageIndex)
self.onEnd()
if dynamic:
data = TiffArray(filelist,
shape,
self.__dtype,
imagestack=self.__imageStack)
self.info = {}
self.data = data
shape = self.data.shape
for i in range(len(shape)):
key = 'Dim_%d' % (i+1,)
self.info[key] = shape[i]
if self.__imageStack:
self.info["McaIndex"] = 0
self.info["FileIndex"] = 1
else:
self.info["McaIndex"] = 2
self.info["FileIndex"] = 0
self.info["SourceType"] = SOURCE_TYPE
self.info["SourceName"] = self.sourceName
def loadIndexedStack(self,filename,begin=None,end=None, skip = None, fileindex=0):
#if begin is None: begin = 0
if type(filename) == type([]):
filename = filename[0]
if not os.path.exists(filename):
raise IOError("File %s does not exists" % filename)
name = os.path.basename(filename)
n = len(name)
i = 1
numbers = ['0', '1', '2', '3', '4', '5',
'6', '7', '8','9']
while (i <= n):
c = name[n-i:n-i+1]
if c in numbers:
break
i += 1
suffix = name[n-i+1:]
if len(name) == len(suffix):
#just one file, one should use standard widget
#and not this one.
self.loadFileList(filename, fileindex=fileindex)
else:
nchain = []
while (i<=n):
c = name[n-i:n-i+1]
if c not in numbers:
break
else:
nchain.append(c)
i += 1
number = ""
nchain.reverse()
for c in nchain:
number += c
fformat = "%" + "0%dd" % len(number)
if (len(number) + len(suffix)) == len(name):
prefix = ""
else:
prefix = name[0:n-i+1]
prefix = os.path.join(os.path.dirname(filename),prefix)
if not os.path.exists(prefix + number + suffix):
print("Internal error in TIFFStack")
print("file should exist: %s " % (prefix + number + suffix))
return
i = 0
if begin is None:
begin = 0
testname = prefix+fformat % begin+suffix
while not os.path.exists(prefix+fformat % begin+suffix):
begin += 1
testname = prefix+fformat % begin+suffix
if len(testname) > len(filename):break
i = begin
else:
i = begin
if not os.path.exists(prefix+fformat % i+suffix):
raise ValueError("Invalid start index file = %s" % \
(prefix+fformat % i+suffix))
f = prefix+fformat % i+suffix
filelist = []
while os.path.exists(f):
filelist.append(f)
i += 1
if end is not None:
if i > end:
break
f = prefix+fformat % i+suffix
self.loadFileList(filelist, fileindex=fileindex)
def onBegin(self, n):
pass
def onProgress(self, n):
pass
def onEnd(self):
pass
def test():
from PyMca import StackBase
testFileName = "TiffTest.tif"
nrows = 2000
ncols = 2000
#create a dummy stack with 100 images
nImages = 100
imagestack = True
a = numpy.ones((nrows, ncols), numpy.float32)
if not os.path.exists(testFileName):
print("Creating test filename %s" % testFileName)
tif = TiffIO.TiffIO(testFileName, mode = 'wb+')
for i in range(nImages):
data = (a * i).astype(numpy.float32)
if i == 1:
tif = TiffIO.TiffIO(testFileName, mode = 'rb+')
tif.writeImage(data,
info={'Title':'Image %d of %d' % (i+1, nImages)})
tif = None
stackData = TiffStack(imagestack=imagestack)
stackData.loadFileList([testFileName], dynamic=True)
if 0:
stack = StackBase.StackBase()
stack.setStack(stackData)
print("This should be 0 = %f" % stack.calculateROIImages(0, 0)['ROI'].sum())
print("This should be %f = %f" %\
(a.sum(),stack.calculateROIImages(1, 2)['ROI'].sum()))
if imagestack:
print("%f should be = %f" %\
(stackData.data[0:10,:,:].sum(),
stack.calculateROIImages(0, 10)['ROI'].sum()))
print("Test small ROI 10 should be = %f" %\
stackData.data[10:11,[10],11].sum())
print("Test small ROI 40 should be = %f" %\
stackData.data[10:11,[10,12,14,16],11].sum())
else:
print("%f should be = %f" %\
(stackData.data[:,:, 0:10].sum(),
stack.calculateROIImages(0, 10)['ROI'].sum()))
print("Test small ROI %f" %\
stackData.data[10:11,[29],:].sum())
else:
from PyMca import PyMcaQt as qt
from PyMca import QStackWidget
app = qt.QApplication([])
w = QStackWidget.QStackWidget()
print("Setting stack")
w.setStack(stackData)
w.show()
app.exec_()
if __name__ == "__main__":
test()
| gpl-2.0 | 2,687,976,386,199,071,000 | 35.708543 | 99 | 0.484805 | false |
shzygmyx/Adaboost | boosting.py | 1 | 13043 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 14 14:39:38 2016
@author: Meng Yuxian
This is an implementation of <Improved boosting algorithms using
confidence-rated predictions>, Schapire, 1999.
"""
from math import e, log
import numpy as np
from sklearn.tree import DecisionTreeClassifier
class Adaboost():
"""
Adaboost(X, y, estimator = DecisionTreeClassifier, itern = 20, mode = "si
gn")
Basic Adaboost to solve two-class problem
Parameters
----------
X: numpy 2d array (m samples * n features)
y: numpy 1d array (m samples' label)
estimator: base_estimator of boosting
itern: number of iterations
mode: sign mode output label directly, while num mode output a confidence
rate x. The more positive x is ,the more likely the label is Adaboost.cls0;
the more negative x is, the more likely the label is not Adaboost.cls0
e.g.
>>> x = np.array([[1,2,3,4],[2,3,4,5],[6,7,8,9],[2,5,7,8]])
>>> y = np.array([1,2,2,1])
>>> clf = Adaboost(x, y, mode = "num")
>>> clf.predict(np.array([[1,7,2,8],[2,5,6,9]]))
array([ 27.5707191 , 32.16583895])
>>> clf.cls0
1
>>> clf = Adaboost(x, y, mode = "sign")
>>> clf.predict(np.array([[1,7,2,8],[2,5,6,9]]))
array([ 1., 1.])
Note that outputs of clf.predict in num model are positive, so outputs of
clf.predict in sign model are both clf.cls0, which is label 1.
Methods
-------
predict
score
See also
--------
Adaboost
References
----------
<Improved boosting algorithms using confidence-rated predictions>, Schapire
, 1999
"""
def __init__(self, X, y, estimator = DecisionTreeClassifier, itern = 20, mode = "sign"):
self.X = X
self.y = y.copy()
self.estimator = estimator
self.mode = mode
self.itern = itern
self.estimators = [] # estimators produced by boosting algorithm
self.alphas = np.array([]) # weights of each boost estimator
self.m = self.X.shape[0] # number of samples
self.w = np.array([1/self.m] * self.m) # weights of samples
self.cls_list = [] # list used to store classes' name and numbers
self.cls0 = y[0]
for i in range(self.m):
if y[i] not in self.cls_list:
self.cls_list.append(y[i])
if y[i] == self.cls0:
self.y[i] = 1
else:
self.y[i] = -1
if len(self.cls_list) != 2:
raise TypeError(
'''This Adaboost only support two-class problem, for multiclass
problem, please use AdaboostMH.''')
self.train()
def train(self):
m = self.m
for k in range(self.itern):
cls = self.estimator(max_depth = 3, presort = True)
cls.fit(self.X, self.y, sample_weight = self.w)
self.estimators.append(cls)
y_predict = cls.predict(self.X)
error = 0 # number of wrong prediction
for i in range(m):
if y_predict[i] != self.y[i]:
error += self.w[i]
if error == 0:
error += 0.01 # smoothness
alpha = 0.5*log((1-error)/error) # estimator weight
self.alphas = np.append(self.alphas, alpha)
for i in range(m): # update sample weights
if y_predict[i] != self.y[i]:
self.w[i] *= e**alpha
else:
self.w[i] /= e**alpha
self.w /= sum(self.w)
def predict(self, X):
y_predict = np.array([])
if self.mode == "sign":
for i in range(X.shape[0]):
predict_i = (sum(self.alphas *
np.array([int(self.estimators[k].predict(X[i].reshape(1,-1))) for k in range(len(self.alphas))])))
y_predict = np.append(y_predict, self.transfer(np.sign(predict_i)))
else:
for i in range(X.shape[0]):
predict_i = (sum(self.alphas *
np.array([int(self.estimators[k].predict(X[i].reshape(1,-1))) for k in range(len(self.alphas))])))
y_predict = np.append(y_predict, predict_i)
return y_predict
def transfer(self, l):
"""turn -1/+1 to previous initial label name"""
if l == 1:
return self.cls0
else:
return self.cls_list[1]
def score(self, X_test, y_test):
"""return precision of trained estimator on x_test and y_test"""
y_predict = self.predict(X_test)
error = 0 # error
for i in range(X_test.shape[0]):
if y_predict[i] != y_test[i]:
error += 1
error /= X_test.shape[0]
return 1 - error
class AdaboostMH():
"""
AdaboostMH(X, y, estimator = DecisionTreeClassifier, itern = 20, mode = "si
gn")
Adaboost that could solve multiclass and multilabel problem.
Parameters
----------
X: numpy 2d array (m samples * n features)
y: numpy 1d array (m samples' label)
estimator: base_estimator of boosting
itern: number of iterations
mode: "sign" mode will return label directly when you use predict method,
while "num" mode will return an array of confidence rates x which reflects
how likely the labels i belongs to corresbonding sample j.
the more positive x is, the more likely the label i belongs to sample j;
the more negative x is, the more likely the label i doesn't belong to j.
e.g.
>>> x = np.array([[1,2,3,4],[2,3,4,5],[6,7,8,9],[2,5,7,8]])
>>> y = np.array([[1,2],[2],[3,1],[2,3]])
>>> clf = AdaboostMH(x, y, mode = "num")
>>> clf.predict(np.array([[1,7,2,8],[2,5,6,9]]))
array([[ 3.89458577, 3.89458577, 1.14677695],
[-1.45489964, 1.51029301, 7.75042082]])
Methods
-------
predict
score
See also
--------
Adaboost
References
----------
<Improved boosting algorithms using confidence-rated predictions>, Schapire
, 1999
"""
def __init__(self, X, y, estimator = DecisionTreeClassifier, itern = 20, mode = "sign"):
self.X = X
self.y = y
self.estimator = estimator
self.itern = itern
self.mode = mode
self.m = self.X.shape[0] # number of samples
self.cls_list = [] # list used to store classes' name and numbers
# if type(y[0]) != np.ndarray:
# self.y = y.reshape(len(y),-1)
for i in range(self.m):
for cls in self.y[i]:
if cls not in self.cls_list:
self.cls_list.append(cls)
self.k = len(self.cls_list) # number of classes
self.boost = self.train()
def train(self):
X = self.X
new_X = [] #from initial problem generate new problem
new_y = []
for i in range(self.m):
for cls in self.cls_list:
new_X.append(list(X[i])+[cls])
if cls in self.y[i]:
new_y.append(1)
else:
new_y.append(-1)
new_X = np.array(new_X)
new_y = np.array(new_y)
boost = Adaboost(new_X, new_y, estimator = self.estimator, itern = self.itern, mode = self.mode)
return boost
def predict(self, X):
"""Use trained model to predict new X
clf.predict(x)
"""
y_predict = []
if self.mode == "sign":
for i in range(X.shape[0]):
y = []
for cls in self.cls_list:
new_X = np.append(X[i], cls).reshape(1,-1)
predict = int(self.boost.predict(new_X))
if predict == 1:
y.append(cls)
y_predict.append(y)
else:
for i in range(X.shape[0]):
y = []
for cls in self.cls_list:
new_X = np.append(X[i], cls).reshape(1,-1)
predict = self.boost.predict(new_X)[0]
y.append(predict)
y_predict.append(y)
y_predict = np.array(y_predict)
return y_predict
def score(self, X_test, y_test):
"""return precision of trained estimator on test dataset X and y"""
if self.mode != "sign":
raise TypeError("score only support sign mode")
y_predict = self.predict(X_test)
error = 0 # error
for i in range(X_test.shape[0]):
for cls in self.cls_list:
if cls in y_test[i]:
if cls not in y_predict[i]:
error += 1
else:
if cls in y_predict[i]:
error += 1
error /= (X_test.shape[0] * self.k)
return 1 - error
class AdaboostMO():
"""
AdaboostMO(X, y, code_dic = None, estimator = DecisionTreeClassifier, itern
= 20)
A multiclass version of Adaboost based on output codes to solve singlelabel
problem
Parameters
----------
X: numpy 2d array (m samples * n features)
y: numpy 1d array (m samples' label)
code_dic: dictionary (key:label, value: numpy array of -1/+1)
estimator: base_estimator of boosting
itern: number of iterations
e.g.
>>> x = np.array([[1,2,3,4],[2,3,4,5],[6,7,8,9],[2,5,7,8]])
>>> y = np.array([1,2,3,1])
>>> clf = AdaboostMO(x, y, code_dic = {1:np.array([1,-1,-1], 2:np.array([-1
,1,-1], 3:np.array([-1,-1,1])))}, itern = 15)
>>> clf.predict(np.array([[1,7,2,8],[2,5,6,9]]))
array([1,1])
Methods
-------
predict
score
See also
--------
AdaboostMH
References
----------
<Improved boosting algorithms using confidence-rated predictions>, Schapire
, 1999
"""
def __init__(self, X, y, code_dic = None, estimator = DecisionTreeClassifier, itern = 20):
self.X = X
self.y = y
self.estimator = estimator
self.itern = itern
self.m = self.X.shape[0] # number of samples
self.cls_list = [] # list used to store classes' name and numbers
for i in range(self.m):
if y[i] not in self.cls_list:
self.cls_list.append(y[i])
if code_dic != None:
self.k = len(code_dic[cls_list[0]]) # dimension of encoding space
else:
self.k = len(self.cls_list)
if code_dic == None: # generate default encode dictionary
code_dic = {}
for i in range(self.k):
code = np.array([-1] * self.k)
code[i] = 1
code_dic[self.cls_list[i]] = code
self.code_dic = code_dic #store {label: array-like code}
self.boost = self.train()
def train(self):
y = self.encode(self.y) #encoding y and train it as AdaboostMH in num mode
for i in range(self.m):
y[i] = [k for k in range(self.k) if y[i][k] == 1]
boost = AdaboostMH(self.X, y, estimator = self.estimator, itern = self.itern, mode = "num")
return boost
def encode(self, y):
if not isinstance(y, np.ndarray):
return self.code_dic[y]
return np.array([self.code_dic[i] for i in y])
def decode(self, y):
"""decode an array_like labels"""
decode_y = []
for i in range(len(y)):
for cls in self.code_dic:
if self.code_dic[cls] == i:
decode_y.append(cls)
break
return np.array(decode_y)
def predict(self, X):
"""Use trained model to predict on new X"""
y_predict = []
for i in range(X.shape[0]):
confidences = self.boost.predict(X[i].reshape(1,-1))[0]
cls_score = [sum(self.encode(cls) * confidences)for cls in self.cls_list]
cls = self.cls_list[cls_score.index(max(cls_score))]
y_predict.append(cls)
return np.array(y_predict)
def score(self, x_test, y_test):
"""return precision of trained estimator on x_test and y_test"""
error = 0
y_predict = self.predict(x_test)
for i in range(len(y_test)):
if y_predict[i] != y_test[i]:
error += 1
return 1 - error/len(y_test)
| gpl-3.0 | 8,261,647,468,071,129,000 | 33.067204 | 131 | 0.499885 | false |
dionhaefner/veros | test/pyom_consistency/eke_test.py | 1 | 3344 | from collections import OrderedDict
import numpy as np
from test_base import VerosPyOMUnitTest
from veros.core import eke
class EKETest(VerosPyOMUnitTest):
nx, ny, nz = 70, 60, 50
extra_settings = {
'enable_cyclic_x': True,
'enable_eke_leewave_dissipation': True,
'enable_eke': True,
'enable_TEM_friction': True,
'enable_eke_isopycnal_diffusion': True,
'enable_store_cabbeling_heat': True,
'enable_eke_superbee_advection': True,
'enable_eke_upwind_advection': True
}
def initialize(self):
for a in ('eke_hrms_k0_min', 'eke_k_max', 'eke_c_k', 'eke_crhin', 'eke_cross',
'eke_lmin', 'K_gm_0', 'K_iso_0', 'c_lee0', 'eke_Ri0', 'eke_Ri1', 'eke_int_diss0',
'kappa_EKE0', 'eke_r_bot', 'eke_c_eps', 'alpha_eke', 'dt_tracer', 'AB_eps'):
self.set_attribute(a, np.random.rand())
for a in ('dxt', 'dxu'):
self.set_attribute(a, np.random.randint(1, 100, size=self.nx + 4).astype(np.float))
for a in ('dyt', 'dyu'):
self.set_attribute(a, np.random.randint(1, 100, size=self.ny + 4).astype(np.float))
for a in ('cosu', 'cost'):
self.set_attribute(a, 2 * np.random.rand(self.ny + 4) - 1.)
for a in ('dzt', 'dzw', 'zw'):
self.set_attribute(a, 100 * np.random.rand(self.nz))
for a in ('eke_topo_hrms', 'eke_topo_lam', 'hrms_k0', 'coriolis_t', 'beta',
'eke_lee_flux', 'eke_bot_flux', 'L_rossby'):
self.set_attribute(a, np.random.randn(self.nx + 4, self.ny + 4))
for a in ('eke_len', 'K_diss_h', 'K_diss_gm', 'P_diss_skew', 'P_diss_hmix', 'P_diss_iso',
'kappaM', 'eke_diss_iw', 'eke_diss_tke', 'K_gm', 'flux_east', 'flux_north', 'flux_top',
'L_rhines'):
self.set_attribute(a, np.random.randn(self.nx + 4, self.ny + 4, self.nz))
for a in ('eke', 'deke', 'Nsqr', 'u', 'v'):
self.set_attribute(a, np.random.randn(self.nx + 4, self.ny + 4, self.nz, 3))
for a in ('maskU', 'maskV', 'maskW', 'maskT'):
self.set_attribute(a, np.random.randint(0, 2, size=(self.nx + 4, self.ny + 4, self.nz)).astype(np.float))
kbot = np.random.randint(1, self.nz, size=(self.nx + 4, self.ny + 4))
# add some islands, but avoid boundaries
kbot[3:-3, 3:-3].flat[np.random.randint(0, (self.nx - 2) * (self.ny - 2), size=10)] = 0
self.set_attribute('kbot', kbot)
self.test_module = eke
veros_args = (self.veros_new.state, )
veros_legacy_args = dict()
self.test_routines = OrderedDict()
self.test_routines['init_eke'] = (veros_args, veros_legacy_args)
self.test_routines['set_eke_diffusivities'] = (veros_args, veros_legacy_args)
self.test_routines['integrate_eke'] = (veros_args, veros_legacy_args)
def test_passed(self, routine):
for f in ('flux_east', 'flux_north', 'flux_top', 'eke', 'deke', 'hrms_k0', 'L_rossby',
'L_rhines', 'eke_len', 'K_gm', 'kappa_gm', 'K_iso', 'sqrteke', 'c_lee', 'c_Ri_diss',
'eke_diss_iw', 'eke_diss_tke', 'eke_lee_flux', 'eke_bot_flux'):
self.check_variable(f)
def test_eke(pyom2_lib, backend):
EKETest(fortran=pyom2_lib, backend=backend).run()
| mit | -7,968,939,652,004,585,000 | 43 | 117 | 0.557715 | false |
google-research/simclr | tf2/data_util.py | 1 | 18220 | # coding=utf-8
# Copyright 2020 The SimCLR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific simclr governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing and augmentation."""
import functools
from absl import flags
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
CROP_PROPORTION = 0.875 # Standard for ImageNet.
def random_apply(func, p, x):
"""Randomly apply function func to x with probability p."""
return tf.cond(
tf.less(
tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32),
tf.cast(p, tf.float32)), lambda: func(x), lambda: x)
def random_brightness(image, max_delta, impl='simclrv2'):
"""A multiplicative vs additive change of brightness."""
if impl == 'simclrv2':
factor = tf.random.uniform([], tf.maximum(1.0 - max_delta, 0),
1.0 + max_delta)
image = image * factor
elif impl == 'simclrv1':
image = tf.image.random_brightness(image, max_delta=max_delta)
else:
raise ValueError('Unknown impl {} for random brightness.'.format(impl))
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def color_jitter(image, strength, random_order=True, impl='simclrv2'):
"""Distorts the color of the image.
Args:
image: The input image tensor.
strength: the floating number for the strength of the color augmentation.
random_order: A bool, specifying whether to randomize the jittering order.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
brightness = 0.8 * strength
contrast = 0.8 * strength
saturation = 0.8 * strength
hue = 0.2 * strength
if random_order:
return color_jitter_rand(
image, brightness, contrast, saturation, hue, impl=impl)
else:
return color_jitter_nonrand(
image, brightness, contrast, saturation, hue, impl=impl)
def color_jitter_nonrand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is fixed).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x, brightness, contrast, saturation, hue):
"""Apply the i-th transformation."""
if brightness != 0 and i == 0:
x = random_brightness(x, max_delta=brightness, impl=impl)
elif contrast != 0 and i == 1:
x = tf.image.random_contrast(
x, lower=1-contrast, upper=1+contrast)
elif saturation != 0 and i == 2:
x = tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
elif hue != 0:
x = tf.image.random_hue(x, max_delta=hue)
return x
for i in range(4):
image = apply_transform(i, image, brightness, contrast, saturation, hue)
image = tf.clip_by_value(image, 0., 1.)
return image
def color_jitter_rand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return random_brightness(x, max_delta=brightness, impl=impl)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(x, lower=1-contrast, upper=1+contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random.shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def _compute_crop_shape(
image_height, image_width, aspect_ratio, crop_proportion):
"""Compute aspect ratio-preserving shape for central crop.
The resulting shape retains `crop_proportion` along one side and a proportion
less than or equal to `crop_proportion` along the other side.
Args:
image_height: Height of image to be cropped.
image_width: Width of image to be cropped.
aspect_ratio: Desired aspect ratio (width / height) of output.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
crop_height: Height of image after cropping.
crop_width: Width of image after cropping.
"""
image_width_float = tf.cast(image_width, tf.float32)
image_height_float = tf.cast(image_height, tf.float32)
def _requested_aspect_ratio_wider_than_image():
crop_height = tf.cast(
tf.math.rint(crop_proportion / aspect_ratio * image_width_float),
tf.int32)
crop_width = tf.cast(
tf.math.rint(crop_proportion * image_width_float), tf.int32)
return crop_height, crop_width
def _image_wider_than_requested_aspect_ratio():
crop_height = tf.cast(
tf.math.rint(crop_proportion * image_height_float), tf.int32)
crop_width = tf.cast(
tf.math.rint(crop_proportion * aspect_ratio * image_height_float),
tf.int32)
return crop_height, crop_width
return tf.cond(
aspect_ratio > image_width_float / image_height_float,
_requested_aspect_ratio_wider_than_image,
_image_wider_than_requested_aspect_ratio)
def center_crop(image, height, width, crop_proportion):
"""Crops to center of image and rescales to desired size.
Args:
image: Image Tensor to crop.
height: Height of image to be cropped.
width: Width of image to be cropped.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
A `height` x `width` x channels Tensor holding a central crop of `image`.
"""
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
crop_height, crop_width = _compute_crop_shape(
image_height, image_width, height / width, crop_proportion)
offset_height = ((image_height - crop_height) + 1) // 2
offset_width = ((image_width - crop_width) + 1) // 2
image = tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_height, crop_width)
image = tf.image.resize([image], [height, width],
method=tf.image.ResizeMethod.BICUBIC)[0]
return image
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: `Tensor` of image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
(cropped image `Tensor`, distorted bbox `Tensor`).
"""
with tf.name_scope(scope or 'distorted_bounding_box_crop'):
shape = tf.shape(image)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
image = tf.image.crop_to_bounding_box(
image, offset_y, offset_x, target_height, target_width)
return image
def crop_and_resize(image, height, width):
"""Make a random crop and resize it to height `height` and width `width`.
Args:
image: Tensor representing the image.
height: Desired image height.
width: Desired image width.
Returns:
A `height` x `width` x channels Tensor holding a random crop of `image`.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
aspect_ratio = width / height
image = distorted_bounding_box_crop(
image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4 * aspect_ratio, 4. / 3. * aspect_ratio),
area_range=(0.08, 1.0),
max_attempts=100,
scope=None)
return tf.image.resize([image], [height, width],
method=tf.image.ResizeMethod.BICUBIC)[0]
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the given image with separable convolution.
Args:
image: Tensor of shape [height, width, channels] and dtype float to blur.
kernel_size: Integer Tensor for the size of the blur kernel. This is should
be an odd number. If it is an even number, the actual kernel size will be
size + 1.
sigma: Sigma value for gaussian operator.
padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.
Returns:
A Tensor representing the blurred image.
"""
radius = tf.cast(kernel_size / 2, dtype=tf.int32)
kernel_size = radius * 2 + 1
x = tf.cast(tf.range(-radius, radius + 1), dtype=tf.float32)
blur_filter = tf.exp(-tf.pow(x, 2.0) /
(2.0 * tf.pow(tf.cast(sigma, dtype=tf.float32), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_crop_with_resize(image, height, width, p=1.0):
"""Randomly crop and resize an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: Probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
def _transform(image): # pylint: disable=missing-docstring
image = crop_and_resize(image, height, width)
return image
return random_apply(_transform, p=p, x=image)
def random_color_jitter(image, p=1.0, strength=1.0,
impl='simclrv2'):
def _transform(image):
color_jitter_t = functools.partial(
color_jitter, strength=strength, impl=impl)
image = random_apply(color_jitter_t, p=0.8, x=image)
return random_apply(to_grayscale, p=0.2, x=image)
return random_apply(_transform, p=p, x=image)
def random_blur(image, height, width, p=1.0):
"""Randomly blur an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
del width
def _transform(image):
sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32)
return gaussian_blur(
image, kernel_size=height//10, sigma=sigma, padding='SAME')
return random_apply(_transform, p=p, x=image)
def batch_random_blur(images_list, height, width, blur_probability=0.5):
"""Apply efficient batch data transformations.
Args:
images_list: a list of image tensors.
height: the height of image.
width: the width of image.
blur_probability: the probaility to apply the blur operator.
Returns:
Preprocessed feature list.
"""
def generate_selector(p, bsz):
shape = [bsz, 1, 1, 1]
selector = tf.cast(
tf.less(tf.random.uniform(shape, 0, 1, dtype=tf.float32), p),
tf.float32)
return selector
new_images_list = []
for images in images_list:
images_new = random_blur(images, height, width, p=1.)
selector = generate_selector(blur_probability, tf.shape(images)[0])
images = images_new * selector + images * (1 - selector)
images = tf.clip_by_value(images, 0., 1.)
new_images_list.append(images)
return new_images_list
def preprocess_for_train(image,
height,
width,
color_distort=True,
crop=True,
flip=True,
impl='simclrv2'):
"""Preprocesses the given image for training.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
color_distort: Whether to apply the color distortion.
crop: Whether to crop the image.
flip: Whether or not to flip left and right of an image.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = random_crop_with_resize(image, height, width)
if flip:
image = tf.image.random_flip_left_right(image)
if color_distort:
image = random_color_jitter(image, strength=FLAGS.color_jitter_strength,
impl=impl)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
def preprocess_for_eval(image, height, width, crop=True):
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
crop: Whether or not to (center) crop the test images.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = center_crop(image, height, width, crop_proportion=CROP_PROPORTION)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
def preprocess_image(image, height, width, is_training=False,
color_distort=True, test_crop=True):
"""Preprocesses the given image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
is_training: `bool` for whether the preprocessing is for training.
color_distort: whether to apply the color distortion.
test_crop: whether or not to extract a central crop of the images
(as for standard ImageNet evaluation) during the evaluation.
Returns:
A preprocessed image `Tensor` of range [0, 1].
"""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if is_training:
return preprocess_for_train(image, height, width, color_distort)
else:
return preprocess_for_eval(image, height, width, test_crop)
| apache-2.0 | -7,356,768,281,607,679,000 | 34.105973 | 80 | 0.644237 | false |
Daniel-CA/odoo-addons | stock_quant_expiry/models/stock_quant.py | 1 | 1792 | # -*- coding: utf-8 -*-
# Copyright 2017 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
class StockQuant(models.Model):
_inherit = 'stock.quant'
@api.multi
@api.depends('lot_id.life_date', 'lot_id.mrp_date')
def _compute_lifespan(self):
for record in self.filtered(lambda x: x.lot_id and
x.lot_id.life_date and x.lot_id.mrp_date):
life_date = fields.Date.from_string(record.lot_id.life_date)
mrp_date = fields.Date.from_string(record.lot_id.mrp_date)
record.lifespan = (life_date - mrp_date).days
def _compute_lifespan_progress(self):
for record in self.filtered(lambda x: x.lot_id and
x.lot_id.life_date and x.lot_id.mrp_date):
life_date = fields.Date.from_string(record.lot_id.life_date)
mrp_date = fields.Date.from_string(record.lot_id.mrp_date)
today = fields.Date.from_string(fields.Date.today())
lifespan = (life_date - mrp_date).days
todayspan = (today - mrp_date).days
if not lifespan:
continue
record.lifespan_progress = float(todayspan) / float(lifespan) * 100
mrp_date = fields.Date(string='Mrp Date', store=True,
related='lot_id.mrp_date')
life_date = fields.Datetime(string='Expiry Date',
related='lot_id.life_date')
lifespan = fields.Integer(string='Lifespan', store=True,
compute='_compute_lifespan')
lifespan_progress = fields.Float(string='Lifespan Progress',
compute='_compute_lifespan_progress')
| agpl-3.0 | -6,778,287,636,719,805,000 | 43.8 | 79 | 0.580915 | false |
harpribot/deep-summarization | train_scripts/train_script_lstm_stacked_bidirectional_attention.py | 1 | 1043 | import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
from models import lstm_stacked_bidirectional
from helpers import checkpoint
# Get the review summary file
review_summary_file = 'extracted_data/review_summary.csv'
# Initialize Checkpointer to ensure checkpointing
checkpointer = checkpoint.Checkpointer('stackedBidirectional', 'lstm', 'Attention')
checkpointer.steps_per_checkpoint(1000)
checkpointer.steps_per_prediction(1000)
# Do using GRU cell - without attention mechanism
out_file = 'result/stacked_bidirectional/lstm/attention.csv'
checkpointer.set_result_location(out_file)
lstm_net = lstm_stacked_bidirectional.LstmStackedBidirectional(review_summary_file,
checkpointer, attention=True, num_layers=2)
lstm_net.set_parameters(train_batch_size=128, test_batch_size=128, memory_dim=128, learning_rate=0.05)
lstm_net.begin_session()
lstm_net.form_model_graph()
lstm_net.fit()
lstm_net.predict()
lstm_net.store_test_predictions()
| mit | 4,807,616,649,628,322,000 | 44.347826 | 106 | 0.744008 | false |
ScanOC/trunk-player | radio/receivers.py | 1 | 1882 | # receivers.py
import json
import logging
import datetime
from django.dispatch import receiver
from django.contrib.auth.models import User
from pinax.stripe.signals import WEBHOOK_SIGNALS
from radio.models import Plan, StripePlanMatrix, Profile
from pinax.stripe.models import Plan as pinax_Plan
# Get an instance of a logger
logger = logging.getLogger(__name__)
@receiver(WEBHOOK_SIGNALS["invoice.payment_succeeded"])
def handle_payment_succeeded(sender, event, **kwargs):
logger.error('----------------------------------------')
logger.error('Stripe Payment Posted')
logger.error(event.customer)
#logger.error(event.webhook_message)
@receiver(WEBHOOK_SIGNALS["customer.subscription.created"])
def handle_subscription_created(sender, event, **kwargs):
hook_message = event.webhook_message
customer = event.customer
stripe_subscription_end = hook_message['data']['object']['current_period_end']
stripe_subscription_plan_id = hook_message['data']['object']['items']['data'][0]['plan']['id']
user = User.objects.get(username=customer)
user_profile = Profile.objects.get(user=user)
stripe_plan = pinax_Plan.objects.get(stripe_id=stripe_subscription_plan_id)
plan_matrix = StripePlanMatrix.objects.get(stripe_plan=stripe_plan)
user_profile.plan = plan_matrix.radio_plan
user_profile.save()
logger.error('Moving Customer {} to plan {}'.format(user, plan_matrix.radio_plan))
logger.error('Stripe customer.subscription.created {}'.format(event.customer))
end_date = datetime.datetime.fromtimestamp(hook_message['data']['object']['current_period_end']).strftime('%c')
logger.error('END TS {}'.format(end_date))
#logger.error('TESTING {}'.format(hook_message['data']['object']['data'][0]))
logger.error('TESTING ID {}'.format(hook_message['data']['object']['items']['data'][0]['plan']['id']))
| mit | -176,158,808,518,551,600 | 37.408163 | 115 | 0.70457 | false |
Grumbel/rfactorlcd | rfactorlcd/state.py | 1 | 13232 | # rFactor Remote LCD
# Copyright (C) 2014 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import rfactorlcd
class LapTime:
def __init__(self):
self.sector1 = 0
self.sector2 = 0
self.sector3 = 0
@property
def total(self):
return self.sector1 + self.sector2 + self.sector3
class LapTimes(object):
"""Lap time history for a vehicle in a single session"""
def __init__(self):
self.laps = {}
self.current_sector = None
@property
def best_time(self):
if self.laps == []:
return 0
else:
return min([lap.total for lap in self.laps])
def last_lap(self):
last_lap = -1
last_times = None
for lap, times in self.laps.items():
if lap > last_lap:
last_lap = lap
last_times = times
return last_times
def update(self, state):
"""Update current LapTime history with info from VehicleState"""
if state.sector == 0 and state.total_laps == 0:
pass
elif self.current_sector != state.sector:
self.current_sector = state.sector
if state.sector == 0:
lap = state.total_laps - 1
else:
lap = state.total_laps
if lap in self.laps:
lap_time = self.laps[lap]
else:
lap_time = LapTime()
self.laps[lap] = lap_time
# set the sector time in the LapTime object
if state.sector == 1:
lap_time.sector1 = state.cur_sector1
elif state.sector == 2:
lap_time.sector2 = state.cur_sector2 - state.cur_sector1
elif state.sector == 0:
lap_time.sector3 = state.last_lap_time - state.cur_sector2
else:
logging.error("unknown sector: %d" % state.sector)
class WheelState(object):
def __init__(self):
self.rotation = 0.0
self.suspension_deflection = 0.0
self.ride_height = 0.0
self.tire_load = 0.0
self.lateral_force = 0.0
self.grip_fract = 0.0
self.brake_temp = 0.0
self.pressure = 0.0
self.temperature = [0.0, 0.0, 0.0]
self.wear = 0.0
self.surface_type = 0
self.flat = 0
self.detached = 0
class VehicleState(object):
def __init__(self):
self.is_player = 0
self.control = 0
self.driver_name = ""
self.vehicle_name = ""
self.vehicle_class = ""
self.total_laps = 0
self.sector = 0
self.finish_status = 0
self.lap_dist = 0
self.path_lateral = 0.0
self.track_edge = 0.0
self.in_pits = 0
self.place = 0
self.time_behind_next = 0.0
self.laps_behind_next = 0
self.time_behind_leader = 0.0
self.laps_behind_leader = 0
self.best_sector1 = 0.0
self.best_sector2 = 0.0
self.best_lap_time = 0.0
self.last_sector1 = 0.0
self.last_sector2 = 0.0
self.last_lap_time = 0.0
self.cur_sector1 = 0.0
self.cur_sector2 = 0.0
self.num_pitstops = 0
self.num_penalties = 0
self.lap_start_et = 0.0
self.lap_times = LapTimes()
class rFactorState(object):
def __init__(self):
self.session_id = 0
# telemetry defaults
self.lap_number = 0
self.lap_start_et = 0.0
self.pos = (0.0, 0.0, 0.0)
self.local_vel = (0.0, 0.0, 0.0)
self.local_accel = (0.0, 0.0, 0.0)
self.ori_x = (0.0, 0.0, 0.0)
self.ori_y = (0.0, 0.0, 0.0)
self.ori_z = (0.0, 0.0, 0.0)
self.local_rot = (0.0, 0.0, 0.0)
self.local_rot_accel = (0.0, 0.0, 0.0)
self.gear = 0
self.rpm = 0.0
self.max_rpm = 0.0
self.clutch_rpm = 0.0
self.fuel = 0.0
self.water_temp = 0.0
self.oil_temp = 0.0
self.throttle = 0.0
self.brake = 0.0
self.steering = 0.0
self.clutch = 0.0
self.steering_arm_force = 0.0
self.scheduled_stops = 0
self.overheating = 0
self.detached = 0
self.dent_severity = [0, 0, 0, 0, 0, 0, 0, 0]
self.wheels = [WheelState(), WheelState(), WheelState(), WheelState()]
self.num_vehicles = 0
self.player = None
self.vehicles = []
# info
self.track_name = ""
self.player_name = ""
self.plr_file_name = ""
self.end_e_t = 0.0
self.max_laps = 0
self.lap_dist = 1.0
# score
self.game_phase = 0
self.yellow_flag_state = 0
self.sector_flag = [0, 0, 0]
self.start_light = 0
self.num_red_lights = 0
self.session = 0
self.current_e_t = 0.0
self.ambient_temp = 0.0
self.track_temp = 0.0
# Backward compatibility hacks:
self.speed = 0
self.laptime = "1:23:45"
self.best_lap_driver = ""
@property
def best_lap_time(self):
if self.vehicles != []:
best = self.vehicles[0].best_lap_time
for veh in self.vehicles[1:]:
if veh.best_lap_time < best:
best = veh.best_lap_time
self.best_lap_driver = veh.driver_name # FIXME: hack
return best
else:
return 0
def on_telemetry(self, msg):
self.delta_time = msg.read_float()
self.lap_number = msg.read_int()
self.lap_start_et = msg.read_float()
# missing: mVehicleName[64]
# missing: mTrackName[64]
self.pos = msg.read_vect()
self.local_vel = msg.read_vect()
self.local_accel = msg.read_vect()
self.ori_x = msg.read_vect()
self.ori_y = msg.read_vect()
self.ori_z = msg.read_vect()
self.local_rot = msg.read_vect()
self.local_rot_accel = msg.read_vect()
self.gear = msg.read_int()
self.rpm = msg.read_float()
self.max_rpm = msg.read_float()
self.clutch_rpm = msg.read_float()
self.fuel = msg.read_float()
self.water_temp = msg.read_float()
self.oil_temp = msg.read_float()
self.throttle = msg.read_float()
self.brake = msg.read_float()
self.steering = msg.read_float()
self.clutch = msg.read_float()
self.steering_arm_force = msg.read_float()
self.scheduled_stops = msg.read_char()
self.overheating = msg.read_char()
self.detached = msg.read_char()
self.dent_severity = msg.read_multi_char(8)
self.last_impact_e_t = msg.read_float()
self.last_impact_magnitude = msg.read_float()
self.last_impact_pos = msg.read_vect()
# give speed in km/h
self.speed = -self.local_vel[2] * 3.6
for i in range(0, 4):
self.wheels[i].rotation = msg.read_float()
self.wheels[i].suspension_deflection = msg.read_float()
self.wheels[i].ride_height = msg.read_float()
self.wheels[i].tire_load = msg.read_float()
self.wheels[i].lateral_force = msg.read_float()
self.wheels[i].grip_fract = msg.read_float()
self.wheels[i].brake_temp = msg.read_float()
self.wheels[i].pressure = msg.read_float()
self.wheels[i].temperature = [msg.read_float(),
msg.read_float(),
msg.read_float()]
self.wheels[i].wear = msg.read_float()
# missing: mTerrainName[16]
self.wheels[i].surface_type = msg.read_char()
self.wheels[i].flat = msg.read_char()
self.wheels[i].detached = msg.read_char()
def on_vehicle(self, msg):
self.num_vehicles = msg.read_int()
if self.num_vehicles != len(self.vehicles):
self.vehicles = []
for i in range(self.num_vehicles):
self.vehicles.append(VehicleState())
for i in range(0, self.num_vehicles):
self.vehicles[i].is_player = msg.read_char()
self.vehicles[i].control = msg.read_char()
self.vehicles[i].driver_name = msg.read_string()
self.vehicles[i].vehicle_name = msg.read_string()
self.vehicles[i].vehicle_class = msg.read_string()
self.vehicles[i].total_laps = msg.read_short()
# rFactor numbers sectors 1, 2, 0, convert them to 0, 1, 2
self.vehicles[i].sector = (msg.read_char() + 2) % 3
self.vehicles[i].finish_status = msg.read_char()
self.vehicles[i].lap_dist = msg.read_float()
self.vehicles[i].path_lateral = msg.read_float()
self.vehicles[i].track_edge = msg.read_float()
self.vehicles[i].in_pits = msg.read_char()
self.vehicles[i].place = msg.read_char()
self.vehicles[i].time_behind_next = msg.read_float()
self.vehicles[i].laps_behind_next = msg.read_int()
self.vehicles[i].time_behind_leader = msg.read_float()
self.vehicles[i].laps_behind_leader = msg.read_int()
self.vehicles[i].best_sector1 = msg.read_float()
self.vehicles[i].best_sector2 = msg.read_float()
self.vehicles[i].best_lap_time = msg.read_float()
# these times are only updated going into a new lap
self.vehicles[i].last_sector1 = msg.read_float()
self.vehicles[i].last_sector2 = msg.read_float()
self.vehicles[i].last_lap_time = msg.read_float()
self.vehicles[i].cur_sector1 = msg.read_float()
self.vehicles[i].cur_sector2 = msg.read_float()
self.vehicles[i].num_pitstops = msg.read_short()
self.vehicles[i].num_penalties = msg.read_short()
self.vehicles[i].lap_start_et = msg.read_float()
self.vehicles[i].pos = msg.read_vect()
self.vehicles[i].local_vel = msg.read_vect()
self.vehicles[i].local_accel = msg.read_vect()
self.vehicles[i].ori_x = msg.read_vect()
self.vehicles[i].ori_y = msg.read_vect()
self.vehicles[i].ori_z = msg.read_vect()
self.vehicles[i].local_rot = msg.read_vect()
self.vehicles[i].local_rot_accel = msg.read_vect()
if self.vehicles[i].is_player:
self.player = self.vehicles[i]
self.vehicles[i].lap_times.update(self.vehicles[i])
def on_score(self, msg):
self.game_phase = msg.read_char()
self.yellow_flag_state = msg.read_char()
self.sector_flag = msg.read_multi_char(3)
self.start_light = msg.read_char()
self.num_red_lights = msg.read_char()
self.in_realtime = msg.read_char()
self.session = msg.read_int()
self.current_e_t = msg.read_float()
self.ambient_temp = msg.read_float()
self.track_temp = msg.read_float()
self.dark_cloud = msg.read_float()
self.raining = msg.read_float()
self.wind = msg.read_vect()
self.on_path_wetness = msg.read_float()
self.off_path_wetness = msg.read_float()
def on_info(self, msg):
self.track_name = msg.read_string()
self.player_name = msg.read_string()
self.plr_file_name = msg.read_string()
self.end_e_t = msg.read_float()
self.max_laps = msg.read_int()
self.lap_dist = msg.read_float()
# missing mResultsStream
def on_start_realtime(self, msg):
pass
def on_end_realtime(self, msg):
pass
def on_start_session(self, msg):
self.session_id += 1
self.vehicles = []
logging.info("on_start_session")
def on_end_session(self, msg):
logging.info("on_end_session")
def dispatch_message(self, tag, payload):
msg = rfactorlcd.BinaryDecoder(payload)
if tag == "STSS":
self.on_start_session(msg)
elif tag == "EDSS":
self.on_end_session(msg)
elif tag == "STRT":
self.on_start_realtime(msg)
elif tag == "EDRT":
self.on_end_realtime(msg)
elif tag == "VHCL":
self.on_vehicle(msg)
elif tag == "TLMT":
self.on_telemetry(msg)
elif tag == "SCOR":
self.on_score(msg)
elif tag == "INFO":
self.on_info(msg)
else:
print "error: unhandled tag: %s" % tag
# EOF #
| gpl-3.0 | -7,897,458,366,094,102,000 | 30.504762 | 78 | 0.546025 | false |
ocelot-collab/ocelot | unit_tests/ebeam_test/acc_utils/acc_utils_test.py | 1 | 4045 | """Test of the demo file demos/ebeam/csr_ex.py"""
import os
import sys
import copy
import time
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
REF_RES_DIR = FILE_DIR + '/ref_results/'
from unit_tests.params import *
from acc_utils_conf import *
def test_lattice_transfer_map(lattice, p_array, parameter=None, update_ref_values=False):
"""R matrix calculation test"""
r_matrix = lattice_transfer_map(lattice, 0.0)
if update_ref_values:
return numpy2json(r_matrix)
r_matrix_ref = json2numpy(json_read(REF_RES_DIR + sys._getframe().f_code.co_name + '.json'))
result = check_matrix(r_matrix, r_matrix_ref, TOL, assert_info=' r_matrix - ')
assert check_result(result)
@pytest.mark.parametrize('parameter', [0, 1])
def test_lattice_transfer_map_RT(lattice, p_array, parameter, update_ref_values=False):
"""test R56 and T566 of the chicane"""
r56, t566, u5666, Sref = chicane_RTU(yoke_len=b1.l/b1.angle*np.sin(b1.angle), dip_dist=d1.l*np.cos(b1.angle), r=b1.l/b1.angle, type='c')
lattice = copy.deepcopy(lattice)
if parameter == 1:
for elem in lattice.sequence:
if elem.__class__ == Bend:
elem.tilt = np.pi / 2
lattice.update_transfer_maps()
r_matrix = lattice_transfer_map(lattice, 0.0)
result1 = check_value(r_matrix[4, 5], r56, tolerance=1.0e-14, assert_info=" R56 ")
result2 = check_value(lattice.T[4, 5, 5], t566, tolerance=1.0e-14, assert_info=" T566 ")
assert check_result([result1, result2])
def test_rf2beam(lattice, p_array, parameter=None, update_ref_values=False):
"""
track function test without CSR
0 - normal tracking
1 - tilt bending magnets and tilt back electron beam then untilt beam and compare with ref beam (twiss not checked)
"""
v1 = 0.14746291505994155
phi1 = -11.105280079934298
vh = 0.030763428944485114
phih = 132.9179951484828 - 360
E1, chirp, curvature, skewness = rf2beam(v1, phi1, vh, phih, n=3, freq=1.3e9, E0=0.00675, zeta1=0., zeta2=0.,
zeta3=0.)
v1_r, phi1_r, vh_r, phih_r = beam2rf(E1, chirp, curvature, skewness, n=3, freq=1.3e9, E0=0.00675, zeta1=0., zeta2=0.,
zeta3=0.)
r1 = check_value(v1_r, v1, tolerance=1.0e-8, tolerance_type='relative', assert_info='v1')
r2 = check_value(phi1_r, phi1, tolerance=1.0e-8, tolerance_type='relative', assert_info='phi1')
r3 = check_value(vh_r, vh, tolerance=1.0e-8, tolerance_type='relative', assert_info='vh')
r4 = check_value(phih_r, phih, tolerance=1.0e-8, tolerance_type='relative', assert_info='phih')
assert check_result([r1, r2, r3, r4])
def setup_module(module):
f = open(pytest.TEST_RESULTS_FILE, 'a')
f.write('### CSR_EX START ###\n\n')
f.close()
def teardown_module(module):
f = open(pytest.TEST_RESULTS_FILE, 'a')
f.write('### CSR_EX END ###\n\n\n')
f.close()
def setup_function(function):
f = open(pytest.TEST_RESULTS_FILE, 'a')
f.write(function.__name__)
f.close()
pytest.t_start = time.time()
def teardown_function(function):
f = open(pytest.TEST_RESULTS_FILE, 'a')
f.write(' execution time is ' + '{:.3f}'.format(time.time() - pytest.t_start) + ' sec\n\n')
f.close()
@pytest.mark.update
def test_update_ref_values(lattice, p_array, cmdopt):
update_functions = []
update_functions.append('test_lattice_transfer_map')
update_function_parameters = {}
parameter = update_function_parameters[cmdopt] if cmdopt in update_function_parameters.keys() else ['']
if cmdopt in update_functions:
for p in parameter:
p_arr = copy.deepcopy(p_array)
result = eval(cmdopt)(lattice, p_arr, p, True)
if os.path.isfile(REF_RES_DIR + cmdopt + str(p) + '.json'):
os.rename(REF_RES_DIR + cmdopt + str(p) + '.json', REF_RES_DIR + cmdopt + str(p) + '.old')
json_save(result, REF_RES_DIR + cmdopt + str(p) + '.json')
| gpl-3.0 | -2,321,862,415,947,859,000 | 32.155738 | 140 | 0.624969 | false |
ellisztamas/faps | faps/pr_unsampled.py | 1 | 2716 | import numpy as np
def pr_unsampled(offspring_diploid, maternal_diploid, allele_freqs, offspring_genotype, maternal_genotype, male_genotype, mu):
"""
Calculate the transitions probability for a given set of parental and offspring
alleles.
Transitipn probabilities are then weight by the probability of drawing the allele
from the population, and the probability that this allele is the true allele, given
observed genotype data and the error rate mu.
ARGUMENTS:
offspring_diploid, maternal_diploid, male_diploid: arrays of diploid genotypes for
the offspring, mothers and fathers.
allele_freqs = vector of population allele frequencies.
offspring_genotype, maternal_genotype, male_genotype: a two-element list of zeroes
and ones indicating the diploid genotype of males, mothers and offspring to be
considered.
mu: point estimate of the genotyping error rate.
RETURNS:
A 3-dimensional array of probabilities indexing offspring, candidate males, and loci.
These are given in linear, rather than log space.
"""
# an array of all possible transition probabilities indexed as [offspring, mother, father].
trans_prob_array = np.array([[[1, 0.5, 0 ],
[0.5,0.25,0 ],
[0, 0, 0 ]],
[[0, 0.5, 1 ],
[0.5,0.5, 0.5],
[1, 0.5, 0 ]],
[[0, 0, 0 ],
[0, 0.25,0.5],
[0, 0.5, 1 ]]])
# the transition probability for the given genotypes.
trans_prob = trans_prob_array[offspring_genotype, maternal_genotype, male_genotype]
# Probabilities that the observed offspring marker data match observed data.
pr_offs = np.zeros([offspring_diploid.shape[0], offspring_diploid.shape[1]])
pr_offs[offspring_diploid == offspring_genotype] = 1-mu
pr_offs[offspring_diploid != offspring_genotype] = mu
# Probabilities that the observed maternal marker data match observed data.
pr_mothers = np.zeros([maternal_diploid.shape[0], maternal_diploid.shape[1]])
pr_mothers[maternal_diploid == maternal_genotype] = 1-mu
pr_mothers[maternal_diploid != maternal_genotype] = mu
# Probability of the father is drawn from population allele frequencies.
if male_genotype is 0: pr_males = allele_freqs**2
if male_genotype is 1: pr_males = allele_freqs*(1-allele_freqs)
if male_genotype is 2: pr_males = (1-allele_freqs)**2
return trans_prob * pr_males * pr_mothers * pr_offs | mit | 5,498,974,815,240,566,000 | 46.666667 | 126 | 0.623343 | false |
fbradyirl/home-assistant | homeassistant/components/mochad/light.py | 1 | 4739 | """Support for X10 dimmer over Mochad."""
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
Light,
PLATFORM_SCHEMA,
)
from homeassistant.components import mochad
from homeassistant.const import CONF_NAME, CONF_PLATFORM, CONF_DEVICES, CONF_ADDRESS
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_BRIGHTNESS_LEVELS = "brightness_levels"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): mochad.DOMAIN,
CONF_DEVICES: [
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): cv.x10_address,
vol.Optional(mochad.CONF_COMM_TYPE): cv.string,
vol.Optional(CONF_BRIGHTNESS_LEVELS, default=32): vol.All(
vol.Coerce(int), vol.In([32, 64, 256])
),
}
],
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up X10 dimmers over a mochad controller."""
devs = config.get(CONF_DEVICES)
add_entities([MochadLight(hass, mochad.CONTROLLER.ctrl, dev) for dev in devs])
return True
class MochadLight(Light):
"""Representation of a X10 dimmer over Mochad."""
def __init__(self, hass, ctrl, dev):
"""Initialize a Mochad Light Device."""
from pymochad import device
self._controller = ctrl
self._address = dev[CONF_ADDRESS]
self._name = dev.get(CONF_NAME, "x10_light_dev_{}".format(self._address))
self._comm_type = dev.get(mochad.CONF_COMM_TYPE, "pl")
self.light = device.Device(ctrl, self._address, comm_type=self._comm_type)
self._brightness = 0
self._state = self._get_device_status()
self._brightness_levels = dev.get(CONF_BRIGHTNESS_LEVELS) - 1
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
def _get_device_status(self):
"""Get the status of the light from mochad."""
with mochad.REQ_LOCK:
status = self.light.get_status().rstrip()
return status == "on"
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def is_on(self):
"""Return true if the light is on."""
return self._state
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_BRIGHTNESS
@property
def assumed_state(self):
"""X10 devices are normally 1-way so we have to assume the state."""
return True
def _calculate_brightness_value(self, value):
return int(value * (float(self._brightness_levels) / 255.0))
def _adjust_brightness(self, brightness):
if self._brightness > brightness:
bdelta = self._brightness - brightness
mochad_brightness = self._calculate_brightness_value(bdelta)
self.light.send_cmd("dim {}".format(mochad_brightness))
self._controller.read_data()
elif self._brightness < brightness:
bdelta = brightness - self._brightness
mochad_brightness = self._calculate_brightness_value(bdelta)
self.light.send_cmd("bright {}".format(mochad_brightness))
self._controller.read_data()
def turn_on(self, **kwargs):
"""Send the command to turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS, 255)
with mochad.REQ_LOCK:
if self._brightness_levels > 32:
out_brightness = self._calculate_brightness_value(brightness)
self.light.send_cmd("xdim {}".format(out_brightness))
self._controller.read_data()
else:
self.light.send_cmd("on")
self._controller.read_data()
# There is no persistence for X10 modules so a fresh on command
# will be full brightness
if self._brightness == 0:
self._brightness = 255
self._adjust_brightness(brightness)
self._brightness = brightness
self._state = True
def turn_off(self, **kwargs):
"""Send the command to turn the light on."""
with mochad.REQ_LOCK:
self.light.send_cmd("off")
self._controller.read_data()
# There is no persistence for X10 modules so we need to prepare
# to track a fresh on command will full brightness
if self._brightness_levels == 31:
self._brightness = 0
self._state = False
| apache-2.0 | 1,578,962,292,659,869,400 | 34.365672 | 84 | 0.603292 | false |
JoErNanO/brianmodel | brianmodel/neuron/neuron.py | 1 | 5023 | #!/usr/bin/python
# coding: utf-8
# #################################################################################
# Copyright (C) 2014 Francesco Giovannini, Neurosys - INRIA CR Nancy - Grand Est
# Authors: Francesco Giovannini
# email: [email protected]
# website: http://neurosys.loria.fr/
# Permission is granted to copy, distribute, and/or modify this program
# under the terms of the GNU General Public License, version 3 or any
# later version published by the Free Software Foundation.
#
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details
# #################################################################################
"""
neuron
~~~~~~~~~~~~~
This module contains an abstraction of a Brian-compatible neuron, represented as a cell with a list of :class:`IonicCurrent`'s.
:copyright 2014 Francesco Giovannini, Neurosys - INRIA CR Nancy - Grand Est
:licence GPLv3, see LICENCE for more details
"""
from utilities import utilities as utilities
import ioniccurrent.ioniccurrentfactory as icf
import yaml
## ***************************************************************************************************************** ##
class Neuron(object):
"""
The :class:`Neuron` represents a biological neuron with a set of properties, and a list of :class:`IonicCurrent`'s flowing through its membrane.
"""
## ************************************************************ ##
def __init__(self, parameters):
"""
Default constructor.
:param area: the area of the soma of the neural cell
:type area: float
:param conductance: the conductance of the neural cell
:type conductance: float
"""
# Initialise attributes
self.parameters = parameters.values()[0]
self.name = parameters.keys()[0]
self.area = self.parameters['area']
self.conductance = self.parameters['conductance']
# Initialise list of defined currents - FG: fixes bug due to having an empty list of defined currents when including all the needed ones
if 'defined' not in self.parameters['currents']: # Keyword 'defined' doesn't exists
self.parameters['currents']['defined'] = []
elif self.parameters['currents']['defined'] is None: # List of 'defined' currents is undefined/empty
self.parameters['currents']['defined'] = []
# Check for parameters specified as include files
for f in self.parameters['currents'].get('included', []):
try:
with open(f) as curr:
# Load included currents from file
includes = yaml.load(curr)
# Add them the list of defined currents
self.parameters['currents'].get('defined', []).extend(includes)
except IOError:
raise IOError('Cannot load current parameter file named ' + f)
# Remove list of includesd currents from dict of currents
self.parameters['currents'].pop('included', [])
# Initialise ionic current factory
self.factory = icf.IonicCurrentFactory()
# Build current list
self.currents = []
for currentParams in self.parameters['currents'].get('defined', []):
tmpCurrent = self.factory.makeIonicCurrent(currentParams, self.area)
self.currents.append(tmpCurrent)
# Store safe string representation of parameters
self._area = utilities.getSafeStringParam(self.area)
self._conductance = utilities.getSafeStringParam(utilities.getSafeStringParam(self.conductance) + ' * ' + self._area)
## ************************************************************ ##
## ************************************************************ ##
def getNeuronString(self):
"""
Generate the string representation of the neural cell model.
"""
res = ""
# Neuron model equation
dvdt = '''dv/dt = ('''
# Add current equations
for curr in self.currents:
dvdt += ''' - ''' + curr.name # Add current name to dvdt equation
res += curr.getIonicCurrentString() # Add current equation to neuron model
dvdt += ''' + I_stim) / ''' + self._conductance + ''' : volt \n''' # Append conductance division
# Check Voltage clamp
if self.parameters.has_key('vClamp') and self.parameters['vClamp']:
dvdt = '''v : volt \n'''
# Stimulus current
istim = '''I_stim : amp'''
# Build final neuron model equation
res = dvdt + res + istim
return res
## ************************************************************ ##
## ***************************************************************************************************************** ##
| gpl-3.0 | 8,160,894,770,479,721,000 | 39.184 | 148 | 0.550468 | false |
ronnyandersson/zignal | examples/ex_chunks.py | 1 | 2576 | '''
Created on 12 Apr 2020
@author: Ronny Andersson ([email protected])
@copyright: (c) 2020 Ronny Andersson
@license: MIT
Demo of how to iterate over an instance of the Audio class, for chunk-based
processing. Typically the chunks have a size that is a power of two, for
example 256, 1024 or 4096. In this example the chunk size is set to 1000
for simplicity in the plots. The sample rate in this example is also set to
a value that enhances the effect of the example, since hera a chunk equals
to one second of data.
'''
# Standard library
import logging
# Third party
import matplotlib.pyplot as plt
import numpy as np
# Internal
import zignal
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname)-7s: %(module)s.%(funcName)-15s %(message)s',
level='DEBUG',
)
logging.getLogger("matplotlib").setLevel(logging.INFO)
logging.getLogger("zignal").setLevel(logging.DEBUG)
fs = 1000
# Create various ramp signals, to visualise the chunks better. Not real
# audio, but shows in a plot what the chunks look like
a1 = zignal.Audio(fs=fs, initialdata=np.linspace(0, 1, num=(1000/2)))
a2 = zignal.Audio(fs=fs, initialdata=np.linspace(0, -1, num=(1000*1)+500))
a3 = zignal.Audio(fs=fs, initialdata=np.linspace(0, 1, num=(1000*2)+200))
a = zignal.Audio(fs=fs)
a.append(a1, a2, a3)
print(a)
# We now have 2.2 seconds of audio in three channels. This does not add up
# to even chunk sizes, so padding will have to be done in order to iterate.
#
# Three (3) chunks are expected.
for val in a.iter_chunks(chunksize=1000):
print("------------------------------------------------")
print("shape of data in chunk: %s" % str(val.shape))
print(val)
plt.figure(1)
plt.plot(val[:, 0], ls="-", label="a1")
plt.plot(val[:, 1], ls="--", label="a2")
plt.plot(val[:, 2], ls="-.", label="a3")
plt.grid()
plt.ylim(-1.1, 1.1)
plt.xlabel("samples in chunk")
plt.ylabel("magnitude [lin]")
plt.legend(loc="upper right")
plt.show()
# We can pad beforehand if we know how many samples are missing, then no
# padding will occur inside the iterator
b = a.copy()
b.gain(-20) # just to get a debug logging entry
b.pad(nofsamples=800)
print(b)
for val in b.iter_chunks(chunksize=1000):
print("------------------------------------------------")
print("shape of data in chunk: %s" % str(val.shape))
print(val)
print('-- Done --')
| mit | -1,448,987,454,992,654,300 | 32.025641 | 79 | 0.612189 | false |
modesttree/Projeny | Source/mtm/log/LogStreamConsole.py | 1 | 4086 |
import os
import re
import sys
from mtm.ioc.Inject import Inject
import mtm.util.Util as Util
from mtm.log.Logger import LogType
import shutil
from mtm.util.Assert import *
import mtm.log.ColorConsole as ColorConsole
class AnsiiCodes:
BLACK = "\033[1;30m"
DARKBLACK = "\033[0;30m"
RED = "\033[1;31m"
DARKRED = "\033[0;31m"
GREEN = "\033[1;32m"
DARKGREEN = "\033[0;32m"
YELLOW = "\033[1;33m"
DARKYELLOW = "\033[0;33m"
BLUE = "\033[1;34m"
DARKBLUE = "\033[0;34m"
MAGENTA = "\033[1;35m"
DARKMAGENTA = "\033[0;35m"
CYAN = "\033[1;36m"
DARKCYAN = "\033[0;36m"
WHITE = "\033[1;37m"
DARKWHITE = "\033[0;37m"
END = "\033[0;0m"
class LogStreamConsole:
_log = Inject('Logger')
_sys = Inject('SystemHelper')
_varManager = Inject('VarManager')
_config = Inject('Config')
def __init__(self, verbose, veryVerbose):
self._verbose = verbose or veryVerbose
self._veryVerbose = veryVerbose
self._useColors = self._config.tryGetBool(False, 'LogStreamConsole', 'UseColors')
self._fileStream = None
if self._config.tryGetBool(False, 'LogStreamConsole', 'OutputToFilteredLog'):
self._fileStream = self._getFileStream()
if self._useColors:
self._initColors()
def _initColors(self):
self._defaultColors = ColorConsole.get_text_attr()
self._defaultBg = self._defaultColors & 0x0070
self._defaultFg = self._defaultColors & 0x0007
def log(self, logType, message):
assertIsNotNone(logType)
if logType == LogType.Noise and not self._veryVerbose:
return
if logType == LogType.Debug and not self._verbose:
return
if logType == LogType.Error:
self._output(logType, message, sys.stderr, self._useColors)
else:
self._output(logType, message, sys.stdout, self._useColors)
if self._fileStream:
self._output(logType, message, self._fileStream, False)
def _getFileStream(self):
primaryPath = self._varManager.expand('[LogFilteredPath]')
if not primaryPath:
raise Exception("Could not find path for log file")
previousPath = None
if self._varManager.hasKey('LogFilteredPreviousPath'):
previousPath = self._varManager.expand('[LogFilteredPreviousPath]')
# Keep one old build log
if os.path.isfile(primaryPath) and previousPath:
shutil.copy2(primaryPath, previousPath)
return open(primaryPath, 'w', encoding='utf-8', errors='ignore')
def _getHeadingIndent(self):
return self._log.getCurrentNumHeadings() * " "
def _output(self, logType, message, stream, useColors):
stream.write('\n')
stream.write(self._getHeadingIndent())
if not useColors or logType == LogType.Info:
stream.write(message)
stream.flush()
else:
ColorConsole.set_text_attr(self._getColorAttrs(logType))
stream.write(message)
stream.flush()
ColorConsole.set_text_attr(self._defaultColors)
def _getColorAttrs(self, logType):
if logType == LogType.HeadingStart:
return ColorConsole.FOREGROUND_CYAN | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
if logType == LogType.HeadingEnd:
return ColorConsole.FOREGROUND_BLACK | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
if logType == LogType.Good:
return ColorConsole.FOREGROUND_GREEN | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
if logType == LogType.Warn:
return ColorConsole.FOREGROUND_YELLOW | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
if logType == LogType.Error:
return ColorConsole.FOREGROUND_RED | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
assertThat(logType == LogType.Debug or logType == LogType.Noise)
return ColorConsole.FOREGROUND_BLACK | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
| mit | -1,695,768,849,465,963,800 | 30.430769 | 103 | 0.638767 | false |
51reboot/actual_09_homework | 10/jinderui/cmdb/user/dbutils.py | 1 | 1788 | # encoding: utf-8
import os,sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import gconf
import MySQLdb
# encoding: utf-8
import os,sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import gconf
import MySQLdb
class MySQLConnection(object):
"""docstring for MySQLConnection"""
def __init__(self, host,port,user,passwd,db,charset='utf8'):
self.__host = host
self.__port = port
self.__user = user
self.__passwd = passwd
self.__db = db
self.__charset = charset
self.__conn = None
self.__cur = None
self.__connect()
def __connect(self):
try:
self.__conn = MySQLdb.connect(host=self.__host,port=self.__port, user=self.__user, \
passwd = self.__passwd,db = self.__db,charset=self.__charset)
self.__cur = self.__conn.cursor()
except BaseException as e:
print e
def commit(self):
if self.__conn:
self.__conn.commit()
def execute(self,sql,args=()):
_cnt = 0
if self.__cur:
_cnt = self.__cur.execute(sql,args)
return _cnt
def fetch(self,sql,args=()):
_cnt = 0
_rt_list = []
_cnt = self.execute(sql,args)
if self.__cur:
_rt_list = self.__cur.fetchall()
return _cnt, _rt_list
def close(self):
self.commit()
if self.__cur:
self.__cur.close()
self.__cur = None
if self.__conn:
self.__conn.close()
self.__conn =None
@classmethod
def execute_sql(self,sql,args=(),fetch=True):
_count =0
_rt_list =[]
_conn = MySQLConnection(host=gconf.MYSQL_HOST,port=gconf.MYSQL_PORT, \
db=gconf.MYSQL_DB,user=gconf.MYSQL_USER, passwd=gconf.MYSQL_PASSWORD,charset=gconf.MYSQL_CHARSET)
if fetch:
_count,_rt_list = _conn.fetch(sql,args)
else:
_count = _conn.execute(sql,args)
_conn.close()
return _count,_rt_list
if __name__ == '__main__':
print MySQLConnection.execute_sql('select * from user') | mit | 2,713,146,756,037,389,300 | 21.64557 | 101 | 0.645973 | false |
heracek/django-nonrel | tests/regressiontests/i18n/commands/extraction.py | 1 | 6442 | import os
import re
import shutil
from django.test import TestCase
from django.core import management
LOCALE='de'
class ExtractorTests(TestCase):
PO_FILE='locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(__file__))
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def assertMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
return self.assert_(re.search('^msgid %s' % msgid, s, re.MULTILINE))
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
return self.assert_(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assert_('#. Translators: This comment should be extracted' in po_contents)
self.assert_('This comment should not be extracted' not in po_contents)
# Comments in templates
self.assert_('#. Translators: Django template comment for translators' in po_contents)
self.assert_('#. Translators: Django comment block for translators' in po_contents)
def test_templatize(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('I think that 100%% is more that 50%% of anything.', po_contents)
self.assertMsgId('I think that 100%% is more that 50%% of %\(obj\)s.', po_contents)
def test_extraction_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./templates/template_with_error.txt', './templates/template_with_error.html')
self.assertRaises(SyntaxError, management.call_command, 'makemessages', locale=LOCALE, verbosity=0)
try: # TODO: Simplify this try/try block when we drop support for Python 2.4
try:
management.call_command('makemessages', locale=LOCALE, verbosity=0)
except SyntaxError, e:
self.assertEqual(str(e), 'Translation blocks must not include other block tags: blocktrans (file templates/template_with_error.html, line 3)')
finally:
os.remove('./templates/template_with_error.html')
os.remove('./templates/template_with_error.html.py') # Waiting for #8536 to be fixed
class JavascriptExtractorTests(ExtractorTests):
PO_FILE='locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
management.call_command('makemessages', domain='djangojs', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('This one as well.', po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_option(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, ignore_patterns=['ignore_dir/*'])
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(__file__))
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assert_(os.path.islink(self.symlinked_dir))
else:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, symlinks=True)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should be included.', po_contents)
self.assert_('templates_symlinked/test.html' in po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assert_('Plural-Forms: nplurals=2; plural=(n != 1)' in po_contents)
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, no_wrap=True)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should also be included wrapped or not wrapped depending on the use of the --no-wrap option.', po_contents)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, no_wrap=False)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('""\n"This literal should also be included wrapped or not wrapped depending on the "\n"use of the --no-wrap option."', po_contents, use_quotes=False)
| bsd-3-clause | -375,927,002,498,649,100 | 40.831169 | 174 | 0.646228 | false |
Itxaka/st2 | st2api/st2api/controllers/v1/executionviews.py | 1 | 3355 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from pecan.rest import RestController
import six
from st2common import log as logging
from st2common.models.api.base import jsexpose
from st2common.persistence.execution import ActionExecution
LOG = logging.getLogger(__name__)
# List of supported filters and relation between filter name and execution property it represents.
# The same list is used both in ActionExecutionController to map filter names to properties and
# in FiltersController below to generate a list of unique values for each filter for UI so user
# could pick a filter from a drop down.
# If filter is unique for every execution or repeats very rarely (ex. execution id or parent
# reference) it should be also added to IGNORE_FILTERS to avoid bloating FiltersController
# response. Failure to do so will eventually result in Chrome hanging out while opening History
# tab of st2web.
SUPPORTED_FILTERS = {
'action': 'action.ref',
'status': 'status',
'liveaction': 'liveaction.id',
'parent': 'parent',
'rule': 'rule.name',
'runner': 'runner.name',
'timestamp': 'start_timestamp',
'trigger': 'trigger.name',
'trigger_type': 'trigger_type.name',
'trigger_instance': 'trigger_instance.id',
'user': 'liveaction.context.user'
}
# List of filters that are too broad to distinct by them and are very likely to represent 1 to 1
# relation between filter and particular history record.
IGNORE_FILTERS = ['parent', 'timestamp', 'liveaction', 'trigger_instance']
class FiltersController(RestController):
@jsexpose()
def get_all(self):
"""
List all distinct filters.
Handles requests:
GET /executions/views/filters
"""
filters = {}
for name, field in six.iteritems(SUPPORTED_FILTERS):
if name not in IGNORE_FILTERS:
if isinstance(field, six.string_types):
query = '$' + field
else:
dot_notation = list(chain.from_iterable(
('$' + item, '.') for item in field
))
dot_notation.pop(-1)
query = {'$concat': dot_notation}
aggregate = ActionExecution.aggregate([
{'$match': {'parent': None}},
{'$group': {'_id': query}}
])
filters[name] = [res['_id'] for res in aggregate['result'] if res['_id']]
return filters
class ExecutionViewsController(RestController):
filters = FiltersController()
| apache-2.0 | -4,095,253,901,279,048,700 | 38.011628 | 98 | 0.66617 | false |
JDRomano2/VenomKB | venomkb/archive/scripts/add_go_data.py | 1 | 1105 | import json
from tqdm import tqdm
from venomkb_builder import VenomKB
VKB = VenomKB()
VKB.load_database()
go_annotations_out = {}
for x in tqdm(VKB.proteins):
try:
toxprot = VKB.get_record_from_toxprot(x.venomkb_id, 'dbReference', json=False)
except:
continue
go_annotations = [y for y in toxprot if ('type', 'GO') in y.items()]
this_protein = []
for go in go_annotations:
current = {}
go_id = [z[1] for z in go.items() if z[0] == 'id'][0]
for prop in go:
dict_form = dict(prop.items())
current[dict_form['type']] = dict_form['value']
current['id'] = go_id
# append to temporary list of structured go_annotations
this_protein.append(current)
# push to global list of go_annotations
go_annotations_out[x.venomkb_id] = this_protein
'''
for vkbid, annot_list in tqdm(go_annotations_out.iteritems()):
VKB.add_to_existing(vkbid=vkbid,
new_key='go_annotations',
new_value=annot_list,
replace_if_exist=True)
'''
| gpl-2.0 | 178,348,791,076,406,720 | 27.333333 | 86 | 0.586425 | false |
aecepoglu/twitchy-streamer-python | test/steps/all.py | 1 | 4759 | from behave import *
from unittest.mock import patch, mock_open, MagicMock
from src import arg_parser as myArgParser, server_utils as myServerUtils, errors as myErrors
import io
import requests
import requests_mock
def setup_debug_on_error(userdata):
global BEHAVE_DEBUG_ON_ERROR
BEHAVE_DEBUG_ON_ERROR = userdata.getbool("BEHAVE_DEBUG_ON_ERROR")
def before_all(context):
setup_debug_on_error(context.config.userdata)
def after_step(context, step):
raise step.exc_traceback
if step.status == "failed":
import ipdb
ipdb.post_mortem(step.exc_traceback)
def before_scenario(context):
pass
@given('a file at "{filepath}"')
def step_impl(context, filepath):
previousOpenMock = None
previousIsfileMock = None
content = context.text
if "openMock" in context:
previousOpenMock = context.openMock
if "isfileMock" in context:
previousIsfileMock = context.isfileMock
def my_open(filename, openOpt="r"):
assert(openOpt == "r")
if (filename == filepath):
return io.StringIO(content)
elif previousOpenMock:
return previousOpenMock(filename, openOpt)
else:
raise FileNotFoundError(filename)
def my_isfile(x):
if (x == filepath):
return True
elif previousIsfileMock:
return previousIsfileMock(x)
else:
return False
context.openMock = my_open
context.isfileMock = my_isfile
@given('a directory at "{path}" exists')
def step_impl(context, path):
previousMock = None
if "isdirMock" in context:
previousMock = context.isdirMock
def my_isdir(x):
if (x == path):
return True
elif previousMock:
return previousMock(x)
else:
return False
context.isdirMock = my_isdir
@given('program arguments are "{args}"')
def step_impl(context, args):
context.cmdArgs = args.split()
@when('it parses arguments')
def step_impl(context):
if "openMock" not in context:
context.openMock = True
if "isfileMock" not in context:
context.isfileMock = MagicMock(return_value = False)
if "isdirMock" not in context:
context.isdirMock = MagicMock(return_value = False)
with patch("builtins.open", context.openMock):
with patch("os.path.isfile", context.isfileMock):
with patch("os.path.isdir", context.isdirMock):
try:
context.parsedArgs = myArgParser.parse(context.cmdArgs)
context.raisedException = False
except myErrors.MyError as err:
context.raisedException = err
@then('config has "{pattern}" in "{targetList}" list')
def step_impl(context, pattern, targetList):
assert(targetList in context.parsedArgs)
assert(isinstance(context.parsedArgs[targetList], list))
assert(pattern in context.parsedArgs[targetList])
@then('config has "{value}" at "{target}"')
def step_impl(context, value, target):
assert(target in context.parsedArgs)
assert(context.parsedArgs[target] == value)
@then('config has "{target}"')
def step_impl(context, target):
assert(target in context.parsedArgs)
@when('I start the program')
def step_impl(context):
raise NotImplementedError('STEP: When I start the program')
@given('I entered no arguments')
def step_impl(context):
context.cmdArgs = []
pass
@then('it should fail')
def step_impl(context):
assert(context.raisedException)
@then('it should prompt a link to download one')
def step_impl(context):
assert("can find a sample config file at" in str(context.raisedException))
@then('it should say configs must contain publishLink')
def step_impl(context):
assert("publishLink" in str(context.raisedException))
#TODO may be improve this a bit
@given(u'a server at "{host}" responds to "{method}" "{path}" with "{responseText}"')
def step_impl(context, host, method, path, responseText):
_addr = host + path
_responseText = responseText
_method = method
previousRegistrar = None
if "registerRequestsMock" in context:
previousRegistrar = context.registerRequestsMock
def fun(mocker):
mocker.register_uri(_method, _addr, text = _responseText)
if previousRegistrar:
previousRegistrar()
context.registerRequestsMock = fun
@given(u'my config has "{value}" as "{key}"')
def step_impl(context, value, key):
if "myConfig" not in context:
context.myConfig = {}
context.myConfig[key] = value
@when(u'it checks for version')
def step_impl(context):
try:
with requests_mock.Mocker() as m:
context.registerRequestsMock(m)
myServerUtils.check(context.myConfig)
context.raisedException = False
except myErrors.MyError as err:
context.raisedException = err
@then(u'it should succeed')
def step_impl(context):
assert(not context.raisedException)
@then(u'it should notify me about the newer version')
def step_impl(context):
#TODO I need to verify this somehow
pass
@then(u'give me version incompatibility error')
def step_impl(context):
assert("no longer supported" in str(context.raisedException))
| mit | -4,857,236,144,585,987,000 | 26.039773 | 92 | 0.739021 | false |
minhnd/youtube-subtitle-downloader | youtubesub.py | 1 | 5521 | # -*- coding: utf-8 -*-
"""
Youtube Subtitle Downloader downloads subtitles from Youtube videos
(if those are present) and convert them to SRT format.
Usage: youtubesub.py [-h] [-l] [--language LANGUAGE] [--filename FILENAME]
[--filetype {srt,xml}]
url
positional arguments:
url URL of the Youtube video
optional arguments:
-h, --help show this help message and exit
-l, --list list all available languages
--language LANGUAGE the ISO language code
--filename FILENAME specify the name of subtitle
--filetype {srt,xml} specify the output type of subtitle
Example:
python youtubesub.py --filename subtitle --language en http://www.youtube.com/watch?v=5MgBikgcWnY
:copyright: (c) 2014 by Nguyen Dang Minh (www.minhnd.com)
:license: BSD, see LICENSE for more details.
"""
import urllib2
import urlparse
import argparse
import sys
import xml.etree.ElementTree as ET
class YoutubeSubDownloader():
video_id = None
subtitle = None
languages = {}
def __init__(self, url=None):
self.video_id = self.extractVideoID(url)
self.languages = self.getAvailableLanguages()
if self.languages == {}:
print "There's no subtitle"
sys.exit()
def extractVideoID(self, url=None):
"""
Examples:
- http://youtu.be/5MgBikgcWnY
- http://www.youtube.com/watch?v=5MgBikgcWnY&feature=feed
- http://www.youtube.com/embed/5MgBikgcWnY
- http://www.youtube.com/v/5MgBikgcWnY?version=3&hl=en_US
"""
url_data = urlparse.urlparse(url)
if url_data.hostname == 'youtu.be':
return url_data.path[1:]
if url_data.hostname in ('www.youtube.com', 'youtube.com'):
if url_data.path == '/watch':
query = urlparse.parse_qs(url_data.query)
return query['v'][0]
if url_data.path[:7] == '/embed/':
return url_data.path.split('/')[2]
if url_data.path[:3] == '/v/':
return url_data.path.split('/')[2]
return None
def download(self, language, filename, filetype):
"""Download subtitle of the selected language"""
if language not in self.languages.keys():
print "Theres's no subtitle in this language"
sys.exit()
url = "http://www.youtube.com/api/timedtext?v={0}&lang={1}".format(self.video_id, language)
self.subtitle = urllib2.urlopen(url)
if filetype == "srt":
self.writeSRTFile(filename)
else:
self.writeXMLFile(filename)
def getAvailableLanguages(self):
"""Get all available languages of subtitle"""
url = "http://www.youtube.com/api/timedtext?v=%s&type=list" % self.video_id
xml = urllib2.urlopen(url)
tree = ET.parse(xml)
root = tree.getroot()
languages = {}
for child in root:
languages[child.attrib["lang_code"]] = child.attrib["lang_translated"]
return languages
def list(self):
"""List all available languages of subtitle"""
for key, value in self.languages.iteritems():
print key, value
def writeXMLFile(self, filename=None):
with open(filename + ".xml", 'w') as f:
for line in self.subtitle:
f.write(line)
def writeSRTFile(self, filename=None):
tree = ET.parse(self.subtitle)
root = tree.getroot()
with open(filename + ".srt", 'w') as f:
line = 1
for child in root:
f.write(self.printSRTLine(line, child.attrib["start"], child.attrib["dur"], child.text.encode('utf-8')))
line += 1
def formatSRTTime(self, secTime):
"""Convert a time in seconds (in Google's subtitle) to SRT time format"""
sec, micro = str(secTime).split('.')
m, s = divmod(int(sec), 60)
h, m = divmod(m, 60)
return "{:02}:{:02}:{:02},{}".format(h,m,s,micro)
def printSRTLine(self, line, start, duration, text):
"""Print a subtitle in SRT format"""
end = self.formatSRTTime(float(start) + float(duration))
start = self.formatSRTTime(start)
text = self.convertHTML(text)
return "{}\n{} --> {}\n{}\n\n".format(line, start, end, text)
def convertHTML(self, text):
"""A few HTML encodings replacements.
' to '
"""
return text.replace(''', "'")
def main():
try:
parser = argparse.ArgumentParser(description="Youtube Subtitle Downloader")
parser.add_argument("url", help="URL of the Youtube video")
parser.add_argument("-l", "--list", action="store_true", help="list all available languages")
parser.add_argument("--language", default="en", help="the ISO language code")
parser.add_argument("--filename", default="subtitle", help="specify the name of subtitle")
parser.add_argument("--filetype", default="srt", choices=["srt", "xml"], help="specify the output type of subtitle")
args = parser.parse_args()
downloader = YoutubeSubDownloader(args.url)
if args.list:
print "Available languages:"
f = downloader.list()
downloader.download(args.language, args.filename, args.filetype)
except Exception as e:
print e
if __name__ == '__main__':
main()
| bsd-2-clause | 5,863,344,055,062,149,000 | 35.806667 | 124 | 0.58522 | false |
scollis/iris | lib/iris/tests/unit/plot/test_points.py | 1 | 1395 | # (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.points` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.tests.unit.plot import TestGraphicStringCoord
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.points(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.points(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
if __name__ == "__main__":
tests.main()
| gpl-3.0 | 993,418,146,650,446,000 | 33.875 | 74 | 0.729749 | false |
thomasorb/orb | orb/utils/io.py | 1 | 33933 | #!/usr/bin/python
# *-* coding: utf-8 *-*
# Author: Thomas Martin <[email protected]>
# File: io.py
## Copyright (c) 2010-2020 Thomas Martin <[email protected]>
##
## This file is part of ORB
##
## ORB is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## ORB is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ORB. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import numpy as np
import time
import warnings
import astropy.io.fits as pyfits
from astropy.io.fits.verify import VerifyWarning, VerifyError, AstropyUserWarning
from astropy.wcs import FITSFixedWarning
import astropy.io.votable
import pandas as pd
import orb.cutils
import h5py
import datetime
import orb.utils.validate
def open_file(file_name, mode='r'):
"""Open a file in write mode (by default) and return a file
object.
Create the file if it doesn't exist (only in write mode).
:param file_name: Path to the file, can be either
relative or absolute.
:param mode: (Optional) Can be 'w' for write mode, 'r' for
read mode and 'a' for append mode.
"""
if mode not in ['w','r','a']:
raise Exception("mode option must be 'w', 'r' or 'a'")
if mode in ['w','a']:
# create folder if it does not exist
dirname = os.path.dirname(file_name)
if dirname != '':
if not os.path.exists(dirname):
os.makedirs(dirname)
return open(file_name, mode)
def write_fits(fits_path, fits_data, fits_header=None,
silent=False, overwrite=True, mask=None,
replace=False, record_stats=False, mask_path=None):
"""Write data in FITS format. If the file doesn't exist create
it with its directories.
If the file already exists add a number to its name before the
extension (unless 'overwrite' option is set to True).
:param fits_path: Path to the file, can be either
relative or absolut.
:param fits_data: Data to be written in the file.
:param fits_header: (Optional) Optional keywords to update or
create. It can be a pyfits.Header() instance or a list of
tuples [(KEYWORD_1, VALUE_1, COMMENT_1), (KEYWORD_2,
VALUE_2, COMMENT_2), ...]. Standard keywords like SIMPLE,
BITPIX, NAXIS, EXTEND does not have to be passed.
:param silent: (Optional) If True turn this function won't
display any message (default False)
:param overwrite: (Optional) If True overwrite the output file
if it exists (default True).
:param mask: (Optional) It not None must be an array with the
same size as the given data but filled with ones and
zeros. Bad values (NaN or Inf) are converted to 1 and the
array is converted to 8 bit unsigned integers (uint8). This
array will be written to the disk with the same path
terminated by '_mask'. The header of the mask FITS file will
be the same as the original data (default None).
:param replace: (Optional) If True and if the file already
exist, new data replace old data in the existing file. NaN
values do not replace old values. Other values replace old
values. New array MUST have the same size as the existing
array. Note that if replace is True, overwrite is
automatically set to True.
:param record_stats: (Optional) If True, record mean and
median of data. Useful if they often have to be computed
(default False).
:param mask_path: (Optional) Path to the corresponding mask image.
.. note:: float64 data is converted to float32 data to avoid
too big files with unnecessary precision
.. note:: Please refer to
http://www.stsci.edu/institute/software_hardware/pyfits/ for
more information on PyFITS module and
http://fits.gsfc.nasa.gov/ for more information on FITS
files.
"""
SECURED_KEYS = ['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1',
'NAXIS2', 'NAXIS3', 'EXTEND', 'INHERIT',
'BZERO', 'BSCALE']
if not isinstance(fits_data, np.ndarray):
raise TypeError('Data type must be numpy.ndarray')
start_time = time.time()
# change extension if nescessary
if os.path.splitext(fits_path)[1] != '.fits':
fits_path = os.path.splitext(fits_path)[0] + '.fits'
if mask is not None:
if np.shape(mask) != np.shape(fits_data):
raise ValueError('Mask must have the same shape as data')
if replace: overwrite=True
if overwrite:
warnings.filterwarnings(
'ignore', message='Overwriting existing file.*',
module='astropy.io.*')
if replace and os.path.exists(fits_path):
old_data = read_fits(fits_path)
if old_data.shape == fits_data.shape:
fits_data[np.isnan(fits_data)] = old_data[np.isnan(fits_data)]
else:
raise Exception("New data shape %s and old data shape %s are not the same. Do not set the option 'replace' to True in this case"%(str(fits_data.shape), str(old_data.shape)))
# float64/128 data conversion to float32 to avoid too big files
# with unnecessary precision
if fits_data.dtype == np.float64 or fits_data.dtype == np.float128:
fits_data = fits_data.astype(np.float32)
# complex data cannot be written in fits
if np.iscomplexobj(fits_data):
fits_data = fits_data.real.astype(np.float32)
logging.warning('Complex data cast to float32 (FITS format do not support complex data)')
base_fits_path = fits_path
dirname = os.path.dirname(fits_path)
if (dirname != []) and (dirname != ''):
if not os.path.exists(dirname):
os.makedirs(dirname)
index=0
file_written = False
while not file_written:
if ((not (os.path.exists(fits_path))) or overwrite):
if len(fits_data.shape) > 1:
hdu = pyfits.PrimaryHDU(fits_data.transpose())
elif len(fits_data.shape) == 1:
hdu = pyfits.PrimaryHDU(fits_data[np.newaxis, :])
else: # 1 number only
hdu = pyfits.PrimaryHDU(np.array([fits_data]))
if mask is not None:
# mask conversion to only zeros or ones
mask = mask.astype(float)
mask[np.nonzero(np.isnan(mask))] = 1.
mask[np.nonzero(np.isinf(mask))] = 1.
mask[np.nonzero(mask)] = 1.
mask = mask.astype(np.uint8) # UINT8 is the
# smallest allowed
# type
hdu_mask = pyfits.PrimaryHDU(mask.transpose())
# add header optional keywords
if fits_header is not None:
## remove keys of the passed header which corresponds
## to the description of the data set
for ikey in SECURED_KEYS:
if ikey in fits_header: fits_header.pop(ikey)
hdu.header.extend(fits_header, strip=False,
update=True, end=True)
# Remove 3rd axis related keywords if there is no
# 3rd axis
if len(fits_data.shape) <= 2:
for ikey in range(len(hdu.header)):
if isinstance(hdu.header[ikey], str):
if ('Wavelength axis' in hdu.header[ikey]):
del hdu.header[ikey]
del hdu.header[ikey]
break
if 'CTYPE3' in hdu.header:
del hdu.header['CTYPE3']
if 'CRVAL3' in hdu.header:
del hdu.header['CRVAL3']
if 'CRPIX3' in hdu.header:
del hdu.header['CRPIX3']
if 'CDELT3' in hdu.header:
del hdu.header['CDELT3']
if 'CROTA3' in hdu.header:
del hdu.header['CROTA3']
if 'CUNIT3' in hdu.header:
del hdu.header['CUNIT3']
# add median and mean of the image in the header
# data is nan filtered before
if record_stats:
fdata = fits_data[np.nonzero(~np.isnan(fits_data))]
if np.size(fdata) > 0:
data_mean = np.nanmean(fdata)
data_median = np.nanmedian(fdata)
else:
data_mean = np.nan
data_median = np.nan
hdu.header.set('MEAN', str(data_mean),
'Mean of data (NaNs filtered)',
after=5)
hdu.header.set('MEDIAN', str(data_median),
'Median of data (NaNs filtered)',
after=5)
# add some basic keywords in the header
date = time.strftime("%Y-%m-%d", time.localtime(time.time()))
hdu.header.set('MASK', 'False', '', after=5)
hdu.header.set('DATE', date, 'Creation date', after=5)
hdu.header.set('PROGRAM', "ORB",
'Thomas Martin: [email protected]',
after=5)
# write FITS file
hdu.writeto(fits_path, overwrite=overwrite)
if mask is not None:
hdu_mask.header = hdu.header
hdu_mask.header.set('MASK', 'True', '', after=6)
if mask_path is None:
mask_path = os.path.splitext(fits_path)[0] + '_mask.fits'
hdu_mask.writeto(mask_path, overwrite=overwrite)
if not (silent):
logging.info("Data written as {} in {:.2f} s ".format(
fits_path, time.time() - start_time))
return fits_path
else :
fits_path = (os.path.splitext(base_fits_path)[0] +
"_" + str(index) +
os.path.splitext(base_fits_path)[1])
index += 1
def read_fits(fits_path, no_error=False, nan_filter=False,
return_header=False, return_hdu_only=False,
return_mask=False, silent=False, delete_after=False,
data_index=None, image_mode='classic', chip_index=None,
binning=None, fix_header=True, dtype=float,
mask_path=None):
"""Read a FITS data file and returns its data.
:param fits_path: Path to the file, can be either
relative or absolut.
:param no_error: (Optional) If True this function will only
display a warning message if the file does not exist (so it
does not raise an exception) (default False)
:param nan_filter: (Optional) If True replace NaN by zeros
(default False)
:param return_header: (Optional) If True return a tuple (data,
header) (default False).
:param return_hdu_only: (Optional) If True return FITS header
data unit only. No data will be returned (default False).
:param return_mask: (Optional) If True return only the mask
corresponding to the data file (default False).
:param silent: (Optional) If True no message is displayed
except if an error is raised (default False).
:param delete_after: (Optional) If True delete file after
reading (default False).
:param data_index: (Optional) Index of data in the header data
unit (Default None).
:param image_mode: (Optional) Can be 'sitelle', 'spiomm' or
'classic'. In 'sitelle' mode, the parameter
chip_index must also be set to 0 or 1. In this mode only
one of both SITELLE quadrants is returned. In 'classic' mode
the whole frame is returned (default 'classic').
:param chip_index: (Optional) Index of the chip of the
SITELLE image. Used only if image_mode is set to 'sitelle'
In this case, must be 1 or 2. Else must be None (default
None).
:param binning: (Optional) If not None, returned data is
binned by this amount (must be an integer >= 1)
:param fix_header: (Optional) If True, fits header is
fixed to avoid errors due to header inconsistencies
(e.g. WCS errors) (default True).
:param dtype: (Optional) Data is converted to
the given dtype (e.g. np.float32, default float).
:param mask_path: (Optional) Path to the corresponding mask image.
.. note:: Please refer to
http://www.stsci.edu/institute/software_hardware/pyfits/ for
more information on PyFITS module. And
http://fits.gsfc.nasa.gov/ for more information on FITS
files.
"""
# avoid bugs fits with no data in the first hdu
fits_path = ((fits_path.splitlines())[0]).strip()
if return_mask:
if mask_path is None:
mask_path = os.path.splitext(fits_path)[0] + '_mask.fits'
fits_path = mask_path
try:
warnings.filterwarnings('ignore', module='astropy')
warnings.filterwarnings('ignore', category=ResourceWarning)
hdulist = pyfits.open(fits_path)
if data_index is None:
data_index = get_hdu_data_index(hdulist)
fits_header = hdulist[data_index].header
except Exception as e:
if not no_error:
raise IOError(
"File '%s' could not be opened: {}, {}".format(fits_path, e))
else:
if not silent:
logging.warning(
"File '%s' could not be opened {}, {}".format(fits_path, e))
return None
# Correct header
if fix_header:
if fits_header['NAXIS'] == 2:
if 'CTYPE3' in fits_header: del fits_header['CTYPE3']
if 'CRVAL3' in fits_header: del fits_header['CRVAL3']
if 'CUNIT3' in fits_header: del fits_header['CUNIT3']
if 'CRPIX3' in fits_header: del fits_header['CRPIX3']
if 'CROTA3' in fits_header: del fits_header['CROTA3']
if return_hdu_only:
return hdulist[data_index]
else:
if image_mode == 'classic':
fits_data = np.array(
hdulist[data_index].data.transpose()).astype(dtype)
elif image_mode == 'sitelle':
fits_data = read_sitelle_chip(hdulist[data_index], chip_index)
elif image_mode == 'spiomm':
fits_data, fits_header = read_spiomm_data(
hdulist, fits_path)
else:
raise ValueError("Image_mode must be set to 'sitelle', 'spiomm' or 'classic'")
hdulist.close
if binning is not None:
fits_data = utils.image.bin_image(fits_data, binning)
if (nan_filter):
fits_data = np.nan_to_num(fits_data)
if delete_after:
try:
os.remove(fits_path)
except:
logging.warning("The file '%s' could not be deleted"%fits_path)
if return_header:
return np.squeeze(fits_data), fits_header
else:
return np.squeeze(fits_data)
def get_hdu_data_index(hdul):
"""Return the index of the first header data unit (HDU) containing data.
:param hdul: A pyfits.HDU instance
"""
hdu_data_index = 0
while (hdul[hdu_data_index].data is None):
hdu_data_index += 1
if hdu_data_index >= len(hdul):
raise Exception('No data recorded in FITS file')
return hdu_data_index
def read_sitelle_chip(hdu, chip_index, substract_bias=True):
"""Return chip data of a SITELLE FITS image.
:param hdu: pyfits.HDU Instance of the SITELLE image
:param chip_index: Index of the chip to read. Must be 1 or 2.
:param substract_bias: If True bias is automatically
substracted by using the overscan area (default True).
"""
def get_slice(key, index):
key = '{}{}'.format(key, index)
if key not in hdu.header: raise Exception(
'Bad SITELLE image header')
chip_section = hdu.header[key]
return get_sitelle_slice(chip_section)
def get_data(key, index, frame):
xslice, yslice = get_slice(key, index)
return np.copy(frame[yslice, xslice]).transpose()
if int(chip_index) not in (1,2): raise Exception(
'Chip index must be 1 or 2')
frame = hdu.data.astype(np.float)
# get data without bias substraction
if not substract_bias:
return get_data('DSEC', chip_index, frame)
if chip_index == 1:
amps = ['A', 'B', 'C', 'D']
elif chip_index == 2:
amps = ['E', 'F', 'G', 'H']
xchip, ychip = get_slice('DSEC', chip_index)
data = np.empty((xchip.stop - xchip.start, ychip.stop - ychip.start),
dtype=float)
# removing bias
for iamp in amps:
xamp, yamp = get_slice('DSEC', iamp)
amp_data = get_data('DSEC', iamp, frame)
bias_data = get_data('BSEC', iamp, frame)
overscan_size = int(bias_data.shape[0]/2)
if iamp in ['A', 'C', 'E', 'G']:
bias_data = bias_data[-overscan_size:,:]
else:
bias_data = bias_data[:overscan_size,:]
bias_data = np.mean(bias_data, axis=0)
amp_data = amp_data - bias_data
data[xamp.start - xchip.start: xamp.stop - xchip.start,
yamp.start - ychip.start: yamp.stop - ychip.start] = amp_data
return data
def get_sitelle_slice(slice_str):
"""
Strip a string containing SITELLE like slice coordinates.
:param slice_str: Slice string.
"""
if "'" in slice_str:
slice_str = slice_str[1:-1]
section = slice_str[1:-1].split(',')
x_min = int(section[0].split(':')[0]) - 1
x_max = int(section[0].split(':')[1])
y_min = int(section[1].split(':')[0]) - 1
y_max = int(section[1].split(':')[1])
return slice(x_min,x_max,1), slice(y_min,y_max,1)
def read_spiomm_data(hdu, image_path, substract_bias=True):
"""Return data of an SpIOMM FITS image.
:param hdu: pyfits.HDU Instance of the SpIOMM image
:param image_path: Image path
:param substract_bias: If True bias is automatically
substracted by using the associated bias frame as an
overscan frame. Mean bias level is thus computed along the y
axis of the bias frame (default True).
"""
CENTER_SIZE_COEFF = 0.1
data_index = get_hdu_data_index(hdu)
frame = np.array(hdu[data_index].data.transpose()).astype(np.float)
hdr = hdu[data_index].header
# check presence of a bias
bias_path = os.path.splitext(image_path)[0] + '_bias.fits'
if os.path.exists(bias_path):
bias_frame = read_fits(bias_path)
if substract_bias:
## create overscan line
overscan = orb.cutils.meansigcut2d(bias_frame, axis=1)
frame = (frame.T - overscan.T).T
x_min = int(bias_frame.shape[0]/2.
- CENTER_SIZE_COEFF * bias_frame.shape[0])
x_max = int(bias_frame.shape[0]/2.
+ CENTER_SIZE_COEFF * bias_frame.shape[0] + 1)
y_min = int(bias_frame.shape[1]/2.
- CENTER_SIZE_COEFF * bias_frame.shape[1])
y_max = int(bias_frame.shape[1]/2.
+ CENTER_SIZE_COEFF * bias_frame.shape[1] + 1)
bias_level = np.nanmedian(bias_frame[x_min:x_max, y_min:y_max])
if bias_level is not np.nan:
hdr['BIAS-LVL'] = (
bias_level,
'Bias level (moment, at the center of the frame)')
return frame, hdr
def open_hdf5(file_path, mode):
"""Return a :py:class:`h5py.File` instance with some
informations.
:param file_path: Path to the hdf5 file.
:param mode: Opening mode. Can be 'r', 'r+', 'w', 'w-', 'x',
'a'.
.. note:: Please refer to http://www.h5py.org/.
"""
if mode in ['w', 'a', 'w-', 'x']:
# create folder if it does not exist
dirname = os.path.dirname(file_path)
if dirname != '':
if not os.path.exists(dirname):
os.makedirs(dirname)
f = h5py.File(file_path, mode)
if mode in ['w', 'a', 'w-', 'x', 'r+']:
f.attrs['program'] = 'Created/modified with ORB'
f.attrs['date'] = str(datetime.datetime.now())
return f
def write_hdf5(file_path, data, header=None,
silent=False, overwrite=True, max_hdu_check=True,
compress=False):
"""
Write data in HDF5 format.
A header can be added to the data. This method is useful to
handle an HDF5 data file like a FITS file. It implements most
of the functionality of the method
:py:meth:`core.Tools.write_fits`.
.. note:: The output HDF5 file can contain mutiple data header
units (HDU). Each HDU is in a specific group named 'hdu*', *
being the index of the HDU. The first HDU is named
HDU0. Each HDU contains one data group (HDU*/data) which
contains a numpy.ndarray and one header group
(HDU*/header). Each subgroup of a header group is a keyword
and its associated value, comment and type.
:param file_path: Path to the HDF5 file to create
:param data: A numpy array (numpy.ndarray instance) of numeric
values. If a list of arrays is given, each array will be
placed in a specific HDU. The header keyword must also be
set to a list of headers of the same length.
:param header: (Optional) Optional keywords to update or
create. It can be a pyfits.Header() instance or a list of
tuples [(KEYWORD_1, VALUE_1, COMMENT_1), (KEYWORD_2,
VALUE_2, COMMENT_2), ...]. Standard keywords like SIMPLE,
BITPIX, NAXIS, EXTEND does not have to be passed (default
None). It can also be a list of headers if a list of arrays
has been passed to the option 'data'.
:param max_hdu_check: (Optional): When True, if the input data
is a list (interpreted as a list of data unit), check if
it's length is not too long to make sure that the input list
is not a single data array that has not been converted to a
numpy.ndarray format. If the number of HDU to create is
indeed very long this can be set to False (default True).
:param silent: (Optional) If True turn this function won't
display any message (default False)
:param overwrite: (Optional) If True overwrite the output file
if it exists (default True).
:param compress: (Optional) If True data is compressed using
the SZIP library (see
https://www.hdfgroup.org/doc_resource/SZIP/). SZIP library
must be installed (default False).
.. note:: Please refer to http://www.h5py.org/.
"""
MAX_HDUS = 3
start_time = time.time()
# change extension if nescessary
if os.path.splitext(file_path)[1] != '.hdf5':
file_path = os.path.splitext(file_path)[0] + '.hdf5'
# Check if data is a list of arrays.
if not isinstance(data, list):
data = [data]
if max_hdu_check and len(data) > MAX_HDUS:
raise Exception('Data list length is > {}. As a list is interpreted has a list of data unit make sure to pass a numpy.ndarray instance instead of a list. '.format(MAX_HDUS))
# Check header format
if header is not None:
if isinstance(header, pyfits.Header):
header = [header]
elif isinstance(header, list):
if (isinstance(header[0], list)
or isinstance(header[0], tuple)):
header_seems_ok = False
if (isinstance(header[0][0], list)
or isinstance(header[0][0], tuple)):
# we have a list of headers
if len(header) == len(data):
header_seems_ok = True
elif isinstance(header[0][0], str):
# we only have one header
if len(header[0]) > 2:
header = [header]
header_seems_ok = True
if not header_seems_ok:
raise Exception('Badly formated header')
elif not isinstance(header[0], pyfits.Header):
raise Exception('Header must be a pyfits.Header instance or a list')
else:
raise Exception('Header must be a pyfits.Header instance or a list')
if len(header) != len(data):
raise Exception('The number of headers must be the same as the number of data units.')
# change path if file exists and must not be overwritten
new_file_path = str(file_path)
if not overwrite and os.path.exists(new_file_path):
index = 0
while os.path.exists(new_file_path):
new_file_path = (os.path.splitext(file_path)[0] +
"_" + str(index) +
os.path.splitext(file_path)[1])
index += 1
# open file
with open_hdf5(new_file_path, 'w') as f:
## add data + header
for i in range(len(data)):
idata = data[i]
# Check if data has a valid format.
if not isinstance(idata, np.ndarray):
try:
idata = np.array(idata, dtype=float)
except Exception as e:
raise Exception('Data to write must be convertible to a numpy array of numeric values: {}'.format(e))
# convert data to float32
if idata.dtype == np.float64:
idata = idata.astype(np.float32)
# hdu name
hdu_group_name = 'hdu{}'.format(i)
if compress:
f.create_dataset(
hdu_group_name + '/data', data=idata,
compression='lzf', compression_opts=None)
#compression='szip', compression_opts=('nn', 32))
#compression='gzip', compression_opts=9)
else:
f.create_dataset(
hdu_group_name + '/data', data=idata)
# add header
if header is not None:
iheader = header[i]
if not isinstance(iheader, pyfits.Header):
iheader = pyfits.Header(iheader)
f[hdu_group_name + '/header'] = header_fits2hdf5(
iheader)
logging.info('Data written as {} in {:.2f} s'.format(
new_file_path, time.time() - start_time))
return new_file_path
castables = [int, float, bool, str,
np.int64, np.float64, int, np.float128, np.bool_]
def cast(a, t_str):
if isinstance(t_str, bytes):
t_str = t_str.decode()
if 'type' in t_str: t_str = t_str.replace('type', 'class')
if 'long' in t_str: t_str = t_str.replace('long', 'int')
for _t in castables:
if t_str == repr(_t):
return _t(a)
raise Exception('Bad type string {} should be in {}'.format(t_str, [repr(_t) for _t in castables]))
def dict2array(data):
"""Convert a dictionary to an array that can be written in an hdf5 file
:param data: Must be a dict instance
"""
if not isinstance(data, dict): raise TypeError('data must be a dict')
arr = list()
for key in data:
if type(data[key]) in castables:
_tstr = str(type(data[key]))
arr.append(np.array(
(key, data[key], _tstr)))
else:
logging.debug('{} of type {} not passed to array'.format(key, type(data[key])))
return np.array(arr)
def array2dict(data):
"""Convert an array read from an hdf5 file to a dict.
:param data: array of params returned by dict2array
"""
_dict = dict()
for i in range(len(data)):
_dict[data[i][0]] = cast(data[i][1], data[i][2])
return _dict
def dict2header(params):
"""convert a dict to a pyfits.Header() instance
.. warning:: this is a destructive process, illegal values are
removed from the header.
:param params: a dict instance
"""
# filter illegal header values
cards = list()
for iparam in params:
val = params[iparam]
val_ok = False
for itype in castables:
if isinstance(val, itype):
val_ok = True
if val_ok:
if isinstance(val, bool):
val = int(val)
card = pyfits.Card(
keyword=iparam,
value=val,
comment=None)
try:
card.verify(option='exception')
cards.append(card)
except (VerifyError, ValueError, TypeError):
pass
warnings.simplefilter('ignore', category=VerifyWarning)
warnings.simplefilter('ignore', category=AstropyUserWarning)
warnings.simplefilter('ignore', category=FITSFixedWarning)
header = pyfits.Header(cards)
return header
def header_fits2hdf5(fits_header):
"""convert a pyfits.Header() instance to a header for an hdf5 file
:param fits_header: Header of the FITS file
"""
hdf5_header = list()
for ikey in range(len(fits_header)):
_tstr = str(type(fits_header[ikey]))
ival = np.array(
(list(fits_header.keys())[ikey], str(fits_header[ikey]),
fits_header.comments[ikey], _tstr))
hdf5_header.append(ival)
return np.array(hdf5_header, dtype='S300')
def header_hdf52fits(hdf5_header):
"""convert an hdf5 header to a pyfits.Header() instance.
:param hdf5_header: Header of the HDF5 file
"""
fits_header = pyfits.Header()
for i in range(hdf5_header.shape[0]):
ival = hdf5_header[i,:]
ival = [iival.decode() for iival in ival]
if ival[3] != 'comment':
fits_header[ival[0]] = cast(ival[1], ival[3]), str(ival[2])
else:
fits_header['comment'] = ival[1]
return fits_header
def read_hdf5(file_path, return_header=False, dtype=float):
"""Read an HDF5 data file created with
:py:meth:`core.Tools.write_hdf5`.
:param file_path: Path to the file, can be either
relative or absolute.
:param return_header: (Optional) If True return a tuple (data,
header) (default False).
:param dtype: (Optional) Data is converted to the given type
(e.g. np.float32, default float).
.. note:: Please refer to http://www.h5py.org/."""
with open_hdf5(file_path, 'r') as f:
data = list()
header = list()
for hdu_name in f:
data.append(f[hdu_name + '/data'][:].astype(dtype))
if return_header:
if hdu_name + '/header' in f:
# extract header
header.append(
header_hdf52fits(f[hdu_name + '/header'][:]))
else: header.append(None)
if len(data) == 1:
if return_header:
return data[0], header[0]
else:
return data[0]
else:
if return_header:
return data, header
else:
return data
def cast2hdf5(val):
if val is None:
return 'None'
elif isinstance(val, np.float128):
return val.astype(np.float64)
#elif isinstance(val, int):
# return str(val)
elif isinstance(val, np.ndarray):
if val.dtype == np.float128:
return val.astype(np.float64)
return val
def get_storing_dtype(arr):
if not isinstance(arr, np.ndarray):
raise TypeError('arr must be a numpy.ndarray instance')
if arr.dtype == np.float64:
return np.float32
if arr.dtype == np.complex128:
return np.complex64
else: return arr.dtype
def cast_storing_dtype(arr):
if not isinstance(arr, np.ndarray):
raise TypeError('arr must be a numpy.ndarray instance')
return arr.astype(get_storing_dtype(arr))
def save_dflist(dflist, path):
"""Save a list of dataframes
:param dflist: list of pandas dataframes
:param path: path to the output file
"""
if os.path.exists(path):
os.remove(path)
with open_hdf5(path, 'w') as f:
f.attrs['len'] = len(dflist)
for idf in range(len(dflist)):
if dflist[idf] is not None:
dflist[idf].to_hdf(path, 'df{:06d}'.format(idf),
format='table', mode='a')
def load_dflist(path):
"""Save a list of dataframes
:param path: path to the output file
"""
with open_hdf5(path, 'r') as f:
_len = f.attrs['len']
dflist = list()
for i in range(_len):
try:
idf = pd.read_hdf(path, key='df{:06d}'.format(i))
dflist.append(idf)
except KeyError:
dflist.append(None)
return dflist
def read_votable(votable_file):
"""read a votable and transfer it as as pandas dataframe.
taken from https://gist.github.com/icshih/52ca49eb218a2d5b660ee4a653301b2b
"""
votable = astropy.io.votable.parse(votable_file)
table = votable.get_first_table().to_table(use_names_over_ids=True)
return table.to_pandas()
def save_starlist(path, starlist):
"""Save a star list as a two columnfile X, Y readable by ds9
"""
orb.utils.validate.is_2darray(starlist, object_name='starlist')
if starlist.shape[1] != 2:
raise TypeError('starlist must be of shape (n,2)')
with open_file(path, 'w') as f:
for i in range(starlist.shape[0]):
f.write('{} {}\n'.format(starlist[i,0], starlist[i,1]))
f.flush()
| gpl-3.0 | 8,377,938,406,832,901,000 | 33.946447 | 185 | 0.581646 | false |
quarckster/cfme_tests | cfme/test_framework/appliance_police.py | 1 | 2826 | import attr
import pytest
import requests
from cfme.utils import ports
from cfme.utils.net import net_check
from cfme.utils.wait import TimedOutError
from cfme.utils.conf import rdb
from fixtures.pytest_store import store
from cfme.fixtures.rdb import Rdb
@attr.s
class AppliancePoliceException(Exception):
message = attr.ib()
port = attr.ib()
def __str__(self):
return "{} (port {})".format(self.message, self.port)
@pytest.fixture(autouse=True, scope="function")
def appliance_police():
if not store.slave_manager:
return
try:
port_numbers = {
'ssh': ports.SSH,
'https': store.current_appliance.ui_port,
'postgres': ports.DB}
port_results = {pn: net_check(pp, force=True) for pn, pp in port_numbers.items()}
for port, result in port_results.items():
if port == 'ssh' and store.current_appliance.is_pod:
# ssh is not available for podified appliance
continue
if not result:
raise AppliancePoliceException('Unable to connect', port_numbers[port])
try:
status_code = requests.get(store.current_appliance.url, verify=False,
timeout=120).status_code
except Exception:
raise AppliancePoliceException('Getting status code failed', port_numbers['https'])
if status_code != 200:
raise AppliancePoliceException('Status code was {}, should be 200'.format(
status_code), port_numbers['https'])
return
except AppliancePoliceException as e:
# special handling for known failure conditions
if e.port == 443:
# Lots of rdbs lately where evm seems to have entirely crashed
# and (sadly) the only fix is a rude restart
store.current_appliance.restart_evm_service(rude=True)
try:
store.current_appliance.wait_for_web_ui(900)
store.write_line('EVM was frozen and had to be restarted.', purple=True)
return
except TimedOutError:
pass
e_message = str(e)
except Exception as e:
e_message = str(e)
# Regardles of the exception raised, we didn't return anywhere above
# time to call a human
msg = 'Help! My appliance {} crashed with: {}'.format(store.current_appliance.url, e_message)
store.slave_manager.message(msg)
if 'appliance_police_recipients' in rdb:
rdb_kwargs = {
'subject': 'RDB Breakpoint: Appliance failure',
'recipients': rdb.appliance_police_recipients,
}
else:
rdb_kwargs = {}
Rdb(msg).set_trace(**rdb_kwargs)
store.slave_manager.message('Resuming testing following remote debugging')
| gpl-2.0 | -7,508,210,430,631,497,000 | 33.888889 | 97 | 0.617127 | false |
ErasRasmuson/LA | LogAna/Taxi_LongRides_old.py | 1 | 2578 | # -*- coding: cp1252 -*-
"""
###############################################################################
HEADER: Taxi_LongRides.py
AUTHOR: Esa Heikkinen
DATE: 26.06.2018
DOCUMENT: -
VERSION: "$Id$"
REFERENCES: -
PURPOSE:
CHANGES: "$Log$"
###############################################################################
"""
from logdig_analyze_template import *
# ----------------------------- DATA-DRIVEN PART -----------------------------
VARIABLES = {
"STARTTIME-DATE": "2013-01-01",
"STARTTIME-TIME": "00:00:00",
"STOPTIME-DATE": "2013-01-01",
"STOPTIME-TIME": "01:40:00"
}
START = {
"state": "BEGIN",
"func": "start"
}
ESU["BEGIN"] = {
"esu_mode": "SEARCH_EVENT:First:NextRow",
"log_filename_expr": "TaxiRides_small.csv",
"log_varnames": "isStart=START",
"log_timecol_name": "startTime",
"log_start_time_expr": "<STARTTIME-BEGIN>,0",
"log_stop_time_expr": "<STOPTIME>,0",
"TF_state": "END",
"TF_func": "found_begin",
"TN_state": "STOP",
"TN_func": "exit_normal",
"TE_state": "STOP",
"TE_func": "exit_error",
"GUI_line_num": "0"
}
ESU["END"] = {
"esu_mode": "SEARCH_EVENT:First",
"log_filename_expr": "TaxiRides_small.csv_<SET-RIDEID>",
"log_varnames": "isStart=END",
"log_timecol_name": "startTime",
"log_start_time_expr": "<startTime>,0",
"log_stop_time_expr": "<startTime>,7200",
"TF_state": "BEGIN",
"TF_func": "found_end",
"TN_state": "BEGIN",
"TN_func": "not_found_end",
"TE_state": "STOP",
"TE_func": "exit_error",
"GUI_line_num": "1"
}
STOP = {
"func": ""
}
# ----------------------------- FUNCTION PART -----------------------------
def start():
set_datetime_variable("STARTTIME","STARTTIME-DATE","STARTTIME-TIME")
set_datetime_variable("STOPTIME","STOPTIME-DATE","STOPTIME-TIME")
set_sbk_file("Taxi_LongRides","SET-RIDEID","startTime","endTime")
copy_variable("STARTTIME-BEGIN","STARTTIME")
logfiles_data.read("/home/esa/projects/LA/LogFile/PreProsessed/TaxiRides/TaxiRides_small.csv","startTime")
logfiles_data.transform_operation_keyby("/home/esa/projects/LA/LogFile/PreProsessed/TaxiRides/TaxiRides_small.csv","rideId")
def found_begin():
print("found_begin")
copy_variable("SET-RIDEID","rideId")
copy_variable("STARTTIME-BEGIN","startTime")
def found_end():
print("found_end")
def not_found_end():
print("not_found_end")
copy_variable("STARTTIME-BEGIN","startTime")
print_sbk_file()
def exit_normal():
print("exit_normal")
def exit_error():
print("exit_error")
| gpl-3.0 | 2,315,905,248,688,705,500 | 27.32967 | 125 | 0.556633 | false |
FEniCS/ufl | test/test_apply_function_pullbacks.py | 1 | 12156 | #!/usr/bin/env py.test
# -*- coding: utf-8 -*-
from pytest import raises
from ufl import *
from ufl.algorithms.apply_function_pullbacks import apply_function_pullbacks, apply_single_function_pullbacks
from ufl.algorithms.renumbering import renumber_indices
from ufl.classes import Jacobian, JacobianInverse, JacobianDeterminant, ReferenceValue, CellOrientation
def check_single_function_pullback(g, mappings):
expected = mappings[g]
actual = apply_single_function_pullbacks(g)
rexp = renumber_indices(expected)
ract = renumber_indices(actual)
if not rexp == ract:
print()
print("In check_single_function_pullback:")
print("input:")
print(repr(g))
print("expected:")
print(str(rexp))
print("actual:")
print(str(ract))
print("signatures:")
print((expected**2*dx).signature())
print((actual**2*dx).signature())
print()
assert ract == rexp
def test_apply_single_function_pullbacks_triangle3d():
triangle3d = Cell("triangle", geometric_dimension=3)
cell = triangle3d
domain = as_domain(cell)
UL2 = FiniteElement("DG L2", cell, 1)
U0 = FiniteElement("DG", cell, 0)
U = FiniteElement("CG", cell, 1)
V = VectorElement("CG", cell, 1)
Vd = FiniteElement("RT", cell, 1)
Vc = FiniteElement("N1curl", cell, 1)
T = TensorElement("CG", cell, 1)
S = TensorElement("CG", cell, 1, symmetry=True)
COV2T = FiniteElement("Regge", cell, 0) # (0, 2)-symmetric tensors
CONTRA2T = FiniteElement("HHJ", cell, 0) # (2, 0)-symmetric tensors
Uml2 = UL2*UL2
Um = U*U
Vm = U*V
Vdm = V*Vd
Vcm = Vd*Vc
Tm = Vc*T
Sm = T*S
Vd0 = Vd*U0 # case from failing ffc demo
W = S*T*Vc*Vd*V*U
ul2 = Coefficient(UL2)
u = Coefficient(U)
v = Coefficient(V)
vd = Coefficient(Vd)
vc = Coefficient(Vc)
t = Coefficient(T)
s = Coefficient(S)
cov2t = Coefficient(COV2T)
contra2t = Coefficient(CONTRA2T)
uml2 = Coefficient(Uml2)
um = Coefficient(Um)
vm = Coefficient(Vm)
vdm = Coefficient(Vdm)
vcm = Coefficient(Vcm)
tm = Coefficient(Tm)
sm = Coefficient(Sm)
vd0m = Coefficient(Vd0) # case from failing ffc demo
w = Coefficient(W)
rul2 = ReferenceValue(ul2)
ru = ReferenceValue(u)
rv = ReferenceValue(v)
rvd = ReferenceValue(vd)
rvc = ReferenceValue(vc)
rt = ReferenceValue(t)
rs = ReferenceValue(s)
rcov2t = ReferenceValue(cov2t)
rcontra2t = ReferenceValue(contra2t)
ruml2 = ReferenceValue(uml2)
rum = ReferenceValue(um)
rvm = ReferenceValue(vm)
rvdm = ReferenceValue(vdm)
rvcm = ReferenceValue(vcm)
rtm = ReferenceValue(tm)
rsm = ReferenceValue(sm)
rvd0m = ReferenceValue(vd0m)
rw = ReferenceValue(w)
assert len(w) == 9 + 9 + 3 + 3 + 3 + 1
assert len(rw) == 6 + 9 + 2 + 2 + 3 + 1
assert len(w) == 28
assert len(rw) == 23
assert len(vd0m) == 4
assert len(rvd0m) == 3
# Geometric quantities we need:
J = Jacobian(domain)
detJ = JacobianDeterminant(domain)
Jinv = JacobianInverse(domain)
# o = CellOrientation(domain)
i, j, k, l = indices(4)
# Contravariant H(div) Piola mapping:
M_hdiv = ((1.0/detJ) * J) # Not applying cell orientation here
# Covariant H(curl) Piola mapping: Jinv.T
mappings = {
# Simple elements should get a simple representation
ul2: rul2 / detJ,
u: ru,
v: rv,
vd: as_vector(M_hdiv[i, j]*rvd[j], i),
vc: as_vector(Jinv[j, i]*rvc[j], i),
t: rt,
s: as_tensor([[rs[0], rs[1], rs[2]],
[rs[1], rs[3], rs[4]],
[rs[2], rs[4], rs[5]]]),
cov2t: as_tensor(Jinv[k, i] * rcov2t[k, l] * Jinv[l, j], (i, j)),
contra2t: as_tensor((1.0 / detJ) * (1.0 / detJ)
* J[i, k] * rcontra2t[k, l] * J[j, l], (i, j)),
# Mixed elements become a bit more complicated
uml2: as_vector([ruml2[0] / detJ, ruml2[1] / detJ]),
um: rum,
vm: rvm,
vdm: as_vector([
# V
rvdm[0],
rvdm[1],
rvdm[2],
# Vd
M_hdiv[0, j]*as_vector([rvdm[3], rvdm[4]])[j],
M_hdiv[1, j]*as_vector([rvdm[3], rvdm[4]])[j],
M_hdiv[2, j]*as_vector([rvdm[3], rvdm[4]])[j],
]),
vcm: as_vector([
# Vd
M_hdiv[0, j]*as_vector([rvcm[0], rvcm[1]])[j],
M_hdiv[1, j]*as_vector([rvcm[0], rvcm[1]])[j],
M_hdiv[2, j]*as_vector([rvcm[0], rvcm[1]])[j],
# Vc
Jinv[i, 0]*as_vector([rvcm[2], rvcm[3]])[i],
Jinv[i, 1]*as_vector([rvcm[2], rvcm[3]])[i],
Jinv[i, 2]*as_vector([rvcm[2], rvcm[3]])[i],
]),
tm: as_vector([
# Vc
Jinv[i, 0]*as_vector([rtm[0], rtm[1]])[i],
Jinv[i, 1]*as_vector([rtm[0], rtm[1]])[i],
Jinv[i, 2]*as_vector([rtm[0], rtm[1]])[i],
# T
rtm[2], rtm[3], rtm[4],
rtm[5], rtm[6], rtm[7],
rtm[8], rtm[9], rtm[10],
]),
sm: as_vector([
# T
rsm[0], rsm[1], rsm[2],
rsm[3], rsm[4], rsm[5],
rsm[6], rsm[7], rsm[8],
# S
rsm[9], rsm[10], rsm[11],
rsm[10], rsm[12], rsm[13],
rsm[11], rsm[13], rsm[14],
]),
# Case from failing ffc demo:
vd0m: as_vector([
M_hdiv[0, j]*as_vector([rvd0m[0], rvd0m[1]])[j],
M_hdiv[1, j]*as_vector([rvd0m[0], rvd0m[1]])[j],
M_hdiv[2, j]*as_vector([rvd0m[0], rvd0m[1]])[j],
rvd0m[2]
]),
# This combines it all:
w: as_vector([
# S
rw[0], rw[1], rw[2],
rw[1], rw[3], rw[4],
rw[2], rw[4], rw[5],
# T
rw[6], rw[7], rw[8],
rw[9], rw[10], rw[11],
rw[12], rw[13], rw[14],
# Vc
Jinv[i, 0]*as_vector([rw[15], rw[16]])[i],
Jinv[i, 1]*as_vector([rw[15], rw[16]])[i],
Jinv[i, 2]*as_vector([rw[15], rw[16]])[i],
# Vd
M_hdiv[0, j]*as_vector([rw[17], rw[18]])[j],
M_hdiv[1, j]*as_vector([rw[17], rw[18]])[j],
M_hdiv[2, j]*as_vector([rw[17], rw[18]])[j],
# V
rw[19],
rw[20],
rw[21],
# U
rw[22],
]),
}
# Check functions of various elements outside a mixed context
check_single_function_pullback(ul2, mappings)
check_single_function_pullback(u, mappings)
check_single_function_pullback(v, mappings)
check_single_function_pullback(vd, mappings)
check_single_function_pullback(vc, mappings)
check_single_function_pullback(t, mappings)
check_single_function_pullback(s, mappings)
check_single_function_pullback(cov2t, mappings)
check_single_function_pullback(contra2t, mappings)
# Check functions of various elements inside a mixed context
check_single_function_pullback(uml2, mappings)
check_single_function_pullback(um, mappings)
check_single_function_pullback(vm, mappings)
check_single_function_pullback(vdm, mappings)
check_single_function_pullback(vcm, mappings)
check_single_function_pullback(tm, mappings)
check_single_function_pullback(sm, mappings)
# Check the ridiculous mixed element W combining it all
check_single_function_pullback(w, mappings)
def test_apply_single_function_pullbacks_triangle():
cell = triangle
domain = as_domain(cell)
Ul2 = FiniteElement("DG L2", cell, 1)
U = FiniteElement("CG", cell, 1)
V = VectorElement("CG", cell, 1)
Vd = FiniteElement("RT", cell, 1)
Vc = FiniteElement("N1curl", cell, 1)
T = TensorElement("CG", cell, 1)
S = TensorElement("CG", cell, 1, symmetry=True)
Uml2 = Ul2*Ul2
Um = U*U
Vm = U*V
Vdm = V*Vd
Vcm = Vd*Vc
Tm = Vc*T
Sm = T*S
W = S*T*Vc*Vd*V*U
ul2 = Coefficient(Ul2)
u = Coefficient(U)
v = Coefficient(V)
vd = Coefficient(Vd)
vc = Coefficient(Vc)
t = Coefficient(T)
s = Coefficient(S)
uml2 = Coefficient(Uml2)
um = Coefficient(Um)
vm = Coefficient(Vm)
vdm = Coefficient(Vdm)
vcm = Coefficient(Vcm)
tm = Coefficient(Tm)
sm = Coefficient(Sm)
w = Coefficient(W)
rul2 = ReferenceValue(ul2)
ru = ReferenceValue(u)
rv = ReferenceValue(v)
rvd = ReferenceValue(vd)
rvc = ReferenceValue(vc)
rt = ReferenceValue(t)
rs = ReferenceValue(s)
ruml2 = ReferenceValue(uml2)
rum = ReferenceValue(um)
rvm = ReferenceValue(vm)
rvdm = ReferenceValue(vdm)
rvcm = ReferenceValue(vcm)
rtm = ReferenceValue(tm)
rsm = ReferenceValue(sm)
rw = ReferenceValue(w)
assert len(w) == 4 + 4 + 2 + 2 + 2 + 1
assert len(rw) == 3 + 4 + 2 + 2 + 2 + 1
assert len(w) == 15
assert len(rw) == 14
# Geometric quantities we need:
J = Jacobian(domain)
detJ = JacobianDeterminant(domain)
Jinv = JacobianInverse(domain)
i, j, k, l = indices(4)
# Contravariant H(div) Piola mapping:
M_hdiv = (1.0/detJ) * J
# Covariant H(curl) Piola mapping: Jinv.T
mappings = {
# Simple elements should get a simple representation
ul2: rul2 / detJ,
u: ru,
v: rv,
vd: as_vector(M_hdiv[i, j]*rvd[j], i),
vc: as_vector(Jinv[j, i]*rvc[j], i),
t: rt,
s: as_tensor([[rs[0], rs[1]], [rs[1], rs[2]]]),
# Mixed elements become a bit more complicated
uml2: as_vector([ruml2[0] / detJ, ruml2[1] / detJ]),
um: rum,
vm: rvm,
vdm: as_vector([
# V
rvdm[0],
rvdm[1],
# Vd
M_hdiv[0, j]*as_vector([rvdm[2], rvdm[3]])[j],
M_hdiv[1, j]*as_vector([rvdm[2], rvdm[3]])[j],
]),
vcm: as_vector([
# Vd
M_hdiv[0, j]*as_vector([rvcm[0], rvcm[1]])[j],
M_hdiv[1, j]*as_vector([rvcm[0], rvcm[1]])[j],
# Vc
Jinv[i, 0]*as_vector([rvcm[2], rvcm[3]])[i],
Jinv[i, 1]*as_vector([rvcm[2], rvcm[3]])[i],
]),
tm: as_vector([
# Vc
Jinv[i, 0]*as_vector([rtm[0], rtm[1]])[i],
Jinv[i, 1]*as_vector([rtm[0], rtm[1]])[i],
# T
rtm[2], rtm[3],
rtm[4], rtm[5],
]),
sm: as_vector([
# T
rsm[0], rsm[1],
rsm[2], rsm[3],
# S
rsm[4], rsm[5],
rsm[5], rsm[6],
]),
# This combines it all:
w: as_vector([
# S
rw[0], rw[1],
rw[1], rw[2],
# T
rw[3], rw[4],
rw[5], rw[6],
# Vc
Jinv[i, 0]*as_vector([rw[7], rw[8]])[i],
Jinv[i, 1]*as_vector([rw[7], rw[8]])[i],
# Vd
M_hdiv[0, j]*as_vector([rw[9], rw[10]])[j],
M_hdiv[1, j]*as_vector([rw[9], rw[10]])[j],
# V
rw[11],
rw[12],
# U
rw[13],
]),
}
# Check functions of various elements outside a mixed context
check_single_function_pullback(ul2, mappings)
check_single_function_pullback(u, mappings)
check_single_function_pullback(v, mappings)
check_single_function_pullback(vd, mappings)
check_single_function_pullback(vc, mappings)
check_single_function_pullback(t, mappings)
check_single_function_pullback(s, mappings)
# Check functions of various elements inside a mixed context
check_single_function_pullback(uml2, mappings)
check_single_function_pullback(um, mappings)
check_single_function_pullback(vm, mappings)
check_single_function_pullback(vdm, mappings)
check_single_function_pullback(vcm, mappings)
check_single_function_pullback(tm, mappings)
check_single_function_pullback(sm, mappings)
# Check the ridiculous mixed element W combining it all
check_single_function_pullback(w, mappings)
| lgpl-3.0 | 2,708,946,957,977,316,000 | 29.619647 | 109 | 0.534304 | false |
bubenkoff/Arkestra | vacancies_and_studentships/tests.py | 1 | 22863 | from datetime import datetime, timedelta
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpRequest, QueryDict
from cms.api import create_page
from contacts_and_people.models import Person
from models import Vacancy, Studentship
from lister import (
List, VacanciesAndStudentshipsPluginLister, FilterList
)
from contacts_and_people.models import Entity
class VacanciesTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
self.toothjob = Vacancy(
title = "Pulling teeth",
slug = "pulling-teeth",
date = datetime.now() + timedelta(days=30),
)
def test_generic_attributes(self):
self.toothjob.save()
# the item has no informative content
self.assertEqual(self.toothjob.is_uninformative, True)
# there are no Entities in the database, so this can't be hosted_by anything
self.assertEqual(self.toothjob.hosted_by, None)
# since there are no Entities in the database, default to settings's template
self.assertEqual(self.toothjob.get_template, settings.CMS_TEMPLATES[0][0])
def test_date_related_attributes(self):
self.toothjob.date = datetime(year=2012, month=12, day=12)
self.assertEqual(self.toothjob.get_when, "December 2012")
def test_link_to_more(self):
self.assertEqual(
self.toothjob.auto_page_view_name,
"vacancies-and-studentships"
)
self.toothjob.hosted_by = Entity(slug="slug")
self.assertEqual(
self.toothjob.link_to_more(),
"/vacancies-and-studentships/slug/"
)
@override_settings(CMS_TEMPLATES = (('null.html', "Null"),))
class VacanciesItemsViewsTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
# create a vacancy item
self.toothjob = Vacancy(
title = "Pulling teeth",
slug = "pulling-teeth",
date = datetime.now() + timedelta(days=30),
)
self.adminuser = User.objects.create_user('arkestra', '[email protected]', 'arkestra')
self.adminuser.is_staff=True
self.adminuser.save()
# vacancy tests
def test_unpublished_vacancy_404(self):
self.toothjob.save()
# Issue a GET request.
response = self.client.get('/vacancy/pulling-teeth/')
# Check that the response is 404 because it's not published
self.assertEqual(response.status_code, 404)
def test_unpublished_vacancy_200_for_admin(self):
self.toothjob.save()
# log in a staff user
self.client.login(username='arkestra', password='arkestra')
response = self.client.get('/vacancy/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_vacancy_200_for_everyone(self):
self.toothjob.published = True
self.toothjob.save()
# Check that the response is 200 OK.
response = self.client.get('/vacancy/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_vacancy_context(self):
self.toothjob.published = True
self.toothjob.save()
response = self.client.get('/vacancy/pulling-teeth/')
self.assertEqual(response.context['vacancy'], self.toothjob)
@override_settings(CMS_TEMPLATES = (('null.html', "Null"),))
class StudentshipsItemsViewsTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
# create a studentship item
self.toothjob = Studentship(
title = "Pulling teeth",
slug = "pulling-teeth",
date = datetime.now() + timedelta(days=30),
)
self.adminuser = User.objects.create_user('arkestra', '[email protected]', 'arkestra')
self.adminuser.is_staff=True
self.adminuser.save()
# studentship tests
def test_unpublished_studentship_404(self):
self.toothjob.save()
# Issue a GET request.
response = self.client.get('/studentship/pulling-teeth/')
# Check that the response is 404 because it's not published
self.assertEqual(response.status_code, 404)
def test_unpublished_studentship_200_for_admin(self):
self.toothjob.save()
# log in a staff user
self.client.login(username='arkestra', password='arkestra')
response = self.client.get('/studentship/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_studentship_200_for_everyone(self):
self.toothjob.published = True
self.toothjob.save()
# Check that the response is 200 OK.
response = self.client.get('/studentship/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_studentship_context(self):
self.toothjob.published = True
self.toothjob.save()
response = self.client.get('/studentship/pulling-teeth/')
self.assertEqual(response.context['studentship'], self.toothjob)
class ReverseURLsTests(TestCase):
def test_vacancy_reverse_url(self):
self.assertEqual(
reverse("vacancy", kwargs={"slug": "tooth-puller"}),
"/vacancy/tooth-puller/"
)
def test_studentship_reverse_url(self):
self.assertEqual(
reverse("studentship", kwargs={"slug": "tooth-puller"}),
"/studentship/tooth-puller/"
)
def test_archived_vacancies_base_reverse_url(self):
self.assertEqual(
reverse("vacancies-archive"),
"/archived-vacancies/"
)
def test_archived_vacancies_reverse_url(self):
self.assertEqual(
reverse("vacancies-archive", kwargs={"slug": "some-slug"}),
"/archived-vacancies/some-slug/"
)
def test_current_vacancies_base_reverse_url(self):
self.assertEqual(
reverse("vacancies-current"),
"/vacancies/"
)
def test_current_vacancies_reverse_url(self):
self.assertEqual(
reverse("vacancies-current", kwargs={"slug": "some-slug"}),
"/vacancies/some-slug/"
)
def test_archived_studentships_base_reverse_url(self):
self.assertEqual(
reverse("studentships-archive"),
"/archived-studentships/"
)
def test_archived_studentships_reverse_url(self):
self.assertEqual(
reverse("studentships-archive", kwargs={"slug": "some-slug"}),
"/archived-studentships/some-slug/"
)
def test_current_studentships_base_reverse_url(self):
self.assertEqual(
reverse("studentships-current"),
"/studentships/"
)
def test_current_studentships_reverse_url(self):
self.assertEqual(
reverse("studentships-current", kwargs={"slug": "some-slug"}),
"/studentships/some-slug/"
)
def test_base_reverse_url(self):
self.assertEqual(
reverse("vacancies-and-studentships"),
"/vacancies-and-studentships/"
)
def test_reverse_url(self):
self.assertEqual(
reverse("vacancies-and-studentships", kwargs={"slug": "some-slug"}),
"/vacancies-and-studentships/some-slug/"
)
@override_settings(CMS_TEMPLATES = (('null.html', "Null"),))
class VacanciesStudentshipsEntityPagesViewsTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
home_page = create_page(
"School home page",
"null.html",
"en",
published=True
)
self.school = Entity(
name="School of Medicine",
slug="medicine",
auto_vacancies_page=True,
website=home_page
)
# entity vacancies and studentships URLs - has vacancies and studentships pages
def test_main_url(self):
self.school.save()
response = self.client.get('/vacancies-and-studentships/')
self.assertEqual(response.status_code, 200)
def test_entity_url(self):
self.school.save()
response = self.client.get('/vacancies-and-studentships/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_url(self):
self.school.save()
response = self.client.get('/vacancies-and-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_main_archive_url(self):
self.school.save()
response = self.client.get('/archived-vacancies/')
self.assertEqual(response.status_code, 200)
def test_entity_vacancies_archive_url(self):
self.school.save()
response = self.client.get('/archived-vacancies/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_vacancies_archive_url(self):
self.school.save()
response = self.client.get('/archived-vacancies/xxxx/')
self.assertEqual(response.status_code, 404)
def test_main_archived_studentships_url(self):
self.school.save()
response = self.client.get('/archived-studentships/')
self.assertEqual(response.status_code, 200)
def test_entity_archived_studentships_url(self):
self.school.save()
response = self.client.get('/archived-studentships/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_archived_studentships_url(self):
self.school.save()
response = self.client.get('/archived-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_main_all_current_studentships_url(self):
self.school.save()
response = self.client.get('/studentships/')
self.assertEqual(response.status_code, 200)
def test_entity_all_current_studentships_url(self):
self.school.save()
response = self.client.get('/studentships/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_all_current_studentships_url(self):
self.school.save()
response = self.client.get('/current-studentships/xxx/')
self.assertEqual(response.status_code, 404)
# entity vacancies and studentships URLs - no vacancies and studentships pages
def test_no_auto_page_main_url(self):
self.school.auto_vacancies_page = False
self.school.save()
response = self.client.get('/vacancies-and-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/vacancies-and-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/vacancies-and-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_main_archive_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/archived-vacancies/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_vacancies_archive_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/archived-vacancies/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_vacancies_archive_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/archived-vacancies/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_main_archived_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/studentships-archive/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_archived_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/studentships-archive/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_archived_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/studentships-archive/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_main_all_current_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/current-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_all_current_studentships_url(self):
self.school.auto_vacancies_page = False
self.school.save()
response = self.client.get('/current-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_all_current_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/current-studentships/xxx/')
self.assertEqual(response.status_code, 404)
# entity vacancies and studentships URLs - no entity home page
def test_no_entity_home_page_main_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/vacancies-and-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/vacancies-and-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/vacancies-and-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_main_archive_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/archived-vacancies/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_vacancies_archive_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/archived-vacancies/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_vacancies_archive_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/archived-vacancies/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_main_archived_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/studentships-archive/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_archived_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/studentships-archive/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_archived_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/studentships-archive/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_main_all_current_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/current-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_all_current_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/current-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_all_current_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/current-studentships/xxx/')
self.assertEqual(response.status_code, 404)
class ListTests(TestCase):
def setUp(self):
self.item1 = Vacancy(
title="closes today, less important",
in_lists=True,
published=True,
date=datetime.now()
)
self.item1.save()
self.item2 = Vacancy(
title="closed 20 days ago, important",
summary="a job for today",
in_lists=True,
published=True,
date=datetime.now()-timedelta(days=20),
importance=3,
slug="item2"
)
self.item2.save()
self.item3 = Vacancy(
title="closes in the future",
in_lists=True,
published=True,
date=datetime.now()+timedelta(days=20),
importance=3,
slug="item3"
)
self.item3.save()
self.itemlist = List()
self.itemlist.model = Vacancy
self.itemlist.items = Vacancy.objects.all()
def test_all_items_order(self):
self.assertEqual(
list(self.itemlist.items),
[self.item2, self.item1, self.item3]
)
def test_reorder_by_importance_date_only(self):
# check the re-ordered items are not changed
self.itemlist.re_order_by_importance()
self.assertEqual(
list(self.itemlist.items),
[self.item2, self.item1, self.item3]
)
def test_reorder_by_importance_date_makes_no_difference(self):
# check that items are re-ordered by importance
self.itemlist.order_by = "importance/date"
self.itemlist.re_order_by_importance()
self.assertEqual(
list(self.itemlist.items),
[self.item2, self.item1, self.item3]
)
def test_truncate_items(self):
# check that items are re-ordered by importance
self.itemlist.limit_to = 1
self.itemlist.truncate_items()
self.assertEqual(
list(self.itemlist.items),
[self.item2]
)
def test_set_items_for_person(self):
p = Person()
p.save()
self.item1.please_contact.add(p)
self.itemlist.person = p
self.itemlist.set_items_for_person()
self.assertEqual(
list(self.itemlist.items),
[self.item1]
)
def test_build(self):
self.itemlist.build()
self.assertEqual(list(self.itemlist.items), [self.item1, self.item3])
def test_other_items(self):
school = Entity(name="School of Medicine", short_name="Medicine")
school.save()
self.itemlist.entity = school
self.itemlist.other_item_kinds = ["archived", "open", "main"]
self.itemlist.build()
# "main" other items are always created; the others need tests to
# see if any exist
self.assertEqual(
self.itemlist.other_items(),
[{
'link': '/vacancies-and-studentships/',
'title': u'Medicine vacancies & studentships',
'css_class': 'main',
}]
)
# now we save some items
self.item1.hosted_by = school
self.item2.hosted_by = school
self.item3.hosted_by = school
self.item1.save()
self.item2.save()
self.item3.save()
self.itemlist.build()
self.assertEqual(list(self.itemlist.items), [self.item1, self.item3])
self.assertEqual(list(self.itemlist.archived), [self.item2])
self.assertEqual(
list(self.itemlist.other_items()),
[{
'count': 2,
'link': '/vacancies/',
'title': 'All open vacancies'
},
{
'count': 1,
'link': '/archived-vacancies/',
'title': 'Archived vacancies'
},
{
'link': '/vacancies-and-studentships/',
'title': u'Medicine vacancies & studentships',
'css_class': 'main',
},
]
)
class FilterListTests(TestCase):
def setUp(self):
self.item1 = Vacancy(
title="closes today, less important",
in_lists=True,
published=True,
date=datetime.now()
)
self.item1.save()
self.item2 = Vacancy(
title="closed 20 days ago, important",
summary="a job for today",
in_lists=True,
published=True,
date=datetime.now()-timedelta(days=20),
importance=3,
slug="item2"
)
self.item2.save()
self.item3 = Vacancy(
title="closes in the future",
in_lists=True,
published=True,
date=datetime.now()+timedelta(days=20),
importance=3,
slug="item3"
)
self.item3.save()
self.itemlist = FilterList()
self.itemlist.model = Vacancy
self.itemlist.request = HttpRequest()
def test_filter_on_search_terms_no_terms(self):
query = QueryDict("")
self.itemlist.request.GET = query
self.itemlist.build()
self.assertEqual(
list(self.itemlist.items),
[self.item1, self.item3]
)
def test_filter_on_search_terms_1_match(self):
query = QueryDict("text=today")
self.itemlist.request.GET = query
self.itemlist.build()
self.assertEqual(
list(self.itemlist.items),
[self.item1]
)
class PluginListerTests(TestCase):
def test_other_items(self):
lister = VacanciesAndStudentshipsPluginLister(
entity=Entity(slug="test")
)
self.assertItemsEqual(
lister.other_items(),
[{
'css_class': 'main',
'link': '/vacancies-and-studentships/test/',
'title': 'More '
}]
)
| bsd-2-clause | 8,495,456,747,010,492,000 | 33.073025 | 97 | 0.611906 | false |
1orwell/yrs2013 | fake.py | 1 | 3440 | '''Generate necessary dump files'''
#options
size = 100
regenerate_graph = False
days = 1
force_layout = False
default = str(size)+'.dat'
###
import igraph, pickle, random, os
import math
from collections import OrderedDict
def process(fout):
output = os.path.join('data',fout)
try:
#load graph if previously generated.
g = pickle.load(open('dump.dat'))
print 'Graph loaded from dump.dat'
except IOError:
#generate graph if it does not exist in the directory
print 'Generating graph to dump.dat'
g = igraph.Graph()
g.add_vertices(791)
g.es["weight"] = 1.0
g.delete_vertices([0])
with open('./flu-data/edgeLists/durationCondition/addThenChop/dropoff=0/minimumDuration=1/deltaT=1620/staticWeightedEdgeList_at=1350_min=540_max=2159.txt') as edges:
for edge in edges:
u, v, w = map(int, edge.split())
g[u, v] = 1.0/w
g.delete_vertices(g.vs(_degree_eq = 0))
pickle.dump(g,open('dump.dat','wb'))
print 'Finished'
#take sample of n points
sample = random.sample(range(1,788),790-size)
g.delete_vertices(sample)
print g.summary()
#Fiddle layout
print 'Working out layout'
if force_layout:
#starting everyone at their own location
#coords definition stolen from sim_group_move.py
coords = []
wrap = 10 #positions per row
col_length = int(math.ceil(size/wrap))
for y in range(col_length):
for x in range(wrap):
coords.append((x,y))
print coords
centre = (wrap/2, col_length/2)
else:
l = g.layout_kamada_kawai()
centre = l.centroid()
coords = l.coords
def distance(x, y): return math.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)
#sort the coords by their position from the centre
order = sorted(enumerate(coords), key = lambda x: distance(x[1], centre))
order = [x[0] for x in order]
#work out mininum global time
mintime = 1000 #must be less than this
for x in order:
if x == 0: continue
with open('./flu-data/moteFiles/node-'+str(x)) as fin:
line = fin.readline()
if line:
t = int(line.split()[-1])
if t < mintime:
mintime = t
completed = []
times = {}
print 'Generating movement file'
for node in order:
if node == 0: continue
times[node] = OrderedDict({0 : node})
node_name = 'node-'+str(node)
f = open('./flu-data/moteFiles/'+node_name, 'r')
for contact in f:
line = map(int, contact.split())
contact_id = line[0]
time = (line[-1] - mintime + 1)
if contact_id in completed:
current_max = 0
current_time = -1
for t, pos in times[contact_id].items():
if current_time < t <= time:
current_max = pos
current_time = t
position = current_max
times[node][time] = position
completed.append(node)
f.close()
print 'Writing movement file'
out = {'coords': coords, 'movement': times}
pickle.dump(out, open(output, 'wb'))
if __name__ == '__main__':
process(default)
| mit | 7,964,937,547,895,710,000 | 26.96748 | 173 | 0.54157 | false |
antiface/audiolazy | audiolazy/lazy_io.py | 1 | 14038 | # -*- coding: utf-8 -*-
# This file is part of AudioLazy, the signal processing Python package.
# Copyright (C) 2012-2014 Danilo de Jesus da Silva Bellini
#
# AudioLazy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Created on Fri Jul 20 2012
# danilo [dot] bellini [at] gmail [dot] com
"""
Audio recording input and playing output module
"""
import threading
import struct
import array
# Audiolazy internal imports
from ._internals import deprecate
from .lazy_stream import Stream
from .lazy_misc import DEFAULT_SAMPLE_RATE, blocks
from .lazy_compat import xrange, xmap
from .lazy_math import inf
from .lazy_core import StrategyDict
__all__ = ["chunks", "RecStream", "AudioIO", "AudioThread"]
# Conversion dict from structs.Struct() format symbols to PyAudio constants
_STRUCT2PYAUDIO = {"f": 1, #pyaudio.paFloat32
"i": 2, #pyaudio.paInt32
"h": 8, #pyaudio.paInt16
"b": 16, #pyaudio.paInt8
"B": 32, #pyaudio.paUInt8
}
chunks = StrategyDict("chunks")
chunks.__class__.size = 2048 # Samples
@chunks.strategy("struct")
def chunks(seq, size=None, dfmt="f", byte_order=None, padval=0.):
"""
Chunk generator based on the struct module (Python standard library).
Low-level data blockenizer for homogeneous data as a generator, to help
writing an iterable into a file.
The dfmt should be one char, chosen from the ones in link:
`<http://docs.python.org/library/struct.html#format-characters>`_
Useful examples (integer are signed, use upper case for unsigned ones):
- "b" for 8 bits (1 byte) integer
- "h" for 16 bits (2 bytes) integer
- "i" for 32 bits (4 bytes) integer
- "f" for 32 bits (4 bytes) float (default)
- "d" for 64 bits (8 bytes) float (double)
Byte order follows native system defaults. Other options are in the site:
`<http://docs.python.org/library/struct.html#struct-alignment>`_
They are:
- "<" means little-endian
- ">" means big-endian
Note
----
Default chunk size can be accessed (and changed) via chunks.size.
"""
if size is None:
size = chunks.size
dfmt = str(size) + dfmt
if byte_order is None:
struct_string = dfmt
else:
struct_string = byte_order + dfmt
s = struct.Struct(struct_string)
for block in blocks(seq, size, padval=padval):
yield s.pack(*block)
@chunks.strategy("array")
def chunks(seq, size=None, dfmt="f", byte_order=None, padval=0.):
"""
Chunk generator based on the array module (Python standard library).
See chunk.struct for more help. This strategy uses array.array (random access
by indexing management) instead of struct.Struct and blocks/deque (circular
queue appending) from the chunks.struct strategy.
Hint
----
Try each one to find the faster one for your machine, and chooses
the default one by assigning ``chunks.default = chunks.strategy_name``.
It'll be the one used by the AudioIO/AudioThread playing mechanism.
Note
----
The ``dfmt`` symbols for arrays might differ from structs' defaults.
"""
if size is None:
size = chunks.size
chunk = array.array(dfmt, xrange(size))
idx = 0
for el in seq:
chunk[idx] = el
idx += 1
if idx == size:
yield chunk.tostring()
idx = 0
if idx != 0:
for idx in xrange(idx, size):
chunk[idx] = padval
yield chunk.tostring()
class RecStream(Stream):
"""
Recording Stream
A common Stream class with a ``stop`` method for input data recording
and a ``recording`` read-only property for status.
"""
def __init__(self, device_manager, file_obj, chunk_size, dfmt):
if chunk_size is None:
chunk_size = chunks.size
s = struct.Struct("{0}{1}".format(chunk_size, dfmt))
def rec():
try:
while self._recording:
for k in s.unpack(file_obj.read(chunk_size)):
yield k
finally:
file_obj.close()
self._recording = False # Loop can be broken by StopIteration
self.device_manager.recording_finished(self)
super(RecStream, self).__init__(rec())
self._recording = True
self.device_manager = device_manager
def stop(self):
""" Finishes the recording stream, so it can raise StopIteration """
self._recording = False
@property
def recording(self):
return self._recording
class AudioIO(object):
"""
Multi-thread stream manager wrapper for PyAudio.
"""
def __init__(self, wait=False, api=None):
"""
Constructor to PyAudio Multi-thread manager audio IO interface.
The "wait" input is a boolean about the behaviour on closing the
instance, if it should or not wait for the streaming audio to finish.
Defaults to False. Only works if the close method is explicitly
called.
"""
import pyaudio
self._pa = pa = pyaudio.PyAudio()
self._threads = []
self.wait = wait # Wait threads to finish at end (constructor parameter)
self._recordings = []
# Lockers
self.halting = threading.Lock() # Only for "close" method
self.lock = threading.Lock() # "_threads" access locking
self.finished = False
# Choosing the PortAudio API (needed to use Jack)
if not (api is None):
api_count = pa.get_host_api_count()
apis_gen = xmap(pa.get_host_api_info_by_index, xrange(api_count))
try:
self.api = next(el for el in apis_gen
if el["name"].lower().startswith(api))
except StopIteration:
raise RuntimeError("API '{}' not found".format(api))
def __del__(self):
"""
Default destructor. Use close method instead, or use the class
instance as the expression of a with block.
"""
self.close()
def __exit__(self, etype, evalue, etraceback):
"""
Closing destructor for use internally in a with-expression.
"""
self.close()
def __enter__(self):
"""
To be used only internally, in the with-expression protocol.
"""
return self
def close(self):
"""
Destructor for this audio interface. Waits the threads to finish their
streams, if desired.
"""
with self.halting: # Avoid simultaneous "close" threads
if not self.finished: # Ignore all "close" calls, but the first,
self.finished = True # and any call to play would raise ThreadError
# Closes all playing AudioThread instances
while True:
with self.lock: # Ensure there's no other thread messing around
try:
thread = self._threads[0] # Needless to say: pop = deadlock
except IndexError: # Empty list
break # No more threads
if not self.wait:
thread.stop()
thread.join()
# Closes all recording RecStream instances
while self._recordings:
recst = self._recordings[-1]
recst.stop()
recst.take(inf) # Ensure it'll be closed
# Finishes
assert not self._pa._streams # No stream should survive
self._pa.terminate()
def terminate(self):
"""
Same as "close".
"""
self.close() # Avoids direct calls to inherited "terminate"
def play(self, audio, **kwargs):
"""
Start another thread playing the given audio sample iterable (e.g. a
list, a generator, a NumPy np.ndarray with samples), and play it.
The arguments are used to customize behaviour of the new thread, as
parameters directly sent to PyAudio's new stream opening method, see
AudioThread.__init__ for more.
"""
with self.lock:
if self.finished:
raise threading.ThreadError("Trying to play an audio stream while "
"halting the AudioIO manager object")
new_thread = AudioThread(self, audio, **kwargs)
self._threads.append(new_thread)
new_thread.start()
return new_thread
def thread_finished(self, thread):
"""
Updates internal status about open threads. Should be called only by
the internal closing mechanism of AudioThread instances.
"""
with self.lock:
self._threads.remove(thread)
def recording_finished(self, recst):
"""
Updates internal status about open recording streams. Should be called
only by the internal closing mechanism of children RecStream instances.
"""
self._recordings.remove(recst)
def record(self, chunk_size = None,
dfmt = "f",
channels = 1,
rate = DEFAULT_SAMPLE_RATE,
**kwargs
):
"""
Records audio from device into a Stream.
Parameters
----------
chunk_size :
Number of samples per chunk (block sent to device).
dfmt :
Format, as in chunks(). Default is "f" (Float32).
channels :
Channels in audio stream (serialized).
rate :
Sample rate (same input used in sHz).
Returns
-------
Endless Stream instance that gather data from the audio input device.
"""
if chunk_size is None:
chunk_size = chunks.size
if hasattr(self, "api"):
kwargs.setdefault("input_device_index", self.api["defaultInputDevice"])
channels = kwargs.pop("nchannels", channels) # Backwards compatibility
input_stream = RecStream(self,
self._pa.open(format=_STRUCT2PYAUDIO[dfmt],
channels=channels,
rate=rate,
frames_per_buffer=chunk_size,
input=True,
**kwargs),
chunk_size,
dfmt
)
self._recordings.append(input_stream)
return input_stream
class AudioThread(threading.Thread):
"""
Audio output thread.
This class is a wrapper to ease the use of PyAudio using iterables of
numbers (Stream instances, lists, tuples, NumPy 1D arrays, generators) as
audio data streams.
"""
def __init__(self, device_manager, audio,
chunk_size = None,
dfmt = "f",
channels = 1,
rate = DEFAULT_SAMPLE_RATE,
daemon = True, # This shouldn't survive after crashes
**kwargs
):
"""
Sets a new thread to play the given audio.
Parameters
----------
chunk_size :
Number of samples per chunk (block sent to device).
dfmt :
Format, as in chunks(). Default is "f" (Float32).
channels :
Channels in audio stream (serialized).
rate :
Sample rate (same input used in sHz).
daemon :
Boolean telling if thread should be daemon. Default is True.
"""
super(AudioThread, self).__init__()
self.daemon = daemon # threading.Thread property, couldn't be assigned
# before the superclass constructor
# Stores data needed by the run method
self.audio = audio
self.device_manager = device_manager
self.dfmt = dfmt
self.channels = kwargs.pop("nchannels", channels)
self.chunk_size = chunks.size if chunk_size is None else chunk_size
# Lockers
self.lock = threading.Lock() # Avoid control methods simultaneous call
self.go = threading.Event() # Communication between the 2 threads
self.go.set()
self.halting = False # The stop message
# Get the streaming function
import _portaudio # Just to be slightly faster (per chunk!)
self.write_stream = _portaudio.write_stream
if hasattr(device_manager, "api"):
kwargs.setdefault("output_device_index",
device_manager.api["defaultOutputDevice"])
# Open a new audio output stream
self.stream = device_manager._pa.open(format=_STRUCT2PYAUDIO[dfmt],
channels=channels,
rate=rate,
frames_per_buffer=self.chunk_size,
output=True,
**kwargs)
# Backwards compatibility
nchannels = property(deprecate(lambda self: self.channels))
def run(self):
"""
Plays the audio. This method plays the audio, and shouldn't be called
explicitly, let the constructor do so.
"""
# From now on, it's multi-thread. Let the force be with them.
st = self.stream._stream
for chunk in chunks(self.audio,
size=self.chunk_size*self.nchannels,
dfmt=self.dfmt):
#Below is a faster way to call:
# self.stream.write(chunk, self.chunk_size)
self.write_stream(st, chunk, self.chunk_size, False)
if not self.go.is_set():
self.stream.stop_stream()
if self.halting:
break
self.go.wait()
self.stream.start_stream()
# Finished playing! Destructor-like step: let's close the thread
with self.lock:
if self in self.device_manager._threads: # If not already closed
self.stream.close()
self.device_manager.thread_finished(self)
def stop(self):
""" Stops the playing thread and close """
with self.lock:
self.halting = True
self.go.clear()
def pause(self):
""" Pauses the audio. """
with self.lock:
self.go.clear()
def play(self):
""" Resume playing the audio. """
with self.lock:
self.go.set()
| gpl-3.0 | -8,054,913,346,017,574,000 | 30.195556 | 79 | 0.616826 | false |
Answeror/aip | aip/imfs/cascade.py | 1 | 1705 | from .base import NameMixin
def load_ext(name, bases):
return need_raw(
name,
bases,
lambda base: base.load(name)
)
def thumbnail_ext(name, width, height, bases):
return need_raw(
name,
bases,
lambda base: base.thumbnail(name, width, height)
)
def mtime_ext(name, bases):
return need_raw(
name,
bases,
lambda base: base.mtime(name)
)
def need_raw(name, bases, f):
assert bases
if len(bases) == 1:
return f(bases[0])
try:
data = f(bases[0])
if data is not None:
return data
except:
pass
data = load_ext(name, bases[1:])
if data is not None:
try:
bases[0].save(name, data)
except:
pass
return f(bases[0])
class Cascade(NameMixin):
def __init__(self, *args):
self.bases = args
assert self.bases
def _load(self, name):
return load_ext(name, self.bases)
def _save(self, name, data):
for base in self.bases:
base.save(name, data)
def _thumbnail(self, name, width, height):
return thumbnail_ext(name, width, height, self.bases)
def _has(self, name):
for base in self.bases:
if base.has(name):
return True
return False
def _remove(self, name):
for base in self.bases:
base.remove(name)
def _mtime(self, name):
return mtime_ext(name, self.bases)
def _cache_timeout(self, name):
for base in self.bases:
ret = base.cache_timeout(name)
if ret is not None:
return ret
return None
| mit | -1,043,821,107,190,842,800 | 20.049383 | 61 | 0.537243 | false |
JustRamon/SpeechController | SC.py | 1 | 1113 | #!/usr/bin/env python3
import speech_recognition as sr
import ksr10
import time
arm = ksr10.ksr10_class()
while 1:
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
try:
rn = r.recognize_google(audio)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
if rn == "up":
arm.move("elbow","up")
time.sleep(1.5)
arm.stop()
if rn == "down":
arm.move("elbow","down")
time.sleep(1.5)
arm.stop()
if rn == "light":
arm.lights()
if rn == "grip":
with open ("Save.txt", "r") as file_:
oc = file_.read()
if oc == "1":
arm.move("grip","close")
time.sleep(1.6)
arm.stop()
with open ("Save.txt", "w") as file_:
file_.write("0")
elif oc == "0":
arm.move("grip","open")
time.sleep(1.4)
arm.stop()
with open ("Save.txt", "w") as file_:
file_.write("1")
else:
print "Error, file contains: " + oc
if rn == "stop":
break
| gpl-2.0 | -2,197,478,390,076,488,000 | 22.1875 | 97 | 0.607367 | false |
starbt/flea_market | market/migrations/0006_auto_20161206_2033.py | 1 | 1232 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-06 12:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('market', '0005_auto_20161206_1204'),
]
operations = [
migrations.AddField(
model_name='goods',
name='discount',
field=models.IntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='goods',
name='goods_phone',
field=models.IntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='goods',
name='goods_qq',
field=models.IntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name='goods',
name='picture_url',
field=models.CharField(blank=True, max_length=128),
),
migrations.AlterField(
model_name='userprofile',
name='picture_url',
field=models.CharField(blank=True, default='http://ershou.u.qiniudn.com/Android_1480732854630_186265.jpg?imageView2/5/w/800/h/800', max_length=128),
),
]
| mit | -7,127,938,892,819,470,000 | 29.8 | 160 | 0.571429 | false |
UbiCastTeam/touchwizard | touchwizard/canvas.py | 1 | 16655 | # -*- coding: utf-8 -*
import clutter
import gobject
import easyevent
import logging
import os
import time
from touchwizard.loading import LoadingWidget
logger = logging.getLogger('touchwizard')
class Canvas(clutter.Actor, clutter.Container, easyevent.User):
"""Wizard main actor which manages the user interface and pages.
Listen for event:
- next_page (page_name)
Request for a new page identified by its name passed as content.
The current page becomes in top of the page history.
- previous_page
Request for displaying back the top of the page history. No content
expected. If the history is empty, quit the wizard.
- request_quit
Request for quitting the wizard. Call prepare_quit callback
if it exists and there launch the wizard_quit which should
be handled by the user main script.
Launch the event:
- wizard_quit
Sent after prepare_quit callback to notify the main script that it
can end the process.
"""
__gtype_name__ = 'Canvas'
# infobar_height = 104
# iconbar_height = 200
def __init__(self, first_page):
import touchwizard
clutter.Actor.__init__(self)
easyevent.User.__init__(self)
self.session = touchwizard.Session()
self.background = None
self.last_page_name = None
self.last_page_timestamp = None
self.previous_page_locked = False
self.previous_page_timeout_id = None
if touchwizard.canvas_bg:
if not os.path.exists(touchwizard.canvas_bg):
logger.error('Canvas background %s not found.', touchwizard.canvas_bg)
self.background = clutter.Texture(touchwizard.canvas_bg)
self.background.set_parent(self)
self.infobar = touchwizard.InfoBar()
self.infobar.set_parent(self)
self.iconbar = touchwizard.IconBar()
self.iconbar.set_parent(self)
self.loading = LoadingWidget()
self.loading.set_parent(self)
self.loading.hide()
self.loading_padding = 10
self.home_icon = touchwizard.Icon('shutdown')
self.home_icon.build()
self.previous_icon = touchwizard.IconRef(touchwizard.Icon('previous'))
# self.previous_icon.build()
easyevent.forward_event('icon_previous_actioned', 'previous_page')
self.history = list()
self.first_page = first_page
self.available_pages = dict()
self.current_page = None
self.register_event('next_page', 'previous_page', 'refresh_page', 'clear_history')
self.register_event('request_quit')
gobject.idle_add(self.lookup_pages)
gobject.idle_add(self.display_page, first_page)
def lookup_pages(self):
import touchwizard
origin = ''
path = touchwizard.page_path
if path is None:
if self.first_page is None:
return tuple()
self.available_pages[self.first_page.name] = self.first_page
import sys
origin = sys.modules[self.first_page.__module__].__file__
path = os.path.dirname(os.path.abspath(os.path.expanduser(origin)))
import imp
for f in os.listdir(path):
if f.endswith('.py') and f != os.path.basename(origin):
try:
module = imp.load_source(f[:-3], os.path.join(path, f))
except:
import traceback
logger.error('Cannot import page %s:\n%s', f[:-3], traceback.format_exc())
if not touchwizard.tolerant_to_page_import_error:
import sys
sys.exit(1)
continue
for attr_name in dir(module):
if attr_name.startswith('__'):
continue
attribute = getattr(module, attr_name)
if isinstance(attribute, type) \
and issubclass(attribute, touchwizard.Page) \
and attribute is not touchwizard.Page:
self.available_pages[attribute.name] = attribute
logger.info('%d pages found.', len(self.available_pages))
# print self.available_pages
def display_page(self, page, icons=None):
if isinstance(page, type):
self.current_page = page(self.session)
if self.current_page.reuse:
logger.info('Storing reusable page %s in cache.', self.current_page.name)
self.available_pages[self.current_page.name] = self.current_page
else:
self.current_page = page
logger.info('Reusing already instanciated page %s from cache.', self.current_page.name)
os.environ["TOUCHWIZARD_CURRENT_PAGE"] = self.current_page.name
os.environ.pop("TOUCHWIZARD_REQUESTED_PAGE", None)
if page.need_loading:
self.loading.hide()
self._build_iconbar(icons)
self.current_page.panel.set_parent(self)
self.current_page.panel.lower_bottom()
if hasattr(self.current_page.panel, 'prepare') and callable(self.current_page.panel.prepare):
self.current_page.panel.prepare()
self.current_page.panel.show()
self.previous_page_locked = False
self.last_page_name = page.name
def _build_iconbar(self, icons):
import touchwizard
self.iconbar.clear()
if icons is not None:
# cached icons
previous_icon = icons[0]
next_icon = icons[-1]
icons = icons[1:-1]
else:
# uninstanciated icons
icons = self.current_page.icons
previous_icon = self.current_page.previous
next_icon = self.current_page.next
# Icon "previous"
self.home_icon.unregister_all_events()
if previous_icon is None:
if self.history:
last_page, last_icons = self.history[-1]
previous_icon = last_page.my_icon
if previous_icon is None:
previous_icon = self.previous_icon
else:
self.home_icon.register_events()
previous_icon = self.home_icon
condition = True
if isinstance(previous_icon, touchwizard.IconRef):
if callable(previous_icon.condition):
condition = previous_icon.condition()
else:
condition = previous_icon.condition
previous_icon = previous_icon.get_icon()
if condition:
previous_icon.build()
self.iconbar.set_previous(previous_icon)
# Icon "next"
condition = True
if next_icon is not None:
if isinstance(next_icon, touchwizard.IconRef):
if callable(next_icon.condition):
condition = next_icon.condition()
else:
condition = next_icon.condition
next_icon = next_icon.get_icon()
if condition:
next_icon.build()
self.iconbar.set_next(next_icon)
# Other icons
for icon in icons:
if isinstance(icon, touchwizard.IconRef):
if callable(icon.condition):
condition = icon.condition()
else:
condition = icon.condition
if not condition:
continue
icon = icon.get_icon()
icon.build()
self.iconbar.append(icon)
def evt_next_page(self, event):
if self.last_page_name is None or self.last_page_name != event.content:
gobject.timeout_add(100, self.do_next_page, event, priority=gobject.PRIORITY_HIGH)
self.unregister_event('next_page')
def do_next_page(self, event):
now = time.time()
name = event.content
if not self.last_page_timestamp or (now - self.last_page_timestamp) > 0.5:
logger.info('Page %r requested.', name)
os.environ["TOUCHWIZARD_REQUESTED_PAGE"] = name
self.current_page.panel.hide()
self.current_page.panel.unparent()
icon_states = self.iconbar.get_icon_states()
self.history.append((self.current_page, icon_states))
new_page = self.available_pages[name]
self.iconbar.clear(keep_back=True)
if new_page.need_loading:
self.loading.show()
gobject.idle_add(self.display_page, new_page)
else:
logger.warning('Page %s requested too quickly twice in a row (less than 500ms), not displaying', name)
self.register_event('next_page')
self.last_page_timestamp = now
def evt_previous_page(self, event):
if not self.previous_page_locked:
self.previous_page_locked = True
if self.previous_page_timeout_id is not None:
gobject.source_remove(self.previous_page_timeout_id)
self.previous_page_timeout_id = gobject.timeout_add(300, self.do_previous_page, event, priority=gobject.PRIORITY_HIGH)
def do_previous_page(self, event):
name = None
if event.content:
name = event.content
for page, icons in self.history[::-1]:
try:
previous, icons = self.history.pop()
except IndexError:
# logger.error('Previous page requested but history is empty.')
self.evt_request_quit(event)
return
logger.info('Back to %r page.', previous.name)
os.environ["TOUCHWIZARD_REQUESTED_PAGE"] = previous.name
self.current_page.panel.hide()
gobject.idle_add(self.current_page.panel.unparent)
if previous.need_loading:
self.loading.show()
if not self.current_page.reuse:
gobject.idle_add(self.current_page.panel.destroy)
if name is None or page.name == name:
break
self.current_page = page
gobject.idle_add(self.display_page, previous, icons)
def evt_refresh_page(self, event):
gobject.idle_add(self.do_refresh_page, event)
self.unregister_event('refresh_page')
def do_refresh_page(self, event):
name = self.current_page.name
logger.info('Page %r refresh requested.', name)
self.current_page.panel.hide()
self.current_page.panel.unparent()
gobject.idle_add(self.current_page.panel.destroy)
new_page = self.available_pages[name]
self.iconbar.clear(keep_back=True)
if new_page.need_loading:
self.loading.show()
gobject.idle_add(self.display_page, new_page)
self.register_event('refresh_page')
def evt_clear_history(self, event):
for page, icons in self.history:
gobject.idle_add(page.panel.destroy)
self.history = list()
def evt_request_quit(self, event):
self.evt_request_quit = self.evt_request_quit_fake
logger.info('Quit requested.')
try:
prepare_quit = getattr(self.current_page, "prepare_quit", None)
if prepare_quit:
if not callable(prepare_quit):
prepare_quit = getattr(self.current_page.panel, prepare_quit, None)
if callable(prepare_quit):
logger.info('prepare_quit callback found')
prepare_quit()
except Exception, e:
logger.warning("Failed to call prepare_quit method in page %s: %s", self.current_page, e)
self.launch_event('wizard_quit')
def evt_request_quit_fake(self, event):
logger.error('Quit request rejected.')
def evt_request_session(self, event):
self.launch_event('dispatch_session', self.session)
def evt_update_session(self, event):
self.session.update(event)
self.launch_event('dispatch_session', self.session)
def do_remove(self, actor):
logger.info.debug('Panel "%s" removed.', actor.__name__)
def do_get_preferred_width(self, for_height):
import touchwizard
width = float(touchwizard.canvas_width)
return width, width
def do_get_preferred_height(self, for_width):
import touchwizard
height = float(touchwizard.canvas_height)
return height, height
def do_allocate(self, box, flags):
canvas_width = box.x2 - box.x1
canvas_height = box.y2 - box.y1
infobar_height = round(self.infobar.get_preferred_height(canvas_width)[1])
infobar_box = clutter.ActorBox()
infobar_box.x1 = 0
infobar_box.y1 = 0
infobar_box.x2 = canvas_width
infobar_box.y2 = infobar_height
self.infobar.allocate(infobar_box, flags)
iconbar_height = round(self.iconbar.get_preferred_height(canvas_width)[1])
iconbar_box = clutter.ActorBox()
iconbar_box.x1 = 0
iconbar_box.y1 = canvas_height - iconbar_height
iconbar_box.x2 = canvas_width
iconbar_box.y2 = canvas_height
self.iconbar.allocate(iconbar_box, flags)
loading_box = clutter.ActorBox()
loading_box.x1 = self.loading_padding
loading_box.y1 = infobar_height + self.loading_padding
loading_box.x2 = canvas_width - self.loading_padding
loading_box.y2 = canvas_height - iconbar_height - self.loading_padding
self.loading.allocate(loading_box, flags)
panel_box = clutter.ActorBox()
panel_box.x1 = 0
panel_box.y1 = infobar_height
panel_box.x2 = canvas_width
panel_box.y2 = canvas_height - iconbar_height
if self.background is not None:
self.background.allocate(panel_box, flags)
if self.current_page is not None:
self.current_page.panel.allocate(panel_box, flags)
clutter.Actor.do_allocate(self, box, flags)
def do_foreach(self, func, data=None):
children = [self.infobar, self.iconbar, self.loading]
if self.background:
children.append(self.background)
if self.current_page:
children.append(self.current_page.panel)
for child in children:
func(child, data)
def do_paint(self):
if self.background:
self.background.paint()
self.iconbar.paint()
if self.current_page:
self.current_page.panel.paint()
self.infobar.paint()
self.loading.paint()
def do_pick(self, color):
self.do_paint()
def quick_launch(page, width=None, height=None, overlay=None, main_loop_run_cb=None, main_loop_stop_cb=None):
if not logging._handlers:
# Install a default log handler if none set
import sys
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)s %(message)s',
stream=sys.stderr)
logger.info('Initializing touchwizard app.')
import touchwizard
stage = clutter.Stage()
if width == None and height == None:
width = touchwizard.canvas_width
height = touchwizard.canvas_height
else:
touchwizard.canvas_width = width
touchwizard.canvas_height = height
stage.set_size(width, height)
if page is not None:
stage.set_title(page.title)
canvas = Canvas(page)
stage.add(canvas)
if overlay is not None:
logger.info('Adding overlay %s', overlay)
stage.add(overlay)
overlay.show()
stage.show()
main_loop_name = 'External'
if main_loop_run_cb is None:
main_loop_run_cb = clutter.main
main_loop_name = 'Clutter'
if main_loop_stop_cb is None:
main_loop_stop_cb = clutter.main_quit
def quit(*args):
logger.info('Quitting %s main loop by stage destroy', main_loop_name)
main_loop_stop_cb()
import sys
gobject.timeout_add_seconds(2, sys.exit)
stage.connect('destroy', quit)
class Quitter(easyevent.Listener):
def __init__(self):
easyevent.Listener.__init__(self)
self.register_event('wizard_quit')
def evt_wizard_quit(self, event):
logging.info('Quitting %s main loop by touchwizard button', main_loop_name)
main_loop_stop_cb()
import sys
gobject.timeout_add_seconds(2, sys.exit)
Quitter()
logger.info('Running %s main loop.', main_loop_name)
main_loop_run_cb()
if __name__ == '__main__':
quick_launch(None)
| gpl-3.0 | 398,874,673,450,197,570 | 36.093541 | 130 | 0.593215 | false |
AnthillTech/python-mewa-client | examples/main.py | 1 | 1354 | '''
Created on 27 lip 2014
@author: Krzysztof Langner
'''
from mewa.client import Connection
HOST_URL = "ws://mewa.cc:9001/ws"
# HOST_URL = "ws://localhost:9000/ws"
connection = Connection(HOST_URL)
def onConnected():
connection.getDevices()
connection.sendEvent("serviceA.event2", "78", True)
params = [{"type": "org.fi24.switch", "name": "switch2"}, {"type": "org.fi24.switch", "name": "switch1"}, {"type": "org.fi24.switch", "name": "switch0"}]
connection.sendMessage("device66", "serviceA.level", params)
def onEvent(timestamp, fromDevice, eventId, params):
print("received event %s from %s with params %s" % (eventId, fromDevice, params))
def onMessage(timestamp, fromDevice, msgId, params):
print(timestamp + ": received message %s from %s with params %s" % (timestamp, msgId, fromDevice, params))
def onDevicesEvent(timestamp, devices):
print(timestamp + ": Found devices:")
print(devices)
def onError(reason):
print("Error: " + reason)
def onAck():
print("ACK")
if __name__ == "__main__":
connection.onConnected = onConnected
connection.onEvent = onEvent
connection.onMessage = onMessage
connection.onDevicesEvent = onDevicesEvent
connection.onError = onError
connection.onAck = onAck
connection.connect("admin.test", "python", "l631vxqa", [""])
| bsd-2-clause | 3,212,027,003,129,120,000 | 27.208333 | 157 | 0.669129 | false |
tencia/deeptrackpy | utils.py | 1 | 10122 | import time
import sys
import os
from PIL import Image
import numpy as np
import lasagne as nn
import theano
import theano.tensor as T
import h5py
from fuel.datasets.hdf5 import H5PYDataset
from fuel.schemes import ShuffledScheme, SequentialScheme
from fuel.streams import DataStream
# runs training loop, expects data in DataH5PYStreamer format
# tr_transform and te_transform must return list or tuple, to allow
# for situations where the functions require 2+ inputs
def train_with_hdf5(data, num_epochs, train_fn, test_fn,
tr_transform = lambda x:x,
te_transform = lambda x:x,
verbose=True, train_shuffle=True,
save_params_to=None,
save_last_params=False,
last_layer=None,
use_tqdm=True,
max_per_epoch=-1):
tr_stream = data.streamer(training=True, shuffled=train_shuffle)
te_stream = data.streamer(training=False, shuffled=False)
ret = []
mve_params = None
mve = None
for epoch in range(num_epochs):
start = time.time()
tr_err, tr_batches = 0,0
iterator = tr_stream.get_epoch_iterator()
if use_tqdm:
from tqdm import tqdm
iterator = tqdm(iterator, total=data.ntrain/data.batch_size)
for imb in iterator:
if imb[0].shape[0] != data.batch_size:
continue
imb = tr_transform(imb)
if not isinstance(imb, tuple):
imb = (imb,)
tr_err += train_fn(*imb)
tr_batches += 1
if max_per_epoch > 0 and tr_batches > max_per_epoch:
break
val_err, val_batches = 0,0
iterator = te_stream.get_epoch_iterator()
if use_tqdm:
iterator = tqdm(iterator, total=data.ntest/data.batch_size)
for imb in iterator:
if imb[0].shape[0] != data.batch_size:
continue
imb = te_transform(imb)
if not isinstance(imb, tuple):
imb = (imb,)
val_err += test_fn(*imb)
val_batches += 1
if max_per_epoch > 0 and val_batches > max_per_epoch:
break
val_err /= (val_batches if val_batches > 0 else 1)
tr_err /= (tr_batches if tr_batches > 0 else 1)
if save_params_to is not None:
if mve is None or val_err < mve:
mve = val_err
mve_params = [np.copy(p) for p in (nn.layers.get_all_param_values(last_layer))]
if verbose:
print('ep {}/{} - tl {:.5f} - vl {:.5f} - t {:.3f}s'.format(
epoch, num_epochs, tr_err, val_err, time.time()-start))
ret.append((tr_err, val_err))
if save_params_to is not None:
if save_last_params:
mve_params = [np.copy(p) for p in (nn.layers.get_all_param_values(last_layer))]
save_params(mve_params, save_params_to)
return ret
# goes from raw image array (usually uint8) to floatX, square=True crops to
# size of the short edge, center=True crops at center, otherwise crop is
# random
def raw_to_floatX(imb, pixel_shift=0.5, square=True, center=False, rng=None):
rng = rng if rng else np.random
w,h = imb.shape[2], imb.shape[3] # image size
x, y = 0,0 # offsets
if square:
if w > h:
if center:
x = (w-h)/2
else:
x = rng.randint(w-h)
w=h
elif h > w:
if center:
y = (h-w)/2
else:
y = rng.randint(h-w)
h=w
return nn.utils.floatX(imb)[:,:,x:x+w,y:y+h]/ 255. - pixel_shift
# creates and hdf5 file from a dataset given a split in the form {'train':(0,n)}, etc
# appears to save in unpredictable order, so order must be verified after creation
def save_hd5py(dataset_dict, destfile, indices_dict_or_numfolds):
indices_dict = indices_dict_or_numfolds
if isinstance(indices_dict, int):
folds = indices_dict
n = max(len(it) for it in dataset_dict.values())
fold_n = n // folds
indices_dict = dict(('fold_{}'.format(i), (i*fold_n, (i+1)*fold_n)) \
for i in range(folds))
print indices_dict
f = h5py.File(destfile, mode='w')
for name, dataset in dataset_dict.iteritems():
dat = f.create_dataset(name, dataset.shape, dtype=str(dataset.dtype))
dat[...] = dataset
split_dict = dict((k, dict((name, v) for name in dataset_dict.iterkeys()))
for k,v in indices_dict.iteritems())
f.attrs['split'] = H5PYDataset.create_split_array(split_dict)
f.flush()
f.close()
# for organizing an hdf5 file for streaming
class DataH5PyStreamer:
# folds = None if dataset is separated into 'train', 'test'
# folds = (10, 3) for ex if there are 10 total folds and #3 (zero_indexed) is validation set
# folds = (10, -1) if we want to train on every fold (sets val = fold 0)
def __init__(self, h5filename, ntrain=None, ntest=None, batch_size=1, folds=None):
if folds is None:
te_sets = ('test',)
tr_sets = ('train',)
elif folds[1] == -1:
te_sets = ('fold_0',)
tr_sets = tuple(['fold_{}'.format(i) for i in range(folds[0])])
else:
te_sets = ('fold_{}'.format(folds[1]),)
tr_sets = tuple(['fold_{}'.format(i) for i in range(folds[0]) if i != folds[1]])
self.batch_size = batch_size
self.tr_data = H5PYDataset(h5filename, which_sets=tr_sets)
self.te_data = H5PYDataset(h5filename, which_sets=te_sets)
self.ntrain = ntrain if ntrain is not None else self.tr_data.num_examples
self.ntest = ntest if ntest is not None else self.te_data.num_examples
def dataset(self, training=True):
return self.tr_data if training else self.te_data
def streamer(self, training=True, shuffled=False):
n = self.ntrain if training else self.ntest
sch = ShuffledScheme(examples=n, batch_size=self.batch_size) if shuffled else \
SequentialScheme(examples=n, batch_size=self.batch_size)
return DataStream(self.tr_data if training else self.te_data, \
iteration_scheme = sch)
# helper function for building vae's
def log_likelihood(tgt, mu, ls):
return T.sum(-(np.float32(0.5 * np.log(2 * np.pi)) + ls)
- 0.5 * T.sqr(tgt - mu) / T.exp(2 * ls))
# from the array used for testing, to the kind used in Image.fromarray(..)
def get_picture_array(X, index, shift=0.5):
ch, w, h = X.shape[1], X.shape[2], X.shape[3]
ret = ((X[index]+shift)*255.).reshape(ch,w,h).transpose(2,1,0).clip(0,255).astype(np.uint8)
if ch == 1:
ret=ret.reshape(h,w)
return ret
# returns an Image with X on top, Xpr on bottom, index as requeseted or random if -1
def get_image_pair(X, Xpr,index=-1,shift=0.5):
mode = 'RGB' if X.shape[1] == 3 else 'L'
index = np.random.randint(X.shape[0]) if index == -1 else index
original_image = Image.fromarray(get_picture_array(X, index,shift=shift),mode=mode)
new_size = (original_image.size[0], original_image.size[1]*2)
new_im = Image.new(mode, new_size)
new_im.paste(original_image, (0,0))
rec_image = Image.fromarray(get_picture_array(Xpr, index,shift=shift),mode=mode)
new_im.paste(rec_image, (0,original_image.size[1]))
return new_im
# gets array (in format used for storage) from an Image
def arr_from_img_storage(im):
w,h=im.size
arr=np.asarray(im.getdata(), dtype=np.uint8)
c = np.product(arr.size) / (w*h)
return arr.reshape(h,w,c).transpose(2,1,0)
# gets array (in format used for testing) from an Image
def arr_from_img(im,shift=0.5):
w,h=im.size
arr=np.asarray(im.getdata(), dtype=theano.config.floatX)
c = np.product(arr.size) / (w*h)
return arr.reshape((h,w,c)).transpose(2,1,0) / 255. - shift
# loads params in npz (if filename is a .npz) or pickle if not
def load_params(model, fn):
if 'npz' in fn:
with np.load(fn) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
nn.layers.set_all_param_values(model, param_values)
else:
with open(fn, 'r') as re:
import pickle
nn.layers.set_all_param_values(model, pickle.load(re))
# saves params in npz (if filename is a .npz) or pickle if not
def save_params(model, fn):
if not os.path.exists(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
if 'npz' in fn:
if isinstance(model, list):
param_vals = model
else:
param_vals = nn.layers.get_all_param_values(model)
np.savez(fn, *param_vals)
else:
with open(fn, 'w') as wr:
import pickle
pickle.dump(param_vals, wr)
# reset shared variable values of accumulators to recover from NaN
def reset_accs(updates, params):
for key in updates:
if not key in params:
v = key.get_value(borrow=True)
key.set_value(np.zeros(v.shape,dtype=v.dtype))
# build loss as in (Kingma, Welling 2014) Autoencoding Variational Bayes
def build_vae_loss(input_var, l_z_mu, l_z_ls, l_x_mu_list, l_x_ls_list, l_x_list, l_x,
deterministic, binary, L):
layer_outputs = nn.layers.get_output([l_z_mu, l_z_ls] + l_x_mu_list + l_x_ls_list
+ l_x_list + [l_x], deterministic=deterministic)
z_mu = layer_outputs[0]
z_ls = layer_outputs[1]
x_mu = [] if binary else layer_outputs[2:2+L]
x_ls = [] if binary else layer_outputs[2+L:2+2*L]
x_list = layer_outputs[2:2+L] if binary else layer_outputs[2+2*L:2+3*L]
x = layer_outputs[-1]
kl_div = 0.5 * T.sum(1 + 2*z_ls - T.sqr(z_mu) - T.exp(2 * z_ls))
if binary:
logpxz = sum(nn.objectives.binary_crossentropy(x, input_var).sum()
for x in x_list) * (-1./L)
prediction = x_list[0] if deterministic else x
else:
logpxz = sum(log_likelihood(input_var.flatten(2), mu, ls)
for mu, ls in zip(x_mu, x_ls))/L
prediction = x_mu[0] if deterministic else T.sum(x_mu, axis=0)/L
loss = -1 * (logpxz + kl_div)
return loss, prediction
| mit | 4,537,278,992,625,200,000 | 40.314286 | 96 | 0.598992 | false |
rickerc/cinder_audit | cinder/tests/db/test_finish_migration.py | 1 | 2226 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for finish_volume_migration."""
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests import utils as testutils
class FinishVolumeMigrationTestCase(test.TestCase):
"""Test cases for finish_volume_migration."""
def setUp(self):
super(FinishVolumeMigrationTestCase, self).setUp()
def tearDown(self):
super(FinishVolumeMigrationTestCase, self).tearDown()
def test_finish_volume_migration(self):
ctxt = context.RequestContext(user_id='user_id',
project_id='project_id',
is_admin=True)
src_volume = testutils.create_volume(ctxt, host='src',
migration_status='migrating',
status='available')
dest_volume = testutils.create_volume(ctxt, host='dest',
migration_status='target:fake',
status='available')
db.finish_volume_migration(ctxt, src_volume['id'],
dest_volume['id'])
src_volume = db.volume_get(ctxt, src_volume['id'])
expected_name = 'volume-%s' % dest_volume['id']
self.assertEqual(src_volume['_name_id'], dest_volume['id'])
self.assertEqual(src_volume['name'], expected_name)
self.assertEqual(src_volume['host'], 'dest')
self.assertEqual(src_volume['status'], 'available')
self.assertEqual(src_volume['migration_status'], None)
| apache-2.0 | 1,801,730,165,042,376,000 | 41 | 78 | 0.609164 | false |
miguelut/utmbu | mbu/api/scout.py | 1 | 1603 | from django.contrib.admin.views.decorators import staff_member_required
from django.http import JsonResponse
from django.contrib.auth.decorators import permission_required
from rest_framework.decorators import api_view
from mbu.models import Scout, ScoutCourseInstance, ScoutCourseInstanceSerializer, RegistrationStatus
__author__ = 'michael'
@permission_required('mbu.edit_scout_schedule', raise_exception=True)
@api_view(http_method_names=['GET', 'POST'])
def scout_enrollments(request, scout_id):
user = request.user
scout = Scout.objects.get(user=user)
scout_check = Scout.objects.get(pk=scout_id)
assert(scout == scout_check)
enrollments = []
if request.method == 'POST' and _reg_is_open():
for d in request.data:
enrollments.append(ScoutCourseInstance.objects.get(pk=d['id']))
scout.enrollments = enrollments
scout.save()
return JsonResponse({'data': request.data})
else:
for enrollment in scout.enrollments.all():
serializer = ScoutCourseInstanceSerializer(enrollment)
enrollments.append(serializer.data)
result = {'enrollments': enrollments}
return JsonResponse(result)
@staff_member_required
@api_view(http_method_names=['POST'])
def check_in_scouts(request, scout_id):
scout = Scout.objects.get(pk=scout_id)
scout.checked_in = True
scout.save()
result = {"scout": scout_id}
return JsonResponse(result)
def _reg_is_open():
status = RegistrationStatus.objects.first()
if status:
status = status.status
return status == 'OPEN'
| mit | 6,069,730,055,826,966,000 | 32.395833 | 100 | 0.69869 | false |
thbuerg/Heidelberg_2017 | DeeProtein/validate.py | 1 | 1040 | """ Invoke the Model in validation mode and perform a run over the valid set."""
import argparse
import json
from DeeProtein import DeeProtein
import helpers
def main():
with open(FLAGS.config_json) as config_fobj:
config_dict = json.load(config_fobj)
# set the gpu context
if not FLAGS.gpu:
if config_dict["gpu"] == 'True':
config_dict["gpu"] = "False"
optionhandler = helpers.OptionHandler(config_dict)
model = DeeProtein(optionhandler)
model.validate()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--config_json',
type=str,
required=True,
help='Path to the config.JSON')
parser.add_argument(
'--gpu',
type=str,
default=True,
help='Whether to train in gpu context or not '
'(optional). Defaults to True.')
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print('Error, unrecognized flags:', unparsed)
exit(-1)
main()
| mit | 2,103,418,648,317,539,800 | 26.368421 | 80 | 0.610577 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.