max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
trade_remedies_caseworker/cases/views.py | uktrade/trade-remedies-caseworker | 1 | 10100 | import itertools
import json
import logging
import re
from django.views.generic import TemplateView
from django.http import HttpResponse
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, redirect
from django.urls import reverse
from django.utils import timezone
from django.utils.decorators import method_decorator
from django_chunk_upload_handlers.clam_av import VirusFoundInFileException
from core.base import GroupRequiredMixin
from core.utils import (
deep_index_items_by,
deep_index_items_by_exists,
get,
key_by,
index_users_by_group,
compact_list,
submission_contact,
public_login_url,
parse_notify_template,
parse_api_datetime,
pluck,
to_json,
from_json,
deep_update,
internal_redirect,
is_date,
notify_footer,
notify_contact_email,
)
from django_countries import countries
from django.conf import settings
from cases.submissions import SUBMISSION_TYPE_HELPERS, get_submission_deadline
from cases.utils import decorate_orgs
from core.constants import (
ALL_REGION_ALLOWED_TYPE_IDS,
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_LEAD_INVESTIGATOR,
SECURITY_GROUPS_TRA,
SECURITY_GROUP_TRA_ADMINISTRATOR,
SECURITY_GROUPS_TRA_ADMINS,
SECURITY_GROUP_ORGANISATION_OWNER,
SUBMISSION_TYPE_QUESTIONNAIRE,
SUBMISSION_TYPE_APPLICATION,
SUBMISSION_NOTICE_TYPE_INVITE,
SUBMISSION_NOTICE_TYPE_DEFICIENCY,
SUBMISSION_TYPE_THIRD_PARTY,
CASE_ROLE_AWAITING_APPROVAL,
CASE_ROLE_REJECTED,
CASE_ROLE_APPLICANT,
CASE_ROLE_PREPARING,
DIRECTION_TRA_TO_PUBLIC,
)
from trade_remedies_client.mixins import TradeRemediesAPIClientMixin
from trade_remedies_client.exceptions import APIException
logger = logging.getLogger(__name__)
org_fields = json.dumps(
{
"Organisation": {
"id": 0,
"has_non_draft_subs": 0,
"gov_body": 0,
"has_roi": 0,
}
}
)
class CasesView(LoginRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
template_name = "cases/cases.html"
def get(self, request, *args, **kwargs):
list_mode = request.GET.get("tab", "my")
panel_layout = self.client(self.request.user).get_system_boolean("PRE_RELEASE_PANELS")
fields = {
"Case": {
"id": 0,
"user_case": 0,
"name": 0,
"reference": 0,
"created_at": 0,
"type": {
"name": 0,
"acronym": 0,
"colour": 0,
"reference": 0,
"applicant": {"organisation": 0, "name": 0, "id": 0},
},
"applicant": {
"organisation": {
"name": 0,
"id": 0,
}
},
"stage": {"name": 0},
"case_status": {"next_action": 0, "next_notice_due": 0},
}
}
if list_mode == "archived":
fields = deep_update(
fields,
{
"Case": {
"workflow_state": {
"MEASURE_EXPIRY": 0,
"DETERMINATION_ACTIVE_DATE": 0,
}
}
},
)
cases = self.client(request.user).get_cases(
archived=list_mode == "archived",
all_cases=list_mode == "all",
new_cases=list_mode == "new",
fields=json.dumps(fields),
)
tabs = {
"value": list_mode,
"tabList": [
{"label": "Your cases", "value": "my", "sr_text": "Show your cases"},
{"label": "Open cases", "value": "all", "sr_text": "Show open cases"},
{
"label": "New applications",
"value": "new",
"sr_text": "Show new applications",
},
{
"label": "Archived",
"value": "archived",
"sr_text": "Show archived cases",
},
],
}
template_name = self.template_name if panel_layout else "cases/cases_old.html"
body_class = "full-width kill-footer" if panel_layout else "full-width"
return render(
request,
template_name,
{
"body_classes": body_class,
"cases": cases,
"tabs": tabs,
},
)
class CaseBaseView(
LoginRequiredMixin,
GroupRequiredMixin,
PermissionRequiredMixin,
TemplateView,
TradeRemediesAPIClientMixin,
):
permission_required = []
groups_required = SECURITY_GROUPS_TRA
supress_nav_section = False
def dispatch(self, *args, **kwargs):
if self.request.user.is_authenticated:
self._client = self.client(self.request.user)
self.case_id = kwargs.get("case_id")
return super().dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
self.kwargs = kwargs
self.organisation_id = kwargs.get("organisation_id")
self.request = request
self.user_token = request.user.token
case_fields = json.dumps(
{
"Case": {
"id": 0,
"name": 0,
"initiated_at": 0,
"decision_to_initiate,name": 0,
"reference": 0,
"sequence": 0,
"type": 0,
"archived_at": 0,
"archive_reason": {"name": 0},
"submission_count": 0,
"participant_count": 0,
"stage": {"name": 0},
"case_status": 0,
"organisation": {"id": 0, "name": 0},
}
}
)
self.case = self._client.get_case(self.case_id, fields=case_fields)
self.document_count = self._client.get_case_document_count(self.case_id)
self.start = int(request.GET.get("start", 0))
self.limit = int(request.GET.get("limit", 20))
content_id = self.kwargs.get("nav_section_id")
context = {
"case": self.case,
"case_id": self.case_id,
"document_count": self.document_count,
"content": self._client.get_case_content(self.case_id, content_id=content_id),
"tree": self._client.get_nav_section(self.case_id, selected_content=content_id),
"body_classes": "full-width",
"panel_layout": self._client.get_system_boolean("PRE_RELEASE_PANELS"),
"organisation_id": self.organisation_id,
"submission_group_name": "submission",
"alert": request.GET.get("alert"),
"user": request.user,
}
deep_update(context, self.add_page_data())
if context.get("redirect"):
return redirect(context.get("redirect"))
return render(request, self.template_name, context)
def add_page_data(self):
return {}
def get_documents(self, submission, all_versions=None):
result = self._client.get_submission_documents(
self.case_id, submission.get("id"), all_versions=all_versions
)
all_documents = result.get("documents", [])
deficiency_docs = result.get("deficiency_documents", [])
if all_versions:
# If this submission has an immediate ancestor, get the docs from that to mark status
docs_by_submission = deep_index_items_by(all_documents, "version")
this_version = int(submission.get("version"))
this_sub = docs_by_submission.get(str(this_version))
sub_docs = this_sub[0].get("documents")
# we have a list of the submissions that make up a family - id, version and documents.
if this_version > 1:
parent_sub = docs_by_submission.get(str(this_version - 1))
parent_docs = parent_sub and parent_sub[0].get("documents")
parent_doc_idx = {}
for parent_doc in parent_docs:
doc_type = get(parent_doc, "type/name") + "|" + get(parent_doc, "name")
parent_doc_idx[doc_type] = parent_doc
for document in sub_docs:
document["parent"] = parent_doc_idx.get(
get(document, "type/name") + "|" + get(document, "name")
)
else:
sub_docs = all_documents
submission_documents = deep_index_items_by(sub_docs, "type/key")
document_conf_index = deep_index_items_by(
submission_documents.get("respondent", []), "confidential"
)
confidential = document_conf_index.get("true", [])
confidential.sort(key=lambda cf: cf.get("name"))
non_conf = document_conf_index.get("", [])
doc_index = key_by(confidential, "id")
non_conf.sort(key=lambda nc: get(get(doc_index, str(nc.get("parent_id"))), "name"))
return {
"caseworker": submission_documents.get("caseworker", []),
"respondent": submission_documents.get("respondent", []),
"loa": submission_documents.get("loa", []),
"deficiency": deficiency_docs,
"confidential": confidential,
"nonconfidential": non_conf,
}
def has_permission(self):
"""
Override this method to customize the way permissions are checked.
"""
perms = self.get_permission_required()
return not perms or self.request.user.has_perms(perms)
class CaseAdminView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("case_admin",)
template_name = "cases/admin.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
case_users = self._client.get_case_users(self.case["id"])
context = {
"case_enums": case_enums,
"case": self.case,
"users": case_users,
"participants": self._client.get_case_participants(self.case_id),
}
return context
def post(self, request, case_id, *args, **kwargs):
action = request.POST.get("action")
case = self._client.get_case(case_id)
update_spec = {}
if action == "initiation_flag_toggle":
if case["initiated_at"]:
update_spec["initiated_at"] = ""
else:
update_spec["initiated_at"] = timezone.now()
elif action == "set_case_stage":
update_spec["ignore_flow"] = request.POST.get("ignore_flow") or "false"
update_spec["stage_id"] = request.POST.get("stage_id")
elif action == "set_name":
update_spec["name"] = request.POST.get("name")
elif action == "set_case_type":
update_spec["stage_id"] = ""
update_spec["type_id"] = request.POST.get("type_id")
elif action == "toggle_archived":
if case.get("archived_at"):
update_spec["archived_at"] = ""
else:
update_spec["archived_at"] = timezone.now()
update_spec["archive_reason_id"] = request.POST.get("archive_reason_id")
elif action == "reset_initiation_decision":
update_spec["reset_initiation_decision"] = True
if update_spec:
response = self._client.update_case(case_id, update_spec)
return redirect(f"/case/{case_id}/admin/")
class CaseMilestoneDatesView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("case_admin",)
template_name = "cases/milestone_dates.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums(self.case_id)
case_milestones = self._client.case_milestones(self.case["id"])
existing_keys = [cm["key"] for cm in case_milestones]
context = {
"milestone_types": case_enums.get("milestone_types"),
"available_review_types": case_enums.get("available_review_types"),
"milestones": case_milestones,
"existing_milestones": existing_keys,
}
return context
def post(self, request, case_id, milestone_key=None):
milestone_key = milestone_key or request.POST.get("milestone_key")
date = request.POST.get("date")
response = self._client.set_case_milestone(case_id, milestone_key, date)
return redirect(f"/case/{case_id}/milestones/")
class CaseView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = []
template_name = "cases/case.html"
extra_case_fields = json.dumps(
{
"Case": {
"applicant": {
"organisation": {
"id": 0,
"name": 0,
"primary_contact": {
"name": 0,
"email": 0,
"phone": 0,
"address": 0,
"post_code": 0,
"country": {"name": 0},
"has_user": 0,
"user": {"id": 0, "organisation": {"id": 0, "name": 0}},
},
}
},
"parent": {"id": 0, "name": 0, "reference": 0, "type": 0},
"workflow_state": {"LINKED_CASE_CONFIRM": 0},
"initiated_sequence": 0,
}
}
)
def add_page_data(self):
team = self._client.get_case_team_members(self.case_id)
team_by_group = index_users_by_group([member.get("user") for member in team])
group_order = [
SECURITY_GROUP_TRA_ADMINISTRATOR,
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_LEAD_INVESTIGATOR,
]
case_extras = self._client.get_case(self.case_id, fields=self.extra_case_fields)
return {
"audit": self._client.get_audit(
case_id=self.case_id, start=0, limit=20, milestone=True
),
"case_page": True,
"case": case_extras,
"team_groups": team_by_group,
"group_order": group_order,
"public_base_url": settings.PUBLIC_BASE_URL,
}
def post(self, request, case_id, *args, **kwargs):
self._client.set_case_data(case_id, {"name": request.POST.get("name")})
redirect = request.POST.get("redirect")
if redirect:
return internal_redirect(request.POST.get("redirect"), "/")
else:
return HttpResponse(json.dumps({"result": "ok"}), content_type="application/json")
class PartiesView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/parties.html"
def add_page_data(self):
parties = []
roles = self._client.get_case_roles()
all_case_invites = self._client.get_contact_case_invitations(self.case_id)
all_participants = self._client.get_case_participants(self.case_id, fields=org_fields)
case_invites = deep_index_items_by(all_case_invites, "contact/id")
invited = set([])
accepted = set([])
for invite in all_case_invites:
org_id = invite.get("organisation", {}).get("id")
if invite.get("accepted_at"):
# note: accepted and invited are mutually exclusive
accepted.add(org_id)
else:
invited.add(org_id)
for role in roles:
_base = all_participants[role["key"]]
_base["key"] = role["key"]
_base["name"] = role["plural"]
if role["allow_cw_create"]:
_base["add_link"] = f"Add {role['name']}"
parties.append(_base)
return {
"party_types": parties,
"invites": case_invites,
"accepted_orgs": list(accepted),
"invited_orgs": list(invited),
"pre_release_invitations": self._client.get_system_boolean("PRE_RELEASE_INVITATIONS"),
"alert": self.request.GET.get("alert"),
}
class CaseTeamView(CaseBaseView):
permission_required = "can_assign_team"
template_name = "cases/team.html"
def add_page_data(self):
all_users = self._client.get_all_users(group_name="caseworker")
users_by_group = index_users_by_group(all_users)
team = self._client.get_case_team_members(self.case_id)
return {
"team": [member.get("user", {}).get("id") for member in team],
"tra_users": all_users,
"grouped_users": users_by_group,
"groups": SECURITY_GROUPS_TRA[1:],
"inactive_user_count": sum(user["active"] is False for user in all_users),
"singleton_groups": [
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_ADMINISTRATOR,
],
}
def post(self, request, case_id, *args, **kwargs):
user_ids = request.POST.getlist("user_id")
response = self._client.assign_case_team(case_id, user_ids)
return redirect(f"/case/{case_id}/")
class SubmissionsView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submissions.html"
show_global = False
sub_page = ""
def get_tab(self, role, party):
if not role.get("allow_cw_create"):
return role["key"]
return "sampled" if party.get("sampled") else "not_sampled"
def consolidate_submissions(
self, case, participants, submissions_by_party, counts, selected_tab
):
roles = []
single_role_return = None # for awaiting and rejected - only return that specific role
for role in self._client.get_case_roles():
role["participants"] = []
for party in participants.get(role["key"], {}).get("parties", []):
tab = self.get_tab(role, party)
submissions = submissions_by_party.get(party["id"], [])
submissions += submissions_by_party.get("", [])
if submissions:
counts[tab] = counts.get(tab, 0) + len(submissions)
if tab == selected_tab:
party["submissions"] = submissions
role["participants"].append(party)
if not party.get("gov_body"):
role["customer_parties"] = True
sort_key = (
"submissions/0/received_at"
if selected_tab == CASE_ROLE_AWAITING_APPROVAL
else "name"
)
role["participants"].sort(key=lambda pt: get(pt, sort_key) or "")
if role.get("key") == selected_tab:
single_role_return = role
if role.get("allow_cw_create"):
roles.append(role)
return [single_role_return] if single_role_return else roles
def get_name(self, participant):
return participant.get("name")
def flatten_participants(self, source):
participants = []
for role in source:
rec = source[role]
participants = participants + rec["parties"]
participants.sort(key=self.get_name)
return participants
def divide_submissions(self, submissions):
incoming = []
outgoing = []
draft = []
for submission in submissions:
if get(submission, "status/sent"):
outgoing.append(submission)
elif get(submission, "status/default") and get(submission, "type/direction") != 1:
draft.append(submission)
else:
if (
not get(submission, "status/draft")
or get(submission, "type/key") == "application"
): # customer draft should not be seen by investigators
incoming.append(submission)
return {
"incoming": sorted(incoming, key=lambda su: su.get("received_at") or "", reverse=True),
"outgoing": sorted(outgoing, key=lambda su: su.get("sent_at") or "", reverse=True),
"draft": sorted(draft, key=lambda su: su.get("created_at") or "", reverse=True),
}
def add_page_data(self):
tab = self.request.GET.get("tab", "sampled").lower()
all_submissions = self._client.get_submissions(self.case_id, show_global=True)
submissions_by_type = deep_index_items_by(all_submissions, "type/name")
# Get submissions that have just been created by customer
# or are still in draft after creation
draft_submissions = deep_index_items_by(all_submissions, "status/default").get("true") or []
# Remove any that are back with the customer following deficiency
draft_first_version_submissions = (
deep_index_items_by(draft_submissions, "version").get("1") or []
)
# Exclude these drafts from our list
non_draft_submissions = [
sub for sub in all_submissions if sub not in draft_first_version_submissions
]
# draft applications are included to allow a heads up view
# to the caseworker before it's submitted
if submissions_by_type.get("application", [{}])[0].get("status", {}).get("default") is True:
submissions_by_type["application"][0]["tra_editable"] = True
non_draft_submissions += submissions_by_type["application"]
submissions_by_party = deep_index_items_by(non_draft_submissions, "organisation/id")
case_enums = self._client.get_all_case_enums()
invites = self._client.get_case_invite_submissions(self.case_id)
participants = self._client.get_case_participants(self.case_id, fields=org_fields)
flat_participants = self.flatten_participants(participants)
counts = {}
if self.sub_page:
self.template_name = f"cases/submissions_{self.sub_page}.html"
tab = self.request.GET.get("tab", "incoming").lower()
elif self._client.get_system_boolean("PRE_NEW_SUBMISSION_PAGE"):
self.template_name = "cases/submissions_new.html"
context = {
"raw_participants": participants,
"submissions": submissions_by_type,
"participants": flat_participants,
"counts": counts,
"all_roles": self.consolidate_submissions(
self.case,
participants=participants,
submissions_by_party=submissions_by_party,
counts=counts,
selected_tab=tab,
),
"submission_types": case_enums["case_worker_allowed_submission_types"],
"invites": invites,
"tab": tab,
"submission_groups": self.divide_submissions(all_submissions),
"all_submissions": all_submissions,
}
# TODO: Temp handling of application vs ex_officio ones
if not submissions_by_type.get("application") and submissions_by_type.get(
"ex officio application"
):
context["submissions"]["application"] = submissions_by_type["ex officio application"]
return context
class SubmissionView(CaseBaseView):
"""
View and modify submissions
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submission.html"
extra_case_fields = json.dumps(
{
"Case": {
"applicant": 0,
"product": 0,
"sources": 0,
}
}
)
def add_page_data_old(self):
alert = self.request.GET.get("alert") # indicates the submission has just been created
documents = []
submission = {}
submission_id = self.kwargs.get("submission_id")
third_party_invite = False
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
submission_type = submission["type"]
third_party_invite = submission_type["name"] == "Invite 3rd party"
self.organisation_id = submission["organisation"]["id"]
created_by_id = get(submission, "created_by/id")
if created_by_id:
full_user = self._client.get_user(created_by_id)
submission["created_by"]["organisation"] = get(full_user, "organisations/0")
submission_context = {}
if SUBMISSION_TYPE_HELPERS.get(submission_type["key"]):
submission_context = SUBMISSION_TYPE_HELPERS[submission_type["key"]](
submission, self.request.user
).get_context()
self.template_name = "cases/submission.html"
case_extras = self._client.get_case(self.case_id, fields=self.extra_case_fields)
context = {
"submission": submission,
"template_name": f"{submission_type['key']}",
"documents": self.get_documents(submission=submission, all_versions=True),
"alert": alert,
"case": case_extras,
"third_party_invite": third_party_invite,
**submission_context,
}
if (
not submission
or not submission.get("status")
or submission.get("status", {}).get("default")
):
context["mode"] = "form"
else:
context["mode"] = "view"
if self.organisation_id:
self.organisation = self._client.get_organisation(self.organisation_id)
context["organisation"] = self.organisation
context["organisation_id"] = str(self.organisation["id"])
return context
def get_all_participants(self, case_participants):
all_parties = []
roles = {}
for type_name, role_parties in case_participants.items():
parties = role_parties.get("parties")
if parties:
all_parties.extend(parties)
role = parties[0].get("role")
roles[role.get("key")] = role
return deep_index_items_by(all_parties, "sampled"), roles
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
submission = {}
participants = self._client.get_case_participants(self.case_id, fields=org_fields)
parties, roles = self.get_all_participants(participants)
alert = self.request.GET.get("alert") # indicates the submission has just been created
virus = self.request.GET.get("virus")
upload_error = self.request.GET.get("upload_error")
return_data = {
"virus": virus,
"upload_error": upload_error,
}
submission_id = self.kwargs.get("submission_id")
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
json_data = from_json(submission.get("deficiency_notice_params"))
_default = submission.get("status", {}).get("default")
if not _default or (
_default and submission["type"]["id"] == SUBMISSION_TYPE_APPLICATION
):
page_data = self.add_page_data_old()
return_data.update(page_data)
return return_data
self.organisation_id = submission["organisation"]["id"]
return_data.update(
{
"roles": roles,
"submission": submission,
"status": (submission.get("status") or {}).get("id"),
"alert": alert,
"documents": self.get_documents(submission=submission),
"role": submission.get("organisation_case_role") or {"name": "Public file"},
"participants": participants,
"all_participants": parties,
"json_data": json_data,
"selected_submission_type": submission.get("type", {}).get("key")
or "questionnaire",
}
)
else:
role = self.request.GET.get("for")
sampled = self.request.GET.get("sampled") == "sampled"
full_role = (
self._client.get_case_role(role)
if (role and role != "public")
else {"name": "Public file"}
)
case_enums = self._client.get_all_case_enums(direction=DIRECTION_TRA_TO_PUBLIC)
# Get all draft submissions of this type
all_submissions = self._client.get_submissions(self.case_id, show_global=True)
draft_submissions = (
deep_index_items_by(all_submissions, "status/default").get("true") or []
)
# draft_submissions_this_role = deep_index_items_by(draft_submissions,
# 'organisation_case_role/key').get('' if role == 'public' else role)
draft_submissions_this_role = deep_index_items_by(
draft_submissions, "organisation_id"
).get("")
return_data.update(
{
"submission": submission,
"submission_type_id": self.kwargs.get("submission_type_id")
or self.request.GET.get("submission_type_id"),
"submission_statuses": case_enums["submission_statuses"],
"statuses_by_type": case_enums["statuses_by_type"],
"selected_submission_type": self.request.GET.get("submission_type")
or "questionnaire",
"organisation_id": self.kwargs.get("organisation_id"),
"draft_submissions": draft_submissions_this_role,
"role": full_role,
}
)
if role == "public":
return_data.update(
{
"submission_types": case_enums["public_submission_types"],
"public": True,
"organisation_id": self.kwargs.get("organisation_id"),
}
)
else:
role_participants = participants.get(role, {}).get("parties", [])
filtered = list(
filter(
lambda party: party
if party.get("sampled") == sampled and not party.get("gov_body")
else None,
role_participants,
)
)
return_data.update(
{
"submission_types": case_enums["case_worker_allowed_submission_types"],
"participants": participants,
"roles": roles,
}
)
self.organisation_id = self.organisation_id or self.request.GET.get("organisation_id")
if self.organisation_id:
self.organisation = self._client.get_organisation(self.organisation_id)
return_data["organisation"] = self.organisation
return_data["organisation_id"] = str(self.organisation["id"])
# add errors from the url
errors = self.request.GET.get("errors")
if errors:
try:
return_data["errors"] = json.loads(errors)
except Exception as ex:
pass
# Set up template to use
template_name = (
submission["type"]["key"]
if submission
else (role if role == "public" else "questionnaire")
)
return_data.update({"template_name": template_name, "mode": "form"})
return return_data
def post( # noqa: C901
self,
request,
case_id,
submission_id=None,
organisation_id=None,
*args,
**kwargs,
):
"""
Update an existing submission
"""
return_data = {"submission_id": str(submission_id)}
contact_id = request.POST.get("contact_id")
btn_value = request.POST.get("btn-value")
review = request.POST.get("review")
name = request.POST.get("name")
due_at = request.POST.get("due_at")
response_window_yn = request.POST.get("response_window_yn")
time_window = request.POST.get("time_window")
meta_raw = request.POST.getlist("meta")
meta = [json.loads(block) for block in meta_raw]
file_details = deep_index_items_by(meta, "name")
file_details_by_id = deep_index_items_by(meta, "file/id")
organisation_id = organisation_id or request.POST.get("organisation_id")
send_to = request.POST.get("send_to")
submission = self._client.get_submission(case_id, submission_id)
organisation_id = submission.get("organisation", {}).get("id")
status_id = request.POST.get("submission_status_id")
if submission_id and btn_value == "discard":
delete_submission_response = self._client.delete_submission(
case_id=case_id, submission_id=submission_id
)
return HttpResponse(
json.dumps({"redirect_url": f"/case/{case_id}/submissions/"}),
content_type="application/json",
)
# check if the update is for name or notify contact
if (
submission["name"] != name
or not submission["contact"]
or submission.get("contact", {}).get("id") != contact_id
):
if name is not None and not name:
return_data.update({"errors": '{"name":"You must enter a name"}'})
if due_at and not is_date(due_at):
return_data.update({"errors": '{"due_date":"Invalid date"}'})
if not return_data.get("errors"):
self._client.update_submission(
case_id=case_id,
submission_id=submission_id,
name=name,
contact_id=contact_id, # TODO:not used
due_at=due_at,
time_window=time_window,
description=request.POST.get("description"),
url=request.POST.get("url"),
)
# API `update_submission` returns an incomplete submission
# (no documents) so we re-fetch the submission here.
submission = self._client.get_submission(case_id, submission_id)
return_data.update({"submission": submission})
if submission.get("id"):
for _file in request.FILES.getlist("files"):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
original_file_name = _file.original_name
details = file_details.get(original_file_name.lower())[0]
confidential = details.get("confidential")
document_type = details.get("submission_document_type")
document = self._client.upload_document(
case_id=str(case_id),
submission_id=submission_id,
organisation_id=str(organisation_id),
data={
"name": "Questionnaire",
"confidential": confidential,
"submission_document_type": document_type,
"document_name": original_file_name,
"file_name": _file.name,
"file_size": _file.file_size,
},
)
except (VirusFoundInFileException, APIException) as e:
redirect_url = f"/case/{case_id}/submission/{submission_id}/?"
if isinstance(e, VirusFoundInFileException):
redirect_url += "virus=true"
else:
redirect_url += f"upload_error={e}"
logger.warning(f"File upload aborted: {e}")
return HttpResponse(
json.dumps({"redirect_url": redirect_url}),
content_type="application/json",
)
if case_files := request.POST.getlist("case_files"):
for case_file_id in case_files:
details = (file_details_by_id.get(case_file_id) or [])[0]
document = self._client.attach_document(
case_id=str(case_id),
submission_id=submission_id,
organisation_id=str(organisation_id),
data={"submission_document_type": details.get("submission_document_type")},
document_id=case_file_id,
)
submission_group_name = get(submission, "type/key")
if btn_value in ["send", "publish", "withdraw"]:
if btn_value in ["publish", "withdraw"]:
result = self._client.set_submission_state(
case_id,
submission_id,
"sent"
if (btn_value == "send" or submission_group_name == "public")
else "",
{"publish": "issue", "withdraw": "un-issue"}[btn_value],
)
result = self._client.update_submission(
case_id=case_id, submission_id=submission_id
)
return_data.update(
{
"redirect_url": f"/case/{case_id}/submission/{submission['id']}/?alert={btn_value}" # noqa: E301, E501
}
)
if btn_value == "sufficient":
# Set the submission to sufficient
result = self._client.set_submission_state(case_id, submission_id, btn_value)
return_data.update({"alert": "Submission approved"})
submission_type = submission["type"]
type_helpers = SUBMISSION_TYPE_HELPERS.get(submission_type["key"])
if type_helpers:
return_data.update(
type_helpers(submission, self.request.user).on_approve() or {}
)
# Update submission document approvals
self.update_submission_status(request.POST, submission)
# set any deficiency-notice parameters
updated = False
deficiency_notice_params = from_json(submission.get("deficiency_notice_params"))
send_to = request.POST.getlist("send_to")
if send_to:
deficiency_notice_params["send_to"] = send_to
updated = True
regex = r"^deficiency_notice_params_"
for param_key in request.POST:
matches = re.split(regex, param_key)
if len(matches) > 1:
value = request.POST[param_key]
updated = updated or (deficiency_notice_params.get(matches[1]) != value)
if value == "__remove":
if get(deficiency_notice_params, matches[1]):
deficiency_notice_params.pop(matches[1])
else:
deficiency_notice_params[matches[1]] = value
if updated:
update_submission_response = self._client.update_submission(
case_id=case_id,
submission_id=submission_id,
deficiency_notice_params=to_json(deficiency_notice_params),
)
if btn_value == "save-exit":
return_data.update({"redirect_url": f"/case/{case_id}/submissions"})
if deficiency_notice_params:
return_data.update(
{"redirect_url": f"/case/{case_id}/submission/{submission_id}"}
)
return HttpResponse(json.dumps(return_data), content_type="application/json")
def update_submission_status(self, request_params, submission):
"""Update submission document statuses.
For each document in the submission review, examine response to
establish if it was marked sufficient/deficient. Call API to update
submission document status if it has changed.
:param (dict) request_params: request parameters
:param (dict) submission: submission
"""
submission_docs = {doc["id"]: doc for doc in submission.get("documents")}
for doc_id in request_params:
if doc_id in submission_docs:
current_status = submission_docs[doc_id]["sufficient"]
new_status = request_params[doc_id] == "yes"
if current_status != new_status:
self._client.set_submission_document_state(
case_id=submission["case"]["id"],
submission_id=submission.get("id"),
document_id=doc_id,
status="sufficient" if new_status else "deficient",
block_from_public_file=submission_docs.get("block_from_public_file"),
block_reason=submission_docs.get("block_reason"),
)
class SubmissionCreateView(SubmissionView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, *args, **kwargs):
btn_value = request.POST.get("btn-value")
send_to = request.POST.getlist("send_to")
organisation_id = request.POST.get("organisation_id")
submission_data = {
"submission_type": int(
request.POST.get("submission_type_id", SUBMISSION_TYPE_QUESTIONNAIRE)
),
"case_id": str(case_id),
"organisation_id": str(organisation_id) if organisation_id else None,
"contact_id": request.POST.getlist("contact_id"),
"public": request.POST.get("public"),
}
if send_to:
submission_data["deficiency_notice_params"] = to_json(
{"send_to": send_to, "case_role": request.POST.get("role_key")}
)
result = self._client.create_submission(**submission_data)
submission = result.get("submission", {}) if result else {}
return HttpResponse(
json.dumps(
{
"submission_id": submission.get("id"),
"redirect_url": f"/case/{case_id}/submission/{submission['id']}/",
}
),
content_type="application/json",
)
class SubmissionDocumentView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, submission_id, organisation_id=None, *args, **kwargs):
response = {}
document_list_json = request.POST.get("document_list")
if document_list_json:
document_list = json.loads(document_list_json)
for doc_id, doc_status in document_list.items():
logger.debug(f"update document state {doc_id}")
response = self._client.set_submission_document_state(
case_id=case_id,
submission_id=submission_id,
document_id=doc_id,
status=doc_status["status"],
block_from_public_file=doc_status["block_from_public_file"],
block_reason=doc_status["block_reason"],
)
return HttpResponse(json.dumps(response), content_type="application/json")
def delete(self, request, case_id, submission_id, document_id, *args, **kwargs):
response = self._client.detach_document(
case_id=case_id, submission_id=submission_id, document_id=document_id
)
return HttpResponse(json.dumps(response), content_type="application/json")
class SubmissionStatusView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, submission_id, *args, **kwargs):
stage_change_if_sufficient = request.POST.get("stage_change_if_sufficient")
stage_change_if_deficient = request.POST.get("stage_change_if_deficient")
submission = self._client.get_submission(case_id, submission_id)
status_id = request.POST.get("submission_status_id")
if submission.get("status", {}).get("id") != status_id:
status_response = self._client.set_submission_status(
case_id=case_id,
submission_id=submission_id,
status_id=status_id,
stage_change_if_sufficient=stage_change_if_sufficient,
stage_change_if_deficient=stage_change_if_deficient,
deficiency_documents=request.FILES.getlist("deficiency_document"),
issue=request.POST.get("issue"),
)
if status_response.get("submission"):
submission_id = status_response["submission"]["id"]
return redirect(f"/case/{case_id}/submission/{submission_id}/")
class SubmissionApprovalView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submission.html"
def add_page_data(self):
submission_id = self.kwargs.get("submission_id")
submission = self._client.get_submission(self.case_id, submission_id)
sub_documents = self._client.get_submission_documents(self.case_id, submission_id)
documents = sub_documents.get("documents", [])
submission.update(sub_documents)
case_enums = self._client.get_all_case_enums()
submission_type_id = submission["type"]["id"]
status_map = case_enums["submission_status_map"]
status_options = status_map.get(str(submission_type_id), {}).get("keys", [])
status_context = status_map.get(str(submission_type_id))
submission_documents = self.get_documents(submission=submission)
context = {
"template_name": submission["type"]["key"],
"mode": "approval",
"submission": submission,
"case_enums": case_enums,
"status_context": status_context,
"documents": submission_documents,
}
return context
class SubmissionDeficiencyView(CaseBaseView):
"""
Set the submission into a deficiency status and notify the party about it.
"""
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
def get(self, request, case_id, submission_id, *args, **kwargs):
submission = self._client.get_submission(case_id, submission_id)
submission_type = submission["type"]
contact = submission_contact(submission)
contact_name = contact.get("name")
organisation_name = submission.get("organisation", {}).get("name") or (
contact.get("organisation") or {}
).get("name")
notification_template = self._client.get_notification_template(
"NOTIFY_SUBMISSION_DEFICIENCY"
)
template_name = f"cases/submissions/{submission_type['key']}/notify.html"
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
case_number = submission["case"]["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact_name,
"case_name": submission["case"]["name"],
"case_number": case_number,
"company_name": organisation_name,
"deadline": due_at or "No deadline assigned",
"submission_type": submission.get("type", {}).get("name"),
"login_url": public_login_url(),
"footer": footer,
}
context = {
"form_action": f"/case/{case_id}/submission/{submission_id}/status/notify/",
"form_title": f"Deficiency Notice for {organisation_name}",
"cancel_redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"editable_fields": { # leaving one as a future example
# 'full_name': {'title': 'Name'},
},
"notification_template": notification_template,
"submission": submission,
"case_id": str(case_id),
"contact": contact,
"values": values,
"parsed_template": parse_notify_template(notification_template["body"], values),
}
return render(request, template_name, context)
def post(self, request, case_id, submission_id, *args, **kwargs):
stage_change_if_sufficient = request.POST.get("stage_change_if_sufficient")
stage_change_if_deficient = request.POST.get("stage_change_if_deficient")
submission = self._client.get_submission(case_id, submission_id)
notify_keys = [
"full_name",
"case_name",
"case_number",
"company_name",
"deadline",
"submission_type",
"login_url",
]
notify_data = {key: request.POST.get(key) for key in notify_keys}
if request.POST.get("contact_id"):
notify_data["contact_id"] = request.POST["contact_id"]
case_enums = self._client.get_all_case_enums()
submission_type_id = submission["type"]["id"]
status_map = case_enums["submission_status_map"]
status_context = status_map.get(str(submission_type_id))
status_id = status_context.get("NO")
error = None
if status_id:
if submission.get("status", {}).get("id") != status_id:
status_response = self._client.set_submission_status(
case_id=case_id,
submission_id=submission_id,
status_id=status_id,
stage_change_if_sufficient=stage_change_if_sufficient,
stage_change_if_deficient=stage_change_if_deficient,
)
self._client.submission_notify(
case_id=case_id,
organisation_id=submission["organisation"]["id"],
submission_id=submission["id"],
values=notify_data,
notice_type=SUBMISSION_NOTICE_TYPE_DEFICIENCY,
)
# reset the submission id to redirect to the new clone if available
if status_response.get("submission"):
submission_id = status_response["submission"]["id"]
return HttpResponse(
json.dumps(
{
"redirect_url": f"/case/{case_id}/submission/{submission_id}/",
}
),
content_type="application/json",
)
# If there's no deficiency state for this submission type, return an error
return HttpResponse(
json.dumps(
{
"error": "No deficiency status for this submission type",
}
),
content_type="application/json",
)
class SubmissionVerifyBaseView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def get_submission_id(self, case_id=None, organisation_id=None):
submission_id = self.kwargs.get("submission_id")
if not submission_id:
# If this is called from the party page - there is no submission id
# so find from the org/case
submissions = self._client.get_submissions_public(
organisation_id=organisation_id,
case_id=case_id,
fields=json.dumps({"id": 0, "type": {"key": 0}}),
)
for submission in submissions:
if get(submission, "type/key") in ["interest", "application"]:
submission_id = submission.get("id")
break # we only want one reg-of-interest submission
return submission_id
def update_submission_json(self, case_id, submission, params):
regex = r"^deficiency_notice_params_"
deficiency_notice_params = submission.get("deficiency_notice_params") or {}
updated = False
response = None
for param_key in params:
matches = re.split(regex, param_key)
if len(matches) > 1:
value = params[param_key]
updated = updated or (deficiency_notice_params.get(matches[1]) != value)
deficiency_notice_params[matches[1]] = value
if updated:
response = self._client.update_submission(
case_id=case_id,
submission_id=get(submission, "id"),
deficiency_notice_params=to_json(deficiency_notice_params),
)
return response
class SubmissionVerifyViewTasks(SubmissionVerifyBaseView):
"""
Used to verify user and orgs admission to a case
"""
template_name = "cases/verify/submission_verify_tasks.html"
submission_fields = json.dumps(
{
"Submission": {
"id": 0,
"deficiency_notice_params": 0,
"organisation": {
"id": 0,
"name": 0,
},
"contact": {
"name": 0,
"email": 0,
"user": {
"name": 0,
"email": 0,
"id": 0,
"organisation": {
"organisation": {
"id": 0,
"name": 0,
}
},
},
"organisation": {
"id": 0,
"name": 0,
},
},
"case": 0,
"type": 0,
"created_by": 0,
"organisation_case_role_outer": 0,
}
}
)
def get(self, request, case_id, organisation_id, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
if not submission_id:
return HttpResponse(
json.dumps(
{
"error": "You cannot verify this organisation "
"as they have not yet registered interest in this case.",
}
),
content_type="application/json",
)
submission = self._client.get_submission(
self.case_id, submission_id, fields=self.submission_fields
)
json_data = submission.get("deficiency_notice_params") or {}
organisation = submission.get("organisation")
caserole = self._client.get_organisation_case_role(
case_id=case_id, organisation_id=get(submission, "organisation/id")
)
org_matches = self._client.get_organisation_matches(organisation_id, with_details="none")
return render(
request,
self.template_name,
{
"submission": submission,
"organisation": organisation,
"caserole": caserole,
"org_matches": org_matches,
"page_data": {
"submission": submission,
"organisation": organisation,
},
},
)
class SubmisisonVerifyEditLoaView(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
documents = self.get_documents(submission)
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
org_contacts = self._client.get_organisation_contacts(
organisation_id, case_id, exclude_indirect=True
)
return render(
request,
"cases/verify/loa.html",
{
"auth_contacts": org_contacts,
"organisation": organisation,
"documents": documents,
"LOA": caserole.get("auth_contact"),
"submission": submission,
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
self.update_submission_json(case_id, submission, request.POST)
result = self._client.set_organisation_case_role_loa(
case_id,
organisation_id,
pluck(
request.POST,
["LOA_contact_id", "name", "email", "address", "org_name", "phone"],
),
)
return HttpResponse(json.dumps(result))
class SubmisisonVerifyOrganisation(SubmissionVerifyBaseView):
enable_merge = False
def get(self, request, case_id, organisation_id):
test_org_id = request.GET.get("org_id") or organisation_id
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(case_id=case_id, organisation_id=test_org_id)
if self.enable_merge:
org_matches = self._client.get_organisation_matches(test_org_id, with_details=True)
else:
org_matches = self._client.get_organisation_matches(test_org_id, with_details=False)
org_matches.sort(
key=lambda m: 1 if m.get("id") == test_org_id else 0
) # put the actual match at the end
matches = decorate_orgs(org_matches, test_org_id, exclude_case_id=case_id)
for match in matches:
if str(match.get("id")) == str(organisation.get("id")):
organisation.update(match)
return render(
request,
"cases/verify/merge_org.html" if self.enable_merge else "cases/verify/verify_org.html",
{
"case_id": self.case_id,
"organisation": organisation,
"match_list": matches,
"representing": test_org_id != organisation_id,
"json_data": submission.get("deficiency_notice_params"),
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
test_org_id = request.POST.get("org_id") or organisation_id
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
verify = request.POST.get("deficiency_notice_params_org_verify")
if verify == "verified":
self._client.verify_caserole(
case_id=case_id, organisation_id=get(submission, "organisation/id")
)
elif verify == "rejected":
result = self._client.reject_organisation(case_id, organisation_id)
result = self.update_submission_json(case_id, submission, request.POST)
return HttpResponse(json.dumps({"result": True}))
class SubmissionVerifyAccept(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
roles = self._client.get_case_roles(
exclude=[
CASE_ROLE_APPLICANT,
CASE_ROLE_AWAITING_APPROVAL,
CASE_ROLE_REJECTED,
CASE_ROLE_PREPARING,
]
)
return render(
request,
"cases/verify/accept.html",
{
"submission": submission,
"organisation": organisation,
"roles": roles,
"caserole": caserole,
"role_name": get(caserole, "role/name"),
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
role_key = request.POST.get("role_key")
result = {}
result = self._client.set_organisation_case_role(
case_id, organisation_id, role_key, pluck(request.POST, ["approve"])
)
return HttpResponse(json.dumps(result))
class SubmissionVerifyNotify(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
role_name = get(caserole, "role/name")
action = (
"reject" if get(caserole, "role/key") == "rejected" else "accept"
) # Todo: get this from the right place
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
case = self._client.get_case(case_id)
contact = submission_contact(submission)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
notify_key = (
"NOTIFY_INTERESTED_PARTY_REQUEST_PERMITTED"
if action == "accept"
else "NOTIFY_INTERESTED_PARTY_REQUEST_DENIED"
)
try:
notification_template = self._client.get_notification_template(notify_key)
values = self._client.create_notify_context(
{
"full_name": contact.get("name"),
"case_name": case.get("name"),
"case_number": case.get("reference"),
"company_name": organisation["name"],
"login_url": public_login_url(),
"role": role_name,
}
)
parsed_template = parse_notify_template(notification_template["body"], values)
except Exception as ex:
parsed_template = ""
# contacts for the notification contact selector
contacts = organisation.get("contacts", [])
user = self._client.get_user(get(submission, "created_by/id"))
contacts.append(user.get("contact"))
return render(
request,
"cases/verify/notify.html",
{
"parsed_template": parsed_template,
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
self._client.approve_submission(submission_id=submission_id)
return HttpResponse(json.dumps({"result": True}))
class SubmissionNotifyView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
def get(self, request, case_id, submission_id, *args, **kwargs):
case = self._client.get_case(case_id)
submission = self._client.get_submission(case_id, submission_id)
json_data = from_json(submission.get("deficiency_notice_params"))
contact = None
contact_name = None
send_to = json_data.get("send_to")
if not send_to:
contact = submission_contact(submission)
contact_name = contact and contact.get("name")
submission_type = submission["type"]
notify_sys_param_name = submission_type.get("notify_template") or "NOTIFY_QUESTIONNAIRE"
notification_template = self._client.get_notification_template(notify_sys_param_name)
template_name = f"cases/submissions/{submission_type['key']}/notify.html"
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
case_number = case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact_name,
"case_number": case_number,
"case_name": case["name"],
"investigation_type": case["type"]["name"],
"country": case["sources"][0]["country"] if case["sources"] else "N/A",
"company_name": submission["organisation"].get("name"),
"deadline": due_at or "No deadline assigned",
"login_url": public_login_url(),
"description": submission.get("description"),
"submission_request_name": submission.get("name"),
"notice_type": submission.get("type", {}).get("name"),
"notice_url": submission["url"],
"notice_of_initiation_url": submission["url"],
"footer": footer,
}
template_list = []
if send_to:
for case_role, participant_list in (
self._client.get_case_participants(case_id) or {}
).items():
for participant in participant_list.get("parties"):
if participant.get("id") in send_to:
contact = participant.get("primary_contact")
if contact:
local_values = {
"full_name": contact.get("name"),
"email": contact.get("email"),
"company_name": participant.get("name"),
}
values.update(local_values)
template_list.append(
{
"values": local_values,
"preview": parse_notify_template(
notification_template["body"], values
),
}
)
else:
template_list[contact.get("email")] = parse_notify_template(
notification_template["body"], values
)
context = {
"form_action": f"/case/{case_id}/submission/{submission_id}/notify/",
"form_title": f"Invite {contact_name}",
"cancel_redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"editable_fields": { # leaving one as an example
# 'full_name': {'title': '<NAME>', 'disabled': True},
},
"notification_template": notification_template,
"templates": template_list,
"submission": submission,
"case_id": str(case_id),
"contact": contact,
"values": values,
}
return render(request, template_name, context)
def post(self, request, case_id, submission_id, *args, **kwargs):
submission = self._client.get_submission(case_id, submission_id)
notify_keys = ["full_name", "product", "submission_request_name", "description"]
notify_data = {key: request.POST.get(key) for key in notify_keys if key in request.POST}
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
notify_data["deadline"] = due_at or "No deadline assigned"
if request.POST.get("multiple"):
return self.post_multiple(request, case_id, submission, context=notify_data)
self._client.submission_notify(
case_id=case_id,
organisation_id=submission["organisation"]["id"],
submission_id=submission["id"],
values=notify_data,
notice_type=SUBMISSION_NOTICE_TYPE_INVITE,
)
return HttpResponse(
json.dumps(
{
"redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"error": None,
}
),
content_type="application/json",
)
def post_multiple(self, request, case_id, submission, context=None):
"""
Called to handle a notify post to multiple recipents.
We must clone the submission for each target and send the notification
"""
case = self._client.get_case(case_id)
json_data = from_json(submission.get("deficiency_notice_params"))
send_to = json_data.get("send_to")
# We need to know which is the last party in the list
# so we can modify the existing sub rather than clone it.
party_counter = len(send_to)
for case_role, participant_list in (
self._client.get_case_participants(case_id) or {}
).items():
for participant in participant_list.get("parties"):
if participant.get("id") in send_to:
contact = participant.get("primary_contact")
party_counter -= 1
if contact: # don't try to send if there is no contact
data = {
"case_id": case_id,
"submission_id": submission["id"],
"organisation_id": participant.get("id"),
"contact_id": contact.get("id"),
}
if party_counter:
cloned_submission = self._client.clone_submission(**data)
else:
cloned_submission = self._client.update_submission(**data).get(
"submission"
)
context["full_name"] = contact.get("full_name")
self._client.submission_notify(
case_id=case_id,
organisation_id=participant.get("id"),
submission_id=cloned_submission["id"],
values=context or {},
notice_type=SUBMISSION_NOTICE_TYPE_INVITE,
)
return HttpResponse(
json.dumps(
{
"alert": f'Sent {len(send_to)} request{"" if len(send_to) < 2 else "s"}',
"redirect_url": f'/case/{case_id}/submission/{submission.get("id")}/'
if len(send_to) < 2
else f"/case/{case_id}/submissions/",
"error": None,
}
),
content_type="application/json",
)
class OrganisationDetailsView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, organisation_id, *args, **kwargs):
client = self.client(request.user)
item = request.GET.get("item")
template = request.GET.get("template")
result = {}
case_submissions = client.get_submissions(case_id)
idx_submissions = deep_index_items_by(case_submissions, "organisation/id")
org_id = str(organisation_id)
third_party_contacts = []
if item == "contacts":
contacts = client.get_organisation_contacts(org_id, case_id)
for contact in contacts:
case = get(contact, "cases/" + str(case_id)) or {}
contact["primary"] = case.get("primary")
all_case_invites = client.get_contact_case_invitations(case_id)
if org_id in idx_submissions:
org_submission_idx = deep_index_items_by(idx_submissions[org_id], "id")
third_party_contacts = self.get_third_party_contacts(
org_id, org_submission_idx, all_case_invites
)
# `contacts` may also contain on-boarded third-party contacts that
# have a user, so we need to prune these out.
third_party_contact_ids = set([i["id"] for i in third_party_contacts])
contacts = [
i
for i in itertools.filterfalse(
lambda x: x["id"] in third_party_contact_ids, contacts
)
]
result = {
"contacts": contacts,
"pre_release_invitations": client.get_system_boolean("PRE_RELEASE_INVITATIONS"),
"invites": deep_index_items_by(all_case_invites, "contact/id"),
"third_party_contacts": third_party_contacts,
"case_role_id": request.GET.get("caserole"),
}
elif item == "submissions":
result["submissions"] = idx_submissions.get(org_id, [])
elif item == "details":
result["party"] = client.get_organisation(organisation_id=organisation_id)
if template:
deep_update(
result,
{
"case_id": case_id,
"case": {"id": case_id},
"organisation": {"id": org_id},
},
)
return render(request, template, result)
return HttpResponse(json.dumps({"result": result}), content_type="application/json")
@staticmethod
def get_third_party_contacts(organisation_id, submissions, invites):
"""Get third party contacts.
Given an organisation, its submissions and all invitations for a case,
build a list of third party invite contacts. We include the invite submissions
yet to be approved but flag the contact with `submission_sufficient`
:param (str) organisation_id: Organisation ID.
:param (dict) submissions: The organisation's submissions keyed on id.
:param (list) invites: All invites for a case.
:returns (list): Contacts arising from 3rd party invite submissions.
"""
third_party_contacts = []
for invite in invites:
if invite["submission"]:
submission_id = invite["submission"]["id"]
full_submission = submissions.get(submission_id)
if not full_submission:
# Submission not at this org
continue
if full_submission[0]["type"]["id"] != SUBMISSION_TYPE_THIRD_PARTY:
# Not a third party submission
continue
inviting_organisation = full_submission[0]["organisation"]["id"]
if inviting_organisation == organisation_id:
submission_sufficient = full_submission[0]["status"]["sufficient"]
invite["contact"]["is_third_party"] = True
invite["contact"]["submission_id"] = submission_id
invite["contact"]["submission_sufficient"] = submission_sufficient
invite["contact"]["invited"] = invite["email_sent"]
third_party_contacts.append(invite["contact"])
return third_party_contacts
class CaseOrganisationView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "organisations/organisation_in_case.html"
def add_page_data(self):
organisation = self._client.get_organisation(organisation_id=self.organisation_id)
caserole = None
case_submissions = self._client.get_submissions_public(self.case_id, self.organisation_id)
idx_submissions = deep_index_items_by(case_submissions, "organisation/id")
submissions = idx_submissions.get(str(self.organisation_id), [])
roi_app_submission = next(
filter(lambda x: get(x, "type/key") in ["interest", "application"], submissions),
None,
)
cases = self._client.organisation_cases(self.organisation_id)
user_cases = self._client.organisation_user_cases(self.organisation_id)
cases_idx = deep_index_items_by_exists(cases, "archived_at")
for case in cases:
if get(case, "id") == str(self.case_id):
caserole = case
invites = self._client.get_contact_case_invitations(
self.case_id,
)
return {
"case": self.case,
"invites": invites,
"party": organisation,
"organisation": organisation,
"cases_idx": cases_idx,
"submissions": submissions,
"user_cases": user_cases,
"roi_app_submission": roi_app_submission,
"caserole": caserole,
}
class OrganisationMatchView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/organisation_dedupe.html"
def add_page_data(self):
organisation = self._client.get_organisation(
organisation_id=self.organisation_id, case_id=self.case_id
)
org_matches = self._client.get_organisation_matches(self.organisation_id)
org_matches = decorate_orgs(org_matches, self.organisation_id)
return {
"case": self.case,
"organisation": organisation,
"org_matches": org_matches,
}
class FilesView(CaseBaseView):
"""
View all case documents
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/files.html"
def add_page_data(self):
tab = self.request.GET.get("tab", "respondent")
sort = self.request.GET.get("sort")
direction = self.request.GET.get("dir", "asc")
submission_id = self.request.GET.get("submission_id")
collapse_identical = self.request.GET.get("collapse_identical", "false") in (
"true",
"1",
"Y",
)
tabs = {
"tabList": [
{"label": "Respondent", "value": "respondent"},
{"label": "Investigator", "value": "investigator"},
],
"value": tab,
}
case_enums = self._client.get_all_case_enums(direction=DIRECTION_TRA_TO_PUBLIC)
case_files = self._client.get_case_documents(
case_id=self.case_id,
source=tab,
submission_id=submission_id,
order_by=sort,
order_dir=direction,
)
submission = None
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
return {
"tabs": tabs,
"tab": tab,
"case_enums": case_enums,
"file_list": case_files,
"sort": sort,
"dir": direction,
"collapse_identical": collapse_identical,
"submission": submission,
"pre_document_search": self._client.get_system_boolean("PRE_DOCUMENT_SEARCH"),
}
def post(self, request, case_id, *args, **kwargs):
action = request.POST.get("action")
name = request.POST.get("name")
confirm = request.POST.get("confirm") == "true"
tab = request.POST.get("tab", "respondent")
document_ids = request.POST.getlist("document_id")
if document_ids:
if action == "issue" and confirm:
submission_type_id = request.POST.get("submission_type_id")
response = self._client.issue_documents_to_case(
case_id=case_id,
name=name,
document_ids=document_ids,
submission_type_id=submission_type_id,
)
elif action == "confidential":
response = self._client.toggle_documents_confidentiality(
case_id=case_id, document_ids=document_ids
)
return redirect(f"/case/{case_id}/files/?tab={tab}")
class FileBrowseView(View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, *args, **kwargs):
_client = self.client(request.user)
case_files = _client.get_case_documents(case_id=case_id, source="investigator")
# Add application bundle documents
case_files.extend(_client.get_system_documents())
return HttpResponse(json.dumps(case_files), content_type="application/json")
class WorkflowEditor(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("workflow_editor",)
template_name = "cases/workflow_editor.html"
def add_page_data(self):
case_workflow = self._client.get_case_workflow(self.case_id)
return {
"workflow": case_workflow.get("workflow"),
"state": case_workflow.get("state"),
}
def post(self, request, case_id, *args, **kwargs):
workflow = request.POST.get("workflow")
self._client.save_case_workflow(case_id, workflow)
return HttpResponse(json.dumps({"saved": 1}), content_type="application/json")
class ActionsView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/actions.html"
def add_page_data(self):
permissions = {}
for permission_key in self.request.user.permissions:
permissions[permission_key] = 1
case_workflow = self._client.get_case_workflow(self.case_id)
return {
"workflow": case_workflow.get("workflow"),
"state": case_workflow.get("state"),
"permissions": permissions,
}
class StateView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/action.html"
def post(self, request, case_id, state_key=None, *args, **kwargs):
value = request.POST.get(state_key)
state_map = self._client.set_case_workflow_state(case_id, [state_key], {state_key: value})
return HttpResponse(
json.dumps({"workflow_state": state_map}), content_type="application/json"
)
class ActionView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/action.html"
def get_state_from_children(self, item):
any_mode = item.get("required") # this is a bodge and the logic is reverse
state = None
completed = False if any_mode else True
for child in item.get("children", []):
value = self.get_value(child.get("key"))
if value:
state = state or "in-progress"
if any_mode:
if value == "complete":
completed = True
else:
if value != "complete":
completed = False
return "complete" if state and completed else state
state_map = {}
def get_value(self, key):
return (self.state_map.get(key) or [""])[0]
def set_value(self, key, value):
arr = self.state_map.get(key) or [""]
arr[0] = value
self.state_map[key] = arr
def post(self, request, case_id, action_id=None, *args, **kwargs): # noqa: C901
values = {}
node_keys = []
action_key = request.POST.get("action-key")
btn_action = request.POST.get("btn_action")
complete = True
error = False
state = ""
wf = self._client.get_case_workflow(case_id)
workflow = wf.get("workflow")
self.state_map = wf.get("state")
index = key_by(workflow["root"], "key", "children")
action = index.get(action_key.lower(), {})
for task in action.get("children", []):
response_type = task.get("response_type", {}).get("name", "")
if response_type.lower() not in (
"notesection",
"timer",
"label",
): # notes don't count as in-progress
task_key = task.get("key")
old_val = self.get_value(task_key)
new_val = request.POST.get(task_key)
if old_val != new_val:
values[task_key] = new_val
node_keys.append(task_key)
if not new_val:
if task.get("required"):
complete = False
else:
if new_val != "na":
state = "in-progress"
if complete:
state = "complete"
if (self.get_value(action_key) or "") != state:
values[action_key] = state
node_keys.append(action_key)
self.set_value(action_key, state)
# ripple the state down the tree
loc_action = action
while loc_action.get("parent_key"):
loc_action = index.get(loc_action.get("parent_key"))
loc_key = loc_action.get("key")
loc_state = self.get_state_from_children(loc_action)
if (self.get_value(loc_key) or "") != loc_state:
values[loc_key] = loc_state
node_keys.append(loc_key)
self.set_value(loc_key, loc_state)
if any(values):
self.state_map = self._client.set_case_workflow_state(case_id, node_keys, values)
if error:
action_id = action.get("id")
return redirect(f"/case/{case_id}/action/{action_id}")
else:
return HttpResponse(
json.dumps({"workflow_state": self.state_map}),
content_type="application/json",
)
class NavSectionView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/nav_section.html"
def post(self, request, case_id, *args, **kwargs):
content_id = kwargs.get("nav_section_id")
response = self._client.set_case_content(
case_id, content_id=content_id, content=request.POST
)
content_id = response.get("id")
return redirect(f"/case/{case_id}/section/{content_id}")
def add_page_data(self):
return {}
class AuditView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/audit.html"
def add_page_data(self):
milestone = self.request.GET.get("milestone", "true") == "true"
limit = int(self.request.GET.get("limit", self.limit))
audit_data = self._client.get_audit(
case_id=self.case_id, start=self.start, limit=limit, milestone=milestone
)
url = reverse("case_audit", kwargs={"case_id": self.case_id})
prev_url = next_url = None
prev_page = max(0, self.start - limit)
milestone_flag = f"milestone={milestone}".lower()
if len(audit_data) >= limit:
next_page = max(0, self.start + limit)
next_url = f"{url}?{milestone_flag}&start={next_page}"
if next_page > limit:
prev_url = f"{url}?{milestone_flag}&start={prev_page}"
self.start = next_page
else:
self.start = prev_page + len(audit_data)
if prev_page:
prev_url = f"{url}?{milestone_flag}&start={prev_page}"
return {
"milestone": milestone,
"events": audit_data,
"next_url": next_url,
"prev_url": prev_url,
}
class CaseAuditExport(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
def get(self, request, case_id, *args, **kwargs):
file = self.client(request.user).get_audit_export(case_id)
response = HttpResponse(file, content_type="application/vnd.ms-excel")
response["Content-Disposition"] = "attachment; filename=trade_remedies_export.xlsx"
return response
class NoteView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
def get(
self,
request,
case_id,
content_type=None,
model_id=None,
model_key=None,
*args,
**kwargs,
):
notes = self.client(request.user).get_notes(
case_id, content_type, model_id, model_key=model_key
)
return HttpResponse(json.dumps(notes), content_type="application/json")
def post(self, request, case_id, note_id=None, *args, **kwargs): # noqa: C901
entity_id = request.POST.get("model_id")
model_key = request.POST.get("model_key")
content_type = request.POST.get("content_type")
client = self.client(request.user)
content = request.POST.get("content")
if note_id is None:
result = client.create_note(
case_id=case_id,
content_type=content_type,
model_id=entity_id,
model_key=model_key,
note_text=content,
)
note_id = result.get("id")
else:
delete_list = request.POST.getlist("delete_list")
if delete_list:
for document_id in delete_list:
deleted = client.delete_note_document(case_id, note_id, document_id)
conf_list = request.POST.getlist("set_confidential")
if conf_list:
for document_id in conf_list:
result = client.update_note_document(
case_id, note_id, document_id, "confidential"
)
nonconf_list = request.POST.getlist("set_non-confidential")
if nonconf_list:
for document_id in nonconf_list:
result = client.update_note_document(
case_id, note_id, document_id, "non-confidential"
)
result = client.update_note(case_id, note_id, content)
file_meta = request.POST.getlist("file-meta")
files = request.FILES.getlist("files")
for idx, _file in enumerate(files):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
except VirusFoundInFileException:
# Display a fake doc in the widget until
# a poll for success clears it
msg = "File upload aborted: malware detected in file!"
document = {
"name": msg,
"safe": False,
}
result["documents"].append(document)
else:
document = {
"document_name": _file.original_name,
"name": _file.name,
"size": _file.file_size,
}
result = client.add_note_document(
case_id=case_id,
note_id=note_id,
document=json.dumps(document),
confidentiality=file_meta[idx],
)
redirect_url = request.POST.get("redirect")
if redirect_url:
return internal_redirect(redirect_url, "/")
else:
# Return note json to be rendered at the client
return HttpResponse(json.dumps(result), content_type="application/json")
class PublicFileView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/public_file.html"
def add_page_data(self):
tab = self.request.GET.get("tab", "all")
tabs = {
"tabList": [
{"label": "All", "value": "all"},
{"label": "Notices", "value": "tra"},
{"label": "Business", "value": "business"},
{"label": "Withdrawn", "value": "withdrawn"},
],
"value": tab,
}
case_submissions = self._client.get_submissions(self.case_id, show_global=True)
by_tra = deep_index_items_by_exists(case_submissions, "is_tra")
tra_by_published = deep_index_items_by_exists(by_tra.get("true"), "issued_at")
by_published = deep_index_items_by_exists(case_submissions, "issued_at")
if tab == "all":
submissions = by_published.get("true")
if tab == "tra":
submissions = deep_index_items_by(by_published.get("true"), "is_tra").get("true")
if tab == "business":
submissions = deep_index_items_by(by_published.get("true"), "is_tra").get("")
if tab == "withdrawn":
submissions = deep_index_items_by(by_published.get("false"), "is_tra").get("true")
return {
"tabs": tabs,
"submissions": submissions,
"public_base_url": settings.PUBLIC_BASE_URL,
}
class CaseFormView(LoginRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_form.html"
def get_context(self, client, case_id=None):
if case_id:
case = client.get_case(case_id)
else:
case = {
"new": True,
"id": "",
"organisation": {"id": ""},
"type": {"id": "1"},
}
enums = client.get_all_case_enums()
gov_bodies = client.get_organisations(gov_body=True)
country_dict = {}
for country in countries:
country_dict[country[0]] = country[1]
context = {
"body_classes": "full-width",
"case": case,
"organisations": gov_bodies,
"country_dict": country_dict,
"organisation_name": case.get("organisation", {}).get("name") or "Secretary of State",
"contact_country": "GB",
"submission": {"type": {"id": 4}},
"tra_team_names": [
settings.ORGANISATION_NAME,
settings.ORGANISATION_INITIALISM + " Team 1",
settings.ORGANISATION_INITIALISM + " Team 2",
settings.ORGANISATION_INITIALISM + " Team 3",
],
}
context.update(enums)
# context['countries'] = countries[0]
return context
def get(self, request, case_id=None, *args, **kwargs):
client = self.client(request.user)
context = self.get_context(client, case_id)
return render(request, self.template_name, context)
def post(self, request, case_id=None, *args, **kwargs):
post_data = {
"id": case_id,
}
non_required_fields = [
"submission_status_id",
"case_name",
"organisation_name",
"organisation_id",
# 'organisation_address', 'organisation_post_code', 'companies_house_id',
# 'contact_name', 'contact_email', 'contact_phone', 'contact_address',
# 'contact_country',
]
error_lookup = {
"case_type_id": "Case type",
"product_name": "Product name",
"submission_type_id": "Submission type",
"sector_id": "Product sector",
"product_description": "Product description",
"export_country_code": "Export country",
"hs_code": "Product code",
}
required_fields = list(error_lookup.keys())
list_fields = ["export_country_code", "hs_code"]
case_fields = required_fields + non_required_fields
errors = {}
client = self.client(request.user)
if request.POST.get("case_type_id") in ALL_REGION_ALLOWED_TYPE_IDS:
required_fields.remove("export_country_code")
for field in case_fields:
post_data[field] = (
compact_list(request.POST.getlist(field))
if field in list_fields
else request.POST.get(field)
)
for field in required_fields:
if field in error_lookup and not post_data.get(field):
fieldname = error_lookup.get(field)
errors[field] = f"{fieldname} is required"
for i, code in enumerate(post_data.get("hs_code")):
if len(str(code)) not in (6, 7, 8, 9, 10): # temporary validation
errors["hs_code"] = "HS codes should be between 6 and 10 digits"
if not errors:
post_data["ex_oficio"] = True
result = client.submit_full_case_data(post_data)
return redirect("/cases/")
else:
context = self.get_context(client, case_id)
context["errors"] = errors
context.update(post_data)
return render(request, self.template_name, context)
class InviteContactView(CaseBaseView):
"""
Invite a contact to the case
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/invite.html"
raise_exception = True
def get_organisation_admin_user_contact(self, organisation_id):
contact = None
organisation = self._client.get_organisation(organisation_id)
admin_user = [
user
for user in organisation.get("users", [])
if user.get("security_group") == SECURITY_GROUP_ORGANISATION_OWNER
]
if admin_user:
user = self._client.get_user(admin_user[0]["user_id"])
contact = user.get("contact")
contact["organisation"] = organisation
return contact
def add_page_data(self):
contact = None
organisation = None
if self.kwargs.get("organisation_id"):
organisation = self._client.get_organisation(self.kwargs.get("organisation_id"))
if self.kwargs.get("contact_id"):
contact = self._client.get_contact(self.kwargs["contact_id"])
form_url = f"/case/{self.case['id']}/invite/{self.kwargs['contact_id']}/as/{self.kwargs['case_role_id']}/" # noqa: E501
if organisation:
form_url = f"{form_url}for/{organisation['id']}/"
elif self.kwargs.get("organisation_id"):
contact = self.get_organisation_admin_user_contact(self.kwargs["organisation_id"])
form_url = f"/case/{self.case['id']}/invite/organisation/{self.kwargs['organisation_id']}/as/{self.kwargs['case_role_id']}/" # noqa: E501
if not organisation:
organisation = contact["organisation"]
notification_template = self._client.get_notification_template(
"NOTIFY_INFORM_INTERESTED_PARTIES"
)
deep_update(
self.case,
self._client.get_case(
self.case_id,
fields=json.dumps(
{
"Case": {
"latest_notice_of_initiation_url": 0,
"registration_deadline": 0,
"product": 0,
}
}
),
),
)
case_number = self.case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact["name"],
"product": get(self.case, "product/name"),
"case_number": case_number,
"case_name": self.case["name"],
"notice_of_initiation_url": self.case.get("latest_notice_of_initiation_url"),
"company_name": organisation["name"],
"deadline": parse_api_datetime(
get(self.case, "registration_deadline"), settings.FRIENDLY_DATE_FORMAT
),
"footer": footer,
"guidance_url": self._client.get_system_parameters("LINK_HELP_BOX_GUIDANCE")["value"],
"email": email,
"login_url": f"{settings.PUBLIC_BASE_URL}",
}
context = {
"form_url": form_url,
"editable_fields": ["full_name", "product"],
"case": self.case,
"contact": contact,
"case_role_id": self.kwargs["case_role_id"],
"parsed_template": parse_notify_template(notification_template["body"], values),
"values": values,
"organisation": organisation,
"organisation_id": self.kwargs.get("organisation_id"),
}
return context
def post(
self,
request,
contact_id=None,
case_id=None,
case_role_id=None,
organisation_id=None,
*args,
**kwargs,
):
notify_keys = ["full_name", "product"]
notify_data = {key: request.POST.get(key) for key in notify_keys}
if organisation_id and contact_id:
notify_data["organisation_id"] = organisation_id
elif organisation_id and not contact_id:
contact = self.get_organisation_admin_user_contact(organisation_id)
contact_id = contact["id"]
response = self._client.invite_contact(case_id, contact_id, case_role_id, notify_data)
return HttpResponse(json.dumps(response), content_type="application/json")
class IssueFilesFormView(CaseBaseView):
"""
Issue files to case
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "widgets/issue_files_form.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
return {
"case_enums": case_enums,
"case": self.case,
}
class CaseBundlesView(CaseBaseView):
"""
Assign documents to the case directly (not via submissions)
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_bundles.html"
def add_page_data(self):
list_mode = self.request.GET.get("tab", "live")
tabs = {
"value": list_mode,
"tabList": [
{"label": "Live", "value": "live", "sr_text": "Show live bundles"},
{"label": "Draft", "value": "draft", "sr_text": "Show draft bundles"},
],
}
case_bundles = self._client.get_case_submission_bundles(
case_id=self.case["id"],
status=list_mode.upper(),
)
return {
"bundles": case_bundles,
"error": self.kwargs.get("error"),
"tabs": tabs,
"status": list_mode,
}
@method_decorator(csrf_exempt, name="dispatch")
class CaseBundleView(CaseBaseView):
"""
View and edit a specific bundle full of documents
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_bundle_builder.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
bundle = None
bundle_id = self.kwargs.get("bundle_id")
virus = self.request.GET.get("virus")
upload_error = self.request.GET.get("upload_error")
return_data = {
"virus": virus,
"upload_error": upload_error,
}
if bundle_id:
bundle = self._client.get_case_submission_bundles(
case_id=self.case["id"], bundle_id=self.kwargs.get("bundle_id")
)
return_data.update(
{
"bundle": bundle,
"submission_types": case_enums["submission_types"],
}
)
return return_data
def post(self, request, case_id, bundle_id=None, *args, **kwargs): # noqa: C901
name = request.POST.get("name")
data = pluck(request.POST, ["name", "description"])
btn_value = request.POST.get("btn-value")
if btn_value == "send":
data["status"] = "LIVE"
# Upload documents
if bundle_id:
meta_raw = request.POST.getlist("meta")
meta = [json.loads(block) for block in meta_raw]
file_details = deep_index_items_by(meta, "name")
for _file in request.FILES.getlist("files"):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
original_file_name = _file.original_name
details = file_details.get(original_file_name.lower())[0]
confidential = details.get("confidential")
document_type = details.get("submission_document_type")
document = self._client.upload_document(
case_id=str(case_id),
data={
"bundle_id": bundle_id,
"confidential": confidential,
"submission_document_type": document_type,
"document_name": original_file_name,
"file_name": _file.name,
"file_size": _file.file_size,
},
)
except (VirusFoundInFileException, APIException) as e:
redirect_url = f"/case/{case_id}/bundle/{bundle_id}/?"
msg = "File upload aborted: "
if isinstance(e, VirusFoundInFileException):
redirect_url += "virus=true"
else:
msg += f"{e}"
redirect_url += f"upload_error={msg}"
logger.warning(f"{msg}")
return HttpResponse(
json.dumps({"redirect_url": redirect_url}),
content_type="application/json",
)
# Attach existing documents to this bundle
if case_files := request.POST.getlist("case_files"):
file_details_by_id = deep_index_items_by(meta, "file/id")
for case_file_id in case_files:
details = (file_details_by_id.get(case_file_id) or [])[0]
document = self._client.attach_document(
case_id=str(case_id),
data={
"bundle_id": bundle_id,
"submission_document_type": details.get("submission_document_type"),
},
document_id=case_file_id,
)
else:
data = pluck(request.POST, ["name", "submission_type_id"])
data["case_id"] = case_id
# Anything else to send?
response = None
if data:
response = self._client.set_case_submission_bundle(bundle_id=bundle_id, data=data)
ret = {"result": "ok", "status": data.get("status")}
response_id = response and response.get("id")
if response_id:
ret["redirect_url"] = f"/case/{case_id}/bundle/{response_id}/"
return HttpResponse(json.dumps(ret), content_type="application/json")
def delete(self, request, case_id, document_id, *args, **kwargs):
response = self._client.delete_case_submission_bundle(case_id, document_id)
return redirect(f"/case/{case_id}/documents/")
class SubmissionInviteNotifyView(CaseBaseView):
"""
Notify an invitee about an invitation to case.
"""
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
template_name = "cases/invite.html"
def add_page_data(self):
"""Add page data.
CaseBaseView override.
"""
case_id = self.kwargs.get("case_id")
submission_id = self.kwargs.get("submission_id")
contact_id = self.kwargs.get("contact_id")
case = self._client.get_case(case_id)
submission = self._client.get_submission(case_id, submission_id)
inviting_organisation = submission["organisation"]
invited_contact = self._client.get_contact(contact_id)
inviting_contact = submission.get("contact") or {}
notification_template = self._client.get_notification_template("NOTIFY_THIRD_PARTY_INVITE")
form_url = f"/case/{case_id}/submission/{submission_id}/invite/{contact_id}/notify/"
# Attempt to infer the invite URL
login_url = f"{settings.PUBLIC_BASE_URL}"
invites = self._client.get_invitations(case_id, submission_id)
for i in invites:
if i["contact"]["id"] == str(contact_id):
invite = self._client.get_invite_details(i["id"])
code = invite.get("code")
login_url = f"{login_url}/invitation/{code}/{case_id}/"
break
case_number = case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": invited_contact["name"],
"case_name": case["name"],
"invited_by_organisation": inviting_organisation["name"],
"invited_by_name": inviting_contact["name"],
"notice_of_initiation_url": self.case.get("latest_notice_of_initiation_url"),
"login_url": login_url,
"deadline": parse_api_datetime(
get(self.case, "registration_deadline"), settings.FRIENDLY_DATE_FORMAT
),
"footer": footer,
"email": email,
}
context = {
"form_url": form_url,
"notification_template": notification_template,
"submission": submission,
"case": case,
"contact": invited_contact,
"parsed_template": parse_notify_template(notification_template["body"], values),
"values": values,
}
return context
def post(self, request, case_id, submission_id, contact_id, *args, **kwargs):
notify_data = {
"case_id": case_id,
"submission_id": submission_id,
"contact_id": contact_id,
}
response = self._client.action_third_party_invite(
case_id=case_id,
submission_id=submission_id,
contact_id=contact_id,
params=notify_data,
)
return HttpResponse(json.dumps(response), content_type="application/json")
class UpdateParentView(CaseBaseView):
template_name = "cases/update_parent.html"
linked_case_confirm_key = "LINKED_CASE_CONFIRM"
cases_fields = json.dumps(
{
"Case": {
"name": 0,
"id": 0,
"reference": 0,
}
}
)
case_fields = json.dumps(
{"Case": {"parent": {"id": 0}, "workflow_state": {linked_case_confirm_key: 0}}}
)
def add_page_data(self):
cases = self._client.get_cases(archived=True, all_cases=False, fields=self.cases_fields)
case = self._client.get_case(self.case_id, fields=self.case_fields)
return {"case": case, "cases": cases}
def post(self, request, case_id, *args, **kwargs):
link_confirm = request.POST.get("link_confirm")
parent_id = request.POST.get("parent_id")
_client = self.client(request.user)
case = _client.get_case(case_id, fields=self.case_fields)
if get(case, "parent/id") != parent_id:
_client.set_case_data(case_id, {"parent_id": parent_id})
if (get(case, f"workflow_state/{self.linked_case_confirm_key}") or [0])[0] != link_confirm:
_client.set_case_workflow_state(
case_id, values={f"{self.linked_case_confirm_key}": link_confirm}
)
return HttpResponse(
json.dumps({"parent_id": parent_id, "link_confirm": link_confirm}),
content_type="application/json",
)
class NoticesView(
LoginRequiredMixin, GroupRequiredMixin, TemplateView, TradeRemediesAPIClientMixin
):
groups_required = SECURITY_GROUPS_TRA_ADMINS
template_name = "cases/notices.html"
def get(self, request):
client = self.client(request.user)
notices = client.get_notices()
return render(
request,
self.template_name,
{
"body_classes": "full-width",
"notices": notices,
},
)
class NoticeView(LoginRequiredMixin, GroupRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA_ADMINS
template_name = "cases/notice.html"
cases_fields = json.dumps({"Case": {"name": 0, "id": 0, "reference": 0}})
def get(self, request, notice_id=None):
client = self.client(request.user)
enums = client.get_all_case_enums()
case_types = enums.get("case_types", [])
cases = client.get_cases(archived=True, all_cases=False, fields=self.cases_fields)
notice = {}
if notice_id:
notice = client.get_notice(notice_id)
return render(
request,
self.template_name,
{
"body_classes": "full-width",
"notice": notice,
"cases": cases,
"case_types": case_types,
},
)
def post(self, request, notice_id=None):
client = self.client(request.user)
notice = client.create_update_notice(
name=request.POST.get("name"),
reference=request.POST.get("reference"),
terminated_at=request.POST.get("terminated_at"),
published_at=request.POST.get("published_at"),
case_type=request.POST.get("case_type_id"),
review_case=request.POST.get("review_case_id"),
notice_id=notice_id,
)
return redirect("/cases/notices/")
class DocumentSearchView(CaseBaseView):
template_name = "documents/documents.html"
def add_page_data(self):
query = self.request.GET.get("query")
conf_status = self.request.GET.get("confidential_status")
user_type = self.request.GET.get("user_type")
response = self._client.search_documents(
case_id=self.case_id,
query=query,
confidential_status=conf_status,
user_type=user_type,
)
return {
"body_classes": "full-width",
"documents": response.pop("results", []),
"query": query,
"conf_status": conf_status,
**response,
}
class CaseTeamJsonView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, **kwargs):
team = self.client(request.user).get_case_team_members(case_id)
return HttpResponse(json.dumps(team), content_type="application/json")
| import itertools
import json
import logging
import re
from django.views.generic import TemplateView
from django.http import HttpResponse
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, redirect
from django.urls import reverse
from django.utils import timezone
from django.utils.decorators import method_decorator
from django_chunk_upload_handlers.clam_av import VirusFoundInFileException
from core.base import GroupRequiredMixin
from core.utils import (
deep_index_items_by,
deep_index_items_by_exists,
get,
key_by,
index_users_by_group,
compact_list,
submission_contact,
public_login_url,
parse_notify_template,
parse_api_datetime,
pluck,
to_json,
from_json,
deep_update,
internal_redirect,
is_date,
notify_footer,
notify_contact_email,
)
from django_countries import countries
from django.conf import settings
from cases.submissions import SUBMISSION_TYPE_HELPERS, get_submission_deadline
from cases.utils import decorate_orgs
from core.constants import (
ALL_REGION_ALLOWED_TYPE_IDS,
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_LEAD_INVESTIGATOR,
SECURITY_GROUPS_TRA,
SECURITY_GROUP_TRA_ADMINISTRATOR,
SECURITY_GROUPS_TRA_ADMINS,
SECURITY_GROUP_ORGANISATION_OWNER,
SUBMISSION_TYPE_QUESTIONNAIRE,
SUBMISSION_TYPE_APPLICATION,
SUBMISSION_NOTICE_TYPE_INVITE,
SUBMISSION_NOTICE_TYPE_DEFICIENCY,
SUBMISSION_TYPE_THIRD_PARTY,
CASE_ROLE_AWAITING_APPROVAL,
CASE_ROLE_REJECTED,
CASE_ROLE_APPLICANT,
CASE_ROLE_PREPARING,
DIRECTION_TRA_TO_PUBLIC,
)
from trade_remedies_client.mixins import TradeRemediesAPIClientMixin
from trade_remedies_client.exceptions import APIException
logger = logging.getLogger(__name__)
org_fields = json.dumps(
{
"Organisation": {
"id": 0,
"has_non_draft_subs": 0,
"gov_body": 0,
"has_roi": 0,
}
}
)
class CasesView(LoginRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
template_name = "cases/cases.html"
def get(self, request, *args, **kwargs):
list_mode = request.GET.get("tab", "my")
panel_layout = self.client(self.request.user).get_system_boolean("PRE_RELEASE_PANELS")
fields = {
"Case": {
"id": 0,
"user_case": 0,
"name": 0,
"reference": 0,
"created_at": 0,
"type": {
"name": 0,
"acronym": 0,
"colour": 0,
"reference": 0,
"applicant": {"organisation": 0, "name": 0, "id": 0},
},
"applicant": {
"organisation": {
"name": 0,
"id": 0,
}
},
"stage": {"name": 0},
"case_status": {"next_action": 0, "next_notice_due": 0},
}
}
if list_mode == "archived":
fields = deep_update(
fields,
{
"Case": {
"workflow_state": {
"MEASURE_EXPIRY": 0,
"DETERMINATION_ACTIVE_DATE": 0,
}
}
},
)
cases = self.client(request.user).get_cases(
archived=list_mode == "archived",
all_cases=list_mode == "all",
new_cases=list_mode == "new",
fields=json.dumps(fields),
)
tabs = {
"value": list_mode,
"tabList": [
{"label": "Your cases", "value": "my", "sr_text": "Show your cases"},
{"label": "Open cases", "value": "all", "sr_text": "Show open cases"},
{
"label": "New applications",
"value": "new",
"sr_text": "Show new applications",
},
{
"label": "Archived",
"value": "archived",
"sr_text": "Show archived cases",
},
],
}
template_name = self.template_name if panel_layout else "cases/cases_old.html"
body_class = "full-width kill-footer" if panel_layout else "full-width"
return render(
request,
template_name,
{
"body_classes": body_class,
"cases": cases,
"tabs": tabs,
},
)
class CaseBaseView(
LoginRequiredMixin,
GroupRequiredMixin,
PermissionRequiredMixin,
TemplateView,
TradeRemediesAPIClientMixin,
):
permission_required = []
groups_required = SECURITY_GROUPS_TRA
supress_nav_section = False
def dispatch(self, *args, **kwargs):
if self.request.user.is_authenticated:
self._client = self.client(self.request.user)
self.case_id = kwargs.get("case_id")
return super().dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
self.kwargs = kwargs
self.organisation_id = kwargs.get("organisation_id")
self.request = request
self.user_token = request.user.token
case_fields = json.dumps(
{
"Case": {
"id": 0,
"name": 0,
"initiated_at": 0,
"decision_to_initiate,name": 0,
"reference": 0,
"sequence": 0,
"type": 0,
"archived_at": 0,
"archive_reason": {"name": 0},
"submission_count": 0,
"participant_count": 0,
"stage": {"name": 0},
"case_status": 0,
"organisation": {"id": 0, "name": 0},
}
}
)
self.case = self._client.get_case(self.case_id, fields=case_fields)
self.document_count = self._client.get_case_document_count(self.case_id)
self.start = int(request.GET.get("start", 0))
self.limit = int(request.GET.get("limit", 20))
content_id = self.kwargs.get("nav_section_id")
context = {
"case": self.case,
"case_id": self.case_id,
"document_count": self.document_count,
"content": self._client.get_case_content(self.case_id, content_id=content_id),
"tree": self._client.get_nav_section(self.case_id, selected_content=content_id),
"body_classes": "full-width",
"panel_layout": self._client.get_system_boolean("PRE_RELEASE_PANELS"),
"organisation_id": self.organisation_id,
"submission_group_name": "submission",
"alert": request.GET.get("alert"),
"user": request.user,
}
deep_update(context, self.add_page_data())
if context.get("redirect"):
return redirect(context.get("redirect"))
return render(request, self.template_name, context)
def add_page_data(self):
return {}
def get_documents(self, submission, all_versions=None):
result = self._client.get_submission_documents(
self.case_id, submission.get("id"), all_versions=all_versions
)
all_documents = result.get("documents", [])
deficiency_docs = result.get("deficiency_documents", [])
if all_versions:
# If this submission has an immediate ancestor, get the docs from that to mark status
docs_by_submission = deep_index_items_by(all_documents, "version")
this_version = int(submission.get("version"))
this_sub = docs_by_submission.get(str(this_version))
sub_docs = this_sub[0].get("documents")
# we have a list of the submissions that make up a family - id, version and documents.
if this_version > 1:
parent_sub = docs_by_submission.get(str(this_version - 1))
parent_docs = parent_sub and parent_sub[0].get("documents")
parent_doc_idx = {}
for parent_doc in parent_docs:
doc_type = get(parent_doc, "type/name") + "|" + get(parent_doc, "name")
parent_doc_idx[doc_type] = parent_doc
for document in sub_docs:
document["parent"] = parent_doc_idx.get(
get(document, "type/name") + "|" + get(document, "name")
)
else:
sub_docs = all_documents
submission_documents = deep_index_items_by(sub_docs, "type/key")
document_conf_index = deep_index_items_by(
submission_documents.get("respondent", []), "confidential"
)
confidential = document_conf_index.get("true", [])
confidential.sort(key=lambda cf: cf.get("name"))
non_conf = document_conf_index.get("", [])
doc_index = key_by(confidential, "id")
non_conf.sort(key=lambda nc: get(get(doc_index, str(nc.get("parent_id"))), "name"))
return {
"caseworker": submission_documents.get("caseworker", []),
"respondent": submission_documents.get("respondent", []),
"loa": submission_documents.get("loa", []),
"deficiency": deficiency_docs,
"confidential": confidential,
"nonconfidential": non_conf,
}
def has_permission(self):
"""
Override this method to customize the way permissions are checked.
"""
perms = self.get_permission_required()
return not perms or self.request.user.has_perms(perms)
class CaseAdminView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("case_admin",)
template_name = "cases/admin.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
case_users = self._client.get_case_users(self.case["id"])
context = {
"case_enums": case_enums,
"case": self.case,
"users": case_users,
"participants": self._client.get_case_participants(self.case_id),
}
return context
def post(self, request, case_id, *args, **kwargs):
action = request.POST.get("action")
case = self._client.get_case(case_id)
update_spec = {}
if action == "initiation_flag_toggle":
if case["initiated_at"]:
update_spec["initiated_at"] = ""
else:
update_spec["initiated_at"] = timezone.now()
elif action == "set_case_stage":
update_spec["ignore_flow"] = request.POST.get("ignore_flow") or "false"
update_spec["stage_id"] = request.POST.get("stage_id")
elif action == "set_name":
update_spec["name"] = request.POST.get("name")
elif action == "set_case_type":
update_spec["stage_id"] = ""
update_spec["type_id"] = request.POST.get("type_id")
elif action == "toggle_archived":
if case.get("archived_at"):
update_spec["archived_at"] = ""
else:
update_spec["archived_at"] = timezone.now()
update_spec["archive_reason_id"] = request.POST.get("archive_reason_id")
elif action == "reset_initiation_decision":
update_spec["reset_initiation_decision"] = True
if update_spec:
response = self._client.update_case(case_id, update_spec)
return redirect(f"/case/{case_id}/admin/")
class CaseMilestoneDatesView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("case_admin",)
template_name = "cases/milestone_dates.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums(self.case_id)
case_milestones = self._client.case_milestones(self.case["id"])
existing_keys = [cm["key"] for cm in case_milestones]
context = {
"milestone_types": case_enums.get("milestone_types"),
"available_review_types": case_enums.get("available_review_types"),
"milestones": case_milestones,
"existing_milestones": existing_keys,
}
return context
def post(self, request, case_id, milestone_key=None):
milestone_key = milestone_key or request.POST.get("milestone_key")
date = request.POST.get("date")
response = self._client.set_case_milestone(case_id, milestone_key, date)
return redirect(f"/case/{case_id}/milestones/")
class CaseView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = []
template_name = "cases/case.html"
extra_case_fields = json.dumps(
{
"Case": {
"applicant": {
"organisation": {
"id": 0,
"name": 0,
"primary_contact": {
"name": 0,
"email": 0,
"phone": 0,
"address": 0,
"post_code": 0,
"country": {"name": 0},
"has_user": 0,
"user": {"id": 0, "organisation": {"id": 0, "name": 0}},
},
}
},
"parent": {"id": 0, "name": 0, "reference": 0, "type": 0},
"workflow_state": {"LINKED_CASE_CONFIRM": 0},
"initiated_sequence": 0,
}
}
)
def add_page_data(self):
team = self._client.get_case_team_members(self.case_id)
team_by_group = index_users_by_group([member.get("user") for member in team])
group_order = [
SECURITY_GROUP_TRA_ADMINISTRATOR,
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_LEAD_INVESTIGATOR,
]
case_extras = self._client.get_case(self.case_id, fields=self.extra_case_fields)
return {
"audit": self._client.get_audit(
case_id=self.case_id, start=0, limit=20, milestone=True
),
"case_page": True,
"case": case_extras,
"team_groups": team_by_group,
"group_order": group_order,
"public_base_url": settings.PUBLIC_BASE_URL,
}
def post(self, request, case_id, *args, **kwargs):
self._client.set_case_data(case_id, {"name": request.POST.get("name")})
redirect = request.POST.get("redirect")
if redirect:
return internal_redirect(request.POST.get("redirect"), "/")
else:
return HttpResponse(json.dumps({"result": "ok"}), content_type="application/json")
class PartiesView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/parties.html"
def add_page_data(self):
parties = []
roles = self._client.get_case_roles()
all_case_invites = self._client.get_contact_case_invitations(self.case_id)
all_participants = self._client.get_case_participants(self.case_id, fields=org_fields)
case_invites = deep_index_items_by(all_case_invites, "contact/id")
invited = set([])
accepted = set([])
for invite in all_case_invites:
org_id = invite.get("organisation", {}).get("id")
if invite.get("accepted_at"):
# note: accepted and invited are mutually exclusive
accepted.add(org_id)
else:
invited.add(org_id)
for role in roles:
_base = all_participants[role["key"]]
_base["key"] = role["key"]
_base["name"] = role["plural"]
if role["allow_cw_create"]:
_base["add_link"] = f"Add {role['name']}"
parties.append(_base)
return {
"party_types": parties,
"invites": case_invites,
"accepted_orgs": list(accepted),
"invited_orgs": list(invited),
"pre_release_invitations": self._client.get_system_boolean("PRE_RELEASE_INVITATIONS"),
"alert": self.request.GET.get("alert"),
}
class CaseTeamView(CaseBaseView):
permission_required = "can_assign_team"
template_name = "cases/team.html"
def add_page_data(self):
all_users = self._client.get_all_users(group_name="caseworker")
users_by_group = index_users_by_group(all_users)
team = self._client.get_case_team_members(self.case_id)
return {
"team": [member.get("user", {}).get("id") for member in team],
"tra_users": all_users,
"grouped_users": users_by_group,
"groups": SECURITY_GROUPS_TRA[1:],
"inactive_user_count": sum(user["active"] is False for user in all_users),
"singleton_groups": [
SECURITY_GROUP_TRA_HEAD_OF_INVESTIGATION,
SECURITY_GROUP_TRA_ADMINISTRATOR,
],
}
def post(self, request, case_id, *args, **kwargs):
user_ids = request.POST.getlist("user_id")
response = self._client.assign_case_team(case_id, user_ids)
return redirect(f"/case/{case_id}/")
class SubmissionsView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submissions.html"
show_global = False
sub_page = ""
def get_tab(self, role, party):
if not role.get("allow_cw_create"):
return role["key"]
return "sampled" if party.get("sampled") else "not_sampled"
def consolidate_submissions(
self, case, participants, submissions_by_party, counts, selected_tab
):
roles = []
single_role_return = None # for awaiting and rejected - only return that specific role
for role in self._client.get_case_roles():
role["participants"] = []
for party in participants.get(role["key"], {}).get("parties", []):
tab = self.get_tab(role, party)
submissions = submissions_by_party.get(party["id"], [])
submissions += submissions_by_party.get("", [])
if submissions:
counts[tab] = counts.get(tab, 0) + len(submissions)
if tab == selected_tab:
party["submissions"] = submissions
role["participants"].append(party)
if not party.get("gov_body"):
role["customer_parties"] = True
sort_key = (
"submissions/0/received_at"
if selected_tab == CASE_ROLE_AWAITING_APPROVAL
else "name"
)
role["participants"].sort(key=lambda pt: get(pt, sort_key) or "")
if role.get("key") == selected_tab:
single_role_return = role
if role.get("allow_cw_create"):
roles.append(role)
return [single_role_return] if single_role_return else roles
def get_name(self, participant):
return participant.get("name")
def flatten_participants(self, source):
participants = []
for role in source:
rec = source[role]
participants = participants + rec["parties"]
participants.sort(key=self.get_name)
return participants
def divide_submissions(self, submissions):
incoming = []
outgoing = []
draft = []
for submission in submissions:
if get(submission, "status/sent"):
outgoing.append(submission)
elif get(submission, "status/default") and get(submission, "type/direction") != 1:
draft.append(submission)
else:
if (
not get(submission, "status/draft")
or get(submission, "type/key") == "application"
): # customer draft should not be seen by investigators
incoming.append(submission)
return {
"incoming": sorted(incoming, key=lambda su: su.get("received_at") or "", reverse=True),
"outgoing": sorted(outgoing, key=lambda su: su.get("sent_at") or "", reverse=True),
"draft": sorted(draft, key=lambda su: su.get("created_at") or "", reverse=True),
}
def add_page_data(self):
tab = self.request.GET.get("tab", "sampled").lower()
all_submissions = self._client.get_submissions(self.case_id, show_global=True)
submissions_by_type = deep_index_items_by(all_submissions, "type/name")
# Get submissions that have just been created by customer
# or are still in draft after creation
draft_submissions = deep_index_items_by(all_submissions, "status/default").get("true") or []
# Remove any that are back with the customer following deficiency
draft_first_version_submissions = (
deep_index_items_by(draft_submissions, "version").get("1") or []
)
# Exclude these drafts from our list
non_draft_submissions = [
sub for sub in all_submissions if sub not in draft_first_version_submissions
]
# draft applications are included to allow a heads up view
# to the caseworker before it's submitted
if submissions_by_type.get("application", [{}])[0].get("status", {}).get("default") is True:
submissions_by_type["application"][0]["tra_editable"] = True
non_draft_submissions += submissions_by_type["application"]
submissions_by_party = deep_index_items_by(non_draft_submissions, "organisation/id")
case_enums = self._client.get_all_case_enums()
invites = self._client.get_case_invite_submissions(self.case_id)
participants = self._client.get_case_participants(self.case_id, fields=org_fields)
flat_participants = self.flatten_participants(participants)
counts = {}
if self.sub_page:
self.template_name = f"cases/submissions_{self.sub_page}.html"
tab = self.request.GET.get("tab", "incoming").lower()
elif self._client.get_system_boolean("PRE_NEW_SUBMISSION_PAGE"):
self.template_name = "cases/submissions_new.html"
context = {
"raw_participants": participants,
"submissions": submissions_by_type,
"participants": flat_participants,
"counts": counts,
"all_roles": self.consolidate_submissions(
self.case,
participants=participants,
submissions_by_party=submissions_by_party,
counts=counts,
selected_tab=tab,
),
"submission_types": case_enums["case_worker_allowed_submission_types"],
"invites": invites,
"tab": tab,
"submission_groups": self.divide_submissions(all_submissions),
"all_submissions": all_submissions,
}
# TODO: Temp handling of application vs ex_officio ones
if not submissions_by_type.get("application") and submissions_by_type.get(
"ex officio application"
):
context["submissions"]["application"] = submissions_by_type["ex officio application"]
return context
class SubmissionView(CaseBaseView):
"""
View and modify submissions
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submission.html"
extra_case_fields = json.dumps(
{
"Case": {
"applicant": 0,
"product": 0,
"sources": 0,
}
}
)
def add_page_data_old(self):
alert = self.request.GET.get("alert") # indicates the submission has just been created
documents = []
submission = {}
submission_id = self.kwargs.get("submission_id")
third_party_invite = False
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
submission_type = submission["type"]
third_party_invite = submission_type["name"] == "Invite 3rd party"
self.organisation_id = submission["organisation"]["id"]
created_by_id = get(submission, "created_by/id")
if created_by_id:
full_user = self._client.get_user(created_by_id)
submission["created_by"]["organisation"] = get(full_user, "organisations/0")
submission_context = {}
if SUBMISSION_TYPE_HELPERS.get(submission_type["key"]):
submission_context = SUBMISSION_TYPE_HELPERS[submission_type["key"]](
submission, self.request.user
).get_context()
self.template_name = "cases/submission.html"
case_extras = self._client.get_case(self.case_id, fields=self.extra_case_fields)
context = {
"submission": submission,
"template_name": f"{submission_type['key']}",
"documents": self.get_documents(submission=submission, all_versions=True),
"alert": alert,
"case": case_extras,
"third_party_invite": third_party_invite,
**submission_context,
}
if (
not submission
or not submission.get("status")
or submission.get("status", {}).get("default")
):
context["mode"] = "form"
else:
context["mode"] = "view"
if self.organisation_id:
self.organisation = self._client.get_organisation(self.organisation_id)
context["organisation"] = self.organisation
context["organisation_id"] = str(self.organisation["id"])
return context
def get_all_participants(self, case_participants):
all_parties = []
roles = {}
for type_name, role_parties in case_participants.items():
parties = role_parties.get("parties")
if parties:
all_parties.extend(parties)
role = parties[0].get("role")
roles[role.get("key")] = role
return deep_index_items_by(all_parties, "sampled"), roles
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
submission = {}
participants = self._client.get_case_participants(self.case_id, fields=org_fields)
parties, roles = self.get_all_participants(participants)
alert = self.request.GET.get("alert") # indicates the submission has just been created
virus = self.request.GET.get("virus")
upload_error = self.request.GET.get("upload_error")
return_data = {
"virus": virus,
"upload_error": upload_error,
}
submission_id = self.kwargs.get("submission_id")
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
json_data = from_json(submission.get("deficiency_notice_params"))
_default = submission.get("status", {}).get("default")
if not _default or (
_default and submission["type"]["id"] == SUBMISSION_TYPE_APPLICATION
):
page_data = self.add_page_data_old()
return_data.update(page_data)
return return_data
self.organisation_id = submission["organisation"]["id"]
return_data.update(
{
"roles": roles,
"submission": submission,
"status": (submission.get("status") or {}).get("id"),
"alert": alert,
"documents": self.get_documents(submission=submission),
"role": submission.get("organisation_case_role") or {"name": "Public file"},
"participants": participants,
"all_participants": parties,
"json_data": json_data,
"selected_submission_type": submission.get("type", {}).get("key")
or "questionnaire",
}
)
else:
role = self.request.GET.get("for")
sampled = self.request.GET.get("sampled") == "sampled"
full_role = (
self._client.get_case_role(role)
if (role and role != "public")
else {"name": "Public file"}
)
case_enums = self._client.get_all_case_enums(direction=DIRECTION_TRA_TO_PUBLIC)
# Get all draft submissions of this type
all_submissions = self._client.get_submissions(self.case_id, show_global=True)
draft_submissions = (
deep_index_items_by(all_submissions, "status/default").get("true") or []
)
# draft_submissions_this_role = deep_index_items_by(draft_submissions,
# 'organisation_case_role/key').get('' if role == 'public' else role)
draft_submissions_this_role = deep_index_items_by(
draft_submissions, "organisation_id"
).get("")
return_data.update(
{
"submission": submission,
"submission_type_id": self.kwargs.get("submission_type_id")
or self.request.GET.get("submission_type_id"),
"submission_statuses": case_enums["submission_statuses"],
"statuses_by_type": case_enums["statuses_by_type"],
"selected_submission_type": self.request.GET.get("submission_type")
or "questionnaire",
"organisation_id": self.kwargs.get("organisation_id"),
"draft_submissions": draft_submissions_this_role,
"role": full_role,
}
)
if role == "public":
return_data.update(
{
"submission_types": case_enums["public_submission_types"],
"public": True,
"organisation_id": self.kwargs.get("organisation_id"),
}
)
else:
role_participants = participants.get(role, {}).get("parties", [])
filtered = list(
filter(
lambda party: party
if party.get("sampled") == sampled and not party.get("gov_body")
else None,
role_participants,
)
)
return_data.update(
{
"submission_types": case_enums["case_worker_allowed_submission_types"],
"participants": participants,
"roles": roles,
}
)
self.organisation_id = self.organisation_id or self.request.GET.get("organisation_id")
if self.organisation_id:
self.organisation = self._client.get_organisation(self.organisation_id)
return_data["organisation"] = self.organisation
return_data["organisation_id"] = str(self.organisation["id"])
# add errors from the url
errors = self.request.GET.get("errors")
if errors:
try:
return_data["errors"] = json.loads(errors)
except Exception as ex:
pass
# Set up template to use
template_name = (
submission["type"]["key"]
if submission
else (role if role == "public" else "questionnaire")
)
return_data.update({"template_name": template_name, "mode": "form"})
return return_data
def post( # noqa: C901
self,
request,
case_id,
submission_id=None,
organisation_id=None,
*args,
**kwargs,
):
"""
Update an existing submission
"""
return_data = {"submission_id": str(submission_id)}
contact_id = request.POST.get("contact_id")
btn_value = request.POST.get("btn-value")
review = request.POST.get("review")
name = request.POST.get("name")
due_at = request.POST.get("due_at")
response_window_yn = request.POST.get("response_window_yn")
time_window = request.POST.get("time_window")
meta_raw = request.POST.getlist("meta")
meta = [json.loads(block) for block in meta_raw]
file_details = deep_index_items_by(meta, "name")
file_details_by_id = deep_index_items_by(meta, "file/id")
organisation_id = organisation_id or request.POST.get("organisation_id")
send_to = request.POST.get("send_to")
submission = self._client.get_submission(case_id, submission_id)
organisation_id = submission.get("organisation", {}).get("id")
status_id = request.POST.get("submission_status_id")
if submission_id and btn_value == "discard":
delete_submission_response = self._client.delete_submission(
case_id=case_id, submission_id=submission_id
)
return HttpResponse(
json.dumps({"redirect_url": f"/case/{case_id}/submissions/"}),
content_type="application/json",
)
# check if the update is for name or notify contact
if (
submission["name"] != name
or not submission["contact"]
or submission.get("contact", {}).get("id") != contact_id
):
if name is not None and not name:
return_data.update({"errors": '{"name":"You must enter a name"}'})
if due_at and not is_date(due_at):
return_data.update({"errors": '{"due_date":"Invalid date"}'})
if not return_data.get("errors"):
self._client.update_submission(
case_id=case_id,
submission_id=submission_id,
name=name,
contact_id=contact_id, # TODO:not used
due_at=due_at,
time_window=time_window,
description=request.POST.get("description"),
url=request.POST.get("url"),
)
# API `update_submission` returns an incomplete submission
# (no documents) so we re-fetch the submission here.
submission = self._client.get_submission(case_id, submission_id)
return_data.update({"submission": submission})
if submission.get("id"):
for _file in request.FILES.getlist("files"):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
original_file_name = _file.original_name
details = file_details.get(original_file_name.lower())[0]
confidential = details.get("confidential")
document_type = details.get("submission_document_type")
document = self._client.upload_document(
case_id=str(case_id),
submission_id=submission_id,
organisation_id=str(organisation_id),
data={
"name": "Questionnaire",
"confidential": confidential,
"submission_document_type": document_type,
"document_name": original_file_name,
"file_name": _file.name,
"file_size": _file.file_size,
},
)
except (VirusFoundInFileException, APIException) as e:
redirect_url = f"/case/{case_id}/submission/{submission_id}/?"
if isinstance(e, VirusFoundInFileException):
redirect_url += "virus=true"
else:
redirect_url += f"upload_error={e}"
logger.warning(f"File upload aborted: {e}")
return HttpResponse(
json.dumps({"redirect_url": redirect_url}),
content_type="application/json",
)
if case_files := request.POST.getlist("case_files"):
for case_file_id in case_files:
details = (file_details_by_id.get(case_file_id) or [])[0]
document = self._client.attach_document(
case_id=str(case_id),
submission_id=submission_id,
organisation_id=str(organisation_id),
data={"submission_document_type": details.get("submission_document_type")},
document_id=case_file_id,
)
submission_group_name = get(submission, "type/key")
if btn_value in ["send", "publish", "withdraw"]:
if btn_value in ["publish", "withdraw"]:
result = self._client.set_submission_state(
case_id,
submission_id,
"sent"
if (btn_value == "send" or submission_group_name == "public")
else "",
{"publish": "issue", "withdraw": "un-issue"}[btn_value],
)
result = self._client.update_submission(
case_id=case_id, submission_id=submission_id
)
return_data.update(
{
"redirect_url": f"/case/{case_id}/submission/{submission['id']}/?alert={btn_value}" # noqa: E301, E501
}
)
if btn_value == "sufficient":
# Set the submission to sufficient
result = self._client.set_submission_state(case_id, submission_id, btn_value)
return_data.update({"alert": "Submission approved"})
submission_type = submission["type"]
type_helpers = SUBMISSION_TYPE_HELPERS.get(submission_type["key"])
if type_helpers:
return_data.update(
type_helpers(submission, self.request.user).on_approve() or {}
)
# Update submission document approvals
self.update_submission_status(request.POST, submission)
# set any deficiency-notice parameters
updated = False
deficiency_notice_params = from_json(submission.get("deficiency_notice_params"))
send_to = request.POST.getlist("send_to")
if send_to:
deficiency_notice_params["send_to"] = send_to
updated = True
regex = r"^deficiency_notice_params_"
for param_key in request.POST:
matches = re.split(regex, param_key)
if len(matches) > 1:
value = request.POST[param_key]
updated = updated or (deficiency_notice_params.get(matches[1]) != value)
if value == "__remove":
if get(deficiency_notice_params, matches[1]):
deficiency_notice_params.pop(matches[1])
else:
deficiency_notice_params[matches[1]] = value
if updated:
update_submission_response = self._client.update_submission(
case_id=case_id,
submission_id=submission_id,
deficiency_notice_params=to_json(deficiency_notice_params),
)
if btn_value == "save-exit":
return_data.update({"redirect_url": f"/case/{case_id}/submissions"})
if deficiency_notice_params:
return_data.update(
{"redirect_url": f"/case/{case_id}/submission/{submission_id}"}
)
return HttpResponse(json.dumps(return_data), content_type="application/json")
def update_submission_status(self, request_params, submission):
"""Update submission document statuses.
For each document in the submission review, examine response to
establish if it was marked sufficient/deficient. Call API to update
submission document status if it has changed.
:param (dict) request_params: request parameters
:param (dict) submission: submission
"""
submission_docs = {doc["id"]: doc for doc in submission.get("documents")}
for doc_id in request_params:
if doc_id in submission_docs:
current_status = submission_docs[doc_id]["sufficient"]
new_status = request_params[doc_id] == "yes"
if current_status != new_status:
self._client.set_submission_document_state(
case_id=submission["case"]["id"],
submission_id=submission.get("id"),
document_id=doc_id,
status="sufficient" if new_status else "deficient",
block_from_public_file=submission_docs.get("block_from_public_file"),
block_reason=submission_docs.get("block_reason"),
)
class SubmissionCreateView(SubmissionView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, *args, **kwargs):
btn_value = request.POST.get("btn-value")
send_to = request.POST.getlist("send_to")
organisation_id = request.POST.get("organisation_id")
submission_data = {
"submission_type": int(
request.POST.get("submission_type_id", SUBMISSION_TYPE_QUESTIONNAIRE)
),
"case_id": str(case_id),
"organisation_id": str(organisation_id) if organisation_id else None,
"contact_id": request.POST.getlist("contact_id"),
"public": request.POST.get("public"),
}
if send_to:
submission_data["deficiency_notice_params"] = to_json(
{"send_to": send_to, "case_role": request.POST.get("role_key")}
)
result = self._client.create_submission(**submission_data)
submission = result.get("submission", {}) if result else {}
return HttpResponse(
json.dumps(
{
"submission_id": submission.get("id"),
"redirect_url": f"/case/{case_id}/submission/{submission['id']}/",
}
),
content_type="application/json",
)
class SubmissionDocumentView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, submission_id, organisation_id=None, *args, **kwargs):
response = {}
document_list_json = request.POST.get("document_list")
if document_list_json:
document_list = json.loads(document_list_json)
for doc_id, doc_status in document_list.items():
logger.debug(f"update document state {doc_id}")
response = self._client.set_submission_document_state(
case_id=case_id,
submission_id=submission_id,
document_id=doc_id,
status=doc_status["status"],
block_from_public_file=doc_status["block_from_public_file"],
block_reason=doc_status["block_reason"],
)
return HttpResponse(json.dumps(response), content_type="application/json")
def delete(self, request, case_id, submission_id, document_id, *args, **kwargs):
response = self._client.detach_document(
case_id=case_id, submission_id=submission_id, document_id=document_id
)
return HttpResponse(json.dumps(response), content_type="application/json")
class SubmissionStatusView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def post(self, request, case_id, submission_id, *args, **kwargs):
stage_change_if_sufficient = request.POST.get("stage_change_if_sufficient")
stage_change_if_deficient = request.POST.get("stage_change_if_deficient")
submission = self._client.get_submission(case_id, submission_id)
status_id = request.POST.get("submission_status_id")
if submission.get("status", {}).get("id") != status_id:
status_response = self._client.set_submission_status(
case_id=case_id,
submission_id=submission_id,
status_id=status_id,
stage_change_if_sufficient=stage_change_if_sufficient,
stage_change_if_deficient=stage_change_if_deficient,
deficiency_documents=request.FILES.getlist("deficiency_document"),
issue=request.POST.get("issue"),
)
if status_response.get("submission"):
submission_id = status_response["submission"]["id"]
return redirect(f"/case/{case_id}/submission/{submission_id}/")
class SubmissionApprovalView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/submission.html"
def add_page_data(self):
submission_id = self.kwargs.get("submission_id")
submission = self._client.get_submission(self.case_id, submission_id)
sub_documents = self._client.get_submission_documents(self.case_id, submission_id)
documents = sub_documents.get("documents", [])
submission.update(sub_documents)
case_enums = self._client.get_all_case_enums()
submission_type_id = submission["type"]["id"]
status_map = case_enums["submission_status_map"]
status_options = status_map.get(str(submission_type_id), {}).get("keys", [])
status_context = status_map.get(str(submission_type_id))
submission_documents = self.get_documents(submission=submission)
context = {
"template_name": submission["type"]["key"],
"mode": "approval",
"submission": submission,
"case_enums": case_enums,
"status_context": status_context,
"documents": submission_documents,
}
return context
class SubmissionDeficiencyView(CaseBaseView):
"""
Set the submission into a deficiency status and notify the party about it.
"""
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
def get(self, request, case_id, submission_id, *args, **kwargs):
submission = self._client.get_submission(case_id, submission_id)
submission_type = submission["type"]
contact = submission_contact(submission)
contact_name = contact.get("name")
organisation_name = submission.get("organisation", {}).get("name") or (
contact.get("organisation") or {}
).get("name")
notification_template = self._client.get_notification_template(
"NOTIFY_SUBMISSION_DEFICIENCY"
)
template_name = f"cases/submissions/{submission_type['key']}/notify.html"
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
case_number = submission["case"]["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact_name,
"case_name": submission["case"]["name"],
"case_number": case_number,
"company_name": organisation_name,
"deadline": due_at or "No deadline assigned",
"submission_type": submission.get("type", {}).get("name"),
"login_url": public_login_url(),
"footer": footer,
}
context = {
"form_action": f"/case/{case_id}/submission/{submission_id}/status/notify/",
"form_title": f"Deficiency Notice for {organisation_name}",
"cancel_redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"editable_fields": { # leaving one as a future example
# 'full_name': {'title': 'Name'},
},
"notification_template": notification_template,
"submission": submission,
"case_id": str(case_id),
"contact": contact,
"values": values,
"parsed_template": parse_notify_template(notification_template["body"], values),
}
return render(request, template_name, context)
def post(self, request, case_id, submission_id, *args, **kwargs):
stage_change_if_sufficient = request.POST.get("stage_change_if_sufficient")
stage_change_if_deficient = request.POST.get("stage_change_if_deficient")
submission = self._client.get_submission(case_id, submission_id)
notify_keys = [
"full_name",
"case_name",
"case_number",
"company_name",
"deadline",
"submission_type",
"login_url",
]
notify_data = {key: request.POST.get(key) for key in notify_keys}
if request.POST.get("contact_id"):
notify_data["contact_id"] = request.POST["contact_id"]
case_enums = self._client.get_all_case_enums()
submission_type_id = submission["type"]["id"]
status_map = case_enums["submission_status_map"]
status_context = status_map.get(str(submission_type_id))
status_id = status_context.get("NO")
error = None
if status_id:
if submission.get("status", {}).get("id") != status_id:
status_response = self._client.set_submission_status(
case_id=case_id,
submission_id=submission_id,
status_id=status_id,
stage_change_if_sufficient=stage_change_if_sufficient,
stage_change_if_deficient=stage_change_if_deficient,
)
self._client.submission_notify(
case_id=case_id,
organisation_id=submission["organisation"]["id"],
submission_id=submission["id"],
values=notify_data,
notice_type=SUBMISSION_NOTICE_TYPE_DEFICIENCY,
)
# reset the submission id to redirect to the new clone if available
if status_response.get("submission"):
submission_id = status_response["submission"]["id"]
return HttpResponse(
json.dumps(
{
"redirect_url": f"/case/{case_id}/submission/{submission_id}/",
}
),
content_type="application/json",
)
# If there's no deficiency state for this submission type, return an error
return HttpResponse(
json.dumps(
{
"error": "No deficiency status for this submission type",
}
),
content_type="application/json",
)
class SubmissionVerifyBaseView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
def get_submission_id(self, case_id=None, organisation_id=None):
submission_id = self.kwargs.get("submission_id")
if not submission_id:
# If this is called from the party page - there is no submission id
# so find from the org/case
submissions = self._client.get_submissions_public(
organisation_id=organisation_id,
case_id=case_id,
fields=json.dumps({"id": 0, "type": {"key": 0}}),
)
for submission in submissions:
if get(submission, "type/key") in ["interest", "application"]:
submission_id = submission.get("id")
break # we only want one reg-of-interest submission
return submission_id
def update_submission_json(self, case_id, submission, params):
regex = r"^deficiency_notice_params_"
deficiency_notice_params = submission.get("deficiency_notice_params") or {}
updated = False
response = None
for param_key in params:
matches = re.split(regex, param_key)
if len(matches) > 1:
value = params[param_key]
updated = updated or (deficiency_notice_params.get(matches[1]) != value)
deficiency_notice_params[matches[1]] = value
if updated:
response = self._client.update_submission(
case_id=case_id,
submission_id=get(submission, "id"),
deficiency_notice_params=to_json(deficiency_notice_params),
)
return response
class SubmissionVerifyViewTasks(SubmissionVerifyBaseView):
"""
Used to verify user and orgs admission to a case
"""
template_name = "cases/verify/submission_verify_tasks.html"
submission_fields = json.dumps(
{
"Submission": {
"id": 0,
"deficiency_notice_params": 0,
"organisation": {
"id": 0,
"name": 0,
},
"contact": {
"name": 0,
"email": 0,
"user": {
"name": 0,
"email": 0,
"id": 0,
"organisation": {
"organisation": {
"id": 0,
"name": 0,
}
},
},
"organisation": {
"id": 0,
"name": 0,
},
},
"case": 0,
"type": 0,
"created_by": 0,
"organisation_case_role_outer": 0,
}
}
)
def get(self, request, case_id, organisation_id, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
if not submission_id:
return HttpResponse(
json.dumps(
{
"error": "You cannot verify this organisation "
"as they have not yet registered interest in this case.",
}
),
content_type="application/json",
)
submission = self._client.get_submission(
self.case_id, submission_id, fields=self.submission_fields
)
json_data = submission.get("deficiency_notice_params") or {}
organisation = submission.get("organisation")
caserole = self._client.get_organisation_case_role(
case_id=case_id, organisation_id=get(submission, "organisation/id")
)
org_matches = self._client.get_organisation_matches(organisation_id, with_details="none")
return render(
request,
self.template_name,
{
"submission": submission,
"organisation": organisation,
"caserole": caserole,
"org_matches": org_matches,
"page_data": {
"submission": submission,
"organisation": organisation,
},
},
)
class SubmisisonVerifyEditLoaView(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
documents = self.get_documents(submission)
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
org_contacts = self._client.get_organisation_contacts(
organisation_id, case_id, exclude_indirect=True
)
return render(
request,
"cases/verify/loa.html",
{
"auth_contacts": org_contacts,
"organisation": organisation,
"documents": documents,
"LOA": caserole.get("auth_contact"),
"submission": submission,
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
self.update_submission_json(case_id, submission, request.POST)
result = self._client.set_organisation_case_role_loa(
case_id,
organisation_id,
pluck(
request.POST,
["LOA_contact_id", "name", "email", "address", "org_name", "phone"],
),
)
return HttpResponse(json.dumps(result))
class SubmisisonVerifyOrganisation(SubmissionVerifyBaseView):
enable_merge = False
def get(self, request, case_id, organisation_id):
test_org_id = request.GET.get("org_id") or organisation_id
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(case_id=case_id, organisation_id=test_org_id)
if self.enable_merge:
org_matches = self._client.get_organisation_matches(test_org_id, with_details=True)
else:
org_matches = self._client.get_organisation_matches(test_org_id, with_details=False)
org_matches.sort(
key=lambda m: 1 if m.get("id") == test_org_id else 0
) # put the actual match at the end
matches = decorate_orgs(org_matches, test_org_id, exclude_case_id=case_id)
for match in matches:
if str(match.get("id")) == str(organisation.get("id")):
organisation.update(match)
return render(
request,
"cases/verify/merge_org.html" if self.enable_merge else "cases/verify/verify_org.html",
{
"case_id": self.case_id,
"organisation": organisation,
"match_list": matches,
"representing": test_org_id != organisation_id,
"json_data": submission.get("deficiency_notice_params"),
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
test_org_id = request.POST.get("org_id") or organisation_id
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
verify = request.POST.get("deficiency_notice_params_org_verify")
if verify == "verified":
self._client.verify_caserole(
case_id=case_id, organisation_id=get(submission, "organisation/id")
)
elif verify == "rejected":
result = self._client.reject_organisation(case_id, organisation_id)
result = self.update_submission_json(case_id, submission, request.POST)
return HttpResponse(json.dumps({"result": True}))
class SubmissionVerifyAccept(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
roles = self._client.get_case_roles(
exclude=[
CASE_ROLE_APPLICANT,
CASE_ROLE_AWAITING_APPROVAL,
CASE_ROLE_REJECTED,
CASE_ROLE_PREPARING,
]
)
return render(
request,
"cases/verify/accept.html",
{
"submission": submission,
"organisation": organisation,
"roles": roles,
"caserole": caserole,
"role_name": get(caserole, "role/name"),
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
role_key = request.POST.get("role_key")
result = {}
result = self._client.set_organisation_case_role(
case_id, organisation_id, role_key, pluck(request.POST, ["approve"])
)
return HttpResponse(json.dumps(result))
class SubmissionVerifyNotify(SubmissionVerifyBaseView):
def get(self, request, case_id, organisation_id):
caserole = self._client.get_organisation_case_role(
case_id=self.case_id, organisation_id=organisation_id
)
role_name = get(caserole, "role/name")
action = (
"reject" if get(caserole, "role/key") == "rejected" else "accept"
) # Todo: get this from the right place
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
submission = self._client.get_submission(case_id, submission_id)
case = self._client.get_case(case_id)
contact = submission_contact(submission)
organisation = self._client.get_organisation(
case_id=case_id, organisation_id=organisation_id
)
notify_key = (
"NOTIFY_INTERESTED_PARTY_REQUEST_PERMITTED"
if action == "accept"
else "NOTIFY_INTERESTED_PARTY_REQUEST_DENIED"
)
try:
notification_template = self._client.get_notification_template(notify_key)
values = self._client.create_notify_context(
{
"full_name": contact.get("name"),
"case_name": case.get("name"),
"case_number": case.get("reference"),
"company_name": organisation["name"],
"login_url": public_login_url(),
"role": role_name,
}
)
parsed_template = parse_notify_template(notification_template["body"], values)
except Exception as ex:
parsed_template = ""
# contacts for the notification contact selector
contacts = organisation.get("contacts", [])
user = self._client.get_user(get(submission, "created_by/id"))
contacts.append(user.get("contact"))
return render(
request,
"cases/verify/notify.html",
{
"parsed_template": parsed_template,
},
)
def post(self, request, case_id, organisation_id, *args, **kwargs):
submission_id = self.get_submission_id(case_id=case_id, organisation_id=organisation_id)
self._client.approve_submission(submission_id=submission_id)
return HttpResponse(json.dumps({"result": True}))
class SubmissionNotifyView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
def get(self, request, case_id, submission_id, *args, **kwargs):
case = self._client.get_case(case_id)
submission = self._client.get_submission(case_id, submission_id)
json_data = from_json(submission.get("deficiency_notice_params"))
contact = None
contact_name = None
send_to = json_data.get("send_to")
if not send_to:
contact = submission_contact(submission)
contact_name = contact and contact.get("name")
submission_type = submission["type"]
notify_sys_param_name = submission_type.get("notify_template") or "NOTIFY_QUESTIONNAIRE"
notification_template = self._client.get_notification_template(notify_sys_param_name)
template_name = f"cases/submissions/{submission_type['key']}/notify.html"
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
case_number = case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact_name,
"case_number": case_number,
"case_name": case["name"],
"investigation_type": case["type"]["name"],
"country": case["sources"][0]["country"] if case["sources"] else "N/A",
"company_name": submission["organisation"].get("name"),
"deadline": due_at or "No deadline assigned",
"login_url": public_login_url(),
"description": submission.get("description"),
"submission_request_name": submission.get("name"),
"notice_type": submission.get("type", {}).get("name"),
"notice_url": submission["url"],
"notice_of_initiation_url": submission["url"],
"footer": footer,
}
template_list = []
if send_to:
for case_role, participant_list in (
self._client.get_case_participants(case_id) or {}
).items():
for participant in participant_list.get("parties"):
if participant.get("id") in send_to:
contact = participant.get("primary_contact")
if contact:
local_values = {
"full_name": contact.get("name"),
"email": contact.get("email"),
"company_name": participant.get("name"),
}
values.update(local_values)
template_list.append(
{
"values": local_values,
"preview": parse_notify_template(
notification_template["body"], values
),
}
)
else:
template_list[contact.get("email")] = parse_notify_template(
notification_template["body"], values
)
context = {
"form_action": f"/case/{case_id}/submission/{submission_id}/notify/",
"form_title": f"Invite {contact_name}",
"cancel_redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"editable_fields": { # leaving one as an example
# 'full_name': {'title': '<NAME>', 'disabled': True},
},
"notification_template": notification_template,
"templates": template_list,
"submission": submission,
"case_id": str(case_id),
"contact": contact,
"values": values,
}
return render(request, template_name, context)
def post(self, request, case_id, submission_id, *args, **kwargs):
submission = self._client.get_submission(case_id, submission_id)
notify_keys = ["full_name", "product", "submission_request_name", "description"]
notify_data = {key: request.POST.get(key) for key in notify_keys if key in request.POST}
due_at = get_submission_deadline(submission, settings.FRIENDLY_DATE_FORMAT)
notify_data["deadline"] = due_at or "No deadline assigned"
if request.POST.get("multiple"):
return self.post_multiple(request, case_id, submission, context=notify_data)
self._client.submission_notify(
case_id=case_id,
organisation_id=submission["organisation"]["id"],
submission_id=submission["id"],
values=notify_data,
notice_type=SUBMISSION_NOTICE_TYPE_INVITE,
)
return HttpResponse(
json.dumps(
{
"redirect_url": f"/case/{case_id}/submission/{submission_id}/",
"error": None,
}
),
content_type="application/json",
)
def post_multiple(self, request, case_id, submission, context=None):
"""
Called to handle a notify post to multiple recipents.
We must clone the submission for each target and send the notification
"""
case = self._client.get_case(case_id)
json_data = from_json(submission.get("deficiency_notice_params"))
send_to = json_data.get("send_to")
# We need to know which is the last party in the list
# so we can modify the existing sub rather than clone it.
party_counter = len(send_to)
for case_role, participant_list in (
self._client.get_case_participants(case_id) or {}
).items():
for participant in participant_list.get("parties"):
if participant.get("id") in send_to:
contact = participant.get("primary_contact")
party_counter -= 1
if contact: # don't try to send if there is no contact
data = {
"case_id": case_id,
"submission_id": submission["id"],
"organisation_id": participant.get("id"),
"contact_id": contact.get("id"),
}
if party_counter:
cloned_submission = self._client.clone_submission(**data)
else:
cloned_submission = self._client.update_submission(**data).get(
"submission"
)
context["full_name"] = contact.get("full_name")
self._client.submission_notify(
case_id=case_id,
organisation_id=participant.get("id"),
submission_id=cloned_submission["id"],
values=context or {},
notice_type=SUBMISSION_NOTICE_TYPE_INVITE,
)
return HttpResponse(
json.dumps(
{
"alert": f'Sent {len(send_to)} request{"" if len(send_to) < 2 else "s"}',
"redirect_url": f'/case/{case_id}/submission/{submission.get("id")}/'
if len(send_to) < 2
else f"/case/{case_id}/submissions/",
"error": None,
}
),
content_type="application/json",
)
class OrganisationDetailsView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, organisation_id, *args, **kwargs):
client = self.client(request.user)
item = request.GET.get("item")
template = request.GET.get("template")
result = {}
case_submissions = client.get_submissions(case_id)
idx_submissions = deep_index_items_by(case_submissions, "organisation/id")
org_id = str(organisation_id)
third_party_contacts = []
if item == "contacts":
contacts = client.get_organisation_contacts(org_id, case_id)
for contact in contacts:
case = get(contact, "cases/" + str(case_id)) or {}
contact["primary"] = case.get("primary")
all_case_invites = client.get_contact_case_invitations(case_id)
if org_id in idx_submissions:
org_submission_idx = deep_index_items_by(idx_submissions[org_id], "id")
third_party_contacts = self.get_third_party_contacts(
org_id, org_submission_idx, all_case_invites
)
# `contacts` may also contain on-boarded third-party contacts that
# have a user, so we need to prune these out.
third_party_contact_ids = set([i["id"] for i in third_party_contacts])
contacts = [
i
for i in itertools.filterfalse(
lambda x: x["id"] in third_party_contact_ids, contacts
)
]
result = {
"contacts": contacts,
"pre_release_invitations": client.get_system_boolean("PRE_RELEASE_INVITATIONS"),
"invites": deep_index_items_by(all_case_invites, "contact/id"),
"third_party_contacts": third_party_contacts,
"case_role_id": request.GET.get("caserole"),
}
elif item == "submissions":
result["submissions"] = idx_submissions.get(org_id, [])
elif item == "details":
result["party"] = client.get_organisation(organisation_id=organisation_id)
if template:
deep_update(
result,
{
"case_id": case_id,
"case": {"id": case_id},
"organisation": {"id": org_id},
},
)
return render(request, template, result)
return HttpResponse(json.dumps({"result": result}), content_type="application/json")
@staticmethod
def get_third_party_contacts(organisation_id, submissions, invites):
"""Get third party contacts.
Given an organisation, its submissions and all invitations for a case,
build a list of third party invite contacts. We include the invite submissions
yet to be approved but flag the contact with `submission_sufficient`
:param (str) organisation_id: Organisation ID.
:param (dict) submissions: The organisation's submissions keyed on id.
:param (list) invites: All invites for a case.
:returns (list): Contacts arising from 3rd party invite submissions.
"""
third_party_contacts = []
for invite in invites:
if invite["submission"]:
submission_id = invite["submission"]["id"]
full_submission = submissions.get(submission_id)
if not full_submission:
# Submission not at this org
continue
if full_submission[0]["type"]["id"] != SUBMISSION_TYPE_THIRD_PARTY:
# Not a third party submission
continue
inviting_organisation = full_submission[0]["organisation"]["id"]
if inviting_organisation == organisation_id:
submission_sufficient = full_submission[0]["status"]["sufficient"]
invite["contact"]["is_third_party"] = True
invite["contact"]["submission_id"] = submission_id
invite["contact"]["submission_sufficient"] = submission_sufficient
invite["contact"]["invited"] = invite["email_sent"]
third_party_contacts.append(invite["contact"])
return third_party_contacts
class CaseOrganisationView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "organisations/organisation_in_case.html"
def add_page_data(self):
organisation = self._client.get_organisation(organisation_id=self.organisation_id)
caserole = None
case_submissions = self._client.get_submissions_public(self.case_id, self.organisation_id)
idx_submissions = deep_index_items_by(case_submissions, "organisation/id")
submissions = idx_submissions.get(str(self.organisation_id), [])
roi_app_submission = next(
filter(lambda x: get(x, "type/key") in ["interest", "application"], submissions),
None,
)
cases = self._client.organisation_cases(self.organisation_id)
user_cases = self._client.organisation_user_cases(self.organisation_id)
cases_idx = deep_index_items_by_exists(cases, "archived_at")
for case in cases:
if get(case, "id") == str(self.case_id):
caserole = case
invites = self._client.get_contact_case_invitations(
self.case_id,
)
return {
"case": self.case,
"invites": invites,
"party": organisation,
"organisation": organisation,
"cases_idx": cases_idx,
"submissions": submissions,
"user_cases": user_cases,
"roi_app_submission": roi_app_submission,
"caserole": caserole,
}
class OrganisationMatchView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/organisation_dedupe.html"
def add_page_data(self):
organisation = self._client.get_organisation(
organisation_id=self.organisation_id, case_id=self.case_id
)
org_matches = self._client.get_organisation_matches(self.organisation_id)
org_matches = decorate_orgs(org_matches, self.organisation_id)
return {
"case": self.case,
"organisation": organisation,
"org_matches": org_matches,
}
class FilesView(CaseBaseView):
"""
View all case documents
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/files.html"
def add_page_data(self):
tab = self.request.GET.get("tab", "respondent")
sort = self.request.GET.get("sort")
direction = self.request.GET.get("dir", "asc")
submission_id = self.request.GET.get("submission_id")
collapse_identical = self.request.GET.get("collapse_identical", "false") in (
"true",
"1",
"Y",
)
tabs = {
"tabList": [
{"label": "Respondent", "value": "respondent"},
{"label": "Investigator", "value": "investigator"},
],
"value": tab,
}
case_enums = self._client.get_all_case_enums(direction=DIRECTION_TRA_TO_PUBLIC)
case_files = self._client.get_case_documents(
case_id=self.case_id,
source=tab,
submission_id=submission_id,
order_by=sort,
order_dir=direction,
)
submission = None
if submission_id:
submission = self._client.get_submission(self.case_id, submission_id)
return {
"tabs": tabs,
"tab": tab,
"case_enums": case_enums,
"file_list": case_files,
"sort": sort,
"dir": direction,
"collapse_identical": collapse_identical,
"submission": submission,
"pre_document_search": self._client.get_system_boolean("PRE_DOCUMENT_SEARCH"),
}
def post(self, request, case_id, *args, **kwargs):
action = request.POST.get("action")
name = request.POST.get("name")
confirm = request.POST.get("confirm") == "true"
tab = request.POST.get("tab", "respondent")
document_ids = request.POST.getlist("document_id")
if document_ids:
if action == "issue" and confirm:
submission_type_id = request.POST.get("submission_type_id")
response = self._client.issue_documents_to_case(
case_id=case_id,
name=name,
document_ids=document_ids,
submission_type_id=submission_type_id,
)
elif action == "confidential":
response = self._client.toggle_documents_confidentiality(
case_id=case_id, document_ids=document_ids
)
return redirect(f"/case/{case_id}/files/?tab={tab}")
class FileBrowseView(View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, *args, **kwargs):
_client = self.client(request.user)
case_files = _client.get_case_documents(case_id=case_id, source="investigator")
# Add application bundle documents
case_files.extend(_client.get_system_documents())
return HttpResponse(json.dumps(case_files), content_type="application/json")
class WorkflowEditor(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
permission_required = ("workflow_editor",)
template_name = "cases/workflow_editor.html"
def add_page_data(self):
case_workflow = self._client.get_case_workflow(self.case_id)
return {
"workflow": case_workflow.get("workflow"),
"state": case_workflow.get("state"),
}
def post(self, request, case_id, *args, **kwargs):
workflow = request.POST.get("workflow")
self._client.save_case_workflow(case_id, workflow)
return HttpResponse(json.dumps({"saved": 1}), content_type="application/json")
class ActionsView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/actions.html"
def add_page_data(self):
permissions = {}
for permission_key in self.request.user.permissions:
permissions[permission_key] = 1
case_workflow = self._client.get_case_workflow(self.case_id)
return {
"workflow": case_workflow.get("workflow"),
"state": case_workflow.get("state"),
"permissions": permissions,
}
class StateView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/action.html"
def post(self, request, case_id, state_key=None, *args, **kwargs):
value = request.POST.get(state_key)
state_map = self._client.set_case_workflow_state(case_id, [state_key], {state_key: value})
return HttpResponse(
json.dumps({"workflow_state": state_map}), content_type="application/json"
)
class ActionView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/action.html"
def get_state_from_children(self, item):
any_mode = item.get("required") # this is a bodge and the logic is reverse
state = None
completed = False if any_mode else True
for child in item.get("children", []):
value = self.get_value(child.get("key"))
if value:
state = state or "in-progress"
if any_mode:
if value == "complete":
completed = True
else:
if value != "complete":
completed = False
return "complete" if state and completed else state
state_map = {}
def get_value(self, key):
return (self.state_map.get(key) or [""])[0]
def set_value(self, key, value):
arr = self.state_map.get(key) or [""]
arr[0] = value
self.state_map[key] = arr
def post(self, request, case_id, action_id=None, *args, **kwargs): # noqa: C901
values = {}
node_keys = []
action_key = request.POST.get("action-key")
btn_action = request.POST.get("btn_action")
complete = True
error = False
state = ""
wf = self._client.get_case_workflow(case_id)
workflow = wf.get("workflow")
self.state_map = wf.get("state")
index = key_by(workflow["root"], "key", "children")
action = index.get(action_key.lower(), {})
for task in action.get("children", []):
response_type = task.get("response_type", {}).get("name", "")
if response_type.lower() not in (
"notesection",
"timer",
"label",
): # notes don't count as in-progress
task_key = task.get("key")
old_val = self.get_value(task_key)
new_val = request.POST.get(task_key)
if old_val != new_val:
values[task_key] = new_val
node_keys.append(task_key)
if not new_val:
if task.get("required"):
complete = False
else:
if new_val != "na":
state = "in-progress"
if complete:
state = "complete"
if (self.get_value(action_key) or "") != state:
values[action_key] = state
node_keys.append(action_key)
self.set_value(action_key, state)
# ripple the state down the tree
loc_action = action
while loc_action.get("parent_key"):
loc_action = index.get(loc_action.get("parent_key"))
loc_key = loc_action.get("key")
loc_state = self.get_state_from_children(loc_action)
if (self.get_value(loc_key) or "") != loc_state:
values[loc_key] = loc_state
node_keys.append(loc_key)
self.set_value(loc_key, loc_state)
if any(values):
self.state_map = self._client.set_case_workflow_state(case_id, node_keys, values)
if error:
action_id = action.get("id")
return redirect(f"/case/{case_id}/action/{action_id}")
else:
return HttpResponse(
json.dumps({"workflow_state": self.state_map}),
content_type="application/json",
)
class NavSectionView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/nav_section.html"
def post(self, request, case_id, *args, **kwargs):
content_id = kwargs.get("nav_section_id")
response = self._client.set_case_content(
case_id, content_id=content_id, content=request.POST
)
content_id = response.get("id")
return redirect(f"/case/{case_id}/section/{content_id}")
def add_page_data(self):
return {}
class AuditView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/audit.html"
def add_page_data(self):
milestone = self.request.GET.get("milestone", "true") == "true"
limit = int(self.request.GET.get("limit", self.limit))
audit_data = self._client.get_audit(
case_id=self.case_id, start=self.start, limit=limit, milestone=milestone
)
url = reverse("case_audit", kwargs={"case_id": self.case_id})
prev_url = next_url = None
prev_page = max(0, self.start - limit)
milestone_flag = f"milestone={milestone}".lower()
if len(audit_data) >= limit:
next_page = max(0, self.start + limit)
next_url = f"{url}?{milestone_flag}&start={next_page}"
if next_page > limit:
prev_url = f"{url}?{milestone_flag}&start={prev_page}"
self.start = next_page
else:
self.start = prev_page + len(audit_data)
if prev_page:
prev_url = f"{url}?{milestone_flag}&start={prev_page}"
return {
"milestone": milestone,
"events": audit_data,
"next_url": next_url,
"prev_url": prev_url,
}
class CaseAuditExport(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
def get(self, request, case_id, *args, **kwargs):
file = self.client(request.user).get_audit_export(case_id)
response = HttpResponse(file, content_type="application/vnd.ms-excel")
response["Content-Disposition"] = "attachment; filename=trade_remedies_export.xlsx"
return response
class NoteView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
def get(
self,
request,
case_id,
content_type=None,
model_id=None,
model_key=None,
*args,
**kwargs,
):
notes = self.client(request.user).get_notes(
case_id, content_type, model_id, model_key=model_key
)
return HttpResponse(json.dumps(notes), content_type="application/json")
def post(self, request, case_id, note_id=None, *args, **kwargs): # noqa: C901
entity_id = request.POST.get("model_id")
model_key = request.POST.get("model_key")
content_type = request.POST.get("content_type")
client = self.client(request.user)
content = request.POST.get("content")
if note_id is None:
result = client.create_note(
case_id=case_id,
content_type=content_type,
model_id=entity_id,
model_key=model_key,
note_text=content,
)
note_id = result.get("id")
else:
delete_list = request.POST.getlist("delete_list")
if delete_list:
for document_id in delete_list:
deleted = client.delete_note_document(case_id, note_id, document_id)
conf_list = request.POST.getlist("set_confidential")
if conf_list:
for document_id in conf_list:
result = client.update_note_document(
case_id, note_id, document_id, "confidential"
)
nonconf_list = request.POST.getlist("set_non-confidential")
if nonconf_list:
for document_id in nonconf_list:
result = client.update_note_document(
case_id, note_id, document_id, "non-confidential"
)
result = client.update_note(case_id, note_id, content)
file_meta = request.POST.getlist("file-meta")
files = request.FILES.getlist("files")
for idx, _file in enumerate(files):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
except VirusFoundInFileException:
# Display a fake doc in the widget until
# a poll for success clears it
msg = "File upload aborted: malware detected in file!"
document = {
"name": msg,
"safe": False,
}
result["documents"].append(document)
else:
document = {
"document_name": _file.original_name,
"name": _file.name,
"size": _file.file_size,
}
result = client.add_note_document(
case_id=case_id,
note_id=note_id,
document=json.dumps(document),
confidentiality=file_meta[idx],
)
redirect_url = request.POST.get("redirect")
if redirect_url:
return internal_redirect(redirect_url, "/")
else:
# Return note json to be rendered at the client
return HttpResponse(json.dumps(result), content_type="application/json")
class PublicFileView(CaseBaseView):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/public_file.html"
def add_page_data(self):
tab = self.request.GET.get("tab", "all")
tabs = {
"tabList": [
{"label": "All", "value": "all"},
{"label": "Notices", "value": "tra"},
{"label": "Business", "value": "business"},
{"label": "Withdrawn", "value": "withdrawn"},
],
"value": tab,
}
case_submissions = self._client.get_submissions(self.case_id, show_global=True)
by_tra = deep_index_items_by_exists(case_submissions, "is_tra")
tra_by_published = deep_index_items_by_exists(by_tra.get("true"), "issued_at")
by_published = deep_index_items_by_exists(case_submissions, "issued_at")
if tab == "all":
submissions = by_published.get("true")
if tab == "tra":
submissions = deep_index_items_by(by_published.get("true"), "is_tra").get("true")
if tab == "business":
submissions = deep_index_items_by(by_published.get("true"), "is_tra").get("")
if tab == "withdrawn":
submissions = deep_index_items_by(by_published.get("false"), "is_tra").get("true")
return {
"tabs": tabs,
"submissions": submissions,
"public_base_url": settings.PUBLIC_BASE_URL,
}
class CaseFormView(LoginRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_form.html"
def get_context(self, client, case_id=None):
if case_id:
case = client.get_case(case_id)
else:
case = {
"new": True,
"id": "",
"organisation": {"id": ""},
"type": {"id": "1"},
}
enums = client.get_all_case_enums()
gov_bodies = client.get_organisations(gov_body=True)
country_dict = {}
for country in countries:
country_dict[country[0]] = country[1]
context = {
"body_classes": "full-width",
"case": case,
"organisations": gov_bodies,
"country_dict": country_dict,
"organisation_name": case.get("organisation", {}).get("name") or "Secretary of State",
"contact_country": "GB",
"submission": {"type": {"id": 4}},
"tra_team_names": [
settings.ORGANISATION_NAME,
settings.ORGANISATION_INITIALISM + " Team 1",
settings.ORGANISATION_INITIALISM + " Team 2",
settings.ORGANISATION_INITIALISM + " Team 3",
],
}
context.update(enums)
# context['countries'] = countries[0]
return context
def get(self, request, case_id=None, *args, **kwargs):
client = self.client(request.user)
context = self.get_context(client, case_id)
return render(request, self.template_name, context)
def post(self, request, case_id=None, *args, **kwargs):
post_data = {
"id": case_id,
}
non_required_fields = [
"submission_status_id",
"case_name",
"organisation_name",
"organisation_id",
# 'organisation_address', 'organisation_post_code', 'companies_house_id',
# 'contact_name', 'contact_email', 'contact_phone', 'contact_address',
# 'contact_country',
]
error_lookup = {
"case_type_id": "Case type",
"product_name": "Product name",
"submission_type_id": "Submission type",
"sector_id": "Product sector",
"product_description": "Product description",
"export_country_code": "Export country",
"hs_code": "Product code",
}
required_fields = list(error_lookup.keys())
list_fields = ["export_country_code", "hs_code"]
case_fields = required_fields + non_required_fields
errors = {}
client = self.client(request.user)
if request.POST.get("case_type_id") in ALL_REGION_ALLOWED_TYPE_IDS:
required_fields.remove("export_country_code")
for field in case_fields:
post_data[field] = (
compact_list(request.POST.getlist(field))
if field in list_fields
else request.POST.get(field)
)
for field in required_fields:
if field in error_lookup and not post_data.get(field):
fieldname = error_lookup.get(field)
errors[field] = f"{fieldname} is required"
for i, code in enumerate(post_data.get("hs_code")):
if len(str(code)) not in (6, 7, 8, 9, 10): # temporary validation
errors["hs_code"] = "HS codes should be between 6 and 10 digits"
if not errors:
post_data["ex_oficio"] = True
result = client.submit_full_case_data(post_data)
return redirect("/cases/")
else:
context = self.get_context(client, case_id)
context["errors"] = errors
context.update(post_data)
return render(request, self.template_name, context)
class InviteContactView(CaseBaseView):
"""
Invite a contact to the case
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/invite.html"
raise_exception = True
def get_organisation_admin_user_contact(self, organisation_id):
contact = None
organisation = self._client.get_organisation(organisation_id)
admin_user = [
user
for user in organisation.get("users", [])
if user.get("security_group") == SECURITY_GROUP_ORGANISATION_OWNER
]
if admin_user:
user = self._client.get_user(admin_user[0]["user_id"])
contact = user.get("contact")
contact["organisation"] = organisation
return contact
def add_page_data(self):
contact = None
organisation = None
if self.kwargs.get("organisation_id"):
organisation = self._client.get_organisation(self.kwargs.get("organisation_id"))
if self.kwargs.get("contact_id"):
contact = self._client.get_contact(self.kwargs["contact_id"])
form_url = f"/case/{self.case['id']}/invite/{self.kwargs['contact_id']}/as/{self.kwargs['case_role_id']}/" # noqa: E501
if organisation:
form_url = f"{form_url}for/{organisation['id']}/"
elif self.kwargs.get("organisation_id"):
contact = self.get_organisation_admin_user_contact(self.kwargs["organisation_id"])
form_url = f"/case/{self.case['id']}/invite/organisation/{self.kwargs['organisation_id']}/as/{self.kwargs['case_role_id']}/" # noqa: E501
if not organisation:
organisation = contact["organisation"]
notification_template = self._client.get_notification_template(
"NOTIFY_INFORM_INTERESTED_PARTIES"
)
deep_update(
self.case,
self._client.get_case(
self.case_id,
fields=json.dumps(
{
"Case": {
"latest_notice_of_initiation_url": 0,
"registration_deadline": 0,
"product": 0,
}
}
),
),
)
case_number = self.case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": contact["name"],
"product": get(self.case, "product/name"),
"case_number": case_number,
"case_name": self.case["name"],
"notice_of_initiation_url": self.case.get("latest_notice_of_initiation_url"),
"company_name": organisation["name"],
"deadline": parse_api_datetime(
get(self.case, "registration_deadline"), settings.FRIENDLY_DATE_FORMAT
),
"footer": footer,
"guidance_url": self._client.get_system_parameters("LINK_HELP_BOX_GUIDANCE")["value"],
"email": email,
"login_url": f"{settings.PUBLIC_BASE_URL}",
}
context = {
"form_url": form_url,
"editable_fields": ["full_name", "product"],
"case": self.case,
"contact": contact,
"case_role_id": self.kwargs["case_role_id"],
"parsed_template": parse_notify_template(notification_template["body"], values),
"values": values,
"organisation": organisation,
"organisation_id": self.kwargs.get("organisation_id"),
}
return context
def post(
self,
request,
contact_id=None,
case_id=None,
case_role_id=None,
organisation_id=None,
*args,
**kwargs,
):
notify_keys = ["full_name", "product"]
notify_data = {key: request.POST.get(key) for key in notify_keys}
if organisation_id and contact_id:
notify_data["organisation_id"] = organisation_id
elif organisation_id and not contact_id:
contact = self.get_organisation_admin_user_contact(organisation_id)
contact_id = contact["id"]
response = self._client.invite_contact(case_id, contact_id, case_role_id, notify_data)
return HttpResponse(json.dumps(response), content_type="application/json")
class IssueFilesFormView(CaseBaseView):
"""
Issue files to case
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "widgets/issue_files_form.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
return {
"case_enums": case_enums,
"case": self.case,
}
class CaseBundlesView(CaseBaseView):
"""
Assign documents to the case directly (not via submissions)
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_bundles.html"
def add_page_data(self):
list_mode = self.request.GET.get("tab", "live")
tabs = {
"value": list_mode,
"tabList": [
{"label": "Live", "value": "live", "sr_text": "Show live bundles"},
{"label": "Draft", "value": "draft", "sr_text": "Show draft bundles"},
],
}
case_bundles = self._client.get_case_submission_bundles(
case_id=self.case["id"],
status=list_mode.upper(),
)
return {
"bundles": case_bundles,
"error": self.kwargs.get("error"),
"tabs": tabs,
"status": list_mode,
}
@method_decorator(csrf_exempt, name="dispatch")
class CaseBundleView(CaseBaseView):
"""
View and edit a specific bundle full of documents
"""
groups_required = SECURITY_GROUPS_TRA
template_name = "cases/case_bundle_builder.html"
def add_page_data(self):
case_enums = self._client.get_all_case_enums()
bundle = None
bundle_id = self.kwargs.get("bundle_id")
virus = self.request.GET.get("virus")
upload_error = self.request.GET.get("upload_error")
return_data = {
"virus": virus,
"upload_error": upload_error,
}
if bundle_id:
bundle = self._client.get_case_submission_bundles(
case_id=self.case["id"], bundle_id=self.kwargs.get("bundle_id")
)
return_data.update(
{
"bundle": bundle,
"submission_types": case_enums["submission_types"],
}
)
return return_data
def post(self, request, case_id, bundle_id=None, *args, **kwargs): # noqa: C901
name = request.POST.get("name")
data = pluck(request.POST, ["name", "description"])
btn_value = request.POST.get("btn-value")
if btn_value == "send":
data["status"] = "LIVE"
# Upload documents
if bundle_id:
meta_raw = request.POST.getlist("meta")
meta = [json.loads(block) for block in meta_raw]
file_details = deep_index_items_by(meta, "name")
for _file in request.FILES.getlist("files"):
try:
_file.readline() # Important, will raise VirusFoundInFileException if infected
original_file_name = _file.original_name
details = file_details.get(original_file_name.lower())[0]
confidential = details.get("confidential")
document_type = details.get("submission_document_type")
document = self._client.upload_document(
case_id=str(case_id),
data={
"bundle_id": bundle_id,
"confidential": confidential,
"submission_document_type": document_type,
"document_name": original_file_name,
"file_name": _file.name,
"file_size": _file.file_size,
},
)
except (VirusFoundInFileException, APIException) as e:
redirect_url = f"/case/{case_id}/bundle/{bundle_id}/?"
msg = "File upload aborted: "
if isinstance(e, VirusFoundInFileException):
redirect_url += "virus=true"
else:
msg += f"{e}"
redirect_url += f"upload_error={msg}"
logger.warning(f"{msg}")
return HttpResponse(
json.dumps({"redirect_url": redirect_url}),
content_type="application/json",
)
# Attach existing documents to this bundle
if case_files := request.POST.getlist("case_files"):
file_details_by_id = deep_index_items_by(meta, "file/id")
for case_file_id in case_files:
details = (file_details_by_id.get(case_file_id) or [])[0]
document = self._client.attach_document(
case_id=str(case_id),
data={
"bundle_id": bundle_id,
"submission_document_type": details.get("submission_document_type"),
},
document_id=case_file_id,
)
else:
data = pluck(request.POST, ["name", "submission_type_id"])
data["case_id"] = case_id
# Anything else to send?
response = None
if data:
response = self._client.set_case_submission_bundle(bundle_id=bundle_id, data=data)
ret = {"result": "ok", "status": data.get("status")}
response_id = response and response.get("id")
if response_id:
ret["redirect_url"] = f"/case/{case_id}/bundle/{response_id}/"
return HttpResponse(json.dumps(ret), content_type="application/json")
def delete(self, request, case_id, document_id, *args, **kwargs):
response = self._client.delete_case_submission_bundle(case_id, document_id)
return redirect(f"/case/{case_id}/documents/")
class SubmissionInviteNotifyView(CaseBaseView):
"""
Notify an invitee about an invitation to case.
"""
groups_required = SECURITY_GROUPS_TRA
raise_exception = True
template_name = "cases/invite.html"
def add_page_data(self):
"""Add page data.
CaseBaseView override.
"""
case_id = self.kwargs.get("case_id")
submission_id = self.kwargs.get("submission_id")
contact_id = self.kwargs.get("contact_id")
case = self._client.get_case(case_id)
submission = self._client.get_submission(case_id, submission_id)
inviting_organisation = submission["organisation"]
invited_contact = self._client.get_contact(contact_id)
inviting_contact = submission.get("contact") or {}
notification_template = self._client.get_notification_template("NOTIFY_THIRD_PARTY_INVITE")
form_url = f"/case/{case_id}/submission/{submission_id}/invite/{contact_id}/notify/"
# Attempt to infer the invite URL
login_url = f"{settings.PUBLIC_BASE_URL}"
invites = self._client.get_invitations(case_id, submission_id)
for i in invites:
if i["contact"]["id"] == str(contact_id):
invite = self._client.get_invite_details(i["id"])
code = invite.get("code")
login_url = f"{login_url}/invitation/{code}/{case_id}/"
break
case_number = case["reference"]
email = notify_contact_email(self._client, case_number)
footer = notify_footer(self._client, email)
values = {
"full_name": invited_contact["name"],
"case_name": case["name"],
"invited_by_organisation": inviting_organisation["name"],
"invited_by_name": inviting_contact["name"],
"notice_of_initiation_url": self.case.get("latest_notice_of_initiation_url"),
"login_url": login_url,
"deadline": parse_api_datetime(
get(self.case, "registration_deadline"), settings.FRIENDLY_DATE_FORMAT
),
"footer": footer,
"email": email,
}
context = {
"form_url": form_url,
"notification_template": notification_template,
"submission": submission,
"case": case,
"contact": invited_contact,
"parsed_template": parse_notify_template(notification_template["body"], values),
"values": values,
}
return context
def post(self, request, case_id, submission_id, contact_id, *args, **kwargs):
notify_data = {
"case_id": case_id,
"submission_id": submission_id,
"contact_id": contact_id,
}
response = self._client.action_third_party_invite(
case_id=case_id,
submission_id=submission_id,
contact_id=contact_id,
params=notify_data,
)
return HttpResponse(json.dumps(response), content_type="application/json")
class UpdateParentView(CaseBaseView):
template_name = "cases/update_parent.html"
linked_case_confirm_key = "LINKED_CASE_CONFIRM"
cases_fields = json.dumps(
{
"Case": {
"name": 0,
"id": 0,
"reference": 0,
}
}
)
case_fields = json.dumps(
{"Case": {"parent": {"id": 0}, "workflow_state": {linked_case_confirm_key: 0}}}
)
def add_page_data(self):
cases = self._client.get_cases(archived=True, all_cases=False, fields=self.cases_fields)
case = self._client.get_case(self.case_id, fields=self.case_fields)
return {"case": case, "cases": cases}
def post(self, request, case_id, *args, **kwargs):
link_confirm = request.POST.get("link_confirm")
parent_id = request.POST.get("parent_id")
_client = self.client(request.user)
case = _client.get_case(case_id, fields=self.case_fields)
if get(case, "parent/id") != parent_id:
_client.set_case_data(case_id, {"parent_id": parent_id})
if (get(case, f"workflow_state/{self.linked_case_confirm_key}") or [0])[0] != link_confirm:
_client.set_case_workflow_state(
case_id, values={f"{self.linked_case_confirm_key}": link_confirm}
)
return HttpResponse(
json.dumps({"parent_id": parent_id, "link_confirm": link_confirm}),
content_type="application/json",
)
class NoticesView(
LoginRequiredMixin, GroupRequiredMixin, TemplateView, TradeRemediesAPIClientMixin
):
groups_required = SECURITY_GROUPS_TRA_ADMINS
template_name = "cases/notices.html"
def get(self, request):
client = self.client(request.user)
notices = client.get_notices()
return render(
request,
self.template_name,
{
"body_classes": "full-width",
"notices": notices,
},
)
class NoticeView(LoginRequiredMixin, GroupRequiredMixin, TemplateView, TradeRemediesAPIClientMixin):
groups_required = SECURITY_GROUPS_TRA_ADMINS
template_name = "cases/notice.html"
cases_fields = json.dumps({"Case": {"name": 0, "id": 0, "reference": 0}})
def get(self, request, notice_id=None):
client = self.client(request.user)
enums = client.get_all_case_enums()
case_types = enums.get("case_types", [])
cases = client.get_cases(archived=True, all_cases=False, fields=self.cases_fields)
notice = {}
if notice_id:
notice = client.get_notice(notice_id)
return render(
request,
self.template_name,
{
"body_classes": "full-width",
"notice": notice,
"cases": cases,
"case_types": case_types,
},
)
def post(self, request, notice_id=None):
client = self.client(request.user)
notice = client.create_update_notice(
name=request.POST.get("name"),
reference=request.POST.get("reference"),
terminated_at=request.POST.get("terminated_at"),
published_at=request.POST.get("published_at"),
case_type=request.POST.get("case_type_id"),
review_case=request.POST.get("review_case_id"),
notice_id=notice_id,
)
return redirect("/cases/notices/")
class DocumentSearchView(CaseBaseView):
template_name = "documents/documents.html"
def add_page_data(self):
query = self.request.GET.get("query")
conf_status = self.request.GET.get("confidential_status")
user_type = self.request.GET.get("user_type")
response = self._client.search_documents(
case_id=self.case_id,
query=query,
confidential_status=conf_status,
user_type=user_type,
)
return {
"body_classes": "full-width",
"documents": response.pop("results", []),
"query": query,
"conf_status": conf_status,
**response,
}
class CaseTeamJsonView(LoginRequiredMixin, View, TradeRemediesAPIClientMixin):
def get(self, request, case_id, **kwargs):
team = self.client(request.user).get_case_team_members(case_id)
return HttpResponse(json.dumps(team), content_type="application/json")
| en | 0.849241 | # If this submission has an immediate ancestor, get the docs from that to mark status # we have a list of the submissions that make up a family - id, version and documents. Override this method to customize the way permissions are checked. # note: accepted and invited are mutually exclusive # for awaiting and rejected - only return that specific role # customer draft should not be seen by investigators # Get submissions that have just been created by customer # or are still in draft after creation # Remove any that are back with the customer following deficiency # Exclude these drafts from our list # draft applications are included to allow a heads up view # to the caseworker before it's submitted # TODO: Temp handling of application vs ex_officio ones View and modify submissions # indicates the submission has just been created # indicates the submission has just been created # Get all draft submissions of this type # draft_submissions_this_role = deep_index_items_by(draft_submissions, # 'organisation_case_role/key').get('' if role == 'public' else role) # add errors from the url # Set up template to use # noqa: C901 Update an existing submission # check if the update is for name or notify contact # TODO:not used # API `update_submission` returns an incomplete submission # (no documents) so we re-fetch the submission here. # Important, will raise VirusFoundInFileException if infected # noqa: E301, E501 # Set the submission to sufficient # Update submission document approvals # set any deficiency-notice parameters Update submission document statuses. For each document in the submission review, examine response to establish if it was marked sufficient/deficient. Call API to update submission document status if it has changed. :param (dict) request_params: request parameters :param (dict) submission: submission Set the submission into a deficiency status and notify the party about it. # leaving one as a future example # 'full_name': {'title': 'Name'}, # reset the submission id to redirect to the new clone if available # If there's no deficiency state for this submission type, return an error # If this is called from the party page - there is no submission id # so find from the org/case # we only want one reg-of-interest submission Used to verify user and orgs admission to a case # put the actual match at the end # Todo: get this from the right place # contacts for the notification contact selector # leaving one as an example # 'full_name': {'title': '<NAME>', 'disabled': True}, Called to handle a notify post to multiple recipents. We must clone the submission for each target and send the notification # We need to know which is the last party in the list # so we can modify the existing sub rather than clone it. # don't try to send if there is no contact # `contacts` may also contain on-boarded third-party contacts that # have a user, so we need to prune these out. Get third party contacts. Given an organisation, its submissions and all invitations for a case, build a list of third party invite contacts. We include the invite submissions yet to be approved but flag the contact with `submission_sufficient` :param (str) organisation_id: Organisation ID. :param (dict) submissions: The organisation's submissions keyed on id. :param (list) invites: All invites for a case. :returns (list): Contacts arising from 3rd party invite submissions. # Submission not at this org # Not a third party submission View all case documents # Add application bundle documents # this is a bodge and the logic is reverse # noqa: C901 # notes don't count as in-progress # ripple the state down the tree # noqa: C901 # Important, will raise VirusFoundInFileException if infected # Display a fake doc in the widget until # a poll for success clears it # Return note json to be rendered at the client # context['countries'] = countries[0] # 'organisation_address', 'organisation_post_code', 'companies_house_id', # 'contact_name', 'contact_email', 'contact_phone', 'contact_address', # 'contact_country', # temporary validation Invite a contact to the case # noqa: E501 # noqa: E501 Issue files to case Assign documents to the case directly (not via submissions) View and edit a specific bundle full of documents # noqa: C901 # Upload documents # Important, will raise VirusFoundInFileException if infected # Attach existing documents to this bundle # Anything else to send? Notify an invitee about an invitation to case. Add page data. CaseBaseView override. # Attempt to infer the invite URL | 1.433441 | 1 |
mmdet/models/emod_ops/ar_module.py | zhenglab/EMOD | 2 | 10101 | <reponame>zhenglab/EMOD
import torch
from torch import nn
from mmcv.cnn.utils import constant_init, kaiming_init
class SimAttention(nn.Module):
def __init__(self, in_channels):
super(SimAttention, self).__init__()
self.conv_attn = nn.Conv2d(in_channels, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
kaiming_init(self.conv_attn, mode='fan_in')
self.conv_attn.inited = True
def forward(self, x):
b, c, h, w = x.size()
x_in = x
x_in = x_in.view(b, c, h * w)
x_in = x_in.unsqueeze(1)
x_attn = self.conv_attn(x)
x_attn = x_attn.view(b, 1, h * w)
x_attn = self.softmax(x_attn)
x_attn = x_attn.unsqueeze(-1)
x_out = torch.matmul(x_in, x_attn)
x_out = x_out.view(b, c, 1, 1)
return x_out
class SimRelation(nn.Module):
def __init__(self, in_channels, ratio, act=False):
super(SimRelation, self).__init__()
self.planes = int(in_channels * ratio)
self.act = act
self.mlp = nn.Sequential(
nn.Linear(in_features=in_channels, out_features=self.planes),
nn.LayerNorm([self.planes]),
nn.ReLU(inplace=True),
nn.Linear(in_features=self.planes, out_features=in_channels))
constant_init(self.mlp[-1], val=0)
if self.act:
self.activate = nn.Sigmoid()
def forward(self, x):
x_in = x
x_in = x_in.view(x.size(0), -1)
x_out = self.mlp(x_in)
if self.act:
x_out = self.activate(x_out)
x_out = x_out.view(x.size(0), x.size(1), 1, 1)
return x_out
class ARModule(nn.Module):
"""AR Module for EMOD."""
def __init__(self,
in_channels,
ratio,
fusion_type='add'):
super(ARModule, self).__init__()
assert fusion_type in ['add', 'mul'], 'fusion_type should be add or mul.'
self.fusion_type = fusion_type
# attention
self.sim_attention = SimAttention(in_channels)
# relation
if self.fusion_type == 'add':
self.sim_relation = SimRelation(in_channels, ratio, act=False)
else:
self.sim_relation = SimRelation(in_channels, ratio, act=True)
def forward(self, x):
x_attn = self.sim_attention(x)
out = x
if self.fusion_type == 'add':
x_rel = self.sim_relation(x_attn)
out = out + x_rel
else:
x_rel = self.sim_relation(x_attn)
out = out * x_rel
return out
| import torch
from torch import nn
from mmcv.cnn.utils import constant_init, kaiming_init
class SimAttention(nn.Module):
def __init__(self, in_channels):
super(SimAttention, self).__init__()
self.conv_attn = nn.Conv2d(in_channels, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
kaiming_init(self.conv_attn, mode='fan_in')
self.conv_attn.inited = True
def forward(self, x):
b, c, h, w = x.size()
x_in = x
x_in = x_in.view(b, c, h * w)
x_in = x_in.unsqueeze(1)
x_attn = self.conv_attn(x)
x_attn = x_attn.view(b, 1, h * w)
x_attn = self.softmax(x_attn)
x_attn = x_attn.unsqueeze(-1)
x_out = torch.matmul(x_in, x_attn)
x_out = x_out.view(b, c, 1, 1)
return x_out
class SimRelation(nn.Module):
def __init__(self, in_channels, ratio, act=False):
super(SimRelation, self).__init__()
self.planes = int(in_channels * ratio)
self.act = act
self.mlp = nn.Sequential(
nn.Linear(in_features=in_channels, out_features=self.planes),
nn.LayerNorm([self.planes]),
nn.ReLU(inplace=True),
nn.Linear(in_features=self.planes, out_features=in_channels))
constant_init(self.mlp[-1], val=0)
if self.act:
self.activate = nn.Sigmoid()
def forward(self, x):
x_in = x
x_in = x_in.view(x.size(0), -1)
x_out = self.mlp(x_in)
if self.act:
x_out = self.activate(x_out)
x_out = x_out.view(x.size(0), x.size(1), 1, 1)
return x_out
class ARModule(nn.Module):
"""AR Module for EMOD."""
def __init__(self,
in_channels,
ratio,
fusion_type='add'):
super(ARModule, self).__init__()
assert fusion_type in ['add', 'mul'], 'fusion_type should be add or mul.'
self.fusion_type = fusion_type
# attention
self.sim_attention = SimAttention(in_channels)
# relation
if self.fusion_type == 'add':
self.sim_relation = SimRelation(in_channels, ratio, act=False)
else:
self.sim_relation = SimRelation(in_channels, ratio, act=True)
def forward(self, x):
x_attn = self.sim_attention(x)
out = x
if self.fusion_type == 'add':
x_rel = self.sim_relation(x_attn)
out = out + x_rel
else:
x_rel = self.sim_relation(x_attn)
out = out * x_rel
return out | en | 0.707952 | AR Module for EMOD. # attention # relation | 2.777401 | 3 |
prayer_times_v2.py | danish09/request_api | 0 | 10102 | import json
import requests
from datetime import datetime
from playsound import playsound
tday=datetime.today().strftime('%Y-%m-%d')
right_now=datetime.today().strftime('%I-%M-%p')
response = requests.get("https://www.londonprayertimes.com/api/times/?format=json&key=0239f686-4423-408e-9a0c-7968a403d197&year=&month=")
data=response.json()
for key,value in data.items():
if value >= '03:30' and value < '06:00':
print('It is asr time')
#playsound('/home/danish/Downloads/adan.mp3') | import json
import requests
from datetime import datetime
from playsound import playsound
tday=datetime.today().strftime('%Y-%m-%d')
right_now=datetime.today().strftime('%I-%M-%p')
response = requests.get("https://www.londonprayertimes.com/api/times/?format=json&key=0239f686-4423-408e-9a0c-7968a403d197&year=&month=")
data=response.json()
for key,value in data.items():
if value >= '03:30' and value < '06:00':
print('It is asr time')
#playsound('/home/danish/Downloads/adan.mp3') | en | 0.465685 | #playsound('/home/danish/Downloads/adan.mp3') | 3.177953 | 3 |
pokemon/pokemon_tests/test_serializers.py | pessman/pokemon_utils | 1 | 10103 | <reponame>pessman/pokemon_utils<gh_stars>1-10
import pytest
from django.test import TestCase
from rest_framework import serializers as drf_serializers
from pokemon import models, serializers
@pytest.mark.django_db
class StatsSerializer(TestCase):
"""
Test Module for StatsSerializer
"""
def setUp(self):
models.Nature.objects.create(
name="Adamant",
positive="attack",
negative="special_attack"
)
self.valid_base_stats = {
"hit_points": 108,
"attack": 130,
"defense": 95,
"special_attack": 80,
"special_defense": 85,
"speed": 102
}
self.valid_ivs = {
"hit_points": 24,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.invalid_ivs_high = {
"hit_points": 33,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.invalid_ivs_low = {
"hit_points": -1,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.valid_evs = {
"hit_points": 74,
"attack": 190,
"defense": 91,
"special_attack": 48,
"special_defense": 84,
"speed": 23
}
self.invalid_evs_high_individual = {
"hit_points": 0,
"attack": 300,
"defense": 0,
"special_attack": 0,
"special_defense": 0,
"speed": 0
}
self.invalid_evs_high_total = {
"hit_points": 74,
"attack": 190,
"defense": 91,
"special_attack": 48,
"special_defense": 84,
"speed": 100
}
self.invalid_evs_low_individual = {
"hit_points": 0,
"attack": -10,
"defense": 0,
"special_attack": 0,
"special_defense": 0,
"speed": 0
}
self.valid_level = 78
self.invalid_level_high = 110
self.invalid_level_low = 0
self.valid_nature = "adamant"
self.invalid_nature = "thisisntanature"
def test_stats_serializer(self):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
stats = serializer.get_stats()
self.assertEqual(stats["hit_points"], 289)
self.assertEqual(stats["attack"], 278)
self.assertEqual(stats["defense"], 193)
self.assertEqual(stats["special_attack"], 135)
self.assertEqual(stats["special_defense"], 171)
self.assertEqual(stats["speed"], 171)
def test_invalid_nature(self):
with pytest.raises(drf_serializers.ValidationError) as exc:
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.invalid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_level_high(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.invalid_level_high,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_level_low(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.invalid_level_low,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_ivs_low(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.invalid_ivs_low,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_ivs_high(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.invalid_ivs_high,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_high_total(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_high_total,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_high_individual(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_high_individual,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_low_individual(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_low_individual,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
| import pytest
from django.test import TestCase
from rest_framework import serializers as drf_serializers
from pokemon import models, serializers
@pytest.mark.django_db
class StatsSerializer(TestCase):
"""
Test Module for StatsSerializer
"""
def setUp(self):
models.Nature.objects.create(
name="Adamant",
positive="attack",
negative="special_attack"
)
self.valid_base_stats = {
"hit_points": 108,
"attack": 130,
"defense": 95,
"special_attack": 80,
"special_defense": 85,
"speed": 102
}
self.valid_ivs = {
"hit_points": 24,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.invalid_ivs_high = {
"hit_points": 33,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.invalid_ivs_low = {
"hit_points": -1,
"attack": 12,
"defense": 30,
"special_attack": 16,
"special_defense": 23,
"speed": 5
}
self.valid_evs = {
"hit_points": 74,
"attack": 190,
"defense": 91,
"special_attack": 48,
"special_defense": 84,
"speed": 23
}
self.invalid_evs_high_individual = {
"hit_points": 0,
"attack": 300,
"defense": 0,
"special_attack": 0,
"special_defense": 0,
"speed": 0
}
self.invalid_evs_high_total = {
"hit_points": 74,
"attack": 190,
"defense": 91,
"special_attack": 48,
"special_defense": 84,
"speed": 100
}
self.invalid_evs_low_individual = {
"hit_points": 0,
"attack": -10,
"defense": 0,
"special_attack": 0,
"special_defense": 0,
"speed": 0
}
self.valid_level = 78
self.invalid_level_high = 110
self.invalid_level_low = 0
self.valid_nature = "adamant"
self.invalid_nature = "thisisntanature"
def test_stats_serializer(self):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
stats = serializer.get_stats()
self.assertEqual(stats["hit_points"], 289)
self.assertEqual(stats["attack"], 278)
self.assertEqual(stats["defense"], 193)
self.assertEqual(stats["special_attack"], 135)
self.assertEqual(stats["special_defense"], 171)
self.assertEqual(stats["speed"], 171)
def test_invalid_nature(self):
with pytest.raises(drf_serializers.ValidationError) as exc:
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.invalid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_level_high(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.invalid_level_high,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_level_low(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.valid_ivs,
"level": self.invalid_level_low,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_ivs_low(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.invalid_ivs_low,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_ivs_high(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.valid_evs,
"ivs": self.invalid_ivs_high,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_high_total(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_high_total,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_high_individual(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_high_individual,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True)
def test_invalid_evs_low_individual(self):
with pytest.raises(drf_serializers.ValidationError):
serializer = serializers.StatsSerializer(data={
"base_stats": self.valid_base_stats,
"evs": self.invalid_evs_low_individual,
"ivs": self.valid_ivs,
"level": self.valid_level,
"nature": self.valid_nature
})
serializer.is_valid(raise_exception=True) | en | 0.206931 | Test Module for StatsSerializer | 2.416547 | 2 |
sqlpuzzle/_common/argsparser.py | Dundee/python-sqlpuzzle | 8 | 10104 | <reponame>Dundee/python-sqlpuzzle<filename>sqlpuzzle/_common/argsparser.py
from sqlpuzzle.exceptions import InvalidArgumentException
__all__ = ('parse_args',)
# pylint: disable=dangerous-default-value,keyword-arg-before-vararg
def parse_args(options={}, *args, **kwds):
"""
Parser of arguments.
dict options {
int min_items: Min of required items to fold one tuple. (default: 1)
int max_items: Count of items in one tuple. Last `max_items-min_items`
items is by default set to None. (default: 1)
bool allow_dict: Flag allowing dictionary as first (and only one)
argument or dictinary as **kwds. (default: False)
bool allow_list: Flag allowing list as first (and only one) argument.
(default: False)
}
Examples:
calling with min_items=1, max_items=2, allow_dict=False:
arg1, arg2 => ((arg1, None), (arg2, None))
(arg1a, arg1b), arg2 => ((arg1a, arg1b), arg2, None))
arg1=val1 => FAIL
{key1: val1} => FAIL
calling with min_items=2, max_items=3, allow_dict=True:
arg1, arg2 => ((arg1, arg2, None),)
arg1, arg2, arg3 => ((arg1, arg2, arg3),)
(arg1a, arg1b, arg1c) => ((arg1a, arg1b, arg1c),)
arg1=val1, arg2=val2 => ((arg1, val1, None), (arg2, val2, None))
{key1: val1, key2: val2} => ((key1, val1, None), (key2, val2, None))
(arg1a, arg1b), arg2a, arg2b => FAIL
"""
parser_options = ParserOptions(options)
parser_input = ParserInput(args, kwds)
parser = Parser(parser_options, parser_input)
parser.parse()
return parser.output_data
# pylint: disable=too-few-public-methods
class ParserOptions:
def __init__(self, options):
self.min_items = options.get('min_items', 1)
self.max_items = options.get('max_items', 1)
self.allow_dict = options.get('allow_dict', False)
self.allow_list = options.get('allow_list', False)
assert self.min_items <= self.max_items
assert not self.allow_dict or (self.allow_dict and self.max_items > 1)
class ParserInput:
def __init__(self, args, kwds):
self.args = args
self.kwds = kwds
@property
def list(self):
if self.is_list:
return self.args[0]
return []
@property
def dictionary_or_kwds(self):
if self.is_dictionary:
return self.args[0]
if self.is_kwds:
return self.kwds
return {}
@property
def is_list(self):
return len(self.args) == 1 and isinstance(self.args[0], (list, tuple))
@property
def is_dictionary(self):
return len(self.args) == 1 and isinstance(self.args[0], dict)
@property
def is_kwds(self):
return self.kwds != {}
@property
def is_args(self):
return len(self.args) > 0 and not isinstance(self.args[0], (list, tuple))
def count_of_args_is_in_interval(self, min_, max_):
return min_ <= len(self.args) <= max_
class Parser:
def __init__(self, options, input_data):
self.options = options
self.input_data = input_data
self.output_data = []
def parse(self):
if (
self.options.min_items > 1
and self.input_data.is_args
and self.input_data.count_of_args_is_in_interval(self.options.min_items, self.options.max_items)
):
self._parse_item(self.input_data.args)
elif self.options.allow_list and self.input_data.is_list:
self._parse_list(self.input_data.list)
elif not self.input_data.is_dictionary and self.input_data.args:
self._parse_list(self.input_data.args)
if self.input_data.is_dictionary or self.input_data.is_kwds:
if not self.options.allow_dict:
raise InvalidArgumentException('Dictionary or kwds is disabled.')
self._parse_dictionary(self.input_data.dictionary_or_kwds)
def _parse_dictionary(self, dictionary):
for item in sorted(dictionary.items()):
self._parse_item(item)
def _parse_list(self, list_):
for item in list_:
if isinstance(item, (list, tuple)):
self._parse_item(item)
elif self.options.min_items == 1:
self._parse_item((item,))
else:
raise InvalidArgumentException('Too few arguments.')
def _parse_item(self, item):
batch = self._create_batch(item)
self.output_data.append(batch)
def _create_batch(self, values):
if len(values) > self.options.max_items:
raise InvalidArgumentException('Too many arguments.')
return self._append_nones(tuple(values))
def _append_nones(self, tuple_with_values):
count_of_nones = self.options.max_items - len(tuple_with_values)
tuple_with_nones = (None,) * count_of_nones
return tuple_with_values + tuple_with_nones
| from sqlpuzzle.exceptions import InvalidArgumentException
__all__ = ('parse_args',)
# pylint: disable=dangerous-default-value,keyword-arg-before-vararg
def parse_args(options={}, *args, **kwds):
"""
Parser of arguments.
dict options {
int min_items: Min of required items to fold one tuple. (default: 1)
int max_items: Count of items in one tuple. Last `max_items-min_items`
items is by default set to None. (default: 1)
bool allow_dict: Flag allowing dictionary as first (and only one)
argument or dictinary as **kwds. (default: False)
bool allow_list: Flag allowing list as first (and only one) argument.
(default: False)
}
Examples:
calling with min_items=1, max_items=2, allow_dict=False:
arg1, arg2 => ((arg1, None), (arg2, None))
(arg1a, arg1b), arg2 => ((arg1a, arg1b), arg2, None))
arg1=val1 => FAIL
{key1: val1} => FAIL
calling with min_items=2, max_items=3, allow_dict=True:
arg1, arg2 => ((arg1, arg2, None),)
arg1, arg2, arg3 => ((arg1, arg2, arg3),)
(arg1a, arg1b, arg1c) => ((arg1a, arg1b, arg1c),)
arg1=val1, arg2=val2 => ((arg1, val1, None), (arg2, val2, None))
{key1: val1, key2: val2} => ((key1, val1, None), (key2, val2, None))
(arg1a, arg1b), arg2a, arg2b => FAIL
"""
parser_options = ParserOptions(options)
parser_input = ParserInput(args, kwds)
parser = Parser(parser_options, parser_input)
parser.parse()
return parser.output_data
# pylint: disable=too-few-public-methods
class ParserOptions:
def __init__(self, options):
self.min_items = options.get('min_items', 1)
self.max_items = options.get('max_items', 1)
self.allow_dict = options.get('allow_dict', False)
self.allow_list = options.get('allow_list', False)
assert self.min_items <= self.max_items
assert not self.allow_dict or (self.allow_dict and self.max_items > 1)
class ParserInput:
def __init__(self, args, kwds):
self.args = args
self.kwds = kwds
@property
def list(self):
if self.is_list:
return self.args[0]
return []
@property
def dictionary_or_kwds(self):
if self.is_dictionary:
return self.args[0]
if self.is_kwds:
return self.kwds
return {}
@property
def is_list(self):
return len(self.args) == 1 and isinstance(self.args[0], (list, tuple))
@property
def is_dictionary(self):
return len(self.args) == 1 and isinstance(self.args[0], dict)
@property
def is_kwds(self):
return self.kwds != {}
@property
def is_args(self):
return len(self.args) > 0 and not isinstance(self.args[0], (list, tuple))
def count_of_args_is_in_interval(self, min_, max_):
return min_ <= len(self.args) <= max_
class Parser:
def __init__(self, options, input_data):
self.options = options
self.input_data = input_data
self.output_data = []
def parse(self):
if (
self.options.min_items > 1
and self.input_data.is_args
and self.input_data.count_of_args_is_in_interval(self.options.min_items, self.options.max_items)
):
self._parse_item(self.input_data.args)
elif self.options.allow_list and self.input_data.is_list:
self._parse_list(self.input_data.list)
elif not self.input_data.is_dictionary and self.input_data.args:
self._parse_list(self.input_data.args)
if self.input_data.is_dictionary or self.input_data.is_kwds:
if not self.options.allow_dict:
raise InvalidArgumentException('Dictionary or kwds is disabled.')
self._parse_dictionary(self.input_data.dictionary_or_kwds)
def _parse_dictionary(self, dictionary):
for item in sorted(dictionary.items()):
self._parse_item(item)
def _parse_list(self, list_):
for item in list_:
if isinstance(item, (list, tuple)):
self._parse_item(item)
elif self.options.min_items == 1:
self._parse_item((item,))
else:
raise InvalidArgumentException('Too few arguments.')
def _parse_item(self, item):
batch = self._create_batch(item)
self.output_data.append(batch)
def _create_batch(self, values):
if len(values) > self.options.max_items:
raise InvalidArgumentException('Too many arguments.')
return self._append_nones(tuple(values))
def _append_nones(self, tuple_with_values):
count_of_nones = self.options.max_items - len(tuple_with_values)
tuple_with_nones = (None,) * count_of_nones
return tuple_with_values + tuple_with_nones | en | 0.60909 | # pylint: disable=dangerous-default-value,keyword-arg-before-vararg Parser of arguments. dict options { int min_items: Min of required items to fold one tuple. (default: 1) int max_items: Count of items in one tuple. Last `max_items-min_items` items is by default set to None. (default: 1) bool allow_dict: Flag allowing dictionary as first (and only one) argument or dictinary as **kwds. (default: False) bool allow_list: Flag allowing list as first (and only one) argument. (default: False) } Examples: calling with min_items=1, max_items=2, allow_dict=False: arg1, arg2 => ((arg1, None), (arg2, None)) (arg1a, arg1b), arg2 => ((arg1a, arg1b), arg2, None)) arg1=val1 => FAIL {key1: val1} => FAIL calling with min_items=2, max_items=3, allow_dict=True: arg1, arg2 => ((arg1, arg2, None),) arg1, arg2, arg3 => ((arg1, arg2, arg3),) (arg1a, arg1b, arg1c) => ((arg1a, arg1b, arg1c),) arg1=val1, arg2=val2 => ((arg1, val1, None), (arg2, val2, None)) {key1: val1, key2: val2} => ((key1, val1, None), (key2, val2, None)) (arg1a, arg1b), arg2a, arg2b => FAIL # pylint: disable=too-few-public-methods | 2.825986 | 3 |
reviewboard/webapi/tests/test_review_screenshot_comment.py | ParikhKadam/reviewboard | 921 | 10105 | <filename>reviewboard/webapi/tests/test_review_screenshot_comment.py<gh_stars>100-1000
from __future__ import unicode_literals
from django.contrib.auth.models import User
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.reviews.models import ScreenshotComment
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
screenshot_comment_item_mimetype,
screenshot_comment_list_mimetype)
from reviewboard.webapi.tests.mixins import (
BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_comment import (
CommentItemMixin,
CommentListMixin)
from reviewboard.webapi.tests.urls import (
get_review_screenshot_comment_item_url,
get_review_screenshot_comment_list_url)
class BaseTestCase(BaseWebAPITestCase):
fixtures = ['test_users']
def _create_screenshot_review_with_issue(self, publish=False,
comment_text=None):
"""Sets up a review for a screenshot that includes an open issue.
If `publish` is True, the review is published. The review request is
always published.
Returns the response from posting the comment, the review object, and
the review request object.
"""
if not comment_text:
comment_text = 'Test screenshot comment with an opened issue'
review_request = self.create_review_request(publish=True,
submitter=self.user)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user,
publish=publish)
comment = self.create_screenshot_comment(review, screenshot,
comment_text,
issue_opened=True)
return comment, review, review_request
class ResourceListTests(CommentListMixin, ReviewRequestChildListMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource list APIs."""
sample_api_url = 'review-requests/<id>/reviews/<id>/screenshot-comments/'
resource = resources.review_screenshot_comment
def setup_review_request_child_test(self, review_request):
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
return (get_review_screenshot_comment_list_url(review),
screenshot_comment_list_mimetype)
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
if populate_items:
items = [self.create_screenshot_comment(review, screenshot)]
else:
items = []
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_list_mimetype,
items)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_item_mimetype,
{
'screenshot_id': screenshot.pk,
'text': 'Test comment',
'x': 2,
'y': 2,
'w': 10,
'h': 10,
},
[review, screenshot])
def check_post_result(self, user, rsp, review, screenshot):
comment = \
ScreenshotComment.objects.get(pk=rsp['screenshot_comment']['id'])
self.compare_item(rsp['screenshot_comment'], comment)
def test_post_with_issue(self):
"""Testing the
POST review-requests/<id>/reviews/<id>/screenshot-comments/ API
with an issue
"""
comment_text = "Test screenshot comment with an opened issue"
comment, review, review_request = \
self._create_screenshot_review_with_issue(
publish=False, comment_text=comment_text)
rsp = self.api_get(
get_review_screenshot_comment_list_url(review),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('screenshot_comments', rsp)
self.assertEqual(len(rsp['screenshot_comments']), 1)
self.assertEqual(rsp['screenshot_comments'][0]['text'], comment_text)
self.assertTrue(rsp['screenshot_comments'][0]['issue_opened'])
class ResourceItemTests(CommentItemMixin, ReviewRequestChildItemMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource item APIs."""
fixtures = ['test_users']
sample_api_url = \
'review-requests/<id>/reviews/<id>/screenshot-comments/<id>/'
resource = resources.review_screenshot_comment
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
def setup_review_request_child_test(self, review_request):
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk),
screenshot_comment_item_mimetype)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
[comment, review])
def check_delete_result(self, user, comment, review):
self.assertNotIn(comment, review.screenshot_comments.all())
def test_delete_with_does_not_exist_error(self):
"""Testing the
DELETE review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with Does Not Exist error
"""
review_request = self.create_review_request(publish=True)
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
self.api_delete(get_review_screenshot_comment_item_url(review, 123),
expected_status=404)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
comment)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
{'text': 'Test comment'},
comment,
[])
def check_put_result(self, user, item_rsp, comment, *args):
comment = ScreenshotComment.objects.get(pk=comment.pk)
self.assertEqual(item_rsp['text_type'], 'plain')
self.assertEqual(item_rsp['text'], 'Test comment')
self.compare_item(item_rsp, comment)
def test_put_with_issue(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, removing issue_opened
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_opened': False},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertFalse(rsp['screenshot_comment']['issue_opened'])
def test_put_issue_status_before_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API
with an issue, before review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
# The issue_status should not be able to be changed while the review is
# unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
def test_put_issue_status_after_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, after review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
def test_put_issue_status_by_issue_creator(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for issue creator
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request so that it's not owned by
# self.user
review_request.submitter = User.objects.get(username='doc')
review_request.save()
# The review/comment (and therefore issue) is still owned by self.user,
# so we should be able to change the issue status.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'dropped')
def test_put_issue_status_by_uninvolved_user(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for an uninvolved user
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request and review so that they're not
# owned by self.user.
new_owner = User.objects.get(username='doc')
review_request.submitter = new_owner
review_request.save()
review.user = new_owner
review.save()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_put_deleted_screenshot_comment_issue_status(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>
API with an issue and a deleted screenshot
"""
comment_text = "Test screenshot comment with an opened issue"
x, y, w, h = (2, 2, 10, 10)
review_request = self.create_review_request(publish=True,
submitter=self.user,
target_people=[self.user])
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot,
comment_text, x, y, w, h,
issue_opened=True)
# First, let's ensure that the user that has created the comment
# cannot alter the issue_status while the review is unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
# Next, let's publish the review, and try altering the issue_status.
# This should be allowed, since the review request was made by the
# current user.
review.public = True
review.save()
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
# Delete the screenshot.
self._delete_screenshot(review_request, screenshot)
review_request.publish(review_request.submitter)
# Try altering the issue_status. This should be allowed.
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'open'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
| <filename>reviewboard/webapi/tests/test_review_screenshot_comment.py<gh_stars>100-1000
from __future__ import unicode_literals
from django.contrib.auth.models import User
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.reviews.models import ScreenshotComment
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
screenshot_comment_item_mimetype,
screenshot_comment_list_mimetype)
from reviewboard.webapi.tests.mixins import (
BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_comment import (
CommentItemMixin,
CommentListMixin)
from reviewboard.webapi.tests.urls import (
get_review_screenshot_comment_item_url,
get_review_screenshot_comment_list_url)
class BaseTestCase(BaseWebAPITestCase):
fixtures = ['test_users']
def _create_screenshot_review_with_issue(self, publish=False,
comment_text=None):
"""Sets up a review for a screenshot that includes an open issue.
If `publish` is True, the review is published. The review request is
always published.
Returns the response from posting the comment, the review object, and
the review request object.
"""
if not comment_text:
comment_text = 'Test screenshot comment with an opened issue'
review_request = self.create_review_request(publish=True,
submitter=self.user)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user,
publish=publish)
comment = self.create_screenshot_comment(review, screenshot,
comment_text,
issue_opened=True)
return comment, review, review_request
class ResourceListTests(CommentListMixin, ReviewRequestChildListMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource list APIs."""
sample_api_url = 'review-requests/<id>/reviews/<id>/screenshot-comments/'
resource = resources.review_screenshot_comment
def setup_review_request_child_test(self, review_request):
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
return (get_review_screenshot_comment_list_url(review),
screenshot_comment_list_mimetype)
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
if populate_items:
items = [self.create_screenshot_comment(review, screenshot)]
else:
items = []
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_list_mimetype,
items)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_item_mimetype,
{
'screenshot_id': screenshot.pk,
'text': 'Test comment',
'x': 2,
'y': 2,
'w': 10,
'h': 10,
},
[review, screenshot])
def check_post_result(self, user, rsp, review, screenshot):
comment = \
ScreenshotComment.objects.get(pk=rsp['screenshot_comment']['id'])
self.compare_item(rsp['screenshot_comment'], comment)
def test_post_with_issue(self):
"""Testing the
POST review-requests/<id>/reviews/<id>/screenshot-comments/ API
with an issue
"""
comment_text = "Test screenshot comment with an opened issue"
comment, review, review_request = \
self._create_screenshot_review_with_issue(
publish=False, comment_text=comment_text)
rsp = self.api_get(
get_review_screenshot_comment_list_url(review),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('screenshot_comments', rsp)
self.assertEqual(len(rsp['screenshot_comments']), 1)
self.assertEqual(rsp['screenshot_comments'][0]['text'], comment_text)
self.assertTrue(rsp['screenshot_comments'][0]['issue_opened'])
class ResourceItemTests(CommentItemMixin, ReviewRequestChildItemMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource item APIs."""
fixtures = ['test_users']
sample_api_url = \
'review-requests/<id>/reviews/<id>/screenshot-comments/<id>/'
resource = resources.review_screenshot_comment
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
def setup_review_request_child_test(self, review_request):
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk),
screenshot_comment_item_mimetype)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
[comment, review])
def check_delete_result(self, user, comment, review):
self.assertNotIn(comment, review.screenshot_comments.all())
def test_delete_with_does_not_exist_error(self):
"""Testing the
DELETE review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with Does Not Exist error
"""
review_request = self.create_review_request(publish=True)
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
self.api_delete(get_review_screenshot_comment_item_url(review, 123),
expected_status=404)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
comment)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
{'text': 'Test comment'},
comment,
[])
def check_put_result(self, user, item_rsp, comment, *args):
comment = ScreenshotComment.objects.get(pk=comment.pk)
self.assertEqual(item_rsp['text_type'], 'plain')
self.assertEqual(item_rsp['text'], 'Test comment')
self.compare_item(item_rsp, comment)
def test_put_with_issue(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, removing issue_opened
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_opened': False},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertFalse(rsp['screenshot_comment']['issue_opened'])
def test_put_issue_status_before_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API
with an issue, before review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
# The issue_status should not be able to be changed while the review is
# unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
def test_put_issue_status_after_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, after review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
def test_put_issue_status_by_issue_creator(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for issue creator
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request so that it's not owned by
# self.user
review_request.submitter = User.objects.get(username='doc')
review_request.save()
# The review/comment (and therefore issue) is still owned by self.user,
# so we should be able to change the issue status.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'dropped')
def test_put_issue_status_by_uninvolved_user(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for an uninvolved user
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request and review so that they're not
# owned by self.user.
new_owner = User.objects.get(username='doc')
review_request.submitter = new_owner
review_request.save()
review.user = new_owner
review.save()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_put_deleted_screenshot_comment_issue_status(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>
API with an issue and a deleted screenshot
"""
comment_text = "Test screenshot comment with an opened issue"
x, y, w, h = (2, 2, 10, 10)
review_request = self.create_review_request(publish=True,
submitter=self.user,
target_people=[self.user])
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot,
comment_text, x, y, w, h,
issue_opened=True)
# First, let's ensure that the user that has created the comment
# cannot alter the issue_status while the review is unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
# Next, let's publish the review, and try altering the issue_status.
# This should be allowed, since the review request was made by the
# current user.
review.public = True
review.save()
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
# Delete the screenshot.
self._delete_screenshot(review_request, screenshot)
review_request.publish(review_request.submitter)
# Try altering the issue_status. This should be allowed.
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'open'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
| en | 0.915727 | Sets up a review for a screenshot that includes an open issue. If `publish` is True, the review is published. The review request is always published. Returns the response from posting the comment, the review object, and the review request object. Testing the ReviewScreenshotCommentResource list APIs. # # HTTP GET tests # # # HTTP POST tests # Testing the POST review-requests/<id>/reviews/<id>/screenshot-comments/ API with an issue Testing the ReviewScreenshotCommentResource item APIs. # # HTTP DELETE tests # Testing the DELETE review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API with Does Not Exist error # # HTTP GET tests # # # HTTP PUT tests # Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API with an issue, removing issue_opened Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API with an issue, before review is published # The issue_status should not be able to be changed while the review is # unpublished. # The issue_status should still be "open" Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API with an issue, after review is published Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API permissions for issue creator # Change the owner of the review request so that it's not owned by # self.user # The review/comment (and therefore issue) is still owned by self.user, # so we should be able to change the issue status. Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API permissions for an uninvolved user # Change the owner of the review request and review so that they're not # owned by self.user. Testing the PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API with an issue and a deleted screenshot # First, let's ensure that the user that has created the comment # cannot alter the issue_status while the review is unpublished. # The issue_status should still be "open" # Next, let's publish the review, and try altering the issue_status. # This should be allowed, since the review request was made by the # current user. # Delete the screenshot. # Try altering the issue_status. This should be allowed. | 2.133939 | 2 |
qbapi/app.py | dimddev/qb | 0 | 10106 | <filename>qbapi/app.py
"""
Command line tool
"""
import asyncio
from qbapi.request import create_request
from qbapi.services.clients import Producer, Consumer
async def spider(user_data: tuple) -> None:
"""spider
:param user_data:
:type user_data: tuple
:rtype: None
"""
producer_queue = asyncio.Queue()
consumer_queue = asyncio.Queue()
max_workers = 0
for data in user_data:
await producer_queue.put(await create_request(data))
max_workers += 1
producer_tasks = []
consumer_tasks = []
for _ in range(max_workers):
producer_tasks.append(
asyncio.create_task(
Producer().process(producer_queue, consumer_queue)
)
)
consumer_tasks.append(
asyncio.create_task(
Consumer().process(consumer_queue)
)
)
await producer_queue.join()
await consumer_queue.join()
for i, task in enumerate(producer_tasks):
task.cancel()
consumer_tasks[i].cancel()
await asyncio.gather(*producer_tasks, return_exceptions=True)
await asyncio.gather(*consumer_tasks, return_exceptions=True)
| <filename>qbapi/app.py
"""
Command line tool
"""
import asyncio
from qbapi.request import create_request
from qbapi.services.clients import Producer, Consumer
async def spider(user_data: tuple) -> None:
"""spider
:param user_data:
:type user_data: tuple
:rtype: None
"""
producer_queue = asyncio.Queue()
consumer_queue = asyncio.Queue()
max_workers = 0
for data in user_data:
await producer_queue.put(await create_request(data))
max_workers += 1
producer_tasks = []
consumer_tasks = []
for _ in range(max_workers):
producer_tasks.append(
asyncio.create_task(
Producer().process(producer_queue, consumer_queue)
)
)
consumer_tasks.append(
asyncio.create_task(
Consumer().process(consumer_queue)
)
)
await producer_queue.join()
await consumer_queue.join()
for i, task in enumerate(producer_tasks):
task.cancel()
consumer_tasks[i].cancel()
await asyncio.gather(*producer_tasks, return_exceptions=True)
await asyncio.gather(*consumer_tasks, return_exceptions=True)
| en | 0.510515 | Command line tool spider :param user_data: :type user_data: tuple :rtype: None | 2.793021 | 3 |
test/test_literal.py | hrnciar/rdflib | 0 | 10107 | import unittest
import datetime
import rdflib # needed for eval(repr(...)) below
from rdflib.term import Literal, URIRef, _XSD_DOUBLE, bind, _XSD_BOOLEAN
from rdflib.namespace import XSD
def uformat(s):
return s.replace("u'", "'")
class TestLiteral(unittest.TestCase):
def setUp(self):
pass
def test_repr_apostrophe(self):
a = rdflib.Literal("'")
b = eval(repr(a))
self.assertEqual(a, b)
def test_repr_quote(self):
a = rdflib.Literal('"')
b = eval(repr(a))
self.assertEqual(a, b)
def test_backslash(self):
d = r"""
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foo="http://example.org/foo#">
<rdf:Description>
<foo:bar>a\b</foo:bar>
</rdf:Description>
</rdf:RDF>
"""
g = rdflib.Graph()
g.parse(data=d, format="xml")
a = rdflib.Literal("a\\b")
b = list(g.objects())[0]
self.assertEqual(a, b)
def test_literal_from_bool(self):
l = rdflib.Literal(True)
self.assertEqual(l.datatype, rdflib.XSD["boolean"])
class TestNew(unittest.TestCase):
def testCantPassLangAndDatatype(self):
self.assertRaises(
TypeError, Literal, "foo", lang="en", datatype=URIRef("http://example.com/")
)
def testFromOtherLiteral(self):
l = Literal(1)
l2 = Literal(l)
self.assertTrue(isinstance(l.value, int))
self.assertTrue(isinstance(l2.value, int))
# change datatype
l = Literal("1")
l2 = Literal(l, datatype=rdflib.XSD.integer)
self.assertTrue(isinstance(l2.value, int))
def testDatatypeGetsAutoURIRefConversion(self):
# drewp disapproves of this behavior, but it should be
# represented in the tests
x = Literal("foo", datatype="http://example.com/")
self.assertTrue(isinstance(x.datatype, URIRef))
x = Literal("foo", datatype=Literal("pennies"))
self.assertEqual(x.datatype, URIRef("pennies"))
class TestRepr(unittest.TestCase):
def testOmitsMissingDatatypeAndLang(self):
self.assertEqual(repr(Literal("foo")), uformat("rdflib.term.Literal(u'foo')"))
def testOmitsMissingDatatype(self):
self.assertEqual(
repr(Literal("foo", lang="en")),
uformat("rdflib.term.Literal(u'foo', lang='en')"),
)
def testOmitsMissingLang(self):
self.assertEqual(
repr(Literal("foo", datatype=URIRef("http://example.com/"))),
uformat(
"rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"
),
)
def testSubclassNameAppearsInRepr(self):
class MyLiteral(Literal):
pass
x = MyLiteral("foo")
self.assertEqual(repr(x), uformat("MyLiteral('foo')"))
class TestDoubleOutput(unittest.TestCase):
def testNoDanglingPoint(self):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/237"""
vv = Literal("0.88", datatype=_XSD_DOUBLE)
out = vv._literal_n3(use_plain=True)
self.assertTrue(out in ["8.8e-01", "0.88"], out)
class TestParseBoolean(unittest.TestCase):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/913"""
def testTrueBoolean(self):
test_value = Literal("tRue", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
test_value = Literal("1", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
def testFalseBoolean(self):
test_value = Literal("falsE", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
test_value = Literal("0", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
def testNonFalseBoolean(self):
test_value = Literal("abcd", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
test_value = Literal("10", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
class TestBindings(unittest.TestCase):
def testBinding(self):
class a:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
return "<<<%s>>>" % self.v
dtA = rdflib.URIRef("urn:dt:a")
bind(dtA, a)
va = a("<<<2>>>")
la = Literal(va, normalize=True)
self.assertEqual(la.value, va)
self.assertEqual(la.datatype, dtA)
la2 = Literal("<<<2>>>", datatype=dtA)
self.assertTrue(isinstance(la2.value, a))
self.assertEqual(la2.value.v, va.v)
class b:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
return "B%s" % self.v
dtB = rdflib.URIRef("urn:dt:b")
bind(dtB, b, None, lambda x: "<<<%s>>>" % x)
vb = b("<<<3>>>")
lb = Literal(vb, normalize=True)
self.assertEqual(lb.value, vb)
self.assertEqual(lb.datatype, dtB)
def testSpecificBinding(self):
def lexify(s):
return "--%s--" % s
def unlexify(s):
return s[2:-2]
datatype = rdflib.URIRef("urn:dt:mystring")
# Datatype-specific rule
bind(datatype, str, unlexify, lexify, datatype_specific=True)
s = "Hello"
normal_l = Literal(s)
self.assertEqual(str(normal_l), s)
self.assertEqual(normal_l.toPython(), s)
self.assertEqual(normal_l.datatype, None)
specific_l = Literal("--%s--" % s, datatype=datatype)
self.assertEqual(str(specific_l), lexify(s))
self.assertEqual(specific_l.toPython(), s)
self.assertEqual(specific_l.datatype, datatype)
class TestXsdLiterals(unittest.TestCase):
def test_make_literals(self):
"""
Tests literal construction.
"""
inputs = [
# these literals do not get conerted to python types
("ABCD", XSD.integer, None),
("ABCD", XSD.gYear, None),
("-10000", XSD.gYear, None),
("-1921-00", XSD.gYearMonth, None),
("1921-00", XSD.gMonthDay, None),
("1921-13", XSD.gMonthDay, None),
("-1921-00", XSD.gMonthDay, None),
("10", XSD.gDay, None),
("-1", XSD.gDay, None),
("0000", XSD.gYear, None),
("0000-00-00", XSD.date, None),
("NOT A VALID HEX STRING", XSD.hexBinary, None),
("NOT A VALID BASE64 STRING", XSD.base64Binary, None),
# these literals get converted to python types
("1921-05-01", XSD.date, datetime.date),
("1921-05-01T00:00:00", XSD.dateTime, datetime.datetime),
("1921-05", XSD.gYearMonth, datetime.date),
("0001-01", XSD.gYearMonth, datetime.date),
("0001-12", XSD.gYearMonth, datetime.date),
("2002-01", XSD.gYearMonth, datetime.date),
("9999-01", XSD.gYearMonth, datetime.date),
("9999-12", XSD.gYearMonth, datetime.date),
("1921", XSD.gYear, datetime.date),
("2000", XSD.gYear, datetime.date),
("0001", XSD.gYear, datetime.date),
("9999", XSD.gYear, datetime.date),
("1982", XSD.gYear, datetime.date),
("2002", XSD.gYear, datetime.date),
("1921-05-01T00:00:00+00:30", XSD.dateTime, datetime.datetime),
("1921-05-01T00:00:00-00:30", XSD.dateTime, datetime.datetime),
("abcdef0123", XSD.hexBinary, bytes),
("", XSD.hexBinary, bytes),
("UkRGTGli", XSD.base64Binary, bytes),
("", XSD.base64Binary, bytes),
]
self.check_make_literals(inputs)
@unittest.expectedFailure
def test_make_literals_ki(self):
"""
Known issues with literal construction.
"""
inputs = [
("1921-01Z", XSD.gYearMonth, datetime.date),
("1921Z", XSD.gYear, datetime.date),
("1921-00", XSD.gYearMonth, datetime.date),
("1921-05-01Z", XSD.date, datetime.date),
("1921-05-01+00:30", XSD.date, datetime.date),
("1921-05-01+00:30", XSD.date, datetime.date),
("1921-05-01+00:00", XSD.date, datetime.date),
("1921-05-01+00:00", XSD.date, datetime.date),
("1921-05-01T00:00:00Z", XSD.dateTime, datetime.datetime),
]
self.check_make_literals(inputs)
def check_make_literals(self, inputs):
for literal_pair in inputs:
(lexical, type, value_cls) = literal_pair
with self.subTest(f"tesing {literal_pair}"):
literal = Literal(lexical, datatype=type)
if value_cls is not None:
self.assertIsInstance(literal.value, value_cls)
else:
self.assertIsNone(literal.value)
self.assertEqual(lexical, f"{literal}")
if __name__ == "__main__":
unittest.main()
| import unittest
import datetime
import rdflib # needed for eval(repr(...)) below
from rdflib.term import Literal, URIRef, _XSD_DOUBLE, bind, _XSD_BOOLEAN
from rdflib.namespace import XSD
def uformat(s):
return s.replace("u'", "'")
class TestLiteral(unittest.TestCase):
def setUp(self):
pass
def test_repr_apostrophe(self):
a = rdflib.Literal("'")
b = eval(repr(a))
self.assertEqual(a, b)
def test_repr_quote(self):
a = rdflib.Literal('"')
b = eval(repr(a))
self.assertEqual(a, b)
def test_backslash(self):
d = r"""
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foo="http://example.org/foo#">
<rdf:Description>
<foo:bar>a\b</foo:bar>
</rdf:Description>
</rdf:RDF>
"""
g = rdflib.Graph()
g.parse(data=d, format="xml")
a = rdflib.Literal("a\\b")
b = list(g.objects())[0]
self.assertEqual(a, b)
def test_literal_from_bool(self):
l = rdflib.Literal(True)
self.assertEqual(l.datatype, rdflib.XSD["boolean"])
class TestNew(unittest.TestCase):
def testCantPassLangAndDatatype(self):
self.assertRaises(
TypeError, Literal, "foo", lang="en", datatype=URIRef("http://example.com/")
)
def testFromOtherLiteral(self):
l = Literal(1)
l2 = Literal(l)
self.assertTrue(isinstance(l.value, int))
self.assertTrue(isinstance(l2.value, int))
# change datatype
l = Literal("1")
l2 = Literal(l, datatype=rdflib.XSD.integer)
self.assertTrue(isinstance(l2.value, int))
def testDatatypeGetsAutoURIRefConversion(self):
# drewp disapproves of this behavior, but it should be
# represented in the tests
x = Literal("foo", datatype="http://example.com/")
self.assertTrue(isinstance(x.datatype, URIRef))
x = Literal("foo", datatype=Literal("pennies"))
self.assertEqual(x.datatype, URIRef("pennies"))
class TestRepr(unittest.TestCase):
def testOmitsMissingDatatypeAndLang(self):
self.assertEqual(repr(Literal("foo")), uformat("rdflib.term.Literal(u'foo')"))
def testOmitsMissingDatatype(self):
self.assertEqual(
repr(Literal("foo", lang="en")),
uformat("rdflib.term.Literal(u'foo', lang='en')"),
)
def testOmitsMissingLang(self):
self.assertEqual(
repr(Literal("foo", datatype=URIRef("http://example.com/"))),
uformat(
"rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"
),
)
def testSubclassNameAppearsInRepr(self):
class MyLiteral(Literal):
pass
x = MyLiteral("foo")
self.assertEqual(repr(x), uformat("MyLiteral('foo')"))
class TestDoubleOutput(unittest.TestCase):
def testNoDanglingPoint(self):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/237"""
vv = Literal("0.88", datatype=_XSD_DOUBLE)
out = vv._literal_n3(use_plain=True)
self.assertTrue(out in ["8.8e-01", "0.88"], out)
class TestParseBoolean(unittest.TestCase):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/913"""
def testTrueBoolean(self):
test_value = Literal("tRue", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
test_value = Literal("1", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
def testFalseBoolean(self):
test_value = Literal("falsE", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
test_value = Literal("0", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
def testNonFalseBoolean(self):
test_value = Literal("abcd", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
test_value = Literal("10", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
class TestBindings(unittest.TestCase):
def testBinding(self):
class a:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
return "<<<%s>>>" % self.v
dtA = rdflib.URIRef("urn:dt:a")
bind(dtA, a)
va = a("<<<2>>>")
la = Literal(va, normalize=True)
self.assertEqual(la.value, va)
self.assertEqual(la.datatype, dtA)
la2 = Literal("<<<2>>>", datatype=dtA)
self.assertTrue(isinstance(la2.value, a))
self.assertEqual(la2.value.v, va.v)
class b:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
return "B%s" % self.v
dtB = rdflib.URIRef("urn:dt:b")
bind(dtB, b, None, lambda x: "<<<%s>>>" % x)
vb = b("<<<3>>>")
lb = Literal(vb, normalize=True)
self.assertEqual(lb.value, vb)
self.assertEqual(lb.datatype, dtB)
def testSpecificBinding(self):
def lexify(s):
return "--%s--" % s
def unlexify(s):
return s[2:-2]
datatype = rdflib.URIRef("urn:dt:mystring")
# Datatype-specific rule
bind(datatype, str, unlexify, lexify, datatype_specific=True)
s = "Hello"
normal_l = Literal(s)
self.assertEqual(str(normal_l), s)
self.assertEqual(normal_l.toPython(), s)
self.assertEqual(normal_l.datatype, None)
specific_l = Literal("--%s--" % s, datatype=datatype)
self.assertEqual(str(specific_l), lexify(s))
self.assertEqual(specific_l.toPython(), s)
self.assertEqual(specific_l.datatype, datatype)
class TestXsdLiterals(unittest.TestCase):
def test_make_literals(self):
"""
Tests literal construction.
"""
inputs = [
# these literals do not get conerted to python types
("ABCD", XSD.integer, None),
("ABCD", XSD.gYear, None),
("-10000", XSD.gYear, None),
("-1921-00", XSD.gYearMonth, None),
("1921-00", XSD.gMonthDay, None),
("1921-13", XSD.gMonthDay, None),
("-1921-00", XSD.gMonthDay, None),
("10", XSD.gDay, None),
("-1", XSD.gDay, None),
("0000", XSD.gYear, None),
("0000-00-00", XSD.date, None),
("NOT A VALID HEX STRING", XSD.hexBinary, None),
("NOT A VALID BASE64 STRING", XSD.base64Binary, None),
# these literals get converted to python types
("1921-05-01", XSD.date, datetime.date),
("1921-05-01T00:00:00", XSD.dateTime, datetime.datetime),
("1921-05", XSD.gYearMonth, datetime.date),
("0001-01", XSD.gYearMonth, datetime.date),
("0001-12", XSD.gYearMonth, datetime.date),
("2002-01", XSD.gYearMonth, datetime.date),
("9999-01", XSD.gYearMonth, datetime.date),
("9999-12", XSD.gYearMonth, datetime.date),
("1921", XSD.gYear, datetime.date),
("2000", XSD.gYear, datetime.date),
("0001", XSD.gYear, datetime.date),
("9999", XSD.gYear, datetime.date),
("1982", XSD.gYear, datetime.date),
("2002", XSD.gYear, datetime.date),
("1921-05-01T00:00:00+00:30", XSD.dateTime, datetime.datetime),
("1921-05-01T00:00:00-00:30", XSD.dateTime, datetime.datetime),
("abcdef0123", XSD.hexBinary, bytes),
("", XSD.hexBinary, bytes),
("UkRGTGli", XSD.base64Binary, bytes),
("", XSD.base64Binary, bytes),
]
self.check_make_literals(inputs)
@unittest.expectedFailure
def test_make_literals_ki(self):
"""
Known issues with literal construction.
"""
inputs = [
("1921-01Z", XSD.gYearMonth, datetime.date),
("1921Z", XSD.gYear, datetime.date),
("1921-00", XSD.gYearMonth, datetime.date),
("1921-05-01Z", XSD.date, datetime.date),
("1921-05-01+00:30", XSD.date, datetime.date),
("1921-05-01+00:30", XSD.date, datetime.date),
("1921-05-01+00:00", XSD.date, datetime.date),
("1921-05-01+00:00", XSD.date, datetime.date),
("1921-05-01T00:00:00Z", XSD.dateTime, datetime.datetime),
]
self.check_make_literals(inputs)
def check_make_literals(self, inputs):
for literal_pair in inputs:
(lexical, type, value_cls) = literal_pair
with self.subTest(f"tesing {literal_pair}"):
literal = Literal(lexical, datatype=type)
if value_cls is not None:
self.assertIsInstance(literal.value, value_cls)
else:
self.assertIsNone(literal.value)
self.assertEqual(lexical, f"{literal}")
if __name__ == "__main__":
unittest.main()
| en | 0.646166 | # needed for eval(repr(...)) below <rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:foo="http://example.org/foo#"> <rdf:Description> <foo:bar>a\b</foo:bar> </rdf:Description> </rdf:RDF> # change datatype # drewp disapproves of this behavior, but it should be # represented in the tests confirms the fix for https://github.com/RDFLib/rdflib/issues/237 confirms the fix for https://github.com/RDFLib/rdflib/issues/913 # Datatype-specific rule Tests literal construction. # these literals do not get conerted to python types # these literals get converted to python types Known issues with literal construction. | 2.821245 | 3 |
src/messages/text/ruling.py | rkulyn/telegram-dutch-taxbot | 2 | 10108 | import telegram
from emoji import emojize
from .base import TextMessageBase
class RulingHelpTextMessage(TextMessageBase):
"""
Ruling help message.
Taken from:
https://www.iamexpat.nl/expat-info/taxation/30-percent-ruling/requirements
"""
def get_text(self):
message = emojize(
"<b>30% RULING INCOME REQUIREMENTS</b>\n\n"
"<a href=\"https://www.iamexpat.nl/expat-info/taxation/30-percent-ruling/requirements\">Go to Source</a>\n\n"
"<b>2019 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>37743 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28690 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"<b>2018 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>37296 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28350 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"<b>2017 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>37000 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28125 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"<b>2016 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>36889 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28041 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"Type /start to start calculation. \n"
"Type /help get more details. \n\n",
use_aliases=True
)
return message
def get_options(self):
"""
Disable link preview.
Add HTML tags render support.
"""
return {
"disable_web_page_preview": True,
"parse_mode": telegram.ParseMode.HTML,
}
| import telegram
from emoji import emojize
from .base import TextMessageBase
class RulingHelpTextMessage(TextMessageBase):
"""
Ruling help message.
Taken from:
https://www.iamexpat.nl/expat-info/taxation/30-percent-ruling/requirements
"""
def get_text(self):
message = emojize(
"<b>30% RULING INCOME REQUIREMENTS</b>\n\n"
"<a href=\"https://www.iamexpat.nl/expat-info/taxation/30-percent-ruling/requirements\">Go to Source</a>\n\n"
"<b>2019 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>37743 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28690 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"<b>2018 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>37296 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28350 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"<b>2017 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>37000 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28125 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"<b>2016 salary requirements</b>\n\n"
":small_blue_diamond: Minimum taxable salary at 70%: <b>36889 EUR</b> \n"
":small_blue_diamond: Employee with master's degree: <b>28041 EUR</b> \n"
":small_blue_diamond: Scientific researchers: <b>No minimum</b> \n"
":small_blue_diamond: Medical training specialists: <b>No minimum</b> \n\n"
"Type /start to start calculation. \n"
"Type /help get more details. \n\n",
use_aliases=True
)
return message
def get_options(self):
"""
Disable link preview.
Add HTML tags render support.
"""
return {
"disable_web_page_preview": True,
"parse_mode": telegram.ParseMode.HTML,
}
| en | 0.614473 | Ruling help message. Taken from: https://www.iamexpat.nl/expat-info/taxation/30-percent-ruling/requirements Disable link preview. Add HTML tags render support. | 2.89919 | 3 |
extras/20190910/code/dummy_11a/resnet18_unet_softmax_01/train.py | pyaf/severstal-steel-defect-detection | 0 | 10109 | import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
from common import *
from dataset import *
from model import *
def valid_augment(image, mask, infor):
return image, mask, infor
def train_augment(image, mask, infor):
u=np.random.choice(3)
if u==0:
pass
elif u==1:
image, mask = do_random_crop_rescale(image, mask, 1600-(256-224), 224)
elif u==2:
image, mask = do_random_crop_rotate_rescale(image, mask, 1600-(256-224), 224)
if np.random.rand()>0.5:
image = do_random_log_contast(image)
if np.random.rand()>0.5:
image, mask = do_flip_lr(image, mask)
if np.random.rand()>0.5:
image, mask = do_flip_ud(image, mask)
if np.random.rand()>0.5:
image, mask = do_noise(image, mask)
return image, mask, infor
def null_collate(batch):
batch_size = len(batch)
input = []
truth_mask = []
truth_label = []
infor = []
for b in range(batch_size):
input.append(batch[b][0])
#truth_mask.append(batch[b][1])
infor.append(batch[b][2])
mask = batch[b][1]
label = (mask.reshape(4,-1).sum(1)>0).astype(np.int32)
num_class,H,W = mask.shape
mask = mask.transpose(1,2,0)*[1,2,3,4]
mask = mask.reshape(-1,4)
mask = mask.max(-1).reshape(1,H,W)
truth_mask.append(mask)
truth_label.append(label)
input = np.stack(input)
input = image_to_input(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
input = torch.from_numpy(input).float()
truth_mask = np.stack(truth_mask)
truth_mask = torch.from_numpy(truth_mask).long()
truth_label = np.array(truth_label)
truth_label = torch.from_numpy(truth_label).float()
return input, truth_mask, truth_label, infor
#------------------------------------
def do_valid(net, valid_loader, out_dir=None):
#out_dir=None
valid_num = np.zeros(11, np.float32)
valid_loss = np.zeros(11, np.float32)
for t, (input, truth_mask, truth_label, infor) in enumerate(valid_loader):
#if b==5: break
net.eval()
input = input.cuda()
truth_mask = truth_mask.cuda()
truth_label = truth_label.cuda()
with torch.no_grad():
logit = data_parallel(net, input) #net(input)
loss = criterion(logit, truth_mask)
tn,tp, num_neg,num_pos = metric_hit(logit, truth_mask)
dn,dp, num_neg,num_pos = metric_dice(logit, truth_mask, threshold=0.5, sum_threshold=100)
#zz=0
#---
batch_size = len(infor)
l = np.array([ loss.item(), tn,*tp, dn,*dp ])
n = np.array([ batch_size, num_neg,*num_pos, num_neg,*num_pos ])
valid_loss += l*n
valid_num += n
# debug-----------------------------
if out_dir is not None:
probability = torch.softmax(logit,1)
image = input_to_image(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
probability = one_hot_encode_predict(probability)
truth_mask = one_hot_encode_truth(truth_mask)
probability_mask = probability.data.cpu().numpy()
truth_label = truth_label.data.cpu().numpy()
truth_mask = truth_mask.data.cpu().numpy()
for b in range(0, batch_size, 4):
image_id = infor[b].image_id[:-4]
result = draw_predict_result(image[b], truth_mask[b], truth_label[b], probability_mask[b], stack='vertical')
draw_shadow_text(result,'%05d %s.jpg'%(valid_num[0]-batch_size+b, image_id),(5,24),1,[255,255,255],2)
image_show('result',result,resize=1)
cv2.imwrite(out_dir +'/valid/%s.png'%(infor[b].image_id[:-4]), result)
cv2.waitKey(1)
pass
# debug-----------------------------
#print(valid_loss)
print('\r %8d /%8d'%(valid_num[0], len(valid_loader.dataset)),end='',flush=True)
pass #-- end of one data loader --
assert(valid_num[0] == len(valid_loader.dataset))
valid_loss = valid_loss/valid_num
return valid_loss
def run_train():
out_dir = \
'/root/share/project/kaggle/2019/steel/result1/resnet18-seg-full-softmax-foldb1-1-4balance'
initial_checkpoint = \
'/root/share/project/kaggle/2019/steel/result1/resnet18-seg-full-softmax-foldb1-1-4balance/checkpoint/00114000_model.pth'
schduler = NullScheduler(lr=0.001)
batch_size = 8 #8
iter_accum = 4
loss_weight = None#[5,5,2,5] #
train_sampler = FourBalanceClassSampler #RandomSampler
## setup -----------------------------------------------------------------------------
for f in ['checkpoint','train','valid','backup'] : os.makedirs(out_dir +'/'+f, exist_ok=True)
backup_project_as_zip(PROJECT_PATH, out_dir +'/backup/code.train.%s.zip'%IDENTIFIER)
log = Logger()
log.open(out_dir+'/log.train.txt',mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\t%s\n' % COMMON_STRING)
log.write('\n')
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\n')
## dataset ----------------------------------------
log.write('** dataset setting **\n')
train_dataset = SteelDataset(
mode = 'train',
csv = ['train.csv',],
split = ['train_b1_11568.npy',],
augment = train_augment,
)
train_loader = DataLoader(
train_dataset,
sampler = train_sampler(train_dataset),
batch_size = batch_size,
drop_last = True,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate
)
valid_dataset = SteelDataset(
mode = 'train',
csv = ['train.csv',],
split = ['valid_b1_1000.npy',],
augment = valid_augment,
)
valid_loader = DataLoader(
valid_dataset,
sampler = SequentialSampler(valid_dataset),
batch_size = 4,
drop_last = False,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate
)
assert(len(train_dataset)>=batch_size)
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset : \n%s\n'%(train_dataset))
log.write('valid_dataset : \n%s\n'%(valid_dataset))
log.write('\n')
## net ----------------------------------------
log.write('** net setting **\n')
net = Net().cuda()
log.write('\tinitial_checkpoint = %s\n' % initial_checkpoint)
if initial_checkpoint is not None:
state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)
##for k in ['logit.weight','logit.bias']: state_dict.pop(k, None) #tramsfer sigmoid feature to softmax network
##net.load_state_dict(state_dict,strict=False)
net.load_state_dict(state_dict,strict=False)
else:
net.load_pretrain(skip=['logit'], is_print=False)
log.write('%s\n'%(type(net)))
log.write('\tloss_weight = %s\n' % str(loss_weight))
log.write('\ttrain_loader.sampler = %s\n' % str(train_loader.sampler))
log.write('\n')
## optimiser ----------------------------------
# if 0: ##freeze
# for p in net.encoder1.parameters(): p.requires_grad = False
# pass
#net.set_mode('train',is_freeze_bn=True)
#-----------------------------------------------
#optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=schduler(0))
#optimizer = torch.optim.RMSprop(net.parameters(), lr =0.0005, alpha = 0.95)
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=schduler(0), momentum=0.9, weight_decay=0.0001)
num_iters = 3000*1000
iter_smooth = 50
iter_log = 500
iter_valid = 1500
iter_save = [0, num_iters-1]\
+ list(range(0, num_iters, 1500))#1*1000
start_iter = 0
start_epoch= 0
rate = 0
if initial_checkpoint is not None:
initial_optimizer = initial_checkpoint.replace('_model.pth','_optimizer.pth')
if os.path.exists(initial_optimizer):
checkpoint = torch.load(initial_optimizer)
start_iter = checkpoint['iter' ]
start_epoch = checkpoint['epoch']
#optimizer.load_state_dict(checkpoint['optimizer'])
pass
log.write('optimizer\n %s\n'%(optimizer))
log.write('schduler\n %s\n'%(schduler))
log.write('\n')
## start training here! ##############################################
log.write('** start training here! **\n')
log.write(' batch_size=%d, iter_accum=%d\n'%(batch_size,iter_accum))
log.write(' experiment = %s\n' % __file__.split('/')[-2])
log.write(' |-------------------------------- VALID-----------------------------|---------- TRAIN/BATCH ------------------------------\n')
log.write('rate iter epoch | loss hit_neg,pos1,2,3,4 dice_neg,pos1,2,3,4 | loss hit_neg,pos1,2,3,4 | time \n')
log.write('------------------------------------------------------------------------------------------------------------------------------------------------\n')
#0.00000 0.0* 0.0 | 0.690 0.50 [0.00,1.00,0.00,1.00] 0.44 [0.00,0.02,0.00,0.15] | 0.000 0.00 [0.00,0.00,0.00,0.00] | 0 hr 00 min
train_loss = np.zeros(20,np.float32)
valid_loss = np.zeros(20,np.float32)
batch_loss = np.zeros(20,np.float32)
iter = 0
i = 0
start = timer()
while iter<num_iters:
sum_train_loss = np.zeros(20,np.float32)
sum = np.zeros(20,np.float32)
optimizer.zero_grad()
for t, (input, truth_mask, truth_label, infor) in enumerate(train_loader):
batch_size = len(infor)
iter = i + start_iter
epoch = (iter-start_iter)*batch_size/len(train_dataset) + start_epoch
#if 0:
if (iter % iter_valid==0):
valid_loss = do_valid(net, valid_loader, out_dir) #
#pass
if (iter % iter_log==0):
print('\r',end='',flush=True)
asterisk = '*' if iter in iter_save else ' '
log.write('%0.5f %5.1f%s %5.1f | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %s' % (\
rate, iter/1000, asterisk, epoch,
*valid_loss[:11],
*train_loss[:6],
time_to_str((timer() - start),'min'))
)
log.write('\n')
#if 0:
if iter in iter_save:
torch.save(net.state_dict(),out_dir +'/checkpoint/%08d_model.pth'%(iter))
torch.save({
#'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, out_dir +'/checkpoint/%08d_optimizer.pth'%(iter))
pass
# learning rate schduler -------------
lr = schduler(iter)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
rate = get_learning_rate(optimizer)
# one iteration update -------------
#net.set_mode('train',is_freeze_bn=True)
net.train()
input = input.cuda()
truth_label = truth_label.cuda()
truth_mask = truth_mask.cuda()
logit = data_parallel(net,input) #net(input)
loss = criterion(logit, truth_mask, loss_weight)
tn,tp, num_neg,num_pos = metric_hit(logit, truth_mask)
(loss/iter_accum).backward()
if (iter % iter_accum)==0:
optimizer.step()
optimizer.zero_grad()
# print statistics ------------
l = np.array([ loss.item(), tn,*tp ])
n = np.array([ batch_size, num_neg,*num_pos ])
batch_loss[:6] = l
sum_train_loss[:6] += l*n
sum[:6] += n
if iter%iter_smooth == 0:
train_loss = sum_train_loss/(sum+1e-12)
sum_train_loss[...] = 0
sum[...] = 0
print('\r',end='',flush=True)
asterisk = ' '
print('%0.5f %5.1f%s %5.1f | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %s' % (\
rate, iter/1000, asterisk, epoch,
*valid_loss[:11],
*batch_loss[:6],
time_to_str((timer() - start),'min'))
, end='',flush=True)
i=i+1
# debug-----------------------------
if 1:
for di in range(3):
if (iter+di)%1000==0:
probability = torch.softmax(logit,1)
image = input_to_image(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
probability = one_hot_encode_predict(probability)
truth_mask = one_hot_encode_truth(truth_mask)
probability_mask = probability.data.cpu().numpy()
truth_label = truth_label.data.cpu().numpy()
truth_mask = truth_mask.data.cpu().numpy()
for b in range(batch_size):
result = draw_predict_result(image[b], truth_mask[b], truth_label[b], probability_mask[b], stack='vertical')
image_show('result',result,resize=1)
cv2.imwrite(out_dir +'/train/%05d.png'%(di*100+b), result)
cv2.waitKey(1)
pass
pass #-- end of one data loader --
pass #-- end of all iterations --
log.write('\n')
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_train()
| import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
from common import *
from dataset import *
from model import *
def valid_augment(image, mask, infor):
return image, mask, infor
def train_augment(image, mask, infor):
u=np.random.choice(3)
if u==0:
pass
elif u==1:
image, mask = do_random_crop_rescale(image, mask, 1600-(256-224), 224)
elif u==2:
image, mask = do_random_crop_rotate_rescale(image, mask, 1600-(256-224), 224)
if np.random.rand()>0.5:
image = do_random_log_contast(image)
if np.random.rand()>0.5:
image, mask = do_flip_lr(image, mask)
if np.random.rand()>0.5:
image, mask = do_flip_ud(image, mask)
if np.random.rand()>0.5:
image, mask = do_noise(image, mask)
return image, mask, infor
def null_collate(batch):
batch_size = len(batch)
input = []
truth_mask = []
truth_label = []
infor = []
for b in range(batch_size):
input.append(batch[b][0])
#truth_mask.append(batch[b][1])
infor.append(batch[b][2])
mask = batch[b][1]
label = (mask.reshape(4,-1).sum(1)>0).astype(np.int32)
num_class,H,W = mask.shape
mask = mask.transpose(1,2,0)*[1,2,3,4]
mask = mask.reshape(-1,4)
mask = mask.max(-1).reshape(1,H,W)
truth_mask.append(mask)
truth_label.append(label)
input = np.stack(input)
input = image_to_input(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
input = torch.from_numpy(input).float()
truth_mask = np.stack(truth_mask)
truth_mask = torch.from_numpy(truth_mask).long()
truth_label = np.array(truth_label)
truth_label = torch.from_numpy(truth_label).float()
return input, truth_mask, truth_label, infor
#------------------------------------
def do_valid(net, valid_loader, out_dir=None):
#out_dir=None
valid_num = np.zeros(11, np.float32)
valid_loss = np.zeros(11, np.float32)
for t, (input, truth_mask, truth_label, infor) in enumerate(valid_loader):
#if b==5: break
net.eval()
input = input.cuda()
truth_mask = truth_mask.cuda()
truth_label = truth_label.cuda()
with torch.no_grad():
logit = data_parallel(net, input) #net(input)
loss = criterion(logit, truth_mask)
tn,tp, num_neg,num_pos = metric_hit(logit, truth_mask)
dn,dp, num_neg,num_pos = metric_dice(logit, truth_mask, threshold=0.5, sum_threshold=100)
#zz=0
#---
batch_size = len(infor)
l = np.array([ loss.item(), tn,*tp, dn,*dp ])
n = np.array([ batch_size, num_neg,*num_pos, num_neg,*num_pos ])
valid_loss += l*n
valid_num += n
# debug-----------------------------
if out_dir is not None:
probability = torch.softmax(logit,1)
image = input_to_image(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
probability = one_hot_encode_predict(probability)
truth_mask = one_hot_encode_truth(truth_mask)
probability_mask = probability.data.cpu().numpy()
truth_label = truth_label.data.cpu().numpy()
truth_mask = truth_mask.data.cpu().numpy()
for b in range(0, batch_size, 4):
image_id = infor[b].image_id[:-4]
result = draw_predict_result(image[b], truth_mask[b], truth_label[b], probability_mask[b], stack='vertical')
draw_shadow_text(result,'%05d %s.jpg'%(valid_num[0]-batch_size+b, image_id),(5,24),1,[255,255,255],2)
image_show('result',result,resize=1)
cv2.imwrite(out_dir +'/valid/%s.png'%(infor[b].image_id[:-4]), result)
cv2.waitKey(1)
pass
# debug-----------------------------
#print(valid_loss)
print('\r %8d /%8d'%(valid_num[0], len(valid_loader.dataset)),end='',flush=True)
pass #-- end of one data loader --
assert(valid_num[0] == len(valid_loader.dataset))
valid_loss = valid_loss/valid_num
return valid_loss
def run_train():
out_dir = \
'/root/share/project/kaggle/2019/steel/result1/resnet18-seg-full-softmax-foldb1-1-4balance'
initial_checkpoint = \
'/root/share/project/kaggle/2019/steel/result1/resnet18-seg-full-softmax-foldb1-1-4balance/checkpoint/00114000_model.pth'
schduler = NullScheduler(lr=0.001)
batch_size = 8 #8
iter_accum = 4
loss_weight = None#[5,5,2,5] #
train_sampler = FourBalanceClassSampler #RandomSampler
## setup -----------------------------------------------------------------------------
for f in ['checkpoint','train','valid','backup'] : os.makedirs(out_dir +'/'+f, exist_ok=True)
backup_project_as_zip(PROJECT_PATH, out_dir +'/backup/code.train.%s.zip'%IDENTIFIER)
log = Logger()
log.open(out_dir+'/log.train.txt',mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('\t%s\n' % COMMON_STRING)
log.write('\n')
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\n')
## dataset ----------------------------------------
log.write('** dataset setting **\n')
train_dataset = SteelDataset(
mode = 'train',
csv = ['train.csv',],
split = ['train_b1_11568.npy',],
augment = train_augment,
)
train_loader = DataLoader(
train_dataset,
sampler = train_sampler(train_dataset),
batch_size = batch_size,
drop_last = True,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate
)
valid_dataset = SteelDataset(
mode = 'train',
csv = ['train.csv',],
split = ['valid_b1_1000.npy',],
augment = valid_augment,
)
valid_loader = DataLoader(
valid_dataset,
sampler = SequentialSampler(valid_dataset),
batch_size = 4,
drop_last = False,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate
)
assert(len(train_dataset)>=batch_size)
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset : \n%s\n'%(train_dataset))
log.write('valid_dataset : \n%s\n'%(valid_dataset))
log.write('\n')
## net ----------------------------------------
log.write('** net setting **\n')
net = Net().cuda()
log.write('\tinitial_checkpoint = %s\n' % initial_checkpoint)
if initial_checkpoint is not None:
state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)
##for k in ['logit.weight','logit.bias']: state_dict.pop(k, None) #tramsfer sigmoid feature to softmax network
##net.load_state_dict(state_dict,strict=False)
net.load_state_dict(state_dict,strict=False)
else:
net.load_pretrain(skip=['logit'], is_print=False)
log.write('%s\n'%(type(net)))
log.write('\tloss_weight = %s\n' % str(loss_weight))
log.write('\ttrain_loader.sampler = %s\n' % str(train_loader.sampler))
log.write('\n')
## optimiser ----------------------------------
# if 0: ##freeze
# for p in net.encoder1.parameters(): p.requires_grad = False
# pass
#net.set_mode('train',is_freeze_bn=True)
#-----------------------------------------------
#optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=schduler(0))
#optimizer = torch.optim.RMSprop(net.parameters(), lr =0.0005, alpha = 0.95)
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=schduler(0), momentum=0.9, weight_decay=0.0001)
num_iters = 3000*1000
iter_smooth = 50
iter_log = 500
iter_valid = 1500
iter_save = [0, num_iters-1]\
+ list(range(0, num_iters, 1500))#1*1000
start_iter = 0
start_epoch= 0
rate = 0
if initial_checkpoint is not None:
initial_optimizer = initial_checkpoint.replace('_model.pth','_optimizer.pth')
if os.path.exists(initial_optimizer):
checkpoint = torch.load(initial_optimizer)
start_iter = checkpoint['iter' ]
start_epoch = checkpoint['epoch']
#optimizer.load_state_dict(checkpoint['optimizer'])
pass
log.write('optimizer\n %s\n'%(optimizer))
log.write('schduler\n %s\n'%(schduler))
log.write('\n')
## start training here! ##############################################
log.write('** start training here! **\n')
log.write(' batch_size=%d, iter_accum=%d\n'%(batch_size,iter_accum))
log.write(' experiment = %s\n' % __file__.split('/')[-2])
log.write(' |-------------------------------- VALID-----------------------------|---------- TRAIN/BATCH ------------------------------\n')
log.write('rate iter epoch | loss hit_neg,pos1,2,3,4 dice_neg,pos1,2,3,4 | loss hit_neg,pos1,2,3,4 | time \n')
log.write('------------------------------------------------------------------------------------------------------------------------------------------------\n')
#0.00000 0.0* 0.0 | 0.690 0.50 [0.00,1.00,0.00,1.00] 0.44 [0.00,0.02,0.00,0.15] | 0.000 0.00 [0.00,0.00,0.00,0.00] | 0 hr 00 min
train_loss = np.zeros(20,np.float32)
valid_loss = np.zeros(20,np.float32)
batch_loss = np.zeros(20,np.float32)
iter = 0
i = 0
start = timer()
while iter<num_iters:
sum_train_loss = np.zeros(20,np.float32)
sum = np.zeros(20,np.float32)
optimizer.zero_grad()
for t, (input, truth_mask, truth_label, infor) in enumerate(train_loader):
batch_size = len(infor)
iter = i + start_iter
epoch = (iter-start_iter)*batch_size/len(train_dataset) + start_epoch
#if 0:
if (iter % iter_valid==0):
valid_loss = do_valid(net, valid_loader, out_dir) #
#pass
if (iter % iter_log==0):
print('\r',end='',flush=True)
asterisk = '*' if iter in iter_save else ' '
log.write('%0.5f %5.1f%s %5.1f | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %s' % (\
rate, iter/1000, asterisk, epoch,
*valid_loss[:11],
*train_loss[:6],
time_to_str((timer() - start),'min'))
)
log.write('\n')
#if 0:
if iter in iter_save:
torch.save(net.state_dict(),out_dir +'/checkpoint/%08d_model.pth'%(iter))
torch.save({
#'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, out_dir +'/checkpoint/%08d_optimizer.pth'%(iter))
pass
# learning rate schduler -------------
lr = schduler(iter)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
rate = get_learning_rate(optimizer)
# one iteration update -------------
#net.set_mode('train',is_freeze_bn=True)
net.train()
input = input.cuda()
truth_label = truth_label.cuda()
truth_mask = truth_mask.cuda()
logit = data_parallel(net,input) #net(input)
loss = criterion(logit, truth_mask, loss_weight)
tn,tp, num_neg,num_pos = metric_hit(logit, truth_mask)
(loss/iter_accum).backward()
if (iter % iter_accum)==0:
optimizer.step()
optimizer.zero_grad()
# print statistics ------------
l = np.array([ loss.item(), tn,*tp ])
n = np.array([ batch_size, num_neg,*num_pos ])
batch_loss[:6] = l
sum_train_loss[:6] += l*n
sum[:6] += n
if iter%iter_smooth == 0:
train_loss = sum_train_loss/(sum+1e-12)
sum_train_loss[...] = 0
sum[...] = 0
print('\r',end='',flush=True)
asterisk = ' '
print('%0.5f %5.1f%s %5.1f | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %5.3f %4.2f [%4.2f,%4.2f,%4.2f,%4.2f] | %s' % (\
rate, iter/1000, asterisk, epoch,
*valid_loss[:11],
*batch_loss[:6],
time_to_str((timer() - start),'min'))
, end='',flush=True)
i=i+1
# debug-----------------------------
if 1:
for di in range(3):
if (iter+di)%1000==0:
probability = torch.softmax(logit,1)
image = input_to_image(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)
probability = one_hot_encode_predict(probability)
truth_mask = one_hot_encode_truth(truth_mask)
probability_mask = probability.data.cpu().numpy()
truth_label = truth_label.data.cpu().numpy()
truth_mask = truth_mask.data.cpu().numpy()
for b in range(batch_size):
result = draw_predict_result(image[b], truth_mask[b], truth_label[b], probability_mask[b], stack='vertical')
image_show('result',result,resize=1)
cv2.imwrite(out_dir +'/train/%05d.png'%(di*100+b), result)
cv2.waitKey(1)
pass
pass #-- end of one data loader --
pass #-- end of all iterations --
log.write('\n')
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_train()
| en | 0.251306 | #truth_mask.append(batch[b][1]) #------------------------------------ #out_dir=None #if b==5: break #net(input) #zz=0 #--- # debug----------------------------- # debug----------------------------- #print(valid_loss) #-- end of one data loader -- #8 #[5,5,2,5] # #RandomSampler ## setup ----------------------------------------------------------------------------- ## dataset ---------------------------------------- ## net ---------------------------------------- ##for k in ['logit.weight','logit.bias']: state_dict.pop(k, None) #tramsfer sigmoid feature to softmax network ##net.load_state_dict(state_dict,strict=False) ## optimiser ---------------------------------- # if 0: ##freeze # for p in net.encoder1.parameters(): p.requires_grad = False # pass #net.set_mode('train',is_freeze_bn=True) #----------------------------------------------- #optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=schduler(0)) #optimizer = torch.optim.RMSprop(net.parameters(), lr =0.0005, alpha = 0.95) #1*1000 #optimizer.load_state_dict(checkpoint['optimizer']) ## start training here! ############################################## #0.00000 0.0* 0.0 | 0.690 0.50 [0.00,1.00,0.00,1.00] 0.44 [0.00,0.02,0.00,0.15] | 0.000 0.00 [0.00,0.00,0.00,0.00] | 0 hr 00 min #if 0: # #pass #if 0: #'optimizer': optimizer.state_dict(), # learning rate schduler ------------- # one iteration update ------------- #net.set_mode('train',is_freeze_bn=True) #net(input) # print statistics ------------ # debug----------------------------- #-- end of one data loader -- #-- end of all iterations -- # main ################################################################# | 2.501898 | 3 |
city_coord_download.py | Yuchen971/Chinese-city-level-geojson | 0 | 10110 | <gh_stars>0
import requests
import os
def get_json(save_dir, adcode):
# 获取当前地图轮廓
base_url = 'https://geo.datav.aliyun.com/areas/bound/' + str(adcode) + '.json'
full_url = 'https://geo.datav.aliyun.com/areas/bound/' + str(adcode) + '_full.json'
base_r = requests.get(base_url)
if base_r.status_code == 200:
cur_obj_name = base_r.json()['features'][0]['properties']['name']
print(cur_obj_name)
cur_file_dir = os.path.join(save_dir, cur_obj_name)
if not os.path.exists(cur_file_dir):
os.mkdir(cur_file_dir)
base_json_file = os.path.join(cur_file_dir, str(adcode) + '.json')
with open(base_json_file, 'w') as file:
file.write(base_r.text)
# 获取当前地图子地图轮廓
full_r = requests.get(full_url)
if full_r.status_code == 200 and 'cur_obj_name' in vars():
full_json_file = os.path.join(cur_file_dir, str(adcode) + '_full.json')
with open(full_json_file, 'w') as file:
file.write(full_r.text)
for item in full_r.json()['features']:
chadcode = item['properties']['adcode']
if chadcode == adcode:
pass
else:
get_json(cur_file_dir, chadcode)
get_json('/Users/yuchenli/Downloads/city_geojson-master', 100000) | import requests
import os
def get_json(save_dir, adcode):
# 获取当前地图轮廓
base_url = 'https://geo.datav.aliyun.com/areas/bound/' + str(adcode) + '.json'
full_url = 'https://geo.datav.aliyun.com/areas/bound/' + str(adcode) + '_full.json'
base_r = requests.get(base_url)
if base_r.status_code == 200:
cur_obj_name = base_r.json()['features'][0]['properties']['name']
print(cur_obj_name)
cur_file_dir = os.path.join(save_dir, cur_obj_name)
if not os.path.exists(cur_file_dir):
os.mkdir(cur_file_dir)
base_json_file = os.path.join(cur_file_dir, str(adcode) + '.json')
with open(base_json_file, 'w') as file:
file.write(base_r.text)
# 获取当前地图子地图轮廓
full_r = requests.get(full_url)
if full_r.status_code == 200 and 'cur_obj_name' in vars():
full_json_file = os.path.join(cur_file_dir, str(adcode) + '_full.json')
with open(full_json_file, 'w') as file:
file.write(full_r.text)
for item in full_r.json()['features']:
chadcode = item['properties']['adcode']
if chadcode == adcode:
pass
else:
get_json(cur_file_dir, chadcode)
get_json('/Users/yuchenli/Downloads/city_geojson-master', 100000) | zh | 0.666763 | # 获取当前地图轮廓 # 获取当前地图子地图轮廓 | 2.861057 | 3 |
myproject/core/clusterAnalysis.py | xiaoxiansheng19/data_analysis | 0 | 10111 | <filename>myproject/core/clusterAnalysis.py
# from sklearn.cluster import DBSCAN,KMeans
#
#
# def run(data,radius=300):
# res={}
# # 默认参数 epsilon=0.001, min_samples=200
# epsilon = radius / 100000
# # epsilon = 0.003
# min_samples = 100
# db = DBSCAN(eps=epsilon, min_samples=min_samples)
# # eps表示两个向量可以被视作为同一个类的最大的距离
# # min_samples表示一个类中至少要包含的元素数量,如果小于这个数量,那么不构成一个类
# y_pred = db.fit_predict(data)
# # print(y_pred)
# # df_user_info['label'] = y_pred
# n_clusters_ = len(set(y_pred)) - (1 if -1 in y_pred else 0) # 获取分簇的数目
# if n_clusters_<1:
# model = KMeans(n_clusters=1, random_state=0)
# model.fit(data)
# centroid = model.cluster_centers_
# res['point']= | <filename>myproject/core/clusterAnalysis.py
# from sklearn.cluster import DBSCAN,KMeans
#
#
# def run(data,radius=300):
# res={}
# # 默认参数 epsilon=0.001, min_samples=200
# epsilon = radius / 100000
# # epsilon = 0.003
# min_samples = 100
# db = DBSCAN(eps=epsilon, min_samples=min_samples)
# # eps表示两个向量可以被视作为同一个类的最大的距离
# # min_samples表示一个类中至少要包含的元素数量,如果小于这个数量,那么不构成一个类
# y_pred = db.fit_predict(data)
# # print(y_pred)
# # df_user_info['label'] = y_pred
# n_clusters_ = len(set(y_pred)) - (1 if -1 in y_pred else 0) # 获取分簇的数目
# if n_clusters_<1:
# model = KMeans(n_clusters=1, random_state=0)
# model.fit(data)
# centroid = model.cluster_centers_
# res['point']= | en | 0.21102 | # from sklearn.cluster import DBSCAN,KMeans # # # def run(data,radius=300): # res={} # # 默认参数 epsilon=0.001, min_samples=200 # epsilon = radius / 100000 # # epsilon = 0.003 # min_samples = 100 # db = DBSCAN(eps=epsilon, min_samples=min_samples) # # eps表示两个向量可以被视作为同一个类的最大的距离 # # min_samples表示一个类中至少要包含的元素数量,如果小于这个数量,那么不构成一个类 # y_pred = db.fit_predict(data) # # print(y_pred) # # df_user_info['label'] = y_pred # n_clusters_ = len(set(y_pred)) - (1 if -1 in y_pred else 0) # 获取分簇的数目 # if n_clusters_<1: # model = KMeans(n_clusters=1, random_state=0) # model.fit(data) # centroid = model.cluster_centers_ # res['point']= | 3.171622 | 3 |
verticapy/tests/vDataFrame/test_vDF_create.py | sitingren/VerticaPy | 0 | 10112 | # (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest, warnings
from verticapy import vDataFrame, drop_table
from verticapy import set_option
set_option("print_info", False)
@pytest.fixture(scope="module")
def titanic_vd(base):
from verticapy.learn.datasets import load_titanic
titanic = load_titanic(cursor=base.cursor)
yield titanic
with warnings.catch_warnings(record=True) as w:
drop_table(name="public.titanic", cursor=base.cursor)
class TestvDFCreate:
def test_creating_vDF_using_input_relation(self, base, titanic_vd):
tvdf = vDataFrame(input_relation="public.titanic", cursor=base.cursor)
assert tvdf["pclass"].count() == 1234
def test_creating_vDF_using_input_relation_schema(self, base, titanic_vd):
tvdf = vDataFrame(input_relation="titanic", schema="public", cursor=base.cursor)
assert tvdf["pclass"].count() == 1234
def test_creating_vDF_using_input_relation_vcolumns(self, base, titanic_vd):
tvdf = vDataFrame(
input_relation="public.titanic",
usecols=["age", "survived"],
cursor=base.cursor,
)
assert tvdf["survived"].count() == 1234
@pytest.mark.skip(reason="test not implemented")
def test_creating_vDF_using_input_relation_dsn(self):
pass
| # (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest, warnings
from verticapy import vDataFrame, drop_table
from verticapy import set_option
set_option("print_info", False)
@pytest.fixture(scope="module")
def titanic_vd(base):
from verticapy.learn.datasets import load_titanic
titanic = load_titanic(cursor=base.cursor)
yield titanic
with warnings.catch_warnings(record=True) as w:
drop_table(name="public.titanic", cursor=base.cursor)
class TestvDFCreate:
def test_creating_vDF_using_input_relation(self, base, titanic_vd):
tvdf = vDataFrame(input_relation="public.titanic", cursor=base.cursor)
assert tvdf["pclass"].count() == 1234
def test_creating_vDF_using_input_relation_schema(self, base, titanic_vd):
tvdf = vDataFrame(input_relation="titanic", schema="public", cursor=base.cursor)
assert tvdf["pclass"].count() == 1234
def test_creating_vDF_using_input_relation_vcolumns(self, base, titanic_vd):
tvdf = vDataFrame(
input_relation="public.titanic",
usecols=["age", "survived"],
cursor=base.cursor,
)
assert tvdf["survived"].count() == 1234
@pytest.mark.skip(reason="test not implemented")
def test_creating_vDF_using_input_relation_dsn(self):
pass
| en | 0.863285 | # (c) Copyright [2018-2021] Micro Focus or one of its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.02797 | 2 |
Solutions/beta/beta_is_it_an_isogram.py | citrok25/Codewars-1 | 46 | 10113 | import re
from collections import Counter
def is_isogram(word):
if not isinstance(word, str) or word == '': return False
word = {j for i,j in Counter(
re.sub('[^a-z]', '', word.lower())
).most_common()
}
return len(word) == 1
| import re
from collections import Counter
def is_isogram(word):
if not isinstance(word, str) or word == '': return False
word = {j for i,j in Counter(
re.sub('[^a-z]', '', word.lower())
).most_common()
}
return len(word) == 1
| none | 1 | 3.72191 | 4 |
|
p23_Merge_k_Sorted_Lists.py | bzhou26/leetcode_sol | 0 | 10114 | <reponame>bzhou26/leetcode_sol<filename>p23_Merge_k_Sorted_Lists.py
'''
- Leetcode problem: 23
- Difficulty: Hard
- Brief problem description:
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
pq = []
for l in lists:
if l:
heapq.heappush(pq, (l.val, id(l), l))
newNode = ListNode()
result = newNode
while pq:
minVal, i, minNode = heapq.heappop(pq)
newNode.next = minNode
nextNode = minNode.next
newNode = minNode
if nextNode:
heapq.heappush(pq, (nextNode.val, id(nextNode), nextNode))
return result.next
| '''
- Leetcode problem: 23
- Difficulty: Hard
- Brief problem description:
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
pq = []
for l in lists:
if l:
heapq.heappush(pq, (l.val, id(l), l))
newNode = ListNode()
result = newNode
while pq:
minVal, i, minNode = heapq.heappop(pq)
newNode.next = minNode
nextNode = minNode.next
newNode = minNode
if nextNode:
heapq.heappush(pq, (nextNode.val, id(nextNode), nextNode))
return result.next | en | 0.699731 | - Leetcode problem: 23 - Difficulty: Hard - Brief problem description: Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity. Example: Input: [ 1->4->5, 1->3->4, 2->6 ] Output: 1->1->2->3->4->4->5->6 - Solution Summary: - Used Resources: --- Bo Zhou # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next | 3.874538 | 4 |
flocx_ui/content/flocx/views.py | whitel/flocx-ui | 0 | 10115 | from django.views import generic
class IndexView(generic.TemplateView):
template_name = 'project/flocx/index.html' | from django.views import generic
class IndexView(generic.TemplateView):
template_name = 'project/flocx/index.html' | none | 1 | 1.249478 | 1 |
|
wificontrol/utils/networkstranslate.py | patrislav1/pywificontrol | 1 | 10116 | # Written by <NAME> and <NAME> <<EMAIL>>
#
# Copyright (c) 2016, Emlid Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def create_security(proto, key_mgmt, group):
if not proto:
return 'open'
if not key_mgmt:
if "wep" in group:
return 'wep'
else:
return None
else:
if "wpa-psk" in key_mgmt:
if proto == "WPA":
return "wpapsk"
elif proto == "RSN":
return "wpa2psk"
else:
return None
elif "wpa-eap" in key_mgmt:
return 'wpaeap'
else:
return None
def convert_to_wpas_network(network):
return dict(WpasNetworkConverter(network))
def convert_to_wificontrol_network(network, current_network):
wifinetwork = dict(WifiControlNetworkConverter(network))
try:
if wifinetwork['ssid'] == current_network['ssid']:
wifinetwork.update(current_network)
wifinetwork["connected"] = True
except TypeError:
pass
finally:
return wifinetwork
class WpasNetworkConverter(object):
def __init__(self, network_dict):
def rawUtf8(s):
return "{}".format(s.encode('utf-8'))[2:-1]
self.security = network_dict.get('security')
self.name = rawUtf8(network_dict.get('ssid', ''))
self.password = rawUtf8(network_dict.get('password', ''))
self.identity = rawUtf8(network_dict.get('identity', ''))
def __iter__(self):
if (self.security == 'open'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "NONE"
elif (self.security == 'wep'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "NONE"
yield "group", "WEP104 WEP40"
yield "wep_key0", "{}".format(self.password)
elif (self.security == 'wpapsk'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "WPA-PSK"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "psk", "{}".format(self.password)
elif (self.security == 'wpa2psk'):
yield "ssid", "{}".format(self.name)
yield "proto", "RSN"
yield "key_mgmt", "WPA-PSK"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "psk", "{}".format(self.password)
elif (self.security == 'wpaeap'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "WPA-EAP"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "identity", "{}".format(self.identity)
yield "password", <PASSWORD>(self.password)
yield "phase1", "peaplable=0"
else:
yield "ssid", "{}".format(self.name)
yield "psk", "{}".format(self.password)
class WifiControlNetworkConverter(object):
def __init__(self, network_dict):
self.name = network_dict.get('ssid')
self.key_mgmt = network_dict.get('key_mgmt')
self.proto = network_dict.get('proto')
self.group = network_dict.get('group')
def __iter__(self):
if (self.key_mgmt == 'NONE'):
if not self.group:
yield "ssid", self.name
yield "security", "Open"
else:
yield "ssid", self.name
yield "security", "WEP"
elif (self.key_mgmt == 'WPA-PSK'):
if not self.proto:
yield "ssid", self.name
yield "security", "WPA-PSK"
else:
yield "ssid", self.name
yield "security", "WPA2-PSK"
elif (self.key_mgmt == 'WPA-EAP'):
yield "ssid", self.name
yield "security", "WPA-EAP"
else:
yield "ssid", self.name
yield "security", "NONE"
yield "connected", False
if __name__ == '__main__':
network = {'ssid': "MySSID", 'password': "<PASSWORD>", 'security': "wpaeap", 'identity': "<EMAIL>"}
conv = convert_to_wpas_network(network)
reconv = convert_to_wificontrol_network(conv)
print(conv, reconv)
| # Written by <NAME> and <NAME> <<EMAIL>>
#
# Copyright (c) 2016, Emlid Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def create_security(proto, key_mgmt, group):
if not proto:
return 'open'
if not key_mgmt:
if "wep" in group:
return 'wep'
else:
return None
else:
if "wpa-psk" in key_mgmt:
if proto == "WPA":
return "wpapsk"
elif proto == "RSN":
return "wpa2psk"
else:
return None
elif "wpa-eap" in key_mgmt:
return 'wpaeap'
else:
return None
def convert_to_wpas_network(network):
return dict(WpasNetworkConverter(network))
def convert_to_wificontrol_network(network, current_network):
wifinetwork = dict(WifiControlNetworkConverter(network))
try:
if wifinetwork['ssid'] == current_network['ssid']:
wifinetwork.update(current_network)
wifinetwork["connected"] = True
except TypeError:
pass
finally:
return wifinetwork
class WpasNetworkConverter(object):
def __init__(self, network_dict):
def rawUtf8(s):
return "{}".format(s.encode('utf-8'))[2:-1]
self.security = network_dict.get('security')
self.name = rawUtf8(network_dict.get('ssid', ''))
self.password = rawUtf8(network_dict.get('password', ''))
self.identity = rawUtf8(network_dict.get('identity', ''))
def __iter__(self):
if (self.security == 'open'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "NONE"
elif (self.security == 'wep'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "NONE"
yield "group", "WEP104 WEP40"
yield "wep_key0", "{}".format(self.password)
elif (self.security == 'wpapsk'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "WPA-PSK"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "psk", "{}".format(self.password)
elif (self.security == 'wpa2psk'):
yield "ssid", "{}".format(self.name)
yield "proto", "RSN"
yield "key_mgmt", "WPA-PSK"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "psk", "{}".format(self.password)
elif (self.security == 'wpaeap'):
yield "ssid", "{}".format(self.name)
yield "key_mgmt", "WPA-EAP"
yield "pairwise", "CCMP TKIP"
yield "group", "CCMP TKIP"
yield "eap", "TTLS PEAP TLS"
yield "identity", "{}".format(self.identity)
yield "password", <PASSWORD>(self.password)
yield "phase1", "peaplable=0"
else:
yield "ssid", "{}".format(self.name)
yield "psk", "{}".format(self.password)
class WifiControlNetworkConverter(object):
def __init__(self, network_dict):
self.name = network_dict.get('ssid')
self.key_mgmt = network_dict.get('key_mgmt')
self.proto = network_dict.get('proto')
self.group = network_dict.get('group')
def __iter__(self):
if (self.key_mgmt == 'NONE'):
if not self.group:
yield "ssid", self.name
yield "security", "Open"
else:
yield "ssid", self.name
yield "security", "WEP"
elif (self.key_mgmt == 'WPA-PSK'):
if not self.proto:
yield "ssid", self.name
yield "security", "WPA-PSK"
else:
yield "ssid", self.name
yield "security", "WPA2-PSK"
elif (self.key_mgmt == 'WPA-EAP'):
yield "ssid", self.name
yield "security", "WPA-EAP"
else:
yield "ssid", self.name
yield "security", "NONE"
yield "connected", False
if __name__ == '__main__':
network = {'ssid': "MySSID", 'password': "<PASSWORD>", 'security': "wpaeap", 'identity': "<EMAIL>"}
conv = convert_to_wpas_network(network)
reconv = convert_to_wificontrol_network(conv)
print(conv, reconv)
| en | 0.723333 | # Written by <NAME> and <NAME> <<EMAIL>> # # Copyright (c) 2016, Emlid Limited # All rights reserved. # # Redistribution and use in source and binary forms, # with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, # OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 1.292395 | 1 |
src/LaminariaCore.py | MrKelpy/IFXG | 0 | 10117 | <reponame>MrKelpy/IFXG
# -*- coding: utf-8 -*-
"""
This module is distributed as part of the Laminaria Core (Python Version).
Get the Source Code in GitHub:
https://github.com/MrKelpy/LaminariaCore
The LaminariaCore is Open Source and distributed under the
MIT License
"""
# Built-in Imports
import datetime
import random
import asyncio
import os
# Third Party Imports
import screeninfo
from discord.ext import commands
import discord
from fpdf import FPDF
# Local Application Imports
###############################################################################
### DATE & TIME ###
###############################################################################
def twochars(arg):
"""
Formats a string of two characters into the format of (0X), useful for date formatting.
:param arg: The string
:return: String
"""
if len(arg) == 1:
return f"0{arg}"
return arg
def get_formatted_date(date: datetime, include_seconds: bool = False):
"""
Returns a given date in the handy DD/MM/YY - HH:MM:SS format.
:param date: The date to be formatted -> datetime.datetime
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
date_string = f"{twochars(str(date.day))}/{twochars(str(date.month))}/{twochars(str(date.year))} - " \
f"{twochars(str(date.hour))}:{twochars(str(date.minute))}"
if include_seconds:
date_string += f":{twochars(str(date.second))}"
return date_string
def get_formatted_date_now(include_seconds: bool = False, formatting: int = 1):
"""
Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one.
:param formatting: Format type -> int
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
now = datetime.datetime.now()
if formatting == 1:
date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \
f"{twochars(str(now.hour))}:{twochars(str(now.minute))}"
elif formatting == 2:
date_string = f"{twochars(str(now.day))}.{twochars(str(now.month))}.{twochars(str(now.year))}_" \
f"{twochars(str(now.hour))}.{twochars(str(now.minute))}"
else:
date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \
f"{twochars(str(now.hour))}:{twochars(str(now.minute))}"
if include_seconds:
date_string += f":{twochars(str(now.second))}"
return date_string
def time_until_midnight():
"""
Get seconds left until midnight
"""
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
timedelta_until_midnight = datetime.datetime.combine(tomorrow, datetime.time.min) - datetime.datetime.now()
return timedelta_until_midnight.seconds
###############################################################################
### GENERAL ###
###############################################################################
def get_absolute_screen_coords(relx, rely):
"""
Returns absolute screen coordinates based off the given relative
coordinates. For instance, in a 1920x720 screen, the x50, y50 input would be
x960, y360.
:param relx: Relative X Coordinate
:param rely: Relative Y Coordinate
:return: Absolute Coordinates
"""
monitor = screeninfo.get_monitors()[0]
x = (relx*monitor.width)/100
y = (rely*monitor.height)/100
return x, y
def get_relative_screen_coords(x, y):
"""
Returns relative screen coordinates based off the given absolute
coordinates. The relative coordinates are percentage-based values calculates
relatively to the monitor specs and the given coords.
:param x: Absolute X
:param y: Absolute Y
:return:
"""
monitor = screeninfo.get_monitors()[0]
relx = (x*100)/monitor.width
rely = (y*100)/monitor.height
return relx, rely
###############################################################################
### PLACEHOLDERS ###
###############################################################################
async def small_ipsum():
"""
Returns the small version of the lorem impsum placeholder
:return:
"""
return "Lorem ipsum dolor sit amet."
async def big_ipsum():
"""
Returns a bigger version of the lorem ipsum text than the small_ipsum function does.
:return:
"""
return "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt " \
"ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco " \
"laboris nisi ut aliquip ex ea commodo consequat."
###############################################################################
### DISCORD.PY ###
###############################################################################
async def hasrole(role: discord.Role, user: discord.Member, add: bool = False):
"""
Checks if a user has a certain role.
:param role: The role to be checked for. -> discord.Role
:param user: The user. -> discord.Member
:param add: If set to True, adds the role to the user, will always return True.
:return: True, if user has the role. False otherwise.
"""
for r in user.roles:
if r == role:
return True
else:
if add is True:
await user.add_roles(role)
return True
return False
async def getrolenamed(role: str, guild: discord.Guild, create: bool = False, exact: bool = True):
"""
Returns a role inside a Guild based on a given name.
:param role: The role to be gathered. -> str
:param guild: The guild to retrieve the role from. -> discord.Guild
:param create: If set to True, creates the role. (If non existant!)
:param exact: If set to True, matches the role exactly
:return: discord.Role, None if not found.
"""
for r in guild.roles:
if exact and r.name == role:
return r
elif role in r.name:
return r
else:
if create is True:
colours = [discord.Colour.red(), discord.Colour.dark_teal(), discord.Colour.teal(), discord.Colour.gold(),
discord.Colour.blurple(), discord.Colour.purple(), discord.Colour.green(),
discord.Colour.greyple(),
discord.Colour.orange(), discord.Colour.light_grey()]
return_role = await guild.create_role(name=role, colour=random.choice(colours))
return return_role
return None
async def get_textchannel_by_name(channel: str, guild: discord.Guild,
delete: bool = False, create: bool = False, category: str = None, exact: bool = True):
"""
Returns a text channel based on a given name.
:param channel: The channel to be gathered. -> str
:param guild: The guild to retrieve the channel from. -> discord.Guild
:param delete: If set to True, deletes the role. (If found!)
:param create: If set to True, creates the role. (If not found!)
:param category: The category to create the channel into. (If create is True!)
:param exact: If set to True, the channelname needs to match the channel at 100%. Else, no.
:return: discord.TextChannel, None if not found.
"""
for text_channel in guild.text_channels:
if exact:
if text_channel.name == channel.lower():
if delete is True:
await text_channel.delete()
continue
return text_channel
else:
if channel.lower() in text_channel.name:
if delete is True:
await text_channel.delete()
continue
return text_channel
if create is True:
text_channel = await guild.create_text_channel(channel, category=category)
return text_channel
return None
async def get_category_by_name(category_name: str, guild: discord.Guild, delete: bool = False, create: bool = False,
exact: bool = True):
"""
Returns a category based on a given name.
:param exact: If set to True, matches the name exactly as it is.*
:param category_name: The category to be gathered. -> str
:param guild: The guild to retrieve the category from. -> discord.Guild
:param delete: If set to True, deletes the category. (If found!)
:param create: If set to True, creates the category. (If not found!)
:return: discord.Category, None if not found.
"""
for category in guild.categories:
if exact and category.name == category_name:
if delete is True:
await category.delete()
continue
return category
elif not exact and category_name in category.name:
if delete is True:
await category.delete()
continue
return category
if create is True:
category = await guild.create_category(category_name)
return category
return None
async def twochars_async(arg):
"""
Formats a string of two characters into the format of (0X), useful for date formatting.
:param arg: The string
:return: String
"""
if len(arg) == 1:
return f"0{arg}"
return arg
async def as_get_formatted_date_now(include_seconds: bool = False, formatting: int = 1):
"""
Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one.
:param formatting: Format type -> int
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
now = datetime.datetime.now()
if formatting == 1:
date_string = f"{await twochars(str(now.day))}/{await twochars(str(now.month))}/{await twochars(str(now.year))} - " \
f"{await twochars(str(now.hour))}:{await twochars(str(now.minute))}"
elif formatting == 2:
date_string = f"{await twochars(str(now.day))}.{await twochars(str(now.month))}.{await twochars(str(now.year))}_" \
f"{await twochars(str(now.hour))}.{await twochars(str(now.minute))}"
else:
date_string = f"{await twochars(str(now.day))}/{await twochars(str(now.month))}/{await twochars(str(now.year))} - " \
f"{await twochars(str(now.hour))}:{await twochars(str(now.minute))}"
if include_seconds:
date_string += f":{await twochars(str(now.second))}"
return date_string
async def get_formatted_date_async(date: datetime, include_seconds: bool = False):
"""
Returns a given date in the handy DD/MM/YY - HH:MM:SS format.
:param date: The date to be formatted -> datetime.datetime
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
date_string = f"{await twochars(str(date.day))}/{await twochars(str(date.month))}/{await twochars(str(date.year))} - " \
f"{await twochars(str(date.hour))}:{await twochars(str(date.minute))}"
if include_seconds:
date_string += f":{await twochars(str(date.second))}"
return date_string
async def send_loading(channel: discord.TextChannel, colour=discord.Colour.red()):
"""
Sends a loading embed to a specified channel.
:param channel: The channel for the message to be sent to. -> discord.TextChannel
:param colour: The embed colour. -> discord.Colour
:return: discord.Embed
"""
loading_embed = discord.Embed(
title='Loading...',
colour=colour
)
loading = await channel.send(embed=loading_embed)
return loading
async def get_textchannel_chatlog(text_channel: discord.TextChannel, limit: int = None):
"""
Returns a TextChannel chatlog
:param text_channel: The text channel for the data to be gathered from
:param limit: An integer to limit the amount of messages retrieved.
:return: String
"""
all_messages = await text_channel.history(limit=limit).flatten()
all_messages.reverse()
# Parses out and legibilises the messages into a chatlog
chatlog = ""
for message in all_messages:
if message.embeds:
content = message.embeds[0].title
elif message.attachments:
content = f"FILE(s) :{[file.filename for file in message.attachments]}"
else:
content = message.content
content = content.split("```")
content = '\n'.join(content)
chatlog += f"[{await get_formatted_date_async(message.created_at, include_seconds=True)}] [- MSG ID: {message.id}]" \
f" [- AUTHOR ID: {message.author.id}] <{message.author}> {content}\n"
return chatlog
async def get_textchannel_firstmessage(text_channel: discord.TextChannel):
"""
Returns the first message on a TextChannel
:param text_channel: The textchannel to retrieve the message from. -> discord.TextChannel
:return: discord.Message
"""
all_messages = await text_channel.history(limit=None).flatten()
all_messages.reverse()
return all_messages[0]
async def get_member_object(member_id: int, guild: discord.Guild):
"""
Returns a discord.Member object of a member from a given ID
:param member_id: The member ID. -> int
:param guild: The guild to retrieve the member from. -> discord.Guild
:return: discord.Member, None if not found.
"""
for member in guild.members:
if int(member.id) == int(member_id):
return member
return None
async def show_help_menu(ctx, bot: commands.Bot, colour=discord.Colour.red(), reverse=False):
"""
Standard help menu used between bots created by Alex, with loads of quirks to make the UI more appealing.
The help menu is completely computer-generated.
Description management:
> Leaving the description of a command without text will it not be shown in the UI
> Writing |String| at the beggining of a command description will have it sorted into a category
(Replace "String" with the category name)
> Categories are sorted alphabetically, aswell as bot_commands.
> Not specifying a category will result in the command being thrown into a "General" category
:param reverse:
:param ctx: discord context.
:param bot: discord BOT instance.
:param colour: Help menu embed colour
:return: discord.Embed
"""
help_menu_base = discord.Embed(
title=f"{bot.user.name}'s Help Menu - ",
description=f"Prefix: `{ctx.prefix}`",
colour=colour
)
dev = await bot.fetch_user(740969223681212507)
commands_dictionary = dict()
embed_list = list()
for command in bot.commands:
# Iterates through all the registered bot_commands
if not command.description:
# Skips over the command if no description is provided
continue
category_name = "General"
if command.description.startswith("|") and command.description.count(
"|") == 2 and not command.description.endswith("||"):
# Parses out the category of a command if a match is detected
category_name = command.description.split("|")[1].strip().title()
command.description = command.description.split("|")[2].strip()
params = ""
alias_list = "No aliases found"
for param in command.clean_params:
# Parses out the command parameters for usage in the command info
params += f" <{param}> "
if command.aliases:
# If any command aliases exist, parse them out for usage in the command info
alias_list = ""
for alias in command.aliases:
alias_list += f"|{ctx.prefix}{alias}| "
# Build the dict update
try:
_ = commands_dictionary[category_name]
commands_dictionary[category_name].append([command.name, command.description, alias_list, params])
except KeyError:
command_registration = {category_name: [[command.name, command.description, alias_list, params]]}
commands_dictionary.update(command_registration)
for category in sorted(commands_dictionary):
# Loads in the categories with their bot_commands to the help menu
# Loads in the embed for the category
category_embed = help_menu_base.copy()
category_embed.title += f"{category} Commands"
for command in sorted(commands_dictionary[category]):
# Gets the command info
name = command[0]
description = command[1]
aliases = command[2]
params = command[3]
category_embed.add_field(name=name.title(), value=f"{description}\n`USAGE: {ctx.prefix}{name}{params}`\n"
f"`ALIASES: {aliases}`", inline=False)
category_embed.timestamp = datetime.datetime.now()
category_embed.set_footer(text=f"Developed by {dev}")
category_embed.set_thumbnail(url=bot.user.avatar_url)
embed_list.append(category_embed)
if reverse:
embed_list = reversed(embed_list)
for embed in embed_list:
# Sends all the embeds in the list
await ctx.send(embed=embed)
async def convert_txt_to_pdf(path: str):
"""
Converts a .txt file to a .pdf file
:param path: The path for the file. -> str
:return:
"""
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=15)
output_path = str(os.path.splitext(path)[0]) + ".pdf"
with open(path, 'r') as txtfile:
lines = txtfile.readlines()
for line in lines:
if line == '\n':
pdf.cell(200, 10, txt='\n', ln=1, align="L")
continue
if line[0] == "|" and line[2] == "|":
pdf.cell(200, 10, txt=line[3:].strip(), ln=1, align=line[1])
continue
pdf.cell(200, 10, txt=line.strip(), ln=1, align="L")
pdf.output(output_path)
async def load_missing_perms_embed(colour=discord.Colour.red()):
"""
Quickly loads a missing permissions embed
:param colour: The embed colour
:return: discord.Embed
"""
embed = discord.Embed(
title="Missing permissions!",
description="Sorry, you can't use this command.",
colour=colour
)
embed.timestamp = datetime.datetime.now()
return embed
async def interactive_dialog(**kwargs):
"""
Creates an "interactive dialog" as i name it; An embed that uses the wait_for() function together to facilitate the
creation of dialogs.
:param kwargs: expects ctx, channel, check, title, body and optionally emojis, colour.
> PILLAR ARGUMENTS are arguments that are mandatory; Vital for the function to be used.
> OPTIONAL ARGUMENTS are... optional arguments. What did you expect?
> "Ctx" is the command context. (PILLAR ARGUMENT)
> "Check" is the type of event to happen, aswell as the wait_for check to perform on the response. (PILLAR ARGUMENT)
> "Title" is the dialog embed title. (PILLAR ARGUMENT)
> "Body" is the dialog embed description. (PILLAR ARGUMENT)
> "Channel" is the place where to send the dialog to. (OPTIONAL ARGUMENT)
> "Emojis" is a list with a list of reactions, (UTF-8 Symbols) to add into the dialog. (OPTIONAL ARGUMENT)
> "Colour" is the dialog embed colour. Defaults to discord.Colours.red() (OPTIONAL ARGUMENT)
> "Picture" is the dialog image, the big picture at the bottom of the embed. (OPTIONAL ARGUMENT)
> "Thumbnail" is the dialog embed thumbnail, the small picture that gets placed on the top right side of the embed. (OPTIONAL ARGUMENT)
> "Footer" is the dialog footer, the small text at the bottom of the embed. (OPTIONAL ARGUMENT)
:return: The user's response.
"""
# Performs a kwargs check to raise errors if any of the pillar arguments are missing
if "ctx" not in kwargs: raise TypeError("Missing CTX argument in interactive dialog.")
if "check" not in kwargs: raise TypeError("Missing CHECK argument in interactive dialog.")
if "title" not in kwargs: raise TypeError("Missing TITLE argument in interactive dialog.")
if "body" not in kwargs: raise TypeError("Missing BODY argument in interactive dialog.")
# Performs a kwargs check to default the arguments if any of the optional arguments are missing
if "channel" not in kwargs: kwargs["channel"] = kwargs["ctx"].author
if "emojis" not in kwargs: kwargs["emojis"] = None
if "colour" not in kwargs: kwargs["colour"] = discord.Colour.red()
if "picture" not in kwargs: kwargs["picture"] = None
if "thumbnail" not in kwargs: kwargs["thumbnail"] = None
if "footer" not in kwargs: kwargs["footer"] = None
# Loads the dialog embed
dialog_embed = discord.Embed(
title=kwargs["title"],
description=kwargs["body"],
colour=kwargs["colour"]
)
dialog_embed.timestamp = datetime.datetime.now()
dialog_embed.set_thumbnail(url=kwargs["thumbnail"])
dialog_embed.set_image(url=kwargs["picture"])
dialog_embed.set_footer(text=kwargs["footer"])
# Sends the embed to the desired channel
dialog_message = await kwargs["channel"].send(embed=dialog_embed)
# Starts the event type cheks, and their proper handles
if kwargs["check"][0] == "message":
try:
msg = await kwargs["ctx"].bot.wait_for("message", kwargs["check"][1], timeout=120.0)
return msg
except asyncio.TimeoutError:
# Returns an empty response if a timeout occurs.
return
if kwargs["check"][0] == "reaction":
if kwargs["emojis"] is not None:
# Adds the reactions to a message, if the emojis kwarg is not missing.
for emoji in kwargs["emojis"]:
await dialog_message.add_reaction(emoji)
try:
reaction, user = await kwargs["ctx"].bot.wait_for("message", kwargs["check"][1], timeout=120.0)
return reaction, user
except asyncio.TimeoutError:
# Returns an empty response if a timeout occurs.
return
| # -*- coding: utf-8 -*-
"""
This module is distributed as part of the Laminaria Core (Python Version).
Get the Source Code in GitHub:
https://github.com/MrKelpy/LaminariaCore
The LaminariaCore is Open Source and distributed under the
MIT License
"""
# Built-in Imports
import datetime
import random
import asyncio
import os
# Third Party Imports
import screeninfo
from discord.ext import commands
import discord
from fpdf import FPDF
# Local Application Imports
###############################################################################
### DATE & TIME ###
###############################################################################
def twochars(arg):
"""
Formats a string of two characters into the format of (0X), useful for date formatting.
:param arg: The string
:return: String
"""
if len(arg) == 1:
return f"0{arg}"
return arg
def get_formatted_date(date: datetime, include_seconds: bool = False):
"""
Returns a given date in the handy DD/MM/YY - HH:MM:SS format.
:param date: The date to be formatted -> datetime.datetime
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
date_string = f"{twochars(str(date.day))}/{twochars(str(date.month))}/{twochars(str(date.year))} - " \
f"{twochars(str(date.hour))}:{twochars(str(date.minute))}"
if include_seconds:
date_string += f":{twochars(str(date.second))}"
return date_string
def get_formatted_date_now(include_seconds: bool = False, formatting: int = 1):
"""
Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one.
:param formatting: Format type -> int
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
now = datetime.datetime.now()
if formatting == 1:
date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \
f"{twochars(str(now.hour))}:{twochars(str(now.minute))}"
elif formatting == 2:
date_string = f"{twochars(str(now.day))}.{twochars(str(now.month))}.{twochars(str(now.year))}_" \
f"{twochars(str(now.hour))}.{twochars(str(now.minute))}"
else:
date_string = f"{twochars(str(now.day))}/{twochars(str(now.month))}/{twochars(str(now.year))} - " \
f"{twochars(str(now.hour))}:{twochars(str(now.minute))}"
if include_seconds:
date_string += f":{twochars(str(now.second))}"
return date_string
def time_until_midnight():
"""
Get seconds left until midnight
"""
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
timedelta_until_midnight = datetime.datetime.combine(tomorrow, datetime.time.min) - datetime.datetime.now()
return timedelta_until_midnight.seconds
###############################################################################
### GENERAL ###
###############################################################################
def get_absolute_screen_coords(relx, rely):
"""
Returns absolute screen coordinates based off the given relative
coordinates. For instance, in a 1920x720 screen, the x50, y50 input would be
x960, y360.
:param relx: Relative X Coordinate
:param rely: Relative Y Coordinate
:return: Absolute Coordinates
"""
monitor = screeninfo.get_monitors()[0]
x = (relx*monitor.width)/100
y = (rely*monitor.height)/100
return x, y
def get_relative_screen_coords(x, y):
"""
Returns relative screen coordinates based off the given absolute
coordinates. The relative coordinates are percentage-based values calculates
relatively to the monitor specs and the given coords.
:param x: Absolute X
:param y: Absolute Y
:return:
"""
monitor = screeninfo.get_monitors()[0]
relx = (x*100)/monitor.width
rely = (y*100)/monitor.height
return relx, rely
###############################################################################
### PLACEHOLDERS ###
###############################################################################
async def small_ipsum():
"""
Returns the small version of the lorem impsum placeholder
:return:
"""
return "Lorem ipsum dolor sit amet."
async def big_ipsum():
"""
Returns a bigger version of the lorem ipsum text than the small_ipsum function does.
:return:
"""
return "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt " \
"ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco " \
"laboris nisi ut aliquip ex ea commodo consequat."
###############################################################################
### DISCORD.PY ###
###############################################################################
async def hasrole(role: discord.Role, user: discord.Member, add: bool = False):
"""
Checks if a user has a certain role.
:param role: The role to be checked for. -> discord.Role
:param user: The user. -> discord.Member
:param add: If set to True, adds the role to the user, will always return True.
:return: True, if user has the role. False otherwise.
"""
for r in user.roles:
if r == role:
return True
else:
if add is True:
await user.add_roles(role)
return True
return False
async def getrolenamed(role: str, guild: discord.Guild, create: bool = False, exact: bool = True):
"""
Returns a role inside a Guild based on a given name.
:param role: The role to be gathered. -> str
:param guild: The guild to retrieve the role from. -> discord.Guild
:param create: If set to True, creates the role. (If non existant!)
:param exact: If set to True, matches the role exactly
:return: discord.Role, None if not found.
"""
for r in guild.roles:
if exact and r.name == role:
return r
elif role in r.name:
return r
else:
if create is True:
colours = [discord.Colour.red(), discord.Colour.dark_teal(), discord.Colour.teal(), discord.Colour.gold(),
discord.Colour.blurple(), discord.Colour.purple(), discord.Colour.green(),
discord.Colour.greyple(),
discord.Colour.orange(), discord.Colour.light_grey()]
return_role = await guild.create_role(name=role, colour=random.choice(colours))
return return_role
return None
async def get_textchannel_by_name(channel: str, guild: discord.Guild,
delete: bool = False, create: bool = False, category: str = None, exact: bool = True):
"""
Returns a text channel based on a given name.
:param channel: The channel to be gathered. -> str
:param guild: The guild to retrieve the channel from. -> discord.Guild
:param delete: If set to True, deletes the role. (If found!)
:param create: If set to True, creates the role. (If not found!)
:param category: The category to create the channel into. (If create is True!)
:param exact: If set to True, the channelname needs to match the channel at 100%. Else, no.
:return: discord.TextChannel, None if not found.
"""
for text_channel in guild.text_channels:
if exact:
if text_channel.name == channel.lower():
if delete is True:
await text_channel.delete()
continue
return text_channel
else:
if channel.lower() in text_channel.name:
if delete is True:
await text_channel.delete()
continue
return text_channel
if create is True:
text_channel = await guild.create_text_channel(channel, category=category)
return text_channel
return None
async def get_category_by_name(category_name: str, guild: discord.Guild, delete: bool = False, create: bool = False,
exact: bool = True):
"""
Returns a category based on a given name.
:param exact: If set to True, matches the name exactly as it is.*
:param category_name: The category to be gathered. -> str
:param guild: The guild to retrieve the category from. -> discord.Guild
:param delete: If set to True, deletes the category. (If found!)
:param create: If set to True, creates the category. (If not found!)
:return: discord.Category, None if not found.
"""
for category in guild.categories:
if exact and category.name == category_name:
if delete is True:
await category.delete()
continue
return category
elif not exact and category_name in category.name:
if delete is True:
await category.delete()
continue
return category
if create is True:
category = await guild.create_category(category_name)
return category
return None
async def twochars_async(arg):
"""
Formats a string of two characters into the format of (0X), useful for date formatting.
:param arg: The string
:return: String
"""
if len(arg) == 1:
return f"0{arg}"
return arg
async def as_get_formatted_date_now(include_seconds: bool = False, formatting: int = 1):
"""
Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one.
:param formatting: Format type -> int
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
now = datetime.datetime.now()
if formatting == 1:
date_string = f"{await twochars(str(now.day))}/{await twochars(str(now.month))}/{await twochars(str(now.year))} - " \
f"{await twochars(str(now.hour))}:{await twochars(str(now.minute))}"
elif formatting == 2:
date_string = f"{await twochars(str(now.day))}.{await twochars(str(now.month))}.{await twochars(str(now.year))}_" \
f"{await twochars(str(now.hour))}.{await twochars(str(now.minute))}"
else:
date_string = f"{await twochars(str(now.day))}/{await twochars(str(now.month))}/{await twochars(str(now.year))} - " \
f"{await twochars(str(now.hour))}:{await twochars(str(now.minute))}"
if include_seconds:
date_string += f":{await twochars(str(now.second))}"
return date_string
async def get_formatted_date_async(date: datetime, include_seconds: bool = False):
"""
Returns a given date in the handy DD/MM/YY - HH:MM:SS format.
:param date: The date to be formatted -> datetime.datetime
:param include_seconds: If set to True, include seconds in the format.
:return: String
"""
date_string = f"{await twochars(str(date.day))}/{await twochars(str(date.month))}/{await twochars(str(date.year))} - " \
f"{await twochars(str(date.hour))}:{await twochars(str(date.minute))}"
if include_seconds:
date_string += f":{await twochars(str(date.second))}"
return date_string
async def send_loading(channel: discord.TextChannel, colour=discord.Colour.red()):
"""
Sends a loading embed to a specified channel.
:param channel: The channel for the message to be sent to. -> discord.TextChannel
:param colour: The embed colour. -> discord.Colour
:return: discord.Embed
"""
loading_embed = discord.Embed(
title='Loading...',
colour=colour
)
loading = await channel.send(embed=loading_embed)
return loading
async def get_textchannel_chatlog(text_channel: discord.TextChannel, limit: int = None):
"""
Returns a TextChannel chatlog
:param text_channel: The text channel for the data to be gathered from
:param limit: An integer to limit the amount of messages retrieved.
:return: String
"""
all_messages = await text_channel.history(limit=limit).flatten()
all_messages.reverse()
# Parses out and legibilises the messages into a chatlog
chatlog = ""
for message in all_messages:
if message.embeds:
content = message.embeds[0].title
elif message.attachments:
content = f"FILE(s) :{[file.filename for file in message.attachments]}"
else:
content = message.content
content = content.split("```")
content = '\n'.join(content)
chatlog += f"[{await get_formatted_date_async(message.created_at, include_seconds=True)}] [- MSG ID: {message.id}]" \
f" [- AUTHOR ID: {message.author.id}] <{message.author}> {content}\n"
return chatlog
async def get_textchannel_firstmessage(text_channel: discord.TextChannel):
"""
Returns the first message on a TextChannel
:param text_channel: The textchannel to retrieve the message from. -> discord.TextChannel
:return: discord.Message
"""
all_messages = await text_channel.history(limit=None).flatten()
all_messages.reverse()
return all_messages[0]
async def get_member_object(member_id: int, guild: discord.Guild):
"""
Returns a discord.Member object of a member from a given ID
:param member_id: The member ID. -> int
:param guild: The guild to retrieve the member from. -> discord.Guild
:return: discord.Member, None if not found.
"""
for member in guild.members:
if int(member.id) == int(member_id):
return member
return None
async def show_help_menu(ctx, bot: commands.Bot, colour=discord.Colour.red(), reverse=False):
"""
Standard help menu used between bots created by Alex, with loads of quirks to make the UI more appealing.
The help menu is completely computer-generated.
Description management:
> Leaving the description of a command without text will it not be shown in the UI
> Writing |String| at the beggining of a command description will have it sorted into a category
(Replace "String" with the category name)
> Categories are sorted alphabetically, aswell as bot_commands.
> Not specifying a category will result in the command being thrown into a "General" category
:param reverse:
:param ctx: discord context.
:param bot: discord BOT instance.
:param colour: Help menu embed colour
:return: discord.Embed
"""
help_menu_base = discord.Embed(
title=f"{bot.user.name}'s Help Menu - ",
description=f"Prefix: `{ctx.prefix}`",
colour=colour
)
dev = await bot.fetch_user(740969223681212507)
commands_dictionary = dict()
embed_list = list()
for command in bot.commands:
# Iterates through all the registered bot_commands
if not command.description:
# Skips over the command if no description is provided
continue
category_name = "General"
if command.description.startswith("|") and command.description.count(
"|") == 2 and not command.description.endswith("||"):
# Parses out the category of a command if a match is detected
category_name = command.description.split("|")[1].strip().title()
command.description = command.description.split("|")[2].strip()
params = ""
alias_list = "No aliases found"
for param in command.clean_params:
# Parses out the command parameters for usage in the command info
params += f" <{param}> "
if command.aliases:
# If any command aliases exist, parse them out for usage in the command info
alias_list = ""
for alias in command.aliases:
alias_list += f"|{ctx.prefix}{alias}| "
# Build the dict update
try:
_ = commands_dictionary[category_name]
commands_dictionary[category_name].append([command.name, command.description, alias_list, params])
except KeyError:
command_registration = {category_name: [[command.name, command.description, alias_list, params]]}
commands_dictionary.update(command_registration)
for category in sorted(commands_dictionary):
# Loads in the categories with their bot_commands to the help menu
# Loads in the embed for the category
category_embed = help_menu_base.copy()
category_embed.title += f"{category} Commands"
for command in sorted(commands_dictionary[category]):
# Gets the command info
name = command[0]
description = command[1]
aliases = command[2]
params = command[3]
category_embed.add_field(name=name.title(), value=f"{description}\n`USAGE: {ctx.prefix}{name}{params}`\n"
f"`ALIASES: {aliases}`", inline=False)
category_embed.timestamp = datetime.datetime.now()
category_embed.set_footer(text=f"Developed by {dev}")
category_embed.set_thumbnail(url=bot.user.avatar_url)
embed_list.append(category_embed)
if reverse:
embed_list = reversed(embed_list)
for embed in embed_list:
# Sends all the embeds in the list
await ctx.send(embed=embed)
async def convert_txt_to_pdf(path: str):
"""
Converts a .txt file to a .pdf file
:param path: The path for the file. -> str
:return:
"""
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=15)
output_path = str(os.path.splitext(path)[0]) + ".pdf"
with open(path, 'r') as txtfile:
lines = txtfile.readlines()
for line in lines:
if line == '\n':
pdf.cell(200, 10, txt='\n', ln=1, align="L")
continue
if line[0] == "|" and line[2] == "|":
pdf.cell(200, 10, txt=line[3:].strip(), ln=1, align=line[1])
continue
pdf.cell(200, 10, txt=line.strip(), ln=1, align="L")
pdf.output(output_path)
async def load_missing_perms_embed(colour=discord.Colour.red()):
"""
Quickly loads a missing permissions embed
:param colour: The embed colour
:return: discord.Embed
"""
embed = discord.Embed(
title="Missing permissions!",
description="Sorry, you can't use this command.",
colour=colour
)
embed.timestamp = datetime.datetime.now()
return embed
async def interactive_dialog(**kwargs):
"""
Creates an "interactive dialog" as i name it; An embed that uses the wait_for() function together to facilitate the
creation of dialogs.
:param kwargs: expects ctx, channel, check, title, body and optionally emojis, colour.
> PILLAR ARGUMENTS are arguments that are mandatory; Vital for the function to be used.
> OPTIONAL ARGUMENTS are... optional arguments. What did you expect?
> "Ctx" is the command context. (PILLAR ARGUMENT)
> "Check" is the type of event to happen, aswell as the wait_for check to perform on the response. (PILLAR ARGUMENT)
> "Title" is the dialog embed title. (PILLAR ARGUMENT)
> "Body" is the dialog embed description. (PILLAR ARGUMENT)
> "Channel" is the place where to send the dialog to. (OPTIONAL ARGUMENT)
> "Emojis" is a list with a list of reactions, (UTF-8 Symbols) to add into the dialog. (OPTIONAL ARGUMENT)
> "Colour" is the dialog embed colour. Defaults to discord.Colours.red() (OPTIONAL ARGUMENT)
> "Picture" is the dialog image, the big picture at the bottom of the embed. (OPTIONAL ARGUMENT)
> "Thumbnail" is the dialog embed thumbnail, the small picture that gets placed on the top right side of the embed. (OPTIONAL ARGUMENT)
> "Footer" is the dialog footer, the small text at the bottom of the embed. (OPTIONAL ARGUMENT)
:return: The user's response.
"""
# Performs a kwargs check to raise errors if any of the pillar arguments are missing
if "ctx" not in kwargs: raise TypeError("Missing CTX argument in interactive dialog.")
if "check" not in kwargs: raise TypeError("Missing CHECK argument in interactive dialog.")
if "title" not in kwargs: raise TypeError("Missing TITLE argument in interactive dialog.")
if "body" not in kwargs: raise TypeError("Missing BODY argument in interactive dialog.")
# Performs a kwargs check to default the arguments if any of the optional arguments are missing
if "channel" not in kwargs: kwargs["channel"] = kwargs["ctx"].author
if "emojis" not in kwargs: kwargs["emojis"] = None
if "colour" not in kwargs: kwargs["colour"] = discord.Colour.red()
if "picture" not in kwargs: kwargs["picture"] = None
if "thumbnail" not in kwargs: kwargs["thumbnail"] = None
if "footer" not in kwargs: kwargs["footer"] = None
# Loads the dialog embed
dialog_embed = discord.Embed(
title=kwargs["title"],
description=kwargs["body"],
colour=kwargs["colour"]
)
dialog_embed.timestamp = datetime.datetime.now()
dialog_embed.set_thumbnail(url=kwargs["thumbnail"])
dialog_embed.set_image(url=kwargs["picture"])
dialog_embed.set_footer(text=kwargs["footer"])
# Sends the embed to the desired channel
dialog_message = await kwargs["channel"].send(embed=dialog_embed)
# Starts the event type cheks, and their proper handles
if kwargs["check"][0] == "message":
try:
msg = await kwargs["ctx"].bot.wait_for("message", kwargs["check"][1], timeout=120.0)
return msg
except asyncio.TimeoutError:
# Returns an empty response if a timeout occurs.
return
if kwargs["check"][0] == "reaction":
if kwargs["emojis"] is not None:
# Adds the reactions to a message, if the emojis kwarg is not missing.
for emoji in kwargs["emojis"]:
await dialog_message.add_reaction(emoji)
try:
reaction, user = await kwargs["ctx"].bot.wait_for("message", kwargs["check"][1], timeout=120.0)
return reaction, user
except asyncio.TimeoutError:
# Returns an empty response if a timeout occurs.
return | en | 0.668702 | # -*- coding: utf-8 -*- This module is distributed as part of the Laminaria Core (Python Version). Get the Source Code in GitHub: https://github.com/MrKelpy/LaminariaCore The LaminariaCore is Open Source and distributed under the MIT License # Built-in Imports # Third Party Imports # Local Application Imports ############################################################################### ### DATE & TIME ### ############################################################################### Formats a string of two characters into the format of (0X), useful for date formatting. :param arg: The string :return: String Returns a given date in the handy DD/MM/YY - HH:MM:SS format. :param date: The date to be formatted -> datetime.datetime :param include_seconds: If set to True, include seconds in the format. :return: String Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one. :param formatting: Format type -> int :param include_seconds: If set to True, include seconds in the format. :return: String Get seconds left until midnight ############################################################################### ### GENERAL ### ############################################################################### Returns absolute screen coordinates based off the given relative coordinates. For instance, in a 1920x720 screen, the x50, y50 input would be x960, y360. :param relx: Relative X Coordinate :param rely: Relative Y Coordinate :return: Absolute Coordinates Returns relative screen coordinates based off the given absolute coordinates. The relative coordinates are percentage-based values calculates relatively to the monitor specs and the given coords. :param x: Absolute X :param y: Absolute Y :return: ############################################################################### ### PLACEHOLDERS ### ############################################################################### Returns the small version of the lorem impsum placeholder :return: Returns a bigger version of the lorem ipsum text than the small_ipsum function does. :return: ############################################################################### ### DISCORD.PY ### ############################################################################### Checks if a user has a certain role. :param role: The role to be checked for. -> discord.Role :param user: The user. -> discord.Member :param add: If set to True, adds the role to the user, will always return True. :return: True, if user has the role. False otherwise. Returns a role inside a Guild based on a given name. :param role: The role to be gathered. -> str :param guild: The guild to retrieve the role from. -> discord.Guild :param create: If set to True, creates the role. (If non existant!) :param exact: If set to True, matches the role exactly :return: discord.Role, None if not found. Returns a text channel based on a given name. :param channel: The channel to be gathered. -> str :param guild: The guild to retrieve the channel from. -> discord.Guild :param delete: If set to True, deletes the role. (If found!) :param create: If set to True, creates the role. (If not found!) :param category: The category to create the channel into. (If create is True!) :param exact: If set to True, the channelname needs to match the channel at 100%. Else, no. :return: discord.TextChannel, None if not found. Returns a category based on a given name. :param exact: If set to True, matches the name exactly as it is.* :param category_name: The category to be gathered. -> str :param guild: The guild to retrieve the category from. -> discord.Guild :param delete: If set to True, deletes the category. (If found!) :param create: If set to True, creates the category. (If not found!) :return: discord.Category, None if not found. Formats a string of two characters into the format of (0X), useful for date formatting. :param arg: The string :return: String Returns the current date in the handy DD/MM/YY - HH:MM:SS format (default) or in the specified one. :param formatting: Format type -> int :param include_seconds: If set to True, include seconds in the format. :return: String Returns a given date in the handy DD/MM/YY - HH:MM:SS format. :param date: The date to be formatted -> datetime.datetime :param include_seconds: If set to True, include seconds in the format. :return: String Sends a loading embed to a specified channel. :param channel: The channel for the message to be sent to. -> discord.TextChannel :param colour: The embed colour. -> discord.Colour :return: discord.Embed Returns a TextChannel chatlog :param text_channel: The text channel for the data to be gathered from :param limit: An integer to limit the amount of messages retrieved. :return: String # Parses out and legibilises the messages into a chatlog Returns the first message on a TextChannel :param text_channel: The textchannel to retrieve the message from. -> discord.TextChannel :return: discord.Message Returns a discord.Member object of a member from a given ID :param member_id: The member ID. -> int :param guild: The guild to retrieve the member from. -> discord.Guild :return: discord.Member, None if not found. Standard help menu used between bots created by Alex, with loads of quirks to make the UI more appealing. The help menu is completely computer-generated. Description management: > Leaving the description of a command without text will it not be shown in the UI > Writing |String| at the beggining of a command description will have it sorted into a category (Replace "String" with the category name) > Categories are sorted alphabetically, aswell as bot_commands. > Not specifying a category will result in the command being thrown into a "General" category :param reverse: :param ctx: discord context. :param bot: discord BOT instance. :param colour: Help menu embed colour :return: discord.Embed # Iterates through all the registered bot_commands # Skips over the command if no description is provided # Parses out the category of a command if a match is detected # Parses out the command parameters for usage in the command info # If any command aliases exist, parse them out for usage in the command info # Build the dict update # Loads in the categories with their bot_commands to the help menu # Loads in the embed for the category # Gets the command info # Sends all the embeds in the list Converts a .txt file to a .pdf file :param path: The path for the file. -> str :return: Quickly loads a missing permissions embed :param colour: The embed colour :return: discord.Embed Creates an "interactive dialog" as i name it; An embed that uses the wait_for() function together to facilitate the creation of dialogs. :param kwargs: expects ctx, channel, check, title, body and optionally emojis, colour. > PILLAR ARGUMENTS are arguments that are mandatory; Vital for the function to be used. > OPTIONAL ARGUMENTS are... optional arguments. What did you expect? > "Ctx" is the command context. (PILLAR ARGUMENT) > "Check" is the type of event to happen, aswell as the wait_for check to perform on the response. (PILLAR ARGUMENT) > "Title" is the dialog embed title. (PILLAR ARGUMENT) > "Body" is the dialog embed description. (PILLAR ARGUMENT) > "Channel" is the place where to send the dialog to. (OPTIONAL ARGUMENT) > "Emojis" is a list with a list of reactions, (UTF-8 Symbols) to add into the dialog. (OPTIONAL ARGUMENT) > "Colour" is the dialog embed colour. Defaults to discord.Colours.red() (OPTIONAL ARGUMENT) > "Picture" is the dialog image, the big picture at the bottom of the embed. (OPTIONAL ARGUMENT) > "Thumbnail" is the dialog embed thumbnail, the small picture that gets placed on the top right side of the embed. (OPTIONAL ARGUMENT) > "Footer" is the dialog footer, the small text at the bottom of the embed. (OPTIONAL ARGUMENT) :return: The user's response. # Performs a kwargs check to raise errors if any of the pillar arguments are missing # Performs a kwargs check to default the arguments if any of the optional arguments are missing # Loads the dialog embed # Sends the embed to the desired channel # Starts the event type cheks, and their proper handles # Returns an empty response if a timeout occurs. # Adds the reactions to a message, if the emojis kwarg is not missing. # Returns an empty response if a timeout occurs. | 2.785235 | 3 |
examples/api/default_value.py | clamdad/atom | 222 | 10118 | <filename>examples/api/default_value.py
# --------------------------------------------------------------------------------------
# Copyright (c) 2013-2021, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# --------------------------------------------------------------------------------------
""" Demonstrate all the ways to initialize a value
1. Pass the value directly
2. Assign the default value explicitly
3. Provide the value during initialization of the object
4. Provide factory callable that returns a value
5. Use a _default_* static method
"""
import sys
from atom.api import Atom, Int, Str
def get_mother():
return "Maude " + get_last_name()
def get_last_name():
"""Return a last name based on the system byteorder."""
return sys.byteorder.capitalize()
class Person(Atom):
"""A simple class representing a person object."""
first_name = Str("Bob")
age = Int(default=40)
address = Str()
mother = Str(factory=get_mother)
last_name = Str()
def _default_last_name(self):
return get_last_name()
if __name__ == "__main__":
bob = Person(address="101 Main")
print((bob.first_name, bob.last_name, bob.age))
print(bob.mother)
| <filename>examples/api/default_value.py
# --------------------------------------------------------------------------------------
# Copyright (c) 2013-2021, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# --------------------------------------------------------------------------------------
""" Demonstrate all the ways to initialize a value
1. Pass the value directly
2. Assign the default value explicitly
3. Provide the value during initialization of the object
4. Provide factory callable that returns a value
5. Use a _default_* static method
"""
import sys
from atom.api import Atom, Int, Str
def get_mother():
return "Maude " + get_last_name()
def get_last_name():
"""Return a last name based on the system byteorder."""
return sys.byteorder.capitalize()
class Person(Atom):
"""A simple class representing a person object."""
first_name = Str("Bob")
age = Int(default=40)
address = Str()
mother = Str(factory=get_mother)
last_name = Str()
def _default_last_name(self):
return get_last_name()
if __name__ == "__main__":
bob = Person(address="101 Main")
print((bob.first_name, bob.last_name, bob.age))
print(bob.mother)
| en | 0.572225 | # -------------------------------------------------------------------------------------- # Copyright (c) 2013-2021, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # -------------------------------------------------------------------------------------- Demonstrate all the ways to initialize a value 1. Pass the value directly 2. Assign the default value explicitly 3. Provide the value during initialization of the object 4. Provide factory callable that returns a value 5. Use a _default_* static method Return a last name based on the system byteorder. A simple class representing a person object. | 4.029145 | 4 |
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Codec/YUV4MPEG.py | mdavid/nuxleus | 1 | 10119 | #!/usr/bin/env python
#
# Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: <EMAIL>
# to discuss alternative licensing.
# -------------------------------------------------------------------------
#
"""\
=============================================
Parsing and Creation of YUV4MPEG format files
=============================================
YUV4MPEGToFrame parses YUV4MPEG format data sent to its "inbox" inbox and sends
video fram data structures to its "outbox" outbox.
FrameToYUV4MPEG does the reverse - taking frame data structures sent to its
"inbox" inbox and outputting YUV4MPEG format data to its "outbox" outbox."
The YUV4MPEG file format is supported by many tools, such as mjpegtools,
mplayer/mencoder, and ffmpeg.
Example Usage
-------------
Playback a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.yuv4mpeg",readmode="bytes", ...),
YUV4MPEGToFrame(),
VideoOverlay()
).run()
Decode a dirac encoded video file to a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.dirac",readmode="bytes", ...),
DiracDecoder(),
FrameToYUV4MPEG(),
SimpleFileWriter("output.yuv4mpeg")
).run()
YUV4MPEGToFrame Behaviour
-------------------------
Send binary data as strings containing YUV4MPEG format data to the "inbox" inbox
and frame data structures will be sent out of the "outbox" outbox as soon as
they are parsed.
See below for a description of the uncompressed frame data structure format.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
FrameToYUV4MPEG Behaviour
-------------------------
Send frame data structures to the "inbox" inbox of this component. YUV4MPEG
format binary string data will be sent out of the "outbox" outbox.
See below for a description of the uncompressed frame data structure format.
The header data for the YUV4MPEG file is determined from the first frame.
All frames sent to this component must therefore be in the same pixel format and
size, otherwise the output data will not be valid YUV4MPEG.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
=========================
UNCOMPRESSED FRAME FORMAT
=========================
A frame is a dictionary data structure. It must, at minimum contain the first 3
("yuv", "size" and "pixformat")::
{
"yuv" : (y_data, u_data, v_data) # a tuple of strings
"size" : (width, height) # in pixels
"pixformat" : pixelformat # format of raw video data
"frame_rate" : fps # frames per second
"interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields
"topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data
"pixel_aspect" : fraction # aspect ratio of pixels
"sequence_meta" : metadata # string containing extended metadata
# (no whitespace or control characters)
}
All other fields are optional when providing frames to FrameToYUV4MPEG.
YUV4MPEGToFrame only guarantees to fill inthe YUV data itself. All other fields
will be filled in if the relevant header data is detected in the file.
The pixel formats recognised (and therefore supported) are::
"YUV420_planar"
"YUV411_planar"
"YUV422_planar"
"YUV444_planar"
"YUV4444_planar"
"Y_planar"
"""
from Axon.Component import component
#from Axon.Ipc import WaitComplete
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Axon.AxonExceptions import noSpaceInBox
import re
from Kamaelia.Support.Data.Rationals import rational
class YUV4MPEGToFrame(component):
"""\
YUV4MPEGToFrame() -> new YUV4MPEGToFrame component.
Parses YUV4MPEG format binarydata, sent as strings to its "inbox" inbox
and outputs uncompressed video frame data structures to its "outbox" outbox.
"""
def __init__(self):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(YUV4MPEGToFrame,self).__init__()
self.remainder = ""
self.shutdownMsg = None
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
returns "NOW" if immediate shutdown is required, or "WHENEVER" if the
component can shutdown when it has finished processing pending data.
"""
while self.dataReady("control"):
newMsg = self.recv("control")
if isinstance(newMsg, shutdownMicroprocess):
self.shutdownMsg = newMsg
elif self.shutdownMsg is None and isinstance(newMsg, producerFinished):
self.shutdownMsg = newMsg
if isinstance(self.shutdownMsg, shutdownMicroprocess):
return "NOW"
elif self.shutdownMsg is not None:
return "WHENEVER"
else:
return None
def readline(self):
"""\
Generator.
Read up to the next newline char from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
bytes = []
newdata = self.remainder
index = newdata.find("\x0a")
while index==-1:
bytes.append(newdata)
while not self.dataReady("inbox"):
if self.checkShutdown():
self.bytesread=""
return
self.pause()
yield 1
newdata = self.recv("inbox")
index = newdata.find("\x0a")
tail = newdata[:index+1]
self.remainder = newdata[index+1:]
bytes.append(tail)
self.bytesread = "".join(bytes)
return
def readbytes(self,size):
"""\
Generator.
Read the specified number of bytes from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
buf = [self.remainder]
bufsize = len(self.remainder)
while bufsize < size:
if self.dataReady("inbox"):
newdata = self.recv("inbox")
buf.append(newdata)
bufsize += len(newdata)
shutdown = self.checkShutdown()
if shutdown == "NOW" or (shutdown and not self.dataReady("inbox") and bufsize<size):
self.bytesread=""
return
if bufsize<size and not self.anyReady():
self.pause()
yield 1
excess = bufsize-size
if excess:
wanted = buf[:-1]
tail, self.remainder = buf[-1][:-excess], buf[-1][-excess:]
wanted.append(tail)
else:
wanted = buf
self.remainder = ""
self.bytesread = "".join(wanted)
return
def safesend(self, data, boxname):
"""\
Generator.
Sends data out of the named outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space and retries
until it succeeds.
If a shutdownMicroprocess message is received, returns early.
"""
while 1:
try:
self.send(data, boxname)
return
except noSpaceInBox:
if self.checkShutdown() == "NOW":
return
self.pause()
yield 1
def main(self):
"""Main loop"""
# parse header
for _ in self.readline(): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
line = self.bytesread
m = re.match("^YUV4MPEG2((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
seq_params = parse_seq_tags(fields)
yield 1
while 1:
for _ in self.readline(): yield _
line = self.bytesread
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
m = re.match("^FRAME((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
frame_params = parse_frame_tags(fields)
ysize = seq_params["size"][0] * seq_params["size"][1]
csize = seq_params["chroma_size"][0] * seq_params["chroma_size"][1]
for _ in self.readbytes(ysize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
y = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
u = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
v = self.bytesread
frame = { "yuv" : (y,u,v) }
frame.update(seq_params)
frame.update(frame_params)
for _ in self.safesend(frame,"outbox"): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and not self.dataReady("inbox")):
break
yield 1
if self.shutdownMsg:
self.send(self.shutdownMsg, "signal")
else:
self.send(producerFinished(), "signal")
def parse_seq_tags(fields):
"""Parses YUV4MPEG header tags"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "W" in tags and "H" in tags:
params['size'] = (int(tags["W"]), int(tags["H"]))
else:
raise
if "C" in tags:
C = tags["C"]
if C == "420jpeg": # 4:2:0 with JPEG/MPEG-1 siting (default)
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420mpeg2": # 4:2:0 with MPEG-2 siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420paldv": # 4:2:0 with PAL-DV siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "411": # 4:1:1, cosited
params['pixformat'] = "YUV411_planar"
params['chroma_size'] = (params['size'][0]/4, params['size'][1])
elif C == "422": # 4:2:2, cosited
params['pixformat'] = "YUV422_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1])
elif C == "444": # 4:4:4 (no subsampling)
params['pixformat'] = "YUV444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "444alpha": # 4:4:4 with an alpha channel
params['pixformat'] = "YUV4444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "mono": # luma (Y') plane only
params['pixformat'] = "Y_planar"
params['chroma_size'] = (0,0)
else:
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
if "I" in tags:
I = tags["I"]
if I == "?": # unknown (default)
pass
elif I == "p": # progressive/none
params["interlaced"] = False
elif I == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif I == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif I == "m": # mixed-mode: refer to 'I' tag in frame header
pass
if "F" in tags:
m = re.match("^(\d+):(\d+)$",tags["F"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["frame_rate"] = num/denom
if "A" in tags:
m = re.match("^(\d+):(\d+)$",tags["A"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["pixel_aspect"] = num/denom
if "X" in tags:
params["sequence_meta"] = tags["X"]
return params
def parse_frame_tags(fields):
"""\
Parses YUV4MPEG frame tags.
"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "I" in tags:
x,y,z = tags["I"][0], tags["I"][1], tags["I"][2]
if x == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "T": # top-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "B": # bottom-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "1": # single progressive frame
params["interlaced"] = False
elif x == "2": # double progressive frame (repeat)
params["interlaced"] = False
elif x == "3": # triple progressive frame (repeat)
params["interlaced"] = False
if y == "p": # fields sampled at same time
params["interlaced"] = False
elif y == "i": # fields sampled at different times
params["interlaced"] = True
if z == "p": # progressive (subsampling over whole frame)
pass
elif z == "i": # interlaced (each field subsampled independently)
pass
elif z == "?": # unknown (allowed only for non-4:2:0 subsampling)
pass
if "X" in tags:
params["meta"] = tags["X"]
return params
class FrameToYUV4MPEG(component):
"""\
FrameToYUV4MPEG() -> new FrameToYUV4MPEG component.
Parses uncompressed video frame data structures sent to its "inbox" inbox
and writes YUV4MPEG format binary data as strings to its "outbox" outbox.
"""
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
ensures self.shutdownMsg contains the highest priority one encountered
so far.
"""
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg,shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canShutdown(self):
"""\
Returns true if the component should terminate when it has finished
processing any pending data.
"""
return isinstance(self.shutdownMsg, (producerFinished, shutdownMicroprocess))
def mustShutdown(self):
"""Returns true if the component should terminate immediately."""
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def sendoutbox(self,data):
"""\
Generator.
Sends data out of the "outbox" outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space. It keeps
retrying until it succeeds.
If the component is ordered to immediately terminate then "STOP" is
raised as an exception.
"""
while 1:
try:
self.send(data,"outbox")
return
except noSpaceInBox:
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
self.pause()
yield 1
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
def main(self):
"""Main loop"""
self.shutdownMsg = None
try:
while not self.dataReady("inbox"):
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
frame = self.recv("inbox")
for _ in self.write_header(frame):
yield _
for _ in self.write_frame(frame):
yield _
while 1:
while self.dataReady("inbox"):
frame = self.recv("inbox")
for _ in self.write_frame(frame):
yield _
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
except "STOP":
self.send(self.shutdownMsg,"signal")
def write_header(self, frame):
"""\
Generator.
Sends the YUV4MPEG format header to the "outbox" outbox, based on
attributes of the supplied frame data structure.
"""
format = "YUV4MPEG2 W%d H%d" % tuple(frame['size'])
if frame['pixformat']=="YUV420_planar":
format += " C420mpeg2"
elif frame['pixformat']=="YUV411_planar":
format += " C411"
elif frame['pixformat']=="YUV422_planar":
format += " C422"
elif frame['pixformat']=="YUV444_planar":
format += " C444"
elif frame['pixformat']=="YUV4444_planar":
format += " C444alpha"
elif frame['pixformat']=="Y_planar":
format += " Cmono"
interlace = frame.get("interlaced",False)
topfieldfirst = frame.get("topfieldfirst",False)
if interlace and topfieldfirst:
format += " It"
elif interlace and not topfieldfirst:
format += " Ib"
elif not interlace:
format += " Ip"
rate = frame.get("frame_rate", 0)
if rate > 0:
num,denom = rational(rate)
format += " F%d:%d" % (num,denom)
rate = frame.get("pixel_aspect", 0)
if rate > 0:
num,denom = rational(rate)
format += " A%d:%d" % (num,denom)
if "sequence_meta" in frame:
format += " X"+frame['sequence_meta']
format += "\x0a"
for _ in self.sendoutbox(format):
yield _
def write_frame(self, frame):
"""\
Generator.
Writes out YUV4MPEG format frame marker and data.
"""
for _ in self.sendoutbox("FRAME\x0a"):
yield _
for component in frame['yuv']:
for _ in self.sendoutbox(component):
yield _
__kamaelia_components__ = ( YUV4MPEGToFrame, FrameToYUV4MPEG, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
Pipeline( RateControlledFileReader("/data/stream.yuv",readmode="bytes",rate=25*(608256+128)),
YUV4MPEGToFrame(),
FrameToYUV4MPEG(),
YUV4MPEGToFrame(),
VideoOverlay(),
).run()
| #!/usr/bin/env python
#
# Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: <EMAIL>
# to discuss alternative licensing.
# -------------------------------------------------------------------------
#
"""\
=============================================
Parsing and Creation of YUV4MPEG format files
=============================================
YUV4MPEGToFrame parses YUV4MPEG format data sent to its "inbox" inbox and sends
video fram data structures to its "outbox" outbox.
FrameToYUV4MPEG does the reverse - taking frame data structures sent to its
"inbox" inbox and outputting YUV4MPEG format data to its "outbox" outbox."
The YUV4MPEG file format is supported by many tools, such as mjpegtools,
mplayer/mencoder, and ffmpeg.
Example Usage
-------------
Playback a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.yuv4mpeg",readmode="bytes", ...),
YUV4MPEGToFrame(),
VideoOverlay()
).run()
Decode a dirac encoded video file to a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.dirac",readmode="bytes", ...),
DiracDecoder(),
FrameToYUV4MPEG(),
SimpleFileWriter("output.yuv4mpeg")
).run()
YUV4MPEGToFrame Behaviour
-------------------------
Send binary data as strings containing YUV4MPEG format data to the "inbox" inbox
and frame data structures will be sent out of the "outbox" outbox as soon as
they are parsed.
See below for a description of the uncompressed frame data structure format.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
FrameToYUV4MPEG Behaviour
-------------------------
Send frame data structures to the "inbox" inbox of this component. YUV4MPEG
format binary string data will be sent out of the "outbox" outbox.
See below for a description of the uncompressed frame data structure format.
The header data for the YUV4MPEG file is determined from the first frame.
All frames sent to this component must therefore be in the same pixel format and
size, otherwise the output data will not be valid YUV4MPEG.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
=========================
UNCOMPRESSED FRAME FORMAT
=========================
A frame is a dictionary data structure. It must, at minimum contain the first 3
("yuv", "size" and "pixformat")::
{
"yuv" : (y_data, u_data, v_data) # a tuple of strings
"size" : (width, height) # in pixels
"pixformat" : pixelformat # format of raw video data
"frame_rate" : fps # frames per second
"interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields
"topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data
"pixel_aspect" : fraction # aspect ratio of pixels
"sequence_meta" : metadata # string containing extended metadata
# (no whitespace or control characters)
}
All other fields are optional when providing frames to FrameToYUV4MPEG.
YUV4MPEGToFrame only guarantees to fill inthe YUV data itself. All other fields
will be filled in if the relevant header data is detected in the file.
The pixel formats recognised (and therefore supported) are::
"YUV420_planar"
"YUV411_planar"
"YUV422_planar"
"YUV444_planar"
"YUV4444_planar"
"Y_planar"
"""
from Axon.Component import component
#from Axon.Ipc import WaitComplete
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Axon.AxonExceptions import noSpaceInBox
import re
from Kamaelia.Support.Data.Rationals import rational
class YUV4MPEGToFrame(component):
"""\
YUV4MPEGToFrame() -> new YUV4MPEGToFrame component.
Parses YUV4MPEG format binarydata, sent as strings to its "inbox" inbox
and outputs uncompressed video frame data structures to its "outbox" outbox.
"""
def __init__(self):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(YUV4MPEGToFrame,self).__init__()
self.remainder = ""
self.shutdownMsg = None
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
returns "NOW" if immediate shutdown is required, or "WHENEVER" if the
component can shutdown when it has finished processing pending data.
"""
while self.dataReady("control"):
newMsg = self.recv("control")
if isinstance(newMsg, shutdownMicroprocess):
self.shutdownMsg = newMsg
elif self.shutdownMsg is None and isinstance(newMsg, producerFinished):
self.shutdownMsg = newMsg
if isinstance(self.shutdownMsg, shutdownMicroprocess):
return "NOW"
elif self.shutdownMsg is not None:
return "WHENEVER"
else:
return None
def readline(self):
"""\
Generator.
Read up to the next newline char from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
bytes = []
newdata = self.remainder
index = newdata.find("\x0a")
while index==-1:
bytes.append(newdata)
while not self.dataReady("inbox"):
if self.checkShutdown():
self.bytesread=""
return
self.pause()
yield 1
newdata = self.recv("inbox")
index = newdata.find("\x0a")
tail = newdata[:index+1]
self.remainder = newdata[index+1:]
bytes.append(tail)
self.bytesread = "".join(bytes)
return
def readbytes(self,size):
"""\
Generator.
Read the specified number of bytes from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
buf = [self.remainder]
bufsize = len(self.remainder)
while bufsize < size:
if self.dataReady("inbox"):
newdata = self.recv("inbox")
buf.append(newdata)
bufsize += len(newdata)
shutdown = self.checkShutdown()
if shutdown == "NOW" or (shutdown and not self.dataReady("inbox") and bufsize<size):
self.bytesread=""
return
if bufsize<size and not self.anyReady():
self.pause()
yield 1
excess = bufsize-size
if excess:
wanted = buf[:-1]
tail, self.remainder = buf[-1][:-excess], buf[-1][-excess:]
wanted.append(tail)
else:
wanted = buf
self.remainder = ""
self.bytesread = "".join(wanted)
return
def safesend(self, data, boxname):
"""\
Generator.
Sends data out of the named outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space and retries
until it succeeds.
If a shutdownMicroprocess message is received, returns early.
"""
while 1:
try:
self.send(data, boxname)
return
except noSpaceInBox:
if self.checkShutdown() == "NOW":
return
self.pause()
yield 1
def main(self):
"""Main loop"""
# parse header
for _ in self.readline(): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
line = self.bytesread
m = re.match("^YUV4MPEG2((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
seq_params = parse_seq_tags(fields)
yield 1
while 1:
for _ in self.readline(): yield _
line = self.bytesread
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
m = re.match("^FRAME((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
frame_params = parse_frame_tags(fields)
ysize = seq_params["size"][0] * seq_params["size"][1]
csize = seq_params["chroma_size"][0] * seq_params["chroma_size"][1]
for _ in self.readbytes(ysize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
y = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
u = self.bytesread
for _ in self.readbytes(csize): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
v = self.bytesread
frame = { "yuv" : (y,u,v) }
frame.update(seq_params)
frame.update(frame_params)
for _ in self.safesend(frame,"outbox"): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and not self.dataReady("inbox")):
break
yield 1
if self.shutdownMsg:
self.send(self.shutdownMsg, "signal")
else:
self.send(producerFinished(), "signal")
def parse_seq_tags(fields):
"""Parses YUV4MPEG header tags"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "W" in tags and "H" in tags:
params['size'] = (int(tags["W"]), int(tags["H"]))
else:
raise
if "C" in tags:
C = tags["C"]
if C == "420jpeg": # 4:2:0 with JPEG/MPEG-1 siting (default)
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420mpeg2": # 4:2:0 with MPEG-2 siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "420paldv": # 4:2:0 with PAL-DV siting
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
elif C == "411": # 4:1:1, cosited
params['pixformat'] = "YUV411_planar"
params['chroma_size'] = (params['size'][0]/4, params['size'][1])
elif C == "422": # 4:2:2, cosited
params['pixformat'] = "YUV422_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1])
elif C == "444": # 4:4:4 (no subsampling)
params['pixformat'] = "YUV444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "444alpha": # 4:4:4 with an alpha channel
params['pixformat'] = "YUV4444_planar"
params['chroma_size'] = (params['size'][0], params['size'][1])
elif C == "mono": # luma (Y') plane only
params['pixformat'] = "Y_planar"
params['chroma_size'] = (0,0)
else:
params['pixformat'] = "YUV420_planar"
params['chroma_size'] = (params['size'][0]/2, params['size'][1]/2)
if "I" in tags:
I = tags["I"]
if I == "?": # unknown (default)
pass
elif I == "p": # progressive/none
params["interlaced"] = False
elif I == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif I == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif I == "m": # mixed-mode: refer to 'I' tag in frame header
pass
if "F" in tags:
m = re.match("^(\d+):(\d+)$",tags["F"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["frame_rate"] = num/denom
if "A" in tags:
m = re.match("^(\d+):(\d+)$",tags["A"])
num, denom = float(m.groups()[0]), float(m.groups()[1])
if denom > 0:
params["pixel_aspect"] = num/denom
if "X" in tags:
params["sequence_meta"] = tags["X"]
return params
def parse_frame_tags(fields):
"""\
Parses YUV4MPEG frame tags.
"""
params = {}
tags = {}
while fields:
m = re.match("^ (.)(\S*)(.*)$", fields)
(tag,value,fields) = m.groups()
tags[tag] = value
if "I" in tags:
x,y,z = tags["I"][0], tags["I"][1], tags["I"][2]
if x == "t": # top-field-first
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "T": # top-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = True
elif x == "b": # bottom-field-first
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "B": # bottom-field-first and repeat
params["interlaced"] = True
params["topfieldfirst"] = False
elif x == "1": # single progressive frame
params["interlaced"] = False
elif x == "2": # double progressive frame (repeat)
params["interlaced"] = False
elif x == "3": # triple progressive frame (repeat)
params["interlaced"] = False
if y == "p": # fields sampled at same time
params["interlaced"] = False
elif y == "i": # fields sampled at different times
params["interlaced"] = True
if z == "p": # progressive (subsampling over whole frame)
pass
elif z == "i": # interlaced (each field subsampled independently)
pass
elif z == "?": # unknown (allowed only for non-4:2:0 subsampling)
pass
if "X" in tags:
params["meta"] = tags["X"]
return params
class FrameToYUV4MPEG(component):
"""\
FrameToYUV4MPEG() -> new FrameToYUV4MPEG component.
Parses uncompressed video frame data structures sent to its "inbox" inbox
and writes YUV4MPEG format binary data as strings to its "outbox" outbox.
"""
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
ensures self.shutdownMsg contains the highest priority one encountered
so far.
"""
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg,shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canShutdown(self):
"""\
Returns true if the component should terminate when it has finished
processing any pending data.
"""
return isinstance(self.shutdownMsg, (producerFinished, shutdownMicroprocess))
def mustShutdown(self):
"""Returns true if the component should terminate immediately."""
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def sendoutbox(self,data):
"""\
Generator.
Sends data out of the "outbox" outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space. It keeps
retrying until it succeeds.
If the component is ordered to immediately terminate then "STOP" is
raised as an exception.
"""
while 1:
try:
self.send(data,"outbox")
return
except noSpaceInBox:
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
self.pause()
yield 1
self.checkShutdown()
if self.mustShutdown():
raise "STOP"
def main(self):
"""Main loop"""
self.shutdownMsg = None
try:
while not self.dataReady("inbox"):
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
frame = self.recv("inbox")
for _ in self.write_header(frame):
yield _
for _ in self.write_frame(frame):
yield _
while 1:
while self.dataReady("inbox"):
frame = self.recv("inbox")
for _ in self.write_frame(frame):
yield _
self.checkShutdown()
if self.canShutdown():
raise "STOP"
self.pause()
yield 1
except "STOP":
self.send(self.shutdownMsg,"signal")
def write_header(self, frame):
"""\
Generator.
Sends the YUV4MPEG format header to the "outbox" outbox, based on
attributes of the supplied frame data structure.
"""
format = "YUV4MPEG2 W%d H%d" % tuple(frame['size'])
if frame['pixformat']=="YUV420_planar":
format += " C420mpeg2"
elif frame['pixformat']=="YUV411_planar":
format += " C411"
elif frame['pixformat']=="YUV422_planar":
format += " C422"
elif frame['pixformat']=="YUV444_planar":
format += " C444"
elif frame['pixformat']=="YUV4444_planar":
format += " C444alpha"
elif frame['pixformat']=="Y_planar":
format += " Cmono"
interlace = frame.get("interlaced",False)
topfieldfirst = frame.get("topfieldfirst",False)
if interlace and topfieldfirst:
format += " It"
elif interlace and not topfieldfirst:
format += " Ib"
elif not interlace:
format += " Ip"
rate = frame.get("frame_rate", 0)
if rate > 0:
num,denom = rational(rate)
format += " F%d:%d" % (num,denom)
rate = frame.get("pixel_aspect", 0)
if rate > 0:
num,denom = rational(rate)
format += " A%d:%d" % (num,denom)
if "sequence_meta" in frame:
format += " X"+frame['sequence_meta']
format += "\x0a"
for _ in self.sendoutbox(format):
yield _
def write_frame(self, frame):
"""\
Generator.
Writes out YUV4MPEG format frame marker and data.
"""
for _ in self.sendoutbox("FRAME\x0a"):
yield _
for component in frame['yuv']:
for _ in self.sendoutbox(component):
yield _
__kamaelia_components__ = ( YUV4MPEGToFrame, FrameToYUV4MPEG, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
Pipeline( RateControlledFileReader("/data/stream.yuv",readmode="bytes",rate=25*(608256+128)),
YUV4MPEGToFrame(),
FrameToYUV4MPEG(),
YUV4MPEGToFrame(),
VideoOverlay(),
).run()
| en | 0.773649 | #!/usr/bin/env python # # Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1) # All Rights Reserved. # # You may only modify and redistribute this under the terms of any of the # following licenses(2): Mozilla Public License, V1.1, GNU General # Public License, V2.0, GNU Lesser General Public License, V2.1 # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://kamaelia.sourceforge.net/AUTHORS - please extend this file, # not this notice. # (2) Reproduced in the COPYING file, and at: # http://kamaelia.sourceforge.net/COPYING # Under section 3.5 of the MPL, we are using this text since we deem the MPL # notice inappropriate for this file. As per MPL/GPL/LGPL removal of this # notice is prohibited. # # Please contact us via: <EMAIL> # to discuss alternative licensing. # ------------------------------------------------------------------------- # \ ============================================= Parsing and Creation of YUV4MPEG format files ============================================= YUV4MPEGToFrame parses YUV4MPEG format data sent to its "inbox" inbox and sends video fram data structures to its "outbox" outbox. FrameToYUV4MPEG does the reverse - taking frame data structures sent to its "inbox" inbox and outputting YUV4MPEG format data to its "outbox" outbox." The YUV4MPEG file format is supported by many tools, such as mjpegtools, mplayer/mencoder, and ffmpeg. Example Usage ------------- Playback a YUV4MPEG format file:: Pipeline( RateControlledFileReader("video.yuv4mpeg",readmode="bytes", ...), YUV4MPEGToFrame(), VideoOverlay() ).run() Decode a dirac encoded video file to a YUV4MPEG format file:: Pipeline( RateControlledFileReader("video.dirac",readmode="bytes", ...), DiracDecoder(), FrameToYUV4MPEG(), SimpleFileWriter("output.yuv4mpeg") ).run() YUV4MPEGToFrame Behaviour ------------------------- Send binary data as strings containing YUV4MPEG format data to the "inbox" inbox and frame data structures will be sent out of the "outbox" outbox as soon as they are parsed. See below for a description of the uncompressed frame data structure format. This component supports sending data out of its outbox to a size limited inbox. If the size limited inbox is full, this component will pause until it is able to send out the data. Data will not be consumed from the inbox if this component is waiting to send to the outbox. If a producerFinished message is received on the "control" inbox, this component will complete parsing any data pending in its inbox, and finish sending any resulting data to its outbox. It will then send the producerFinished message on out of its "signal" outbox and terminate. If a shutdownMicroprocess message is received on the "control" inbox, this component will immediately send it on out of its "signal" outbox and immediately terminate. It will not complete processing, or sending on any pending data. FrameToYUV4MPEG Behaviour ------------------------- Send frame data structures to the "inbox" inbox of this component. YUV4MPEG format binary string data will be sent out of the "outbox" outbox. See below for a description of the uncompressed frame data structure format. The header data for the YUV4MPEG file is determined from the first frame. All frames sent to this component must therefore be in the same pixel format and size, otherwise the output data will not be valid YUV4MPEG. This component supports sending data out of its outbox to a size limited inbox. If the size limited inbox is full, this component will pause until it is able to send out the data. Data will not be consumed from the inbox if this component is waiting to send to the outbox. If a producerFinished message is received on the "control" inbox, this component will complete parsing any data pending in its inbox, and finish sending any resulting data to its outbox. It will then send the producerFinished message on out of its "signal" outbox and terminate. If a shutdownMicroprocess message is received on the "control" inbox, this component will immediately send it on out of its "signal" outbox and immediately terminate. It will not complete processing, or sending on any pending data. ========================= UNCOMPRESSED FRAME FORMAT ========================= A frame is a dictionary data structure. It must, at minimum contain the first 3 ("yuv", "size" and "pixformat"):: { "yuv" : (y_data, u_data, v_data) # a tuple of strings "size" : (width, height) # in pixels "pixformat" : pixelformat # format of raw video data "frame_rate" : fps # frames per second "interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields "topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data "pixel_aspect" : fraction # aspect ratio of pixels "sequence_meta" : metadata # string containing extended metadata # (no whitespace or control characters) } All other fields are optional when providing frames to FrameToYUV4MPEG. YUV4MPEGToFrame only guarantees to fill inthe YUV data itself. All other fields will be filled in if the relevant header data is detected in the file. The pixel formats recognised (and therefore supported) are:: "YUV420_planar" "YUV411_planar" "YUV422_planar" "YUV444_planar" "YUV4444_planar" "Y_planar" #from Axon.Ipc import WaitComplete \ YUV4MPEGToFrame() -> new YUV4MPEGToFrame component. Parses YUV4MPEG format binarydata, sent as strings to its "inbox" inbox and outputs uncompressed video frame data structures to its "outbox" outbox. x.__init__(...) initializes x; see x.__class__.__doc__ for signature \ Collects any new shutdown messages arriving at the "control" inbox, and returns "NOW" if immediate shutdown is required, or "WHENEVER" if the component can shutdown when it has finished processing pending data. \ Generator. Read up to the next newline char from the stream of chunks of binary string data arriving at the "inbox" inbox. Any excess data is placed into self.remainder ready for the next call to self.readline or self.readbytes. Data is only read from the inbox when required. It is not preemptively fetched. The read data is placed into self.bytesread If a shutdown is detected, self.bytesread is set to "" and this generator immediately returns. \ Generator. Read the specified number of bytes from the stream of chunks of binary string data arriving at the "inbox" inbox. Any excess data is placed into self.remainder ready for the next call to self.readline or self.readbytes. Data is only read from the inbox when required. It is not preemptively fetched. The read data is placed into self.bytesread If a shutdown is detected, self.bytesread is set to "" and this generator immediately returns. \ Generator. Sends data out of the named outbox. If the destination is full (noSpaceInBox exception) then it waits until there is space and retries until it succeeds. If a shutdownMicroprocess message is received, returns early. Main loop # parse header Parses YUV4MPEG header tags # 4:2:0 with JPEG/MPEG-1 siting (default) # 4:2:0 with MPEG-2 siting # 4:2:0 with PAL-DV siting # 4:1:1, cosited # 4:2:2, cosited # 4:4:4 (no subsampling) # 4:4:4 with an alpha channel # luma (Y') plane only # unknown (default) # progressive/none # top-field-first # bottom-field-first # mixed-mode: refer to 'I' tag in frame header \ Parses YUV4MPEG frame tags. # top-field-first # top-field-first and repeat # bottom-field-first # bottom-field-first and repeat # single progressive frame # double progressive frame (repeat) # triple progressive frame (repeat) # fields sampled at same time # fields sampled at different times # progressive (subsampling over whole frame) # interlaced (each field subsampled independently) # unknown (allowed only for non-4:2:0 subsampling) \ FrameToYUV4MPEG() -> new FrameToYUV4MPEG component. Parses uncompressed video frame data structures sent to its "inbox" inbox and writes YUV4MPEG format binary data as strings to its "outbox" outbox. \ Collects any new shutdown messages arriving at the "control" inbox, and ensures self.shutdownMsg contains the highest priority one encountered so far. \ Returns true if the component should terminate when it has finished processing any pending data. Returns true if the component should terminate immediately. \ Generator. Sends data out of the "outbox" outbox. If the destination is full (noSpaceInBox exception) then it waits until there is space. It keeps retrying until it succeeds. If the component is ordered to immediately terminate then "STOP" is raised as an exception. Main loop \ Generator. Sends the YUV4MPEG format header to the "outbox" outbox, based on attributes of the supplied frame data structure. \ Generator. Writes out YUV4MPEG format frame marker and data. | 1.440109 | 1 |
project/cli/event.py | DanielGrams/gsevp | 1 | 10120 | import click
from flask.cli import AppGroup
from project import app, db
from project.dateutils import berlin_tz
from project.services.event import (
get_recurring_events,
update_event_dates_with_recurrence_rule,
)
event_cli = AppGroup("event")
@event_cli.command("update-recurring-dates")
def update_recurring_dates():
# Setting the timezone is neccessary for cli command
db.session.execute("SET timezone TO :val;", {"val": berlin_tz.zone})
events = get_recurring_events()
for event in events:
update_event_dates_with_recurrence_rule(event)
db.session.commit()
click.echo(f"{len(events)} event(s) were updated.")
app.cli.add_command(event_cli)
| import click
from flask.cli import AppGroup
from project import app, db
from project.dateutils import berlin_tz
from project.services.event import (
get_recurring_events,
update_event_dates_with_recurrence_rule,
)
event_cli = AppGroup("event")
@event_cli.command("update-recurring-dates")
def update_recurring_dates():
# Setting the timezone is neccessary for cli command
db.session.execute("SET timezone TO :val;", {"val": berlin_tz.zone})
events = get_recurring_events()
for event in events:
update_event_dates_with_recurrence_rule(event)
db.session.commit()
click.echo(f"{len(events)} event(s) were updated.")
app.cli.add_command(event_cli)
| en | 0.748015 | # Setting the timezone is neccessary for cli command | 2.326978 | 2 |
test/functional/examples/test_examples.py | ymn1k/testplan | 0 | 10121 | import os
import re
import sys
import subprocess
import pytest
from testplan.common.utils.path import change_directory
import platform
ON_WINDOWS = platform.system() == 'Windows'
KNOWN_EXCEPTIONS = [
"TclError: Can't find a usable init\.tcl in the following directories:", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: lib.*\.so\..+: cannot open shared object file: No such file or directory", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: No module named sklearn.*", # Missing module sklearn. Will skip Data Science example.
"ImportError: No module named Tkinter", # Missing module Tkinter. Will skip Data Science example.
"ImportError: No module named _tkinter.*", # Missing module Tkinter. Will skip Data Science example.
"RuntimeError: Download pyfixmsg library .*", # Missing module pyfixmsg. Will skip FIX example.
"No spec file set\. You should download .*", # Missing FIX spec file. Will skip FIX example.
"AttributeError: 'module' object has no attribute 'poll'",
"RuntimeError: You need to compile test binary first." # Need to compile cpp binary first. Will skip GTest example.
]
SKIP_ON_WINDOWS = [
os.path.join('Cpp', 'GTest', 'test_plan.py'),
]
ROOT_DIR_CONTENTS = [
"setup.py",
"requirements.txt",
"README.rst",
"LICENSE.md"
]
def _depth_from_repo_root():
cwd = os.getcwd()
depth = []
while True:
contents = os.listdir(cwd)
if all([entry in contents for entry in ROOT_DIR_CONTENTS]):
return depth
parent_dir = os.path.dirname(cwd)
if os.path.realpath(cwd) == os.path.realpath(parent_dir):
raise RuntimeError('Could not find repo directory')
depth.append(os.pardir)
cwd = parent_dir
def _relative_dir(directory):
path_args = _depth_from_repo_root() + [directory]
return os.path.join(*path_args)
def _param_formatter(param):
if 'examples' in param:
return repr(param.rsplit('examples')[1])
return repr(param)
@pytest.mark.parametrize(
'root,filename',
[
(os.path.abspath(root), filename)
for root, _, files in os.walk(
_relative_dir(os.path.join('testplan', 'examples')))
for filename in files
if 'test_plan' in filename
],
ids=_param_formatter,
)
def test_example(root, filename):
file_path = os.path.join(root, filename)
if ON_WINDOWS and any(
[file_path.endswith(skip_name) for skip_name in SKIP_ON_WINDOWS]
):
pytest.skip()
with change_directory(root), open(filename) as file_obj:
file_obj.readline()
second_line = file_obj.readline()
try:
subprocess.check_output(
[sys.executable, filename],
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
out = e.output.decode()
for exception in KNOWN_EXCEPTIONS:
if re.search(exception, out):
pytest.xfail()
assert 'Exception in test_plan definition' not in out, \
'Exception raised in test_plan definition.'
assert 'Traceback (most recent call last):' not in out, \
'Exception raised during test:\n{}'.format(out)
assert \
('# This plan contains tests that demonstrate failures '
'as well.') == second_line.strip(), \
"Expected \'{}\' example to pass, it failed.\n{}".format(
file_path,
out
)
| import os
import re
import sys
import subprocess
import pytest
from testplan.common.utils.path import change_directory
import platform
ON_WINDOWS = platform.system() == 'Windows'
KNOWN_EXCEPTIONS = [
"TclError: Can't find a usable init\.tcl in the following directories:", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: lib.*\.so\..+: cannot open shared object file: No such file or directory", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: No module named sklearn.*", # Missing module sklearn. Will skip Data Science example.
"ImportError: No module named Tkinter", # Missing module Tkinter. Will skip Data Science example.
"ImportError: No module named _tkinter.*", # Missing module Tkinter. Will skip Data Science example.
"RuntimeError: Download pyfixmsg library .*", # Missing module pyfixmsg. Will skip FIX example.
"No spec file set\. You should download .*", # Missing FIX spec file. Will skip FIX example.
"AttributeError: 'module' object has no attribute 'poll'",
"RuntimeError: You need to compile test binary first." # Need to compile cpp binary first. Will skip GTest example.
]
SKIP_ON_WINDOWS = [
os.path.join('Cpp', 'GTest', 'test_plan.py'),
]
ROOT_DIR_CONTENTS = [
"setup.py",
"requirements.txt",
"README.rst",
"LICENSE.md"
]
def _depth_from_repo_root():
cwd = os.getcwd()
depth = []
while True:
contents = os.listdir(cwd)
if all([entry in contents for entry in ROOT_DIR_CONTENTS]):
return depth
parent_dir = os.path.dirname(cwd)
if os.path.realpath(cwd) == os.path.realpath(parent_dir):
raise RuntimeError('Could not find repo directory')
depth.append(os.pardir)
cwd = parent_dir
def _relative_dir(directory):
path_args = _depth_from_repo_root() + [directory]
return os.path.join(*path_args)
def _param_formatter(param):
if 'examples' in param:
return repr(param.rsplit('examples')[1])
return repr(param)
@pytest.mark.parametrize(
'root,filename',
[
(os.path.abspath(root), filename)
for root, _, files in os.walk(
_relative_dir(os.path.join('testplan', 'examples')))
for filename in files
if 'test_plan' in filename
],
ids=_param_formatter,
)
def test_example(root, filename):
file_path = os.path.join(root, filename)
if ON_WINDOWS and any(
[file_path.endswith(skip_name) for skip_name in SKIP_ON_WINDOWS]
):
pytest.skip()
with change_directory(root), open(filename) as file_obj:
file_obj.readline()
second_line = file_obj.readline()
try:
subprocess.check_output(
[sys.executable, filename],
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
out = e.output.decode()
for exception in KNOWN_EXCEPTIONS:
if re.search(exception, out):
pytest.xfail()
assert 'Exception in test_plan definition' not in out, \
'Exception raised in test_plan definition.'
assert 'Traceback (most recent call last):' not in out, \
'Exception raised during test:\n{}'.format(out)
assert \
('# This plan contains tests that demonstrate failures '
'as well.') == second_line.strip(), \
"Expected \'{}\' example to pass, it failed.\n{}".format(
file_path,
out
)
| en | 0.421396 | # Matplotlib module improperly installed. Will skip Data Science example. # Matplotlib module improperly installed. Will skip Data Science example. # Missing module sklearn. Will skip Data Science example. # Missing module Tkinter. Will skip Data Science example. # Missing module Tkinter. Will skip Data Science example. # Missing module pyfixmsg. Will skip FIX example. # Missing FIX spec file. Will skip FIX example. # Need to compile cpp binary first. Will skip GTest example. | 2.130805 | 2 |
peco/template/template.py | Tikubonn/peco | 0 | 10122 | <reponame>Tikubonn/peco
from io import StringIO
class Template:
"""
this has information that parsed source code.
you can get rendered text with .render() and .render_string()
"""
def __init__(self, sentencenode, scope):
self.sentencenode = sentencenode
self.scope = scope
def render(self, stream, **parameters):
"""
render template to stream with parameters.
Parameters
----------
stream: io.TextIOBase
this file-like object used to output.
parameters:
this used to rendering.
"""
with self.scope:
for name, value in parameters.items():
self.scope.set_value(name, value)
self.sentencenode.write(stream)
def render_string(self, **parameters):
"""
render template with parameters then return rendered text.
Parameters
----------
parameters:
this used to rendering.
Returns
-------
rendered: str
this is rendered string.
"""
with StringIO() as stream:
self.render(stream, **parameters)
rendered = stream.getvalue()
return rendered
| from io import StringIO
class Template:
"""
this has information that parsed source code.
you can get rendered text with .render() and .render_string()
"""
def __init__(self, sentencenode, scope):
self.sentencenode = sentencenode
self.scope = scope
def render(self, stream, **parameters):
"""
render template to stream with parameters.
Parameters
----------
stream: io.TextIOBase
this file-like object used to output.
parameters:
this used to rendering.
"""
with self.scope:
for name, value in parameters.items():
self.scope.set_value(name, value)
self.sentencenode.write(stream)
def render_string(self, **parameters):
"""
render template with parameters then return rendered text.
Parameters
----------
parameters:
this used to rendering.
Returns
-------
rendered: str
this is rendered string.
"""
with StringIO() as stream:
self.render(stream, **parameters)
rendered = stream.getvalue()
return rendered | en | 0.506188 | this has information that parsed source code. you can get rendered text with .render() and .render_string() render template to stream with parameters. Parameters ---------- stream: io.TextIOBase this file-like object used to output. parameters: this used to rendering. render template with parameters then return rendered text. Parameters ---------- parameters: this used to rendering. Returns ------- rendered: str this is rendered string. | 3.086943 | 3 |
mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_with_blank_string_filters.py | ministryofjustice/mtp-api | 5 | 10123 | <reponame>ministryofjustice/mtp-api<gh_stars>1-10
from core import getattr_path
from rest_framework import status
from credit.tests.test_views.test_credit_list.test_security_credit_list import SecurityCreditListTestCase
class CreditListWithBlankStringFiltersTestCase(SecurityCreditListTestCase):
def assertAllResponsesHaveBlankField(self, filters, blank_fields, expected_filter): # noqa: N802
expected_results = list(filter(expected_filter, self._get_managed_prison_credits()))
url = self._get_url(**filters)
response = self.client.get(
url, format='json',
HTTP_AUTHORIZATION=self.get_http_authorization_for_user(self._get_authorised_user())
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = []
for result in response.data.get('results', []):
results.append(result['id'])
for blank_field in blank_fields:
self.assertIn(result[blank_field], ['', None])
self.assertListEqual(
sorted(results),
sorted(expected_result.id for expected_result in expected_results)
)
def test_blank_sender_name(self):
self.assertAllResponsesHaveBlankField(
{
'sender_name__isblank': 'True'
},
['sender_name'],
lambda credit: getattr_path(credit, 'transaction.sender_name', None) == ''
)
def test_blank_sender_sort_code(self):
self.assertAllResponsesHaveBlankField(
{
'sender_sort_code__isblank': 'True'
},
['sender_sort_code'],
lambda credit: getattr_path(credit, 'transaction.sender_sort_code', None) == ''
)
def test_blank_sender_account_number(self):
self.assertAllResponsesHaveBlankField(
{
'sender_account_number__isblank': 'True'
},
['sender_account_number'],
lambda credit: getattr_path(credit, 'transaction.sender_account_number', None) == ''
)
def test_blank_sender_roll_number(self):
self.assertAllResponsesHaveBlankField(
{
'sender_roll_number__isblank': 'True'
},
['sender_roll_number'],
lambda credit: getattr_path(credit, 'transaction.sender_roll_number', None) == ''
)
| from core import getattr_path
from rest_framework import status
from credit.tests.test_views.test_credit_list.test_security_credit_list import SecurityCreditListTestCase
class CreditListWithBlankStringFiltersTestCase(SecurityCreditListTestCase):
def assertAllResponsesHaveBlankField(self, filters, blank_fields, expected_filter): # noqa: N802
expected_results = list(filter(expected_filter, self._get_managed_prison_credits()))
url = self._get_url(**filters)
response = self.client.get(
url, format='json',
HTTP_AUTHORIZATION=self.get_http_authorization_for_user(self._get_authorised_user())
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = []
for result in response.data.get('results', []):
results.append(result['id'])
for blank_field in blank_fields:
self.assertIn(result[blank_field], ['', None])
self.assertListEqual(
sorted(results),
sorted(expected_result.id for expected_result in expected_results)
)
def test_blank_sender_name(self):
self.assertAllResponsesHaveBlankField(
{
'sender_name__isblank': 'True'
},
['sender_name'],
lambda credit: getattr_path(credit, 'transaction.sender_name', None) == ''
)
def test_blank_sender_sort_code(self):
self.assertAllResponsesHaveBlankField(
{
'sender_sort_code__isblank': 'True'
},
['sender_sort_code'],
lambda credit: getattr_path(credit, 'transaction.sender_sort_code', None) == ''
)
def test_blank_sender_account_number(self):
self.assertAllResponsesHaveBlankField(
{
'sender_account_number__isblank': 'True'
},
['sender_account_number'],
lambda credit: getattr_path(credit, 'transaction.sender_account_number', None) == ''
)
def test_blank_sender_roll_number(self):
self.assertAllResponsesHaveBlankField(
{
'sender_roll_number__isblank': 'True'
},
['sender_roll_number'],
lambda credit: getattr_path(credit, 'transaction.sender_roll_number', None) == ''
) | it | 0.199817 | # noqa: N802 | 2.326842 | 2 |
vipermonkey/core/filetype.py | lap1nou/ViperMonkey | 874 | 10124 | <reponame>lap1nou/ViperMonkey<filename>vipermonkey/core/filetype.py
"""
Check for Office file types
ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft
VBA macros (Visual Basic for Applications), mainly for malware analysis.
Author: <NAME> - http://www.decalage.info
License: BSD, see source code or documentation
Project Repository:
https://github.com/decalage2/ViperMonkey
"""
# === LICENSE ==================================================================
# ViperMonkey is copyright (c) 2015-2016 <NAME> (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Office magic numbers.
magic_nums = {
"office97" : "D0 CF 11 E0 A1 B1 1A E1", # Office 97
"office2007" : "50 4B 3 4", # Office 2007+ (PKZip)
}
# PE magic number.
pe_magic_num = "4D 5A"
def get_1st_8_bytes(fname, is_data):
info = None
is_data = (is_data or (len(fname) > 200))
if (not is_data):
try:
tmp = open(fname, 'rb')
tmp.close()
except:
is_data = True
if (not is_data):
with open(fname, 'rb') as f:
info = f.read(8)
else:
info = fname[:9]
curr_magic = ""
for b in info:
curr_magic += hex(ord(b)).replace("0x", "").upper() + " "
return curr_magic
def is_pe_file(fname, is_data):
"""
Check to see if the given file is a PE executable.
return - True if it is a PE file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we the known magic #.
return (curr_magic.startswith(pe_magic_num))
def is_office_file(fname, is_data):
"""
Check to see if the given file is a MS Office file format.
return - True if it is an Office file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have 1 of the known magic #s.
for typ in magic_nums.keys():
magic = magic_nums[typ]
if (curr_magic.startswith(magic)):
return True
return False
def is_office97_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office97 magic #.
return (curr_magic.startswith(magic_nums["office97"]))
def is_office2007_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office 2007 magic #.
return (curr_magic.startswith(magic_nums["office2007"]))
| """
Check for Office file types
ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft
VBA macros (Visual Basic for Applications), mainly for malware analysis.
Author: <NAME> - http://www.decalage.info
License: BSD, see source code or documentation
Project Repository:
https://github.com/decalage2/ViperMonkey
"""
# === LICENSE ==================================================================
# ViperMonkey is copyright (c) 2015-2016 <NAME> (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Office magic numbers.
magic_nums = {
"office97" : "D0 CF 11 E0 A1 B1 1A E1", # Office 97
"office2007" : "50 4B 3 4", # Office 2007+ (PKZip)
}
# PE magic number.
pe_magic_num = "4D 5A"
def get_1st_8_bytes(fname, is_data):
info = None
is_data = (is_data or (len(fname) > 200))
if (not is_data):
try:
tmp = open(fname, 'rb')
tmp.close()
except:
is_data = True
if (not is_data):
with open(fname, 'rb') as f:
info = f.read(8)
else:
info = fname[:9]
curr_magic = ""
for b in info:
curr_magic += hex(ord(b)).replace("0x", "").upper() + " "
return curr_magic
def is_pe_file(fname, is_data):
"""
Check to see if the given file is a PE executable.
return - True if it is a PE file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we the known magic #.
return (curr_magic.startswith(pe_magic_num))
def is_office_file(fname, is_data):
"""
Check to see if the given file is a MS Office file format.
return - True if it is an Office file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have 1 of the known magic #s.
for typ in magic_nums.keys():
magic = magic_nums[typ]
if (curr_magic.startswith(magic)):
return True
return False
def is_office97_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office97 magic #.
return (curr_magic.startswith(magic_nums["office97"]))
def is_office2007_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office 2007 magic #.
return (curr_magic.startswith(magic_nums["office2007"])) | en | 0.717827 | Check for Office file types ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft VBA macros (Visual Basic for Applications), mainly for malware analysis. Author: <NAME> - http://www.decalage.info License: BSD, see source code or documentation Project Repository: https://github.com/decalage2/ViperMonkey # === LICENSE ================================================================== # ViperMonkey is copyright (c) 2015-2016 <NAME> (http://www.decalage.info) # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Office magic numbers. # Office 97 # Office 2007+ (PKZip) # PE magic number. Check to see if the given file is a PE executable. return - True if it is a PE file, False if not. # Read the 1st 8 bytes of the file. # See if we the known magic #. Check to see if the given file is a MS Office file format. return - True if it is an Office file, False if not. # Read the 1st 8 bytes of the file. # See if we have 1 of the known magic #s. # Read the 1st 8 bytes of the file. # See if we have the Office97 magic #. # Read the 1st 8 bytes of the file. # See if we have the Office 2007 magic #. | 1.552327 | 2 |
packs/kubernetes/tests/test_third_party_resource.py | userlocalhost2000/st2contrib | 164 | 10125 | <gh_stars>100-1000
from st2tests.base import BaseSensorTestCase
from third_party_resource import ThirdPartyResource
class ThirdPartyResourceTestCase(BaseSensorTestCase):
sensor_cls = ThirdPartyResource
def test_k8s_object_to_st2_trigger_bad_object(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians'
# uid missing
# label missing
}
}
}
sensor = self.get_sensor_instance()
self.assertRaises(KeyError, sensor._k8s_object_to_st2_trigger, k8s_obj)
def test_k8s_object_to_st2_trigger(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians',
'uid': 'coinye',
'labels': ['rapper', 'train wrecker']
}
}
}
sensor = self.get_sensor_instance()
payload = sensor._k8s_object_to_st2_trigger(k8s_obj)
self.assertTrue('resource' in payload)
self.assertEqual(payload['resource'], k8s_obj['type'])
self.assertTrue('object_kind' in payload)
self.assertEqual(payload['object_kind'], k8s_obj['object']['kind'])
self.assertTrue('name' in payload)
self.assertEqual(payload['name'], k8s_obj['object']['metadata']['name'])
self.assertTrue('labels' in payload)
self.assertListEqual(payload['labels'], k8s_obj['object']['metadata']['labels'])
self.assertTrue('namespace' in payload)
self.assertEqual(payload['namespace'], k8s_obj['object']['metadata']['namespace'])
self.assertTrue('uid' in payload)
self.assertEqual(payload['uid'], k8s_obj['object']['metadata']['uid'])
def test_get_trigger_payload_from_line(self):
line = '{"object": {"kind": "president", ' + \
'"metadata": {"labels": ["rapper", "train wrecker"], ' + \
'"namespace": "westashians", ' + \
'"name": "west", "uid": "coinye"}}, "type": "kanye"}'
sensor = self.get_sensor_instance()
payload = sensor._get_trigger_payload_from_line(line)
self.assertTrue(payload is not None)
self.assertTrue('resource' in payload)
self.assertTrue('object_kind' in payload)
self.assertTrue('name' in payload)
self.assertTrue('labels' in payload)
self.assertTrue('namespace' in payload)
self.assertTrue('uid' in payload)
| from st2tests.base import BaseSensorTestCase
from third_party_resource import ThirdPartyResource
class ThirdPartyResourceTestCase(BaseSensorTestCase):
sensor_cls = ThirdPartyResource
def test_k8s_object_to_st2_trigger_bad_object(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians'
# uid missing
# label missing
}
}
}
sensor = self.get_sensor_instance()
self.assertRaises(KeyError, sensor._k8s_object_to_st2_trigger, k8s_obj)
def test_k8s_object_to_st2_trigger(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians',
'uid': 'coinye',
'labels': ['rapper', 'train wrecker']
}
}
}
sensor = self.get_sensor_instance()
payload = sensor._k8s_object_to_st2_trigger(k8s_obj)
self.assertTrue('resource' in payload)
self.assertEqual(payload['resource'], k8s_obj['type'])
self.assertTrue('object_kind' in payload)
self.assertEqual(payload['object_kind'], k8s_obj['object']['kind'])
self.assertTrue('name' in payload)
self.assertEqual(payload['name'], k8s_obj['object']['metadata']['name'])
self.assertTrue('labels' in payload)
self.assertListEqual(payload['labels'], k8s_obj['object']['metadata']['labels'])
self.assertTrue('namespace' in payload)
self.assertEqual(payload['namespace'], k8s_obj['object']['metadata']['namespace'])
self.assertTrue('uid' in payload)
self.assertEqual(payload['uid'], k8s_obj['object']['metadata']['uid'])
def test_get_trigger_payload_from_line(self):
line = '{"object": {"kind": "president", ' + \
'"metadata": {"labels": ["rapper", "train wrecker"], ' + \
'"namespace": "westashians", ' + \
'"name": "west", "uid": "coinye"}}, "type": "kanye"}'
sensor = self.get_sensor_instance()
payload = sensor._get_trigger_payload_from_line(line)
self.assertTrue(payload is not None)
self.assertTrue('resource' in payload)
self.assertTrue('object_kind' in payload)
self.assertTrue('name' in payload)
self.assertTrue('labels' in payload)
self.assertTrue('namespace' in payload)
self.assertTrue('uid' in payload) | en | 0.966757 | # uid missing # label missing | 2.171325 | 2 |
PrometheusScrapper/scrapper.py | masterchef/webscraper | 0 | 10126 | import datetime
import getpass
import logging
import os
import pathlib
import platform
import re
import smtplib
import sys
from contextlib import contextmanager
from email.message import EmailMessage
from functools import wraps
import azure.functions as func
import click
import gspread
import pandas as pd
from apscheduler.schedulers.background import BlockingScheduler
from oauth2client.service_account import ServiceAccountCredentials
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
@contextmanager
def get_driver(*args, **kwargs):
options = Options()
options.headless = True
options.add_argument("--window-size=1920,1200")
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--disable-crash-reporter')
options.add_argument('--disable-logging')
options.add_argument('--log-level=3')
if platform.system() == 'Linux':
DRIVER_PATH = 'chromedriver'
elif platform.system() == "Darwin":
DRIVER_PATH = (pathlib.Path(__file__).parent.parent /
'chromedriver').resolve()
else:
log.error('Unsupported OS')
exit(0)
driver = webdriver.Chrome(options=options, executable_path=DRIVER_PATH)
yield driver
driver.close()
driver.quit()
def get_browser(func):
@wraps(func)
def wrapper(*args, **kwargs):
with get_driver() as d:
kwargs['driver'] = d
return func(*args, **kwargs)
return wrapper
@click.group()
@click.option('--email', is_flag=True, help='A flag for sending email with results.')
@click.option('--email_to', help='CSV of email addresses to send notification to.')
@click.option('--username', help='SMTP account username.')
@click.option('--gsheet', is_flag=True, help='A flag for updating google sheet with results')
@click.option('--doc_key', help='Google Doc Key to update')
@click.pass_context
def cli(ctx, email, email_to, username, gsheet, doc_key):
ctx.ensure_object(dict)
if email and (not username or not email_to):
log.error('Please provide email sending parameters')
exit(0)
elif email:
password = getpass.getpass(
"Please enter your google account password for sending email:\n")
ctx.obj['password'] = password
if gsheet and not doc_key:
log.error('Please provide a gsheet doc key')
exit(0)
pass
@cli.command('schedule')
@click.option('--hour', default='*/1', help='Cron hour expression')
@click.pass_context
def schedule(ctx, hour):
email = ctx.parent.params['email']
username = ctx.parent.params['username']
email_to = ctx.parent.params['email_to']
password = ctx.obj.get('password', None)
gsheet = ctx.parent.params['gsheet']
doc_key = ctx.parent.params['doc_key']
schedule = BlockingScheduler()
schedule.add_job(run, kwargs={"email": email, "gsheet": gsheet, "doc_key": doc_key,
"username": username, "email_to": email_to, "password": password}, trigger='cron', hour=hour)
try:
schedule.start()
except (KeyboardInterrupt, SystemExit):
schedule.shutdown()
@cli.command('run')
@click.pass_context
def once(ctx):
email = ctx.parent.params['email']
gsheet = ctx.parent.params['gsheet']
username = ctx.parent.params['username']
email_to = ctx.parent.params['email_to']
password = ctx.obj.get('password', None)
doc_key = ctx.parent.params['doc_key']
run(email, username, email_to, password, gsheet, doc_key)
def run(email, username, email_to, password, gsheet, doc_key):
log.info('In run')
content = []
for link in os.environ["searchLinks"].split():
content += get_prometheus_apartments(link)
formatted_content = format_email(content)
if gsheet:
log.info('Updating gsheet')
update_historical_data(doc_key, content)
formatted_content += f'For historical data click the link below:\nhttps://docs.google.com/spreadsheets/d/1XZocxmyQ91e1exBvwDAaSR8Rhavy9WPnwLSz0Z5SKsM/edit?usp=sharing'
if email:
log.info('Sending email')
send_email(username, password, email_to, formatted_content)
log.info(content)
@get_browser
def get_prometheus_apartments(url, driver):
driver.get(url)
content = []
log.info(f'Getting apartments: {url}')
try:
anchors = driver.find_elements_by_xpath(
"//div[@id='results-cards']/div/a[@class='card-wrapper']")
except Exception as e:
log.exception(f'{e}')
return content
links = [a.get_attribute('href') for a in anchors]
apartments = []
for apt in links:
name = apt.strip('/').split('/')[-1]
apartments.append({'name': name, 'url': f'{apt}lease'})
# Scrape each appartment in parallel
for apt in apartments:
results = get_availability(apt)
if results:
content.append(results)
# with Pool() as pool:
# results = [pool.apply_async(get_availability, args=(apt,)) for apt in apartments]
# for result in results:
# data = result.get()
# if data:
# content.append(data)
return content
def update_historical_data(doc_key, content):
date = datetime.datetime.today().strftime('%Y-%m-%d')
all_content = []
for apt in content:
complex = apt['meta']['name']
data = apt['data']
for row in data:
cleaned_values = [f'{date}', f'{complex}'] + \
[value.replace('$', '').replace(',', '') for value in row]
all_content.append(cleaned_values)
update_gdoc(doc_key, all_content)
def format_email(content):
result = ''
for apt in content:
complex = apt['meta']['name']
data = apt['data']
if complex != 'mansion-grove':
continue
result += f'------------ {complex} ----------------\n'
total_available = sum(int(row[-1]) for row in data)
result += '\n'.join(', '.join(row) for row in data)
result += f'\nTotal Available: {total_available}\n'
return result
@get_browser
def get_availability(data, driver):
"""
Returns apartment availability information
"""
url = data['url']
content = []
log.info(f'Processing {url}')
driver.get(url)
delay = 60 # seconds
try:
WebDriverWait(driver, delay).until(
EC.frame_to_be_available_and_switch_to_it('rp-leasing-widget'))
WebDriverWait(driver, delay).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(@class, 'primary')][contains(text(), 'Start')]")))
except TimeoutException:
log.info(f'Page did not load: {url}')
return content
try:
driver.find_element_by_xpath(
"//button[contains(@class, 'primary')][contains(text(), 'Start')]").click()
WebDriverWait(driver, delay).until(
EC.presence_of_element_located((By.XPATH, "//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'name')]")))
# Print plan prices
names = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'name')]")]
specs = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'specs')]")]
prices = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'range')]")]
availability = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div[@class='tile-buttons']/button")]
except Exception:
log.exception(f'Unable to parse {url}')
return content
for i in range(len(names)):
match = re.match(
r'\((\d+)\).*', availability[i]) if len(availability) > i else None
units = int(match.groups()[0]) if match else '0'
match = re.match(
r'(\$\d*)( \- \$\d*\*)*', prices[i].split(' - ')[0].replace(',', '').replace('From ', '')) if len(prices) > i else None
min_price = match.groups()[0] if match else '$0'
content.append((names[i], specs[i], min_price, str(units)))
return {'meta': data, 'data': content}
def send_email(username, password, to, content):
if not content:
log.info('Nothing to send')
return
msg = EmailMessage()
msg.set_content(content)
msg['Subject'] = f'Apartment availability'
msg['From'] = username
msg['To'] = to
# Send the message via our own SMTP server.
s = smtplib.SMTP_SSL('smtp.gmail.com', 465)
s.login(username, password)
s.send_message(msg)
s.quit()
def update_gdoc(doc_key, cells):
scope = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
CREDENTIALS_PATH = pathlib.Path(__file__).parent.parent / 'credentials.json'
credentials = ServiceAccountCredentials.from_json_keyfile_name(
CREDENTIALS_PATH.resolve(), scope,
)
docs = gspread.authorize(credentials)
sheet = docs.open_by_key(doc_key).sheet1
new = pd.DataFrame(cells)
new.columns = ['Date', 'Complex', 'Plan', 'Specs', 'Price', 'Availability']
existing = pd.DataFrame(sheet.get_all_values()[1:])
if existing.size:
existing.columns = ['Date', 'Complex',
'Plan', 'Specs', 'Price', 'Availability']
updated = existing.append(new)
updated = updated.groupby(['Date', 'Complex', 'Plan', 'Specs']).min()
updated.reset_index(inplace=True)
sheet.update([updated.columns.values.tolist()] +
updated.values.tolist(), value_input_option='USER_ENTERED')
if __name__ == '__main__':
cli()
def azurefunc(PrometheusScrapper: func.TimerRequest) -> None:
email = os.environ["SendEmail"]
email_to = os.environ["EmailTo"]
username = os.environ["GmailUsername"]
password = os.environ["GmailPassword"]
gsheet = os.environ["UpdateGSheet"]
doc_key = os.environ["GSheetKey"]
run(email, username, email_to, password, gsheet, doc_key)
| import datetime
import getpass
import logging
import os
import pathlib
import platform
import re
import smtplib
import sys
from contextlib import contextmanager
from email.message import EmailMessage
from functools import wraps
import azure.functions as func
import click
import gspread
import pandas as pd
from apscheduler.schedulers.background import BlockingScheduler
from oauth2client.service_account import ServiceAccountCredentials
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
@contextmanager
def get_driver(*args, **kwargs):
options = Options()
options.headless = True
options.add_argument("--window-size=1920,1200")
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--disable-crash-reporter')
options.add_argument('--disable-logging')
options.add_argument('--log-level=3')
if platform.system() == 'Linux':
DRIVER_PATH = 'chromedriver'
elif platform.system() == "Darwin":
DRIVER_PATH = (pathlib.Path(__file__).parent.parent /
'chromedriver').resolve()
else:
log.error('Unsupported OS')
exit(0)
driver = webdriver.Chrome(options=options, executable_path=DRIVER_PATH)
yield driver
driver.close()
driver.quit()
def get_browser(func):
@wraps(func)
def wrapper(*args, **kwargs):
with get_driver() as d:
kwargs['driver'] = d
return func(*args, **kwargs)
return wrapper
@click.group()
@click.option('--email', is_flag=True, help='A flag for sending email with results.')
@click.option('--email_to', help='CSV of email addresses to send notification to.')
@click.option('--username', help='SMTP account username.')
@click.option('--gsheet', is_flag=True, help='A flag for updating google sheet with results')
@click.option('--doc_key', help='Google Doc Key to update')
@click.pass_context
def cli(ctx, email, email_to, username, gsheet, doc_key):
ctx.ensure_object(dict)
if email and (not username or not email_to):
log.error('Please provide email sending parameters')
exit(0)
elif email:
password = getpass.getpass(
"Please enter your google account password for sending email:\n")
ctx.obj['password'] = password
if gsheet and not doc_key:
log.error('Please provide a gsheet doc key')
exit(0)
pass
@cli.command('schedule')
@click.option('--hour', default='*/1', help='Cron hour expression')
@click.pass_context
def schedule(ctx, hour):
email = ctx.parent.params['email']
username = ctx.parent.params['username']
email_to = ctx.parent.params['email_to']
password = ctx.obj.get('password', None)
gsheet = ctx.parent.params['gsheet']
doc_key = ctx.parent.params['doc_key']
schedule = BlockingScheduler()
schedule.add_job(run, kwargs={"email": email, "gsheet": gsheet, "doc_key": doc_key,
"username": username, "email_to": email_to, "password": password}, trigger='cron', hour=hour)
try:
schedule.start()
except (KeyboardInterrupt, SystemExit):
schedule.shutdown()
@cli.command('run')
@click.pass_context
def once(ctx):
email = ctx.parent.params['email']
gsheet = ctx.parent.params['gsheet']
username = ctx.parent.params['username']
email_to = ctx.parent.params['email_to']
password = ctx.obj.get('password', None)
doc_key = ctx.parent.params['doc_key']
run(email, username, email_to, password, gsheet, doc_key)
def run(email, username, email_to, password, gsheet, doc_key):
log.info('In run')
content = []
for link in os.environ["searchLinks"].split():
content += get_prometheus_apartments(link)
formatted_content = format_email(content)
if gsheet:
log.info('Updating gsheet')
update_historical_data(doc_key, content)
formatted_content += f'For historical data click the link below:\nhttps://docs.google.com/spreadsheets/d/1XZocxmyQ91e1exBvwDAaSR8Rhavy9WPnwLSz0Z5SKsM/edit?usp=sharing'
if email:
log.info('Sending email')
send_email(username, password, email_to, formatted_content)
log.info(content)
@get_browser
def get_prometheus_apartments(url, driver):
driver.get(url)
content = []
log.info(f'Getting apartments: {url}')
try:
anchors = driver.find_elements_by_xpath(
"//div[@id='results-cards']/div/a[@class='card-wrapper']")
except Exception as e:
log.exception(f'{e}')
return content
links = [a.get_attribute('href') for a in anchors]
apartments = []
for apt in links:
name = apt.strip('/').split('/')[-1]
apartments.append({'name': name, 'url': f'{apt}lease'})
# Scrape each appartment in parallel
for apt in apartments:
results = get_availability(apt)
if results:
content.append(results)
# with Pool() as pool:
# results = [pool.apply_async(get_availability, args=(apt,)) for apt in apartments]
# for result in results:
# data = result.get()
# if data:
# content.append(data)
return content
def update_historical_data(doc_key, content):
date = datetime.datetime.today().strftime('%Y-%m-%d')
all_content = []
for apt in content:
complex = apt['meta']['name']
data = apt['data']
for row in data:
cleaned_values = [f'{date}', f'{complex}'] + \
[value.replace('$', '').replace(',', '') for value in row]
all_content.append(cleaned_values)
update_gdoc(doc_key, all_content)
def format_email(content):
result = ''
for apt in content:
complex = apt['meta']['name']
data = apt['data']
if complex != 'mansion-grove':
continue
result += f'------------ {complex} ----------------\n'
total_available = sum(int(row[-1]) for row in data)
result += '\n'.join(', '.join(row) for row in data)
result += f'\nTotal Available: {total_available}\n'
return result
@get_browser
def get_availability(data, driver):
"""
Returns apartment availability information
"""
url = data['url']
content = []
log.info(f'Processing {url}')
driver.get(url)
delay = 60 # seconds
try:
WebDriverWait(driver, delay).until(
EC.frame_to_be_available_and_switch_to_it('rp-leasing-widget'))
WebDriverWait(driver, delay).until(EC.presence_of_element_located(
(By.XPATH, "//button[contains(@class, 'primary')][contains(text(), 'Start')]")))
except TimeoutException:
log.info(f'Page did not load: {url}')
return content
try:
driver.find_element_by_xpath(
"//button[contains(@class, 'primary')][contains(text(), 'Start')]").click()
WebDriverWait(driver, delay).until(
EC.presence_of_element_located((By.XPATH, "//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'name')]")))
# Print plan prices
names = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'name')]")]
specs = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'specs')]")]
prices = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div/span[contains(@class, 'range')]")]
availability = [n.text for n in driver.find_elements_by_xpath(
"//div[contains(@class, 'floorplan-tile')]/div[@class='tile-buttons']/button")]
except Exception:
log.exception(f'Unable to parse {url}')
return content
for i in range(len(names)):
match = re.match(
r'\((\d+)\).*', availability[i]) if len(availability) > i else None
units = int(match.groups()[0]) if match else '0'
match = re.match(
r'(\$\d*)( \- \$\d*\*)*', prices[i].split(' - ')[0].replace(',', '').replace('From ', '')) if len(prices) > i else None
min_price = match.groups()[0] if match else '$0'
content.append((names[i], specs[i], min_price, str(units)))
return {'meta': data, 'data': content}
def send_email(username, password, to, content):
if not content:
log.info('Nothing to send')
return
msg = EmailMessage()
msg.set_content(content)
msg['Subject'] = f'Apartment availability'
msg['From'] = username
msg['To'] = to
# Send the message via our own SMTP server.
s = smtplib.SMTP_SSL('smtp.gmail.com', 465)
s.login(username, password)
s.send_message(msg)
s.quit()
def update_gdoc(doc_key, cells):
scope = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
CREDENTIALS_PATH = pathlib.Path(__file__).parent.parent / 'credentials.json'
credentials = ServiceAccountCredentials.from_json_keyfile_name(
CREDENTIALS_PATH.resolve(), scope,
)
docs = gspread.authorize(credentials)
sheet = docs.open_by_key(doc_key).sheet1
new = pd.DataFrame(cells)
new.columns = ['Date', 'Complex', 'Plan', 'Specs', 'Price', 'Availability']
existing = pd.DataFrame(sheet.get_all_values()[1:])
if existing.size:
existing.columns = ['Date', 'Complex',
'Plan', 'Specs', 'Price', 'Availability']
updated = existing.append(new)
updated = updated.groupby(['Date', 'Complex', 'Plan', 'Specs']).min()
updated.reset_index(inplace=True)
sheet.update([updated.columns.values.tolist()] +
updated.values.tolist(), value_input_option='USER_ENTERED')
if __name__ == '__main__':
cli()
def azurefunc(PrometheusScrapper: func.TimerRequest) -> None:
email = os.environ["SendEmail"]
email_to = os.environ["EmailTo"]
username = os.environ["GmailUsername"]
password = os.environ["GmailPassword"]
gsheet = os.environ["UpdateGSheet"]
doc_key = os.environ["GSheetKey"]
run(email, username, email_to, password, gsheet, doc_key)
| en | 0.705838 | # Scrape each appartment in parallel # with Pool() as pool: # results = [pool.apply_async(get_availability, args=(apt,)) for apt in apartments] # for result in results: # data = result.get() # if data: # content.append(data) Returns apartment availability information # seconds # Print plan prices # Send the message via our own SMTP server. | 1.943456 | 2 |
MrWorldwide.py | AnonymousHacker1279/MrWorldwide | 0 | 10127 | <filename>MrWorldwide.py
from PyQt6.QtWidgets import QApplication, QWidget, QFileDialog
import PyQt6.QtCore as QtCore
import PyQt6.QtGui as QtGui
import sys, time, json, requests, traceback, configparser, os
import MrWorldwideUI, ConfigurationUI, UpdateManagerUI
version = "v1.0.0"
class LangTypes:
ENGLISH = "English"
ARABIC = "Arabic"
CHINESE = "Chinese"
DUTCH = "Dutch"
FRENCH = "French"
GERMAN = "German"
HINDI = "Hindi"
INDONESIAN = "Indonesian"
IRISH = "Irish"
ITALIAN = "Italian"
JAPANESE = "Japanese"
KOREAN = "Korean"
POLISH = "Polish"
PORTUGUESE = "Portuguese"
RUSSIAN = "Russian"
SPANISH = "Spanish"
TURKISH = "Turkish"
UKRANIAN = "Ukranian"
VIETNAMESE = "Vietnamese"
class WorkerSignals(QtCore.QObject):
callback = QtCore.pyqtSignal(str)
class Worker(QtCore.QRunnable):
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progressCallback'] = self.signals.callback
@QtCore.pyqtSlot()
def run(self):
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
print(traceback.print_exc())
else:
self.signals.callback.emit(result)
def readConfigurationFile(config):
try:
configFile = open("config.ini")
configFile.close()
return config.read("config.ini")
except:
config['general'] = {}
config['general']['libretranslate_mirror'] = 'https://translate.astian.org/translate'
config['defaults'] = {}
config['defaults']['default_source_language'] = LangTypes.ENGLISH
config['defaults']['default_target_language'] = LangTypes.SPANISH
with open('config.ini', 'w') as configFile:
config.write(configFile)
configFile.close()
return config
class MrWorldwide(QWidget, MrWorldwideUI.Ui_Dialog, QtCore.QThread):
selectedFile = ""
selectedTargetLocation = ""
sourceFileKeys = []
sourceFileValues = []
totalLangFileLines = 0
shouldAbort = False
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/MrWorldwide.png"))
icon = QtGui.QIcon(resource_path("gui_resources/MrWorldwide.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
self.config = configparser.ConfigParser()
readConfigurationFile(self.config)
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.abortButton.clicked.connect(self.abortEvent)
self.startButton.clicked.connect(self.preTranslate)
self.openFileButton.clicked.connect(self.openFileEvent)
self.targetLocationButton.clicked.connect(self.selectFileLocationEvent)
self.configButton.clicked.connect(self.openConfiguration)
# Setup dropdown boxes
self.sourceLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.targetLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.sourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.targetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
self.apiMirror = self.config["general"]["libretranslate_mirror"]
# Open the configuration GUI
def openConfiguration(self, event):
self.configurationDialog = ConfigurationDialog()
self.configurationDialog.setup(self)
self.configurationDialog.show()
# Refresh the configuration
def refreshConfiguration(self):
readConfigurationFile(self.config)
self.sourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.targetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
self.apiMirror = self.config["general"]["libretranslate_mirror"]
# Close event, for handling closing of the program
def closeEvent(self, event):
global app
self.close()
app.exit()
# Abort event, for shutting down translation functions
def abortEvent(self, event):
global shouldAbort
global totalLangFileLines
self.shouldAbort = True
self.progressBar.setValue(0)
self.progressBarLabel.setText("Idle")
self.logAction("ABORT: Translation process canceled.")
# Open file event, for selecting a language file and starting the read process
def openFileEvent(self, event):
self.totalLangFileLines = 0
self.selectedFile = QFileDialog.getOpenFileName(self, 'Select a Minecraft language file', '','JSON Files (*.json)')[0]
self.fileSelectionBox.setText(str(self.selectedFile))
self.readLangFile()
# Select output file location event, for setting the target location
def selectFileLocationEvent(self, event):
self.selectedTargetLocation = QFileDialog.getSaveFileName(self, 'Select an output location', 'target.json','JSON Files (*.json)')[0]
self.targetLocationBox.setText(str(self.selectedTargetLocation))
# Read a language file and get the keys, values, and set various content on the GUI
def readLangFile(self):
global sourceFileValues
global totalLangFileLines
self.sourceFileValues = []
self.sourceFileKeys = []
# Read input JSON and make it usable
startReadInputTime = time.time()
if self.selectedFile != "":
with open(self.selectedFile, 'r') as f:
data = json.load(f)
self.sourceFileKeys = data.keys()
for item in data:
if self.shouldAbort:
return
self.sourceFileValues.append(data[item])
self.totalLangFileLines = self.totalLangFileLines + 1
self.logAction("Reading input file took " + str(((time.time() - startReadInputTime) * 1000).__round__(3)) + " ms.")
self.langFileEntryCounter.display(self.totalLangFileLines)
self.logAction("Found " + str(self.totalLangFileLines) + " entries.")
def preTranslate(self, event):
global totalLangFileLines
global selectedFile
global selectedTargetLocation
canProceed = True
self.shouldAbort = False
if self.selectedFile == "":
self.logAction("ERROR: No language file selected.")
canProceed = False
elif self.totalLangFileLines == 0:
self.logAction("ERROR: The selected language file is empty.")
canProceed = False
elif self.selectedTargetLocation == "":
self.logAction("ERROR: No target location specified.")
canProceed = False
elif self.sourceLangBox.currentText() == self.targetLangBox.currentText():
self.logAction("ERROR: Target language is the same as the source")
canProceed = False
if canProceed:
self.logAction("Beginning translations with a source language of " + self.sourceLangBox.currentText() + " and a target language of " + self.targetLangBox.currentText())
self.logAction("Using LibreTranslate mirror: " + self.config["general"]["libretranslate_mirror"])
self.disableButtonsDuringTranslations()
self.threadpool = QtCore.QThreadPool()
self.worker = Worker(self.startTranslations)
self.worker.signals.callback.connect(self.threadCallbackHandler)
self.threadpool.start(self.worker)
def disableButtonsDuringTranslations(self):
self.startButton.setDisabled(True)
self.openFileButton.setDisabled(True)
self.targetLocationButton.setDisabled(True)
self.closeButton.setDisabled(True)
self.configButton.setDisabled(True)
def enableButtonsAfterTranslations(self):
self.startButton.setDisabled(False)
self.openFileButton.setDisabled(False)
self.targetLocationButton.setDisabled(False)
self.closeButton.setDisabled(False)
self.configButton.setDisabled(False)
def threadCallbackHandler(self, callback):
try:
exec(callback)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
exctype, value, traceback.format_exc()
app.exit()
def startTranslations(self, progressCallback):
global sourceFileValues
global totalLangFileLines
global shouldAbort
progressCallback.emit('self.progressBarLabel.setText("Starting translations")')
# Set query headers
headers = {
'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
# Really inefficient but it works ¯\_(ツ)_/¯
startQueryTime = time.time()
responseJSON = []
progressCallback.emit('self.progressBarLabel.setText("Translating...")')
itemLoopIteration = 1
try:
requests.post(self.config["general"]["libretranslate_mirror"], headers=headers, data=None)
hasFailedResolve = False
except:
requests.post('https://translate.astian.org/translate', headers=headers, data=None)
progressCallback.emit('self.logAction("Failed to resolve LibreTranslate mirror. Defaulting to https://translate.astian.org/translate")')
hasFailedResolve = True
for item in self.sourceFileValues:
if self.shouldAbort:
return
# Setup the progress bar, by mapping the total translation count to 100
progressCallback.emit('self.progressBar.setValue(int(((' + str(itemLoopIteration) + ' / self.totalLangFileLines) * 100).__round__(0)))')
# Set query data
data = {
'q': item,
'source': self.getLangIdentifier(self.sourceLangBox.currentText()),
'target': self.getLangIdentifier(self.targetLangBox.currentText())
}
# Send the query and get the response
if hasFailedResolve == True:
response = requests.post('https://translate.astian.org/translate', headers=headers, data=data)
else:
response = requests.post(self.config["general"]["libretranslate_mirror"], headers=headers, data=data)
responseData = json.loads(response.content.decode(response.encoding))["translatedText"]
responseJSON.append(str(responseData).rstrip('"').replace('\u00ab', '').lstrip('"').replace('\u00bb', ''))
itemLoopIteration = itemLoopIteration + 1
progressCallback.emit('self.logAction("Query time was " + str(time.time() - ' + str(startQueryTime) + ') + " seconds.")')
progressCallback.emit('self.progressBarLabel.setText("Translations complete")')
progressCallback.emit('self.saveToFile(' + str(responseJSON) + ')')
# Save the JSON data to file
def saveToFile(self, responseJSON):
global sourceFileKeys
global shouldAbort
self.progressBarLabel.setText("Writing to file...")
self.progressBar.setValue(0)
with open(self.targetLocationBox.text(), 'w', encoding="UTF-8") as f:
compiledDict = dict()
responseJSONList = list(responseJSON)
currentIteration = 0
for item in self.sourceFileKeys:
if self.shouldAbort:
return
compiledDict.update({item: str(responseJSONList[currentIteration])})
currentIteration = currentIteration + 1
progBarVal = int(((currentIteration / self.totalLangFileLines) * 100).__round__(0))
self.progressBar.setValue(progBarVal)
json.dump(compiledDict, f, separators=(',', ': '), indent=" ", ensure_ascii=False)
self.enableButtonsAfterTranslations()
self.logAction("Translations written to file.")
self.progressBarLabel.setText("All tasks completed.")
# Log information to the console
def logAction(self, text: str):
if self.logBox.text() == "No log information available. ":
self.logBox.setText("")
preparedLogText = ">> " + text
else:
preparedLogText = self.logBox.text() + "\n>> " + text
self.logBox.setText(preparedLogText)
self.logBoxScrollArea.verticalScrollBar().setValue(self.logBoxScrollArea.verticalScrollBar().maximum())
def getLangIdentifier(self, lang):
if lang == LangTypes.ENGLISH:
return "en"
if lang == LangTypes.ARABIC:
return "ar"
if lang == LangTypes.CHINESE:
return "zh"
if lang == LangTypes.DUTCH:
return "nl"
if lang == LangTypes.FRENCH:
return "fr"
if lang == LangTypes.GERMAN:
return "de"
if lang == LangTypes.HINDI:
return "hi"
if lang == LangTypes.INDONESIAN:
return "id"
if lang == LangTypes.IRISH:
return "ga"
if lang == LangTypes.ITALIAN:
return "it"
if lang == LangTypes.JAPANESE:
return "ja"
if lang == LangTypes.KOREAN:
return "ko"
if lang == LangTypes.POLISH:
return "pl"
if lang == LangTypes.PORTUGUESE:
return "pt"
if lang == LangTypes.RUSSIAN:
return "ru"
if lang == LangTypes.SPANISH:
return "es"
if lang == LangTypes.TURKISH:
return "tr"
if lang == LangTypes.UKRANIAN:
return "uk"
if lang == LangTypes.VIETNAMESE:
return "vi"
# Initialize the program
def __init__(self, parent=None):
global app
super(MrWorldwide, self).__init__(parent)
self.setupUi(self)
self.run()
class ConfigurationDialog(QWidget, ConfigurationUI.Ui_Dialog):
def __init__(self, parent=None):
super(ConfigurationDialog, self).__init__(parent)
self.setupUi(self)
self.run()
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/Configuration.png"))
icon = QtGui.QIcon(resource_path("gui_resources/Configuration.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
# Read configuration
self.config = configparser.ConfigParser()
readConfigurationFile(self.config)
# Setup dropdown boxes
self.defaultSourceLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.defaultTargetLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
# Apply current configuration
self.apiMirror.setText(self.config["general"]["libretranslate_mirror"])
self.defaultSourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.defaultTargetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.applyButton.clicked.connect(self.applyEvent)
self.updateButton.clicked.connect(self.openUpdateManager)
# Setup variables
def setup(self, parent):
self.parent = parent
# Close event, for handling closing of the program
def closeEvent(self, event):
self.close()
# Update event, for opening the update manager
# Open the configuration GUI
def openUpdateManager(self, event):
self.updateManagerDialog = UpdateManagerDialog()
self.updateManagerDialog.setup(self)
self.updateManagerDialog.show()
# Apply event, for handling applying of configurations
def applyEvent(self, event):
self.config = configparser.ConfigParser()
self.config['general'] = {}
self.config['general']['libretranslate_mirror'] = self.apiMirror.text()
self.config['defaults'] = {}
self.config['defaults']['default_source_language'] = self.defaultSourceLangBox.currentText()
self.config['defaults']['default_target_language'] = self.defaultTargetLangBox.currentText()
with open('config.ini', 'w') as configFile:
self.config.write(configFile)
configFile.close()
self.parent.refreshConfiguration()
self.close()
class UpdateManagerDialog(QWidget, UpdateManagerUI.Ui_Dialog):
def __init__(self, parent=None):
super(UpdateManagerDialog, self).__init__(parent)
self.setupUi(self)
self.run()
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/Updates.png"))
icon = QtGui.QIcon(resource_path("gui_resources/Updates.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.checkUpdatesButton.clicked.connect(self.checkForUpdatesEvent)
global version
self.currentVersionBox.setText(version)
# Setup variables
def setup(self, parent):
self.parent = parent
# Close event, for handling closing of the program
def closeEvent(self, event):
self.close()
# Check for updates event
def checkForUpdatesEvent(self, event):
self.updateData = json.loads(requests.get("https://raw.githubusercontent.com/AnonymousHacker1279/MrWorldwide/master/update.json").text)
self.latestVersionBox.setText(self.updateData["latest"])
self.changelogBox.setText(self.updateData["changelog"] + "\n\nDownload the update here: " + self.updateData["link"])
def main():
global app
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
app.setStyle("Fusion")
form = MrWorldwide()
form.show()
app.exec()
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath('.'), relative_path)
if __name__ == '__main__':
main() | <filename>MrWorldwide.py
from PyQt6.QtWidgets import QApplication, QWidget, QFileDialog
import PyQt6.QtCore as QtCore
import PyQt6.QtGui as QtGui
import sys, time, json, requests, traceback, configparser, os
import MrWorldwideUI, ConfigurationUI, UpdateManagerUI
version = "v1.0.0"
class LangTypes:
ENGLISH = "English"
ARABIC = "Arabic"
CHINESE = "Chinese"
DUTCH = "Dutch"
FRENCH = "French"
GERMAN = "German"
HINDI = "Hindi"
INDONESIAN = "Indonesian"
IRISH = "Irish"
ITALIAN = "Italian"
JAPANESE = "Japanese"
KOREAN = "Korean"
POLISH = "Polish"
PORTUGUESE = "Portuguese"
RUSSIAN = "Russian"
SPANISH = "Spanish"
TURKISH = "Turkish"
UKRANIAN = "Ukranian"
VIETNAMESE = "Vietnamese"
class WorkerSignals(QtCore.QObject):
callback = QtCore.pyqtSignal(str)
class Worker(QtCore.QRunnable):
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progressCallback'] = self.signals.callback
@QtCore.pyqtSlot()
def run(self):
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
print(traceback.print_exc())
else:
self.signals.callback.emit(result)
def readConfigurationFile(config):
try:
configFile = open("config.ini")
configFile.close()
return config.read("config.ini")
except:
config['general'] = {}
config['general']['libretranslate_mirror'] = 'https://translate.astian.org/translate'
config['defaults'] = {}
config['defaults']['default_source_language'] = LangTypes.ENGLISH
config['defaults']['default_target_language'] = LangTypes.SPANISH
with open('config.ini', 'w') as configFile:
config.write(configFile)
configFile.close()
return config
class MrWorldwide(QWidget, MrWorldwideUI.Ui_Dialog, QtCore.QThread):
selectedFile = ""
selectedTargetLocation = ""
sourceFileKeys = []
sourceFileValues = []
totalLangFileLines = 0
shouldAbort = False
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/MrWorldwide.png"))
icon = QtGui.QIcon(resource_path("gui_resources/MrWorldwide.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
self.config = configparser.ConfigParser()
readConfigurationFile(self.config)
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.abortButton.clicked.connect(self.abortEvent)
self.startButton.clicked.connect(self.preTranslate)
self.openFileButton.clicked.connect(self.openFileEvent)
self.targetLocationButton.clicked.connect(self.selectFileLocationEvent)
self.configButton.clicked.connect(self.openConfiguration)
# Setup dropdown boxes
self.sourceLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.targetLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.sourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.targetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
self.apiMirror = self.config["general"]["libretranslate_mirror"]
# Open the configuration GUI
def openConfiguration(self, event):
self.configurationDialog = ConfigurationDialog()
self.configurationDialog.setup(self)
self.configurationDialog.show()
# Refresh the configuration
def refreshConfiguration(self):
readConfigurationFile(self.config)
self.sourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.targetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
self.apiMirror = self.config["general"]["libretranslate_mirror"]
# Close event, for handling closing of the program
def closeEvent(self, event):
global app
self.close()
app.exit()
# Abort event, for shutting down translation functions
def abortEvent(self, event):
global shouldAbort
global totalLangFileLines
self.shouldAbort = True
self.progressBar.setValue(0)
self.progressBarLabel.setText("Idle")
self.logAction("ABORT: Translation process canceled.")
# Open file event, for selecting a language file and starting the read process
def openFileEvent(self, event):
self.totalLangFileLines = 0
self.selectedFile = QFileDialog.getOpenFileName(self, 'Select a Minecraft language file', '','JSON Files (*.json)')[0]
self.fileSelectionBox.setText(str(self.selectedFile))
self.readLangFile()
# Select output file location event, for setting the target location
def selectFileLocationEvent(self, event):
self.selectedTargetLocation = QFileDialog.getSaveFileName(self, 'Select an output location', 'target.json','JSON Files (*.json)')[0]
self.targetLocationBox.setText(str(self.selectedTargetLocation))
# Read a language file and get the keys, values, and set various content on the GUI
def readLangFile(self):
global sourceFileValues
global totalLangFileLines
self.sourceFileValues = []
self.sourceFileKeys = []
# Read input JSON and make it usable
startReadInputTime = time.time()
if self.selectedFile != "":
with open(self.selectedFile, 'r') as f:
data = json.load(f)
self.sourceFileKeys = data.keys()
for item in data:
if self.shouldAbort:
return
self.sourceFileValues.append(data[item])
self.totalLangFileLines = self.totalLangFileLines + 1
self.logAction("Reading input file took " + str(((time.time() - startReadInputTime) * 1000).__round__(3)) + " ms.")
self.langFileEntryCounter.display(self.totalLangFileLines)
self.logAction("Found " + str(self.totalLangFileLines) + " entries.")
def preTranslate(self, event):
global totalLangFileLines
global selectedFile
global selectedTargetLocation
canProceed = True
self.shouldAbort = False
if self.selectedFile == "":
self.logAction("ERROR: No language file selected.")
canProceed = False
elif self.totalLangFileLines == 0:
self.logAction("ERROR: The selected language file is empty.")
canProceed = False
elif self.selectedTargetLocation == "":
self.logAction("ERROR: No target location specified.")
canProceed = False
elif self.sourceLangBox.currentText() == self.targetLangBox.currentText():
self.logAction("ERROR: Target language is the same as the source")
canProceed = False
if canProceed:
self.logAction("Beginning translations with a source language of " + self.sourceLangBox.currentText() + " and a target language of " + self.targetLangBox.currentText())
self.logAction("Using LibreTranslate mirror: " + self.config["general"]["libretranslate_mirror"])
self.disableButtonsDuringTranslations()
self.threadpool = QtCore.QThreadPool()
self.worker = Worker(self.startTranslations)
self.worker.signals.callback.connect(self.threadCallbackHandler)
self.threadpool.start(self.worker)
def disableButtonsDuringTranslations(self):
self.startButton.setDisabled(True)
self.openFileButton.setDisabled(True)
self.targetLocationButton.setDisabled(True)
self.closeButton.setDisabled(True)
self.configButton.setDisabled(True)
def enableButtonsAfterTranslations(self):
self.startButton.setDisabled(False)
self.openFileButton.setDisabled(False)
self.targetLocationButton.setDisabled(False)
self.closeButton.setDisabled(False)
self.configButton.setDisabled(False)
def threadCallbackHandler(self, callback):
try:
exec(callback)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
exctype, value, traceback.format_exc()
app.exit()
def startTranslations(self, progressCallback):
global sourceFileValues
global totalLangFileLines
global shouldAbort
progressCallback.emit('self.progressBarLabel.setText("Starting translations")')
# Set query headers
headers = {
'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
# Really inefficient but it works ¯\_(ツ)_/¯
startQueryTime = time.time()
responseJSON = []
progressCallback.emit('self.progressBarLabel.setText("Translating...")')
itemLoopIteration = 1
try:
requests.post(self.config["general"]["libretranslate_mirror"], headers=headers, data=None)
hasFailedResolve = False
except:
requests.post('https://translate.astian.org/translate', headers=headers, data=None)
progressCallback.emit('self.logAction("Failed to resolve LibreTranslate mirror. Defaulting to https://translate.astian.org/translate")')
hasFailedResolve = True
for item in self.sourceFileValues:
if self.shouldAbort:
return
# Setup the progress bar, by mapping the total translation count to 100
progressCallback.emit('self.progressBar.setValue(int(((' + str(itemLoopIteration) + ' / self.totalLangFileLines) * 100).__round__(0)))')
# Set query data
data = {
'q': item,
'source': self.getLangIdentifier(self.sourceLangBox.currentText()),
'target': self.getLangIdentifier(self.targetLangBox.currentText())
}
# Send the query and get the response
if hasFailedResolve == True:
response = requests.post('https://translate.astian.org/translate', headers=headers, data=data)
else:
response = requests.post(self.config["general"]["libretranslate_mirror"], headers=headers, data=data)
responseData = json.loads(response.content.decode(response.encoding))["translatedText"]
responseJSON.append(str(responseData).rstrip('"').replace('\u00ab', '').lstrip('"').replace('\u00bb', ''))
itemLoopIteration = itemLoopIteration + 1
progressCallback.emit('self.logAction("Query time was " + str(time.time() - ' + str(startQueryTime) + ') + " seconds.")')
progressCallback.emit('self.progressBarLabel.setText("Translations complete")')
progressCallback.emit('self.saveToFile(' + str(responseJSON) + ')')
# Save the JSON data to file
def saveToFile(self, responseJSON):
global sourceFileKeys
global shouldAbort
self.progressBarLabel.setText("Writing to file...")
self.progressBar.setValue(0)
with open(self.targetLocationBox.text(), 'w', encoding="UTF-8") as f:
compiledDict = dict()
responseJSONList = list(responseJSON)
currentIteration = 0
for item in self.sourceFileKeys:
if self.shouldAbort:
return
compiledDict.update({item: str(responseJSONList[currentIteration])})
currentIteration = currentIteration + 1
progBarVal = int(((currentIteration / self.totalLangFileLines) * 100).__round__(0))
self.progressBar.setValue(progBarVal)
json.dump(compiledDict, f, separators=(',', ': '), indent=" ", ensure_ascii=False)
self.enableButtonsAfterTranslations()
self.logAction("Translations written to file.")
self.progressBarLabel.setText("All tasks completed.")
# Log information to the console
def logAction(self, text: str):
if self.logBox.text() == "No log information available. ":
self.logBox.setText("")
preparedLogText = ">> " + text
else:
preparedLogText = self.logBox.text() + "\n>> " + text
self.logBox.setText(preparedLogText)
self.logBoxScrollArea.verticalScrollBar().setValue(self.logBoxScrollArea.verticalScrollBar().maximum())
def getLangIdentifier(self, lang):
if lang == LangTypes.ENGLISH:
return "en"
if lang == LangTypes.ARABIC:
return "ar"
if lang == LangTypes.CHINESE:
return "zh"
if lang == LangTypes.DUTCH:
return "nl"
if lang == LangTypes.FRENCH:
return "fr"
if lang == LangTypes.GERMAN:
return "de"
if lang == LangTypes.HINDI:
return "hi"
if lang == LangTypes.INDONESIAN:
return "id"
if lang == LangTypes.IRISH:
return "ga"
if lang == LangTypes.ITALIAN:
return "it"
if lang == LangTypes.JAPANESE:
return "ja"
if lang == LangTypes.KOREAN:
return "ko"
if lang == LangTypes.POLISH:
return "pl"
if lang == LangTypes.PORTUGUESE:
return "pt"
if lang == LangTypes.RUSSIAN:
return "ru"
if lang == LangTypes.SPANISH:
return "es"
if lang == LangTypes.TURKISH:
return "tr"
if lang == LangTypes.UKRANIAN:
return "uk"
if lang == LangTypes.VIETNAMESE:
return "vi"
# Initialize the program
def __init__(self, parent=None):
global app
super(MrWorldwide, self).__init__(parent)
self.setupUi(self)
self.run()
class ConfigurationDialog(QWidget, ConfigurationUI.Ui_Dialog):
def __init__(self, parent=None):
super(ConfigurationDialog, self).__init__(parent)
self.setupUi(self)
self.run()
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/Configuration.png"))
icon = QtGui.QIcon(resource_path("gui_resources/Configuration.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
# Read configuration
self.config = configparser.ConfigParser()
readConfigurationFile(self.config)
# Setup dropdown boxes
self.defaultSourceLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.defaultTargetLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
# Apply current configuration
self.apiMirror.setText(self.config["general"]["libretranslate_mirror"])
self.defaultSourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.defaultTargetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.applyButton.clicked.connect(self.applyEvent)
self.updateButton.clicked.connect(self.openUpdateManager)
# Setup variables
def setup(self, parent):
self.parent = parent
# Close event, for handling closing of the program
def closeEvent(self, event):
self.close()
# Update event, for opening the update manager
# Open the configuration GUI
def openUpdateManager(self, event):
self.updateManagerDialog = UpdateManagerDialog()
self.updateManagerDialog.setup(self)
self.updateManagerDialog.show()
# Apply event, for handling applying of configurations
def applyEvent(self, event):
self.config = configparser.ConfigParser()
self.config['general'] = {}
self.config['general']['libretranslate_mirror'] = self.apiMirror.text()
self.config['defaults'] = {}
self.config['defaults']['default_source_language'] = self.defaultSourceLangBox.currentText()
self.config['defaults']['default_target_language'] = self.defaultTargetLangBox.currentText()
with open('config.ini', 'w') as configFile:
self.config.write(configFile)
configFile.close()
self.parent.refreshConfiguration()
self.close()
class UpdateManagerDialog(QWidget, UpdateManagerUI.Ui_Dialog):
def __init__(self, parent=None):
super(UpdateManagerDialog, self).__init__(parent)
self.setupUi(self)
self.run()
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/Updates.png"))
icon = QtGui.QIcon(resource_path("gui_resources/Updates.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.checkUpdatesButton.clicked.connect(self.checkForUpdatesEvent)
global version
self.currentVersionBox.setText(version)
# Setup variables
def setup(self, parent):
self.parent = parent
# Close event, for handling closing of the program
def closeEvent(self, event):
self.close()
# Check for updates event
def checkForUpdatesEvent(self, event):
self.updateData = json.loads(requests.get("https://raw.githubusercontent.com/AnonymousHacker1279/MrWorldwide/master/update.json").text)
self.latestVersionBox.setText(self.updateData["latest"])
self.changelogBox.setText(self.updateData["changelog"] + "\n\nDownload the update here: " + self.updateData["link"])
def main():
global app
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
app.setStyle("Fusion")
form = MrWorldwide()
form.show()
app.exec()
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath('.'), relative_path)
if __name__ == '__main__':
main() | en | 0.718133 | # Store constructor arguments (re-used for processing) # Add the callback to our kwargs # Retrieve args/kwargs here; and fire processing using them # Setup resources # Set the logos and images # TODO: Custom icon # Setup button actions # Setup dropdown boxes # Open the configuration GUI # Refresh the configuration # Close event, for handling closing of the program # Abort event, for shutting down translation functions # Open file event, for selecting a language file and starting the read process # Select output file location event, for setting the target location # Read a language file and get the keys, values, and set various content on the GUI # Read input JSON and make it usable # Set query headers # Really inefficient but it works ¯\_(ツ)_/¯ # Setup the progress bar, by mapping the total translation count to 100 # Set query data # Send the query and get the response # Save the JSON data to file # Log information to the console # Initialize the program # Setup resources # Set the logos and images # TODO: Custom icon # Read configuration # Setup dropdown boxes # Apply current configuration # Setup button actions # Setup variables # Close event, for handling closing of the program # Update event, for opening the update manager # Open the configuration GUI # Apply event, for handling applying of configurations # Setup resources # Set the logos and images # TODO: Custom icon # Setup button actions # Setup variables # Close event, for handling closing of the program # Check for updates event | 2.348448 | 2 |
tools/az_cli.py | google/cloud-forensics-utls | 0 | 10128 | # -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo CLI tool for Azure."""
import os
from datetime import datetime
from typing import TYPE_CHECKING
from Crypto.PublicKey import RSA
from libcloudforensics import logging_utils
from libcloudforensics.providers.azure.internal import account
from libcloudforensics.providers.azure.internal import monitoring
from libcloudforensics.providers.azure import forensics
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
if TYPE_CHECKING:
import argparse
def ListInstances(args: 'argparse.Namespace') -> None:
"""List instances in Azure subscription.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
instances = az_account.compute.ListInstances(
resource_group_name=args.resource_group_name)
logger.info('Instances found:')
for instance in instances.values():
boot_disk = instance.GetBootDisk()
logger.info(
'Name: {0:s}, Boot disk: {1:s}'.format(instance.name, boot_disk.name))
def ListDisks(args: 'argparse.Namespace') -> None:
"""List disks in Azure subscription.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
disks = az_account.compute.ListDisks(
resource_group_name=args.resource_group_name)
logger.info('Disks found:')
for disk_name, disk in disks.items():
logger.info('Name: {0:s}, Region: {1:s}'.format(disk_name, disk.region))
def CreateDiskCopy(args: 'argparse.Namespace') -> None:
"""Create an Azure disk copy.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
logger.info('Starting disk copy...')
disk_copy = forensics.CreateDiskCopy(args.default_resource_group_name,
instance_name=args.instance_name,
disk_name=args.disk_name,
disk_type=args.disk_type,
region=args.region,
src_profile=args.src_profile,
dst_profile=args.dst_profile)
logger.info(
'Done! Disk {0:s} successfully created. You will find it in '
'your Azure subscription under the name {1:s}.'.format(
disk_copy.resource_id, disk_copy.name))
def StartAnalysisVm(args: 'argparse.Namespace') -> None:
"""Start forensic analysis VM.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
attach_disks = []
if args.attach_disks:
attach_disks = args.attach_disks.split(',')
# Check if attach_disks parameter exists and if there
# are any empty entries.
if not (attach_disks and all(elements for elements in attach_disks)):
logger.error('error: parameter --attach_disks: {0:s}'.format(
args.attach_disks))
return
ssh_public_key = args.ssh_public_key
if not ssh_public_key:
# According to https://docs.microsoft.com/cs-cz/samples/azure-samples/
# resource-manager-python-template-deployment/resource-manager-python-
# template-deployment/ there's no API to generate a new SSH key pair in
# Azure, so we do this manually...
ssh_public_key = _GenerateSSHKeyPair(args.instance_name)
logger.info('Starting analysis VM...')
vm = forensics.StartAnalysisVm(args.default_resource_group_name,
args.instance_name,
int(args.disk_size),
ssh_public_key,
cpu_cores=int(args.cpu_cores),
memory_in_mb=int(args.memory_in_mb),
region=args.region,
attach_disks=attach_disks,
dst_profile=args.dst_profile)
logger.info('Analysis VM started.')
logger.info('Name: {0:s}, Started: {1:s}'.format(vm[0].name, str(vm[1])))
def _GenerateSSHKeyPair(vm_name: str) -> str:
"""Generate a SSH key pair and returns its public key.
Both public and private keys will be saved in the current directory.
Args:
vm_name (str): The VM name for which to generate the key pair.
Returns:
str: The public key for the generated SSH key pair.
Raises:
ValueError: If vm_name is None.
"""
if not vm_name:
raise ValueError('Parameter vm_name must not be None.')
logger.info('Generating a new SSH key pair for VM: {0:s}'.format(vm_name))
key = RSA.generate(2048)
key_name = '{0:s}-ssh'.format(vm_name)
public_key = key.publickey().exportKey('OpenSSH')
path_public_key = os.path.join(os.getcwd(), key_name + '.pub')
private_key = key.exportKey('PEM')
path_private_key = os.path.join(os.getcwd(), key_name + '.pem')
with open(path_private_key, 'wb') as f:
f.write(private_key)
with open(path_public_key, 'wb') as f:
f.write(public_key)
logger.info('SSH key pair generated. Public key saved in {0:s}, private key '
'saved in {1:s}'.format(path_public_key, path_private_key))
return public_key.decode('utf-8')
def ListMetrics(args: 'argparse.Namespace') -> None:
"""List Azure Monitoring metrics for a resource.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
az_monitoring = monitoring.AZMonitoring(az_account)
metrics = az_monitoring.ListAvailableMetricsForResource(args.resource_id)
for metric in metrics:
logger.info('Available metric: {0:s}'.format(metric))
def QueryMetrics(args: 'argparse.Namespace') -> None:
"""Query Azure Monitoring metrics for a resource.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
RuntimeError: If from_date or to_date could not be parsed.
"""
az_account = account.AZAccount(args.default_resource_group_name)
az_monitoring = monitoring.AZMonitoring(az_account)
from_date, to_date = args.from_date, args.to_date
if from_date and to_date:
try:
from_date = datetime.strptime(from_date, '%Y-%m-%dT%H:%M:%SZ')
to_date = datetime.strptime(to_date, '%Y-%m-%dT%H:%M:%SZ')
except ValueError as exception:
raise RuntimeError(
'Cannot parse date: {0!s}'.format(exception)) from exception
metrics = az_monitoring.GetMetricsForResource(
args.resource_id,
metrics=args.metrics,
from_date=from_date,
to_date=to_date,
interval=args.interval,
aggregation=args.aggregation or 'Total',
qfilter=args.qfilter)
for metric, metric_value in metrics.items():
logger.info('Metric: {0:s}'.format(metric))
for timestamp, value in metric_value.items():
logger.info(' Timestamp: {0:s}, value: {1:s}'.format(timestamp, value))
| # -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo CLI tool for Azure."""
import os
from datetime import datetime
from typing import TYPE_CHECKING
from Crypto.PublicKey import RSA
from libcloudforensics import logging_utils
from libcloudforensics.providers.azure.internal import account
from libcloudforensics.providers.azure.internal import monitoring
from libcloudforensics.providers.azure import forensics
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
if TYPE_CHECKING:
import argparse
def ListInstances(args: 'argparse.Namespace') -> None:
"""List instances in Azure subscription.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
instances = az_account.compute.ListInstances(
resource_group_name=args.resource_group_name)
logger.info('Instances found:')
for instance in instances.values():
boot_disk = instance.GetBootDisk()
logger.info(
'Name: {0:s}, Boot disk: {1:s}'.format(instance.name, boot_disk.name))
def ListDisks(args: 'argparse.Namespace') -> None:
"""List disks in Azure subscription.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
disks = az_account.compute.ListDisks(
resource_group_name=args.resource_group_name)
logger.info('Disks found:')
for disk_name, disk in disks.items():
logger.info('Name: {0:s}, Region: {1:s}'.format(disk_name, disk.region))
def CreateDiskCopy(args: 'argparse.Namespace') -> None:
"""Create an Azure disk copy.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
logger.info('Starting disk copy...')
disk_copy = forensics.CreateDiskCopy(args.default_resource_group_name,
instance_name=args.instance_name,
disk_name=args.disk_name,
disk_type=args.disk_type,
region=args.region,
src_profile=args.src_profile,
dst_profile=args.dst_profile)
logger.info(
'Done! Disk {0:s} successfully created. You will find it in '
'your Azure subscription under the name {1:s}.'.format(
disk_copy.resource_id, disk_copy.name))
def StartAnalysisVm(args: 'argparse.Namespace') -> None:
"""Start forensic analysis VM.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
attach_disks = []
if args.attach_disks:
attach_disks = args.attach_disks.split(',')
# Check if attach_disks parameter exists and if there
# are any empty entries.
if not (attach_disks and all(elements for elements in attach_disks)):
logger.error('error: parameter --attach_disks: {0:s}'.format(
args.attach_disks))
return
ssh_public_key = args.ssh_public_key
if not ssh_public_key:
# According to https://docs.microsoft.com/cs-cz/samples/azure-samples/
# resource-manager-python-template-deployment/resource-manager-python-
# template-deployment/ there's no API to generate a new SSH key pair in
# Azure, so we do this manually...
ssh_public_key = _GenerateSSHKeyPair(args.instance_name)
logger.info('Starting analysis VM...')
vm = forensics.StartAnalysisVm(args.default_resource_group_name,
args.instance_name,
int(args.disk_size),
ssh_public_key,
cpu_cores=int(args.cpu_cores),
memory_in_mb=int(args.memory_in_mb),
region=args.region,
attach_disks=attach_disks,
dst_profile=args.dst_profile)
logger.info('Analysis VM started.')
logger.info('Name: {0:s}, Started: {1:s}'.format(vm[0].name, str(vm[1])))
def _GenerateSSHKeyPair(vm_name: str) -> str:
"""Generate a SSH key pair and returns its public key.
Both public and private keys will be saved in the current directory.
Args:
vm_name (str): The VM name for which to generate the key pair.
Returns:
str: The public key for the generated SSH key pair.
Raises:
ValueError: If vm_name is None.
"""
if not vm_name:
raise ValueError('Parameter vm_name must not be None.')
logger.info('Generating a new SSH key pair for VM: {0:s}'.format(vm_name))
key = RSA.generate(2048)
key_name = '{0:s}-ssh'.format(vm_name)
public_key = key.publickey().exportKey('OpenSSH')
path_public_key = os.path.join(os.getcwd(), key_name + '.pub')
private_key = key.exportKey('PEM')
path_private_key = os.path.join(os.getcwd(), key_name + '.pem')
with open(path_private_key, 'wb') as f:
f.write(private_key)
with open(path_public_key, 'wb') as f:
f.write(public_key)
logger.info('SSH key pair generated. Public key saved in {0:s}, private key '
'saved in {1:s}'.format(path_public_key, path_private_key))
return public_key.decode('utf-8')
def ListMetrics(args: 'argparse.Namespace') -> None:
"""List Azure Monitoring metrics for a resource.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
az_monitoring = monitoring.AZMonitoring(az_account)
metrics = az_monitoring.ListAvailableMetricsForResource(args.resource_id)
for metric in metrics:
logger.info('Available metric: {0:s}'.format(metric))
def QueryMetrics(args: 'argparse.Namespace') -> None:
"""Query Azure Monitoring metrics for a resource.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
RuntimeError: If from_date or to_date could not be parsed.
"""
az_account = account.AZAccount(args.default_resource_group_name)
az_monitoring = monitoring.AZMonitoring(az_account)
from_date, to_date = args.from_date, args.to_date
if from_date and to_date:
try:
from_date = datetime.strptime(from_date, '%Y-%m-%dT%H:%M:%SZ')
to_date = datetime.strptime(to_date, '%Y-%m-%dT%H:%M:%SZ')
except ValueError as exception:
raise RuntimeError(
'Cannot parse date: {0!s}'.format(exception)) from exception
metrics = az_monitoring.GetMetricsForResource(
args.resource_id,
metrics=args.metrics,
from_date=from_date,
to_date=to_date,
interval=args.interval,
aggregation=args.aggregation or 'Total',
qfilter=args.qfilter)
for metric, metric_value in metrics.items():
logger.info('Metric: {0:s}'.format(metric))
for timestamp, value in metric_value.items():
logger.info(' Timestamp: {0:s}, value: {1:s}'.format(timestamp, value))
| en | 0.631148 | # -*- coding: utf-8 -*- # Copyright 2020 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Demo CLI tool for Azure. List instances in Azure subscription. Args: args (argparse.Namespace): Arguments from ArgumentParser. List disks in Azure subscription. Args: args (argparse.Namespace): Arguments from ArgumentParser. Create an Azure disk copy. Args: args (argparse.Namespace): Arguments from ArgumentParser. Start forensic analysis VM. Args: args (argparse.Namespace): Arguments from ArgumentParser. # Check if attach_disks parameter exists and if there # are any empty entries. # According to https://docs.microsoft.com/cs-cz/samples/azure-samples/ # resource-manager-python-template-deployment/resource-manager-python- # template-deployment/ there's no API to generate a new SSH key pair in # Azure, so we do this manually... Generate a SSH key pair and returns its public key. Both public and private keys will be saved in the current directory. Args: vm_name (str): The VM name for which to generate the key pair. Returns: str: The public key for the generated SSH key pair. Raises: ValueError: If vm_name is None. List Azure Monitoring metrics for a resource. Args: args (argparse.Namespace): Arguments from ArgumentParser. Query Azure Monitoring metrics for a resource. Args: args (argparse.Namespace): Arguments from ArgumentParser. Raises: RuntimeError: If from_date or to_date could not be parsed. | 2.065616 | 2 |
bbio/bbio.py | timgates42/PyBBIO | 102 | 10129 | <filename>bbio/bbio.py
"""
PyBBIO - bbio.py
Copyright (c) 2012-2015 - <NAME> <<EMAIL>>
Released under the MIT license
https://github.com/graycatlabs/PyBBIO
"""
import sys, atexit
from .platform import platform_init, platform_cleanup
from .common import ADDITIONAL_CLEANUP, util_init
def bbio_init():
""" Pre-run initialization, i.e. starting module clocks, etc. """
util_init()
platform_init()
def bbio_cleanup():
""" Post-run cleanup, i.e. stopping module clocks, etc. """
# Run user cleanup routines:
for cleanup in ADDITIONAL_CLEANUP:
try:
cleanup()
except Exception as e:
# Something went wrong with one of the cleanup routines, but we
# want to keep going; just print the error and continue
print "*Exception raised trying to call cleanup routine '%s':\n %s" %\
(cleanup, e)
platform_cleanup()
# The following code detects if Python is running interactively,
# and if so initializes PyBBIO on import and registers PyBBIO's
# cleanup to be called at exit, otherwise it defines the run() and
# stop() methods for the file based control flow:
import __main__
if not hasattr(__main__, '__file__'):
# We're in the interpreter, see:
# http://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode
bbio_init()
print "PyBBIO initialized"
def interactive_cleanup():
bbio_cleanup()
print "Finished PyBBIO cleanup"
atexit.register(interactive_cleanup)
else:
bbio_init()
atexit.register(bbio_cleanup)
# Imported in a Python file, define run() and stop():
def run(setup, loop):
""" The main loop; must be passed a setup and a loop function.
First the setup function will be called once, then the loop
function wil be called continuously until a stop signal is
raised, e.g. CTRL-C or a call to the stop() function from
within the loop. """
try:
setup()
while (True):
loop()
except KeyboardInterrupt:
# Manual exit signal, clean up and exit happy
exit(0)
def stop():
""" Preferred way for a program to stop itself. """
raise KeyboardInterrupt # Expected happy stop condition in run()
| <filename>bbio/bbio.py
"""
PyBBIO - bbio.py
Copyright (c) 2012-2015 - <NAME> <<EMAIL>>
Released under the MIT license
https://github.com/graycatlabs/PyBBIO
"""
import sys, atexit
from .platform import platform_init, platform_cleanup
from .common import ADDITIONAL_CLEANUP, util_init
def bbio_init():
""" Pre-run initialization, i.e. starting module clocks, etc. """
util_init()
platform_init()
def bbio_cleanup():
""" Post-run cleanup, i.e. stopping module clocks, etc. """
# Run user cleanup routines:
for cleanup in ADDITIONAL_CLEANUP:
try:
cleanup()
except Exception as e:
# Something went wrong with one of the cleanup routines, but we
# want to keep going; just print the error and continue
print "*Exception raised trying to call cleanup routine '%s':\n %s" %\
(cleanup, e)
platform_cleanup()
# The following code detects if Python is running interactively,
# and if so initializes PyBBIO on import and registers PyBBIO's
# cleanup to be called at exit, otherwise it defines the run() and
# stop() methods for the file based control flow:
import __main__
if not hasattr(__main__, '__file__'):
# We're in the interpreter, see:
# http://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode
bbio_init()
print "PyBBIO initialized"
def interactive_cleanup():
bbio_cleanup()
print "Finished PyBBIO cleanup"
atexit.register(interactive_cleanup)
else:
bbio_init()
atexit.register(bbio_cleanup)
# Imported in a Python file, define run() and stop():
def run(setup, loop):
""" The main loop; must be passed a setup and a loop function.
First the setup function will be called once, then the loop
function wil be called continuously until a stop signal is
raised, e.g. CTRL-C or a call to the stop() function from
within the loop. """
try:
setup()
while (True):
loop()
except KeyboardInterrupt:
# Manual exit signal, clean up and exit happy
exit(0)
def stop():
""" Preferred way for a program to stop itself. """
raise KeyboardInterrupt # Expected happy stop condition in run()
| en | 0.827744 | PyBBIO - bbio.py Copyright (c) 2012-2015 - <NAME> <<EMAIL>> Released under the MIT license https://github.com/graycatlabs/PyBBIO Pre-run initialization, i.e. starting module clocks, etc. Post-run cleanup, i.e. stopping module clocks, etc. # Run user cleanup routines: # Something went wrong with one of the cleanup routines, but we # want to keep going; just print the error and continue # The following code detects if Python is running interactively, # and if so initializes PyBBIO on import and registers PyBBIO's # cleanup to be called at exit, otherwise it defines the run() and # stop() methods for the file based control flow: # We're in the interpreter, see: # http://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode # Imported in a Python file, define run() and stop(): The main loop; must be passed a setup and a loop function. First the setup function will be called once, then the loop function wil be called continuously until a stop signal is raised, e.g. CTRL-C or a call to the stop() function from within the loop. # Manual exit signal, clean up and exit happy Preferred way for a program to stop itself. # Expected happy stop condition in run() | 2.681256 | 3 |
app/models/endeavors.py | theLaborInVain/kdm-manager-api | 2 | 10130 | <gh_stars>1-10
"""
The Endeavors asset collection has a number of irregular assets. Be careful
writing any custom code here.
"""
from app.assets import endeavors
from app import models
class Assets(models.AssetCollection):
def __init__(self, *args, **kwargs):
self.root_module = endeavors
models.AssetCollection.__init__(self, *args, **kwargs)
| """
The Endeavors asset collection has a number of irregular assets. Be careful
writing any custom code here.
"""
from app.assets import endeavors
from app import models
class Assets(models.AssetCollection):
def __init__(self, *args, **kwargs):
self.root_module = endeavors
models.AssetCollection.__init__(self, *args, **kwargs) | en | 0.886812 | The Endeavors asset collection has a number of irregular assets. Be careful writing any custom code here. | 1.87897 | 2 |
interface/inter5.py | CeciliaDornelas/Python | 0 | 10131 | <reponame>CeciliaDornelas/Python<gh_stars>0
import sys
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget
from PyQt5.QtCore import QSize
class HelloWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setMinimumSize(QSize(280, 120))
self.setWindowTitle("Olá, Mundo! Exemplo PyQT5")
centralWidget = QWidget(self)
self.setCentralWidget(centralWidget)
gridLayout = QGridLayout(self)
centralWidget.setLayout(gridLayout)
title = QLabel("Olá Mundo para PyQt", self)
title.setAlignment(QtCore.Qt.AlignCenter)
gridLayout.addWidget(title, 0, 0)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = HelloWindow()
mainWin.show()
sys.exit( app.exec_() )
| import sys
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget
from PyQt5.QtCore import QSize
class HelloWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setMinimumSize(QSize(280, 120))
self.setWindowTitle("Olá, Mundo! Exemplo PyQT5")
centralWidget = QWidget(self)
self.setCentralWidget(centralWidget)
gridLayout = QGridLayout(self)
centralWidget.setLayout(gridLayout)
title = QLabel("Olá Mundo para PyQt", self)
title.setAlignment(QtCore.Qt.AlignCenter)
gridLayout.addWidget(title, 0, 0)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = HelloWindow()
mainWin.show()
sys.exit( app.exec_() ) | none | 1 | 3.050087 | 3 |
|
setup.py | notwa/scipybiteopt | 0 | 10132 | <filename>setup.py
#!/usr/bin/env python
import os
import sys
import numpy
from setuptools import setup, Extension
#include markdown description in pip page
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# https://github.com/pypa/packaging-problems/issues/84
# no sensible way to include header files by default
headers = ['scipybiteopt/biteopt.h',
'scipybiteopt/biteoptort.h',
'scipybiteopt/spheropt.h',
'scipybiteopt/biteaux.h',
'scipybiteopt/nmsopt.h']
def get_c_sources(files, include_headers=False):
return files + (headers if include_headers else [])
module1 = Extension('scipybiteopt.biteopt',
sources=get_c_sources(['scipybiteopt/biteopt_py_ext.cpp'], include_headers=(sys.argv[1] == "sdist")),
language="c++",
include_dirs=[numpy.get_include()],
extra_compile_args=['-std=c++11', '-O3'] if os.name != 'nt' else ['-O3'])
setup(name='scipybiteopt',
version='1.1.1',
description="Scipy style wrapper for Aleksey Vaneev's BiteOpt",
author='dschmitz89',
author_email='<EMAIL>',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url = 'https://github.com/dschmitz89/scipybiteopt',
packages = ['scipybiteopt'],
ext_modules = [module1],
install_requires=[
'numpy']
)
| <filename>setup.py
#!/usr/bin/env python
import os
import sys
import numpy
from setuptools import setup, Extension
#include markdown description in pip page
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# https://github.com/pypa/packaging-problems/issues/84
# no sensible way to include header files by default
headers = ['scipybiteopt/biteopt.h',
'scipybiteopt/biteoptort.h',
'scipybiteopt/spheropt.h',
'scipybiteopt/biteaux.h',
'scipybiteopt/nmsopt.h']
def get_c_sources(files, include_headers=False):
return files + (headers if include_headers else [])
module1 = Extension('scipybiteopt.biteopt',
sources=get_c_sources(['scipybiteopt/biteopt_py_ext.cpp'], include_headers=(sys.argv[1] == "sdist")),
language="c++",
include_dirs=[numpy.get_include()],
extra_compile_args=['-std=c++11', '-O3'] if os.name != 'nt' else ['-O3'])
setup(name='scipybiteopt',
version='1.1.1',
description="Scipy style wrapper for Aleksey Vaneev's BiteOpt",
author='dschmitz89',
author_email='<EMAIL>',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url = 'https://github.com/dschmitz89/scipybiteopt',
packages = ['scipybiteopt'],
ext_modules = [module1],
install_requires=[
'numpy']
)
| en | 0.651005 | #!/usr/bin/env python #include markdown description in pip page # https://github.com/pypa/packaging-problems/issues/84 # no sensible way to include header files by default | 1.631843 | 2 |
qiskit_experiments/data_processing/__init__.py | yoshida-ryuhei/qiskit-experiments | 0 | 10133 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===========================================================
Data Processing (:mod:`qiskit_experiments.data_processing`)
===========================================================
.. currentmodule:: qiskit_experiments.data_processing
Data processing is the act of taking the data returned by the backend and
converting it into a format that can be analyzed.
It is implemented as a chain of data processing steps that transform various input data,
e.g. IQ data, into a desired format, e.g. population, which can be analyzed.
These data transformations may consist of multiple steps, such as kerneling and discrimination.
Each step is implemented by a :class:`~qiskit_experiments.data_processing.data_action.DataAction`
also called a `node`.
The data processor implements the :meth:`__call__` method. Once initialized, it
can thus be used as a standard python function:
.. code-block:: python
processor = DataProcessor(input_key="memory", [Node1(), Node2(), ...])
out_data = processor(in_data)
The data input to the processor is a sequence of dictionaries each representing the result
of a single circuit. The output of the processor is a numpy array whose shape and data type
depend on the combination of the nodes in the data processor.
Uncertainties that arise from quantum measurements or finite sampling can be taken into account
in the nodes: a standard error can be generated in a node and can be propagated
through the subsequent nodes in the data processor.
Correlation between computed values is also considered.
Classes
=======
.. autosummary::
:toctree: ../stubs/
DataProcessor
DataAction
TrainableDataAction
Data Processing Nodes
=====================
.. autosummary::
:toctree: ../stubs/
Probability
MarginalizeCounts
ToImag
ToReal
SVD
AverageData
BasisExpectationValue
MinMaxNormalize
"""
from .data_action import DataAction, TrainableDataAction
from .nodes import (
Probability,
MarginalizeCounts,
ToImag,
ToReal,
SVD,
AverageData,
BasisExpectationValue,
MinMaxNormalize,
)
from .data_processor import DataProcessor
| # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===========================================================
Data Processing (:mod:`qiskit_experiments.data_processing`)
===========================================================
.. currentmodule:: qiskit_experiments.data_processing
Data processing is the act of taking the data returned by the backend and
converting it into a format that can be analyzed.
It is implemented as a chain of data processing steps that transform various input data,
e.g. IQ data, into a desired format, e.g. population, which can be analyzed.
These data transformations may consist of multiple steps, such as kerneling and discrimination.
Each step is implemented by a :class:`~qiskit_experiments.data_processing.data_action.DataAction`
also called a `node`.
The data processor implements the :meth:`__call__` method. Once initialized, it
can thus be used as a standard python function:
.. code-block:: python
processor = DataProcessor(input_key="memory", [Node1(), Node2(), ...])
out_data = processor(in_data)
The data input to the processor is a sequence of dictionaries each representing the result
of a single circuit. The output of the processor is a numpy array whose shape and data type
depend on the combination of the nodes in the data processor.
Uncertainties that arise from quantum measurements or finite sampling can be taken into account
in the nodes: a standard error can be generated in a node and can be propagated
through the subsequent nodes in the data processor.
Correlation between computed values is also considered.
Classes
=======
.. autosummary::
:toctree: ../stubs/
DataProcessor
DataAction
TrainableDataAction
Data Processing Nodes
=====================
.. autosummary::
:toctree: ../stubs/
Probability
MarginalizeCounts
ToImag
ToReal
SVD
AverageData
BasisExpectationValue
MinMaxNormalize
"""
from .data_action import DataAction, TrainableDataAction
from .nodes import (
Probability,
MarginalizeCounts,
ToImag,
ToReal,
SVD,
AverageData,
BasisExpectationValue,
MinMaxNormalize,
)
from .data_processor import DataProcessor
| en | 0.80509 | # This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. =========================================================== Data Processing (:mod:`qiskit_experiments.data_processing`) =========================================================== .. currentmodule:: qiskit_experiments.data_processing Data processing is the act of taking the data returned by the backend and converting it into a format that can be analyzed. It is implemented as a chain of data processing steps that transform various input data, e.g. IQ data, into a desired format, e.g. population, which can be analyzed. These data transformations may consist of multiple steps, such as kerneling and discrimination. Each step is implemented by a :class:`~qiskit_experiments.data_processing.data_action.DataAction` also called a `node`. The data processor implements the :meth:`__call__` method. Once initialized, it can thus be used as a standard python function: .. code-block:: python processor = DataProcessor(input_key="memory", [Node1(), Node2(), ...]) out_data = processor(in_data) The data input to the processor is a sequence of dictionaries each representing the result of a single circuit. The output of the processor is a numpy array whose shape and data type depend on the combination of the nodes in the data processor. Uncertainties that arise from quantum measurements or finite sampling can be taken into account in the nodes: a standard error can be generated in a node and can be propagated through the subsequent nodes in the data processor. Correlation between computed values is also considered. Classes ======= .. autosummary:: :toctree: ../stubs/ DataProcessor DataAction TrainableDataAction Data Processing Nodes ===================== .. autosummary:: :toctree: ../stubs/ Probability MarginalizeCounts ToImag ToReal SVD AverageData BasisExpectationValue MinMaxNormalize | 3.03273 | 3 |
models/pointnet2_sem_seg_msg_haptic.py | yufeiwang63/Pointnet_Pointnet2_pytorch | 0 | 10134 | import torch.nn as nn
import torch.nn.functional as F
from haptic.Pointnet_Pointnet2_pytorch.models.pointnet2_utils import PointNetSetAbstractionMsg,PointNetFeaturePropagation
class get_shared_model(nn.Module):
def __init__(self, use_batch_norm, num_classes, num_input_channel=7):
super(get_shared_model, self).__init__()
self.sa1 = PointNetSetAbstractionMsg(1024, [0.05, 0.1], [16, 32], num_input_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(256, [0.1, 0.2], [16, 32], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(64, [0.2, 0.4], [16, 32], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.sa4 = PointNetSetAbstractionMsg(16, [0.4, 0.8], [16, 32], 256+256, [[256, 256, 512], [256, 384, 512]], use_batch_norm=use_batch_norm)
self.fp4 = PointNetFeaturePropagation(512+512+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
self.conv1 = nn.Conv1d(128, 128, 1)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_classes, 1)
# for normal prediction
self.conv_normal = nn.Conv1d(128, 3, 1)
# for force prediction
self.conv_force = nn.Conv1d(128, 1, 1)
self.use_batch_norm = use_batch_norm
def forward(self, xyz):
l0_points = xyz
l0_xyz = xyz[:,:3,:]
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
if self.use_batch_norm:
x = self.drop1(F.relu(self.bn1(self.conv1(l0_points))))
else:
x = F.relu(self.conv1(l0_points))
contact = self.conv2(x)
normal = self.conv_normal(x)
normal = F.normalize(normal, dim=1)
force = self.conv_force(x)
# this is not needed with BCElogit loss
# x = F.log_softmax(x, dim=1)
contact = contact.permute(0, 2, 1)
normal = normal.permute(0, 2, 1)
force = force.permute(0, 2, 1)
return (contact, normal, force), l4_points
class get_model(nn.Module):
def __init__(self, use_batch_norm, num_out_channel, num_in_channel=7, target='contact',
radius_list=[[0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8]],
npoint_list=[1024, 256, 64, 16],
sample_point_1_list=[16, 16, 16, 16],
sample_point_2_list=[32, 32, 32, 32],
layer=4,
downsample=True,
dropout=True,
track_running_stats=True,
mlp1_size=[16, 16, 32],
mlp2_size=[32, 32, 64],
interpolation_mlp_size=[128, 128, 128]
):
print("using layer: ", layer)
super(get_model, self).__init__()
self.layer = layer
if self.layer == 4:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(npoint_list[2], radius_list[2], [sample_point_1_list[2], sample_point_2_list[2]], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.sa4 = PointNetSetAbstractionMsg(npoint_list[3], radius_list[3], [sample_point_1_list[3], sample_point_2_list[3]], 256+256, [[256, 256, 512], [256, 384, 512]], use_batch_norm=use_batch_norm)
self.fp4 = PointNetFeaturePropagation(512+512+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 3:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(npoint_list[2], radius_list[2], [sample_point_1_list[2], sample_point_2_list[2]], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 2:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+128+128, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 1:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [mlp1_size, mlp2_size], use_batch_norm=use_batch_norm,
downsample=downsample, track_running_stats=track_running_stats)
self.fp1 = PointNetFeaturePropagation(mlp1_size[-1] + mlp2_size[-1], interpolation_mlp_size, use_batch_norm=use_batch_norm, track_running_stats=track_running_stats)
self.drop_out = dropout
self.conv1 = nn.Conv1d(128, 128, 1)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(128, track_running_stats=track_running_stats)
if self.drop_out:
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_out_channel, 1)
self.use_batch_norm = use_batch_norm
self.target = target
def forward(self, xyz):
l0_points = xyz
l0_xyz = xyz[:,:3,:]
if self.layer == 4:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 3:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 2:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 1:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
if self.use_batch_norm:
if self.drop_out:
x = self.drop1(F.relu(self.bn1(self.conv1(l0_points))))
else:
x = F.relu(self.bn1(self.conv1(l0_points)))
else:
x = F.relu(self.conv1(l0_points))
x = self.conv2(x)
# this is not needed with BCElogit loss
# x = F.log_softmax(x, dim=1)
if self.target == 'normal':
x = F.normalize(x, dim=1)
x = x.permute(0, 2, 1)
# return x, l4_points
return x, None
class get_loss_original(nn.Module):
def __init__(self):
super(get_loss_original, self).__init__()
def forward(self, pred, target, trans_feat, weight):
total_loss = F.nll_loss(pred, target, weight=weight)
return total_loss
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
self.loss = nn.BCEWithLogitsLoss()
def forward(self, pred, target, trans_feat, weight):
total_loss = self.loss(pred, target)
return total_loss
if __name__ == '__main__':
import torch
model = get_model(13)
xyz = torch.rand(6, 9, 2048)
(model(xyz)) | import torch.nn as nn
import torch.nn.functional as F
from haptic.Pointnet_Pointnet2_pytorch.models.pointnet2_utils import PointNetSetAbstractionMsg,PointNetFeaturePropagation
class get_shared_model(nn.Module):
def __init__(self, use_batch_norm, num_classes, num_input_channel=7):
super(get_shared_model, self).__init__()
self.sa1 = PointNetSetAbstractionMsg(1024, [0.05, 0.1], [16, 32], num_input_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(256, [0.1, 0.2], [16, 32], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(64, [0.2, 0.4], [16, 32], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.sa4 = PointNetSetAbstractionMsg(16, [0.4, 0.8], [16, 32], 256+256, [[256, 256, 512], [256, 384, 512]], use_batch_norm=use_batch_norm)
self.fp4 = PointNetFeaturePropagation(512+512+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
self.conv1 = nn.Conv1d(128, 128, 1)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_classes, 1)
# for normal prediction
self.conv_normal = nn.Conv1d(128, 3, 1)
# for force prediction
self.conv_force = nn.Conv1d(128, 1, 1)
self.use_batch_norm = use_batch_norm
def forward(self, xyz):
l0_points = xyz
l0_xyz = xyz[:,:3,:]
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
if self.use_batch_norm:
x = self.drop1(F.relu(self.bn1(self.conv1(l0_points))))
else:
x = F.relu(self.conv1(l0_points))
contact = self.conv2(x)
normal = self.conv_normal(x)
normal = F.normalize(normal, dim=1)
force = self.conv_force(x)
# this is not needed with BCElogit loss
# x = F.log_softmax(x, dim=1)
contact = contact.permute(0, 2, 1)
normal = normal.permute(0, 2, 1)
force = force.permute(0, 2, 1)
return (contact, normal, force), l4_points
class get_model(nn.Module):
def __init__(self, use_batch_norm, num_out_channel, num_in_channel=7, target='contact',
radius_list=[[0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8]],
npoint_list=[1024, 256, 64, 16],
sample_point_1_list=[16, 16, 16, 16],
sample_point_2_list=[32, 32, 32, 32],
layer=4,
downsample=True,
dropout=True,
track_running_stats=True,
mlp1_size=[16, 16, 32],
mlp2_size=[32, 32, 64],
interpolation_mlp_size=[128, 128, 128]
):
print("using layer: ", layer)
super(get_model, self).__init__()
self.layer = layer
if self.layer == 4:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(npoint_list[2], radius_list[2], [sample_point_1_list[2], sample_point_2_list[2]], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.sa4 = PointNetSetAbstractionMsg(npoint_list[3], radius_list[3], [sample_point_1_list[3], sample_point_2_list[3]], 256+256, [[256, 256, 512], [256, 384, 512]], use_batch_norm=use_batch_norm)
self.fp4 = PointNetFeaturePropagation(512+512+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 3:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(npoint_list[2], radius_list[2], [sample_point_1_list[2], sample_point_2_list[2]], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 2:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+128+128, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 1:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [mlp1_size, mlp2_size], use_batch_norm=use_batch_norm,
downsample=downsample, track_running_stats=track_running_stats)
self.fp1 = PointNetFeaturePropagation(mlp1_size[-1] + mlp2_size[-1], interpolation_mlp_size, use_batch_norm=use_batch_norm, track_running_stats=track_running_stats)
self.drop_out = dropout
self.conv1 = nn.Conv1d(128, 128, 1)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(128, track_running_stats=track_running_stats)
if self.drop_out:
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_out_channel, 1)
self.use_batch_norm = use_batch_norm
self.target = target
def forward(self, xyz):
l0_points = xyz
l0_xyz = xyz[:,:3,:]
if self.layer == 4:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 3:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 2:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 1:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
if self.use_batch_norm:
if self.drop_out:
x = self.drop1(F.relu(self.bn1(self.conv1(l0_points))))
else:
x = F.relu(self.bn1(self.conv1(l0_points)))
else:
x = F.relu(self.conv1(l0_points))
x = self.conv2(x)
# this is not needed with BCElogit loss
# x = F.log_softmax(x, dim=1)
if self.target == 'normal':
x = F.normalize(x, dim=1)
x = x.permute(0, 2, 1)
# return x, l4_points
return x, None
class get_loss_original(nn.Module):
def __init__(self):
super(get_loss_original, self).__init__()
def forward(self, pred, target, trans_feat, weight):
total_loss = F.nll_loss(pred, target, weight=weight)
return total_loss
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
self.loss = nn.BCEWithLogitsLoss()
def forward(self, pred, target, trans_feat, weight):
total_loss = self.loss(pred, target)
return total_loss
if __name__ == '__main__':
import torch
model = get_model(13)
xyz = torch.rand(6, 9, 2048)
(model(xyz)) | en | 0.785908 | # for normal prediction # for force prediction # this is not needed with BCElogit loss # x = F.log_softmax(x, dim=1) # this is not needed with BCElogit loss # x = F.log_softmax(x, dim=1) # return x, l4_points | 2.047124 | 2 |
backend/src/notifications/admin.py | YujithIsura/request-management | 3 | 10135 | <reponame>YujithIsura/request-management
from django.contrib import admin
from .models import Notification
admin.site.register(Notification) | from django.contrib import admin
from .models import Notification
admin.site.register(Notification) | none | 1 | 1.131761 | 1 |
|
HoverSlam.py | GiantWaffleCode/WafflePython | 13 | 10136 | import krpc
import time
import math
from simple_pid import PID
conn = krpc.connect(name="UI Test")
vessel = conn.space_center.active_vessel
kerbin_frame = vessel.orbit.body.reference_frame
orb_frame = vessel.orbital_reference_frame
srf_frame = vessel.surface_reference_frame
surface_gravity = vessel.orbit.body.surface_gravity
current_met = conn.add_stream(getattr, vessel, 'met')
current_roll = conn.add_stream(getattr, vessel.flight(), 'roll')
current_pitch = conn.add_stream(getattr, vessel.flight(), 'pitch')
current_heading = conn.add_stream(getattr, vessel.flight(), 'heading')
current_alt = conn.add_stream(getattr, vessel.flight(), 'surface_altitude')
lowest = conn.add_stream(vessel.bounding_box, srf_frame)
current_drag = conn.add_stream(getattr, vessel.flight(), 'drag')
current_aero = conn.add_stream(getattr, vessel.flight(), 'aerodynamic_force')
current_speed = conn.add_stream(getattr, vessel.flight(kerbin_frame), 'speed')
vessel.control.activate_next_stage()
vessel.control.sas = True
time.sleep(.2)
vessel.control.sas_mode = conn.space_center.SASMode.retrograde
def bottom_altitude():
return max(0, current_alt() - abs(lowest()[0][0]))
for engine in vessel.parts.engines:
engine.gimbal_locked = True
while True:
aero_amp = math.sqrt(current_aero()[0] ** 2
+ current_aero()[1] ** 2
+ current_aero()[2] ** 2)
time_to_zero = current_speed() / ((((vessel.max_thrust * .9) + aero_amp) / vessel.mass)
+ vessel.orbit.body.surface_gravity)
if (time_to_zero * current_speed()) >= bottom_altitude() - current_speed():
print(current_speed())
print(f"Start Hover Slam Burn")
vessel.control.throttle = .9
break
while current_speed() > 50:
print(current_speed())
time.sleep(.01)
pass
print(f"Switch to Stab")
for leg in vessel.parts.legs:
leg.deployed = True
pid1 = PID(.15, 0, .5, setpoint=0)
pid1.output_limits = (0, 1)
pid1.sample_time = 0.01
while bottom_altitude() > 1:
vessel.control.throttle = pid1(bottom_altitude())
# pid1.setpoint *= .98
time.sleep(.01)
vessel.control.sas_mode = conn.space_center.SASMode.radial
vessel.control.throttle = 0
| import krpc
import time
import math
from simple_pid import PID
conn = krpc.connect(name="UI Test")
vessel = conn.space_center.active_vessel
kerbin_frame = vessel.orbit.body.reference_frame
orb_frame = vessel.orbital_reference_frame
srf_frame = vessel.surface_reference_frame
surface_gravity = vessel.orbit.body.surface_gravity
current_met = conn.add_stream(getattr, vessel, 'met')
current_roll = conn.add_stream(getattr, vessel.flight(), 'roll')
current_pitch = conn.add_stream(getattr, vessel.flight(), 'pitch')
current_heading = conn.add_stream(getattr, vessel.flight(), 'heading')
current_alt = conn.add_stream(getattr, vessel.flight(), 'surface_altitude')
lowest = conn.add_stream(vessel.bounding_box, srf_frame)
current_drag = conn.add_stream(getattr, vessel.flight(), 'drag')
current_aero = conn.add_stream(getattr, vessel.flight(), 'aerodynamic_force')
current_speed = conn.add_stream(getattr, vessel.flight(kerbin_frame), 'speed')
vessel.control.activate_next_stage()
vessel.control.sas = True
time.sleep(.2)
vessel.control.sas_mode = conn.space_center.SASMode.retrograde
def bottom_altitude():
return max(0, current_alt() - abs(lowest()[0][0]))
for engine in vessel.parts.engines:
engine.gimbal_locked = True
while True:
aero_amp = math.sqrt(current_aero()[0] ** 2
+ current_aero()[1] ** 2
+ current_aero()[2] ** 2)
time_to_zero = current_speed() / ((((vessel.max_thrust * .9) + aero_amp) / vessel.mass)
+ vessel.orbit.body.surface_gravity)
if (time_to_zero * current_speed()) >= bottom_altitude() - current_speed():
print(current_speed())
print(f"Start Hover Slam Burn")
vessel.control.throttle = .9
break
while current_speed() > 50:
print(current_speed())
time.sleep(.01)
pass
print(f"Switch to Stab")
for leg in vessel.parts.legs:
leg.deployed = True
pid1 = PID(.15, 0, .5, setpoint=0)
pid1.output_limits = (0, 1)
pid1.sample_time = 0.01
while bottom_altitude() > 1:
vessel.control.throttle = pid1(bottom_altitude())
# pid1.setpoint *= .98
time.sleep(.01)
vessel.control.sas_mode = conn.space_center.SASMode.radial
vessel.control.throttle = 0
| en | 0.417771 | # pid1.setpoint *= .98 | 2.345641 | 2 |
tests/unit_tests/cx_core/integration/integration_test.py | clach04/controllerx | 204 | 10137 | <gh_stars>100-1000
from cx_core import integration as integration_module
from cx_core.controller import Controller
def test_get_integrations(fake_controller: Controller):
integrations = integration_module.get_integrations(fake_controller, {})
inteagration_names = {i.name for i in integrations}
assert inteagration_names == {
"z2m",
"zha",
"deconz",
"state",
"mqtt",
"lutron_caseta",
}
| from cx_core import integration as integration_module
from cx_core.controller import Controller
def test_get_integrations(fake_controller: Controller):
integrations = integration_module.get_integrations(fake_controller, {})
inteagration_names = {i.name for i in integrations}
assert inteagration_names == {
"z2m",
"zha",
"deconz",
"state",
"mqtt",
"lutron_caseta",
} | none | 1 | 2.22558 | 2 |
|
asystem-adoc/src/main/template/python/script_util.py | ggear/asystem_archive | 0 | 10138 | <reponame>ggear/asystem_archive
###############################################################################
#
# Python script utilities as included from the cloudera-framework-assembly,
# do not edit directly
#
###############################################################################
import os
import re
def qualify(path):
return path if (re.match(r'[.]*://[.]*', path)
or 'CF_HADOOP_DEFAULT_FS' not in os.environ) \
else os.environ['CF_HADOOP_DEFAULT_FS'] + path
| ###############################################################################
#
# Python script utilities as included from the cloudera-framework-assembly,
# do not edit directly
#
###############################################################################
import os
import re
def qualify(path):
return path if (re.match(r'[.]*://[.]*', path)
or 'CF_HADOOP_DEFAULT_FS' not in os.environ) \
else os.environ['CF_HADOOP_DEFAULT_FS'] + path | de | 0.636154 | ############################################################################### # # Python script utilities as included from the cloudera-framework-assembly, # do not edit directly # ############################################################################### | 2.098034 | 2 |
datapackage_pipelines/web/server.py | gperonato/datapackage-pipelines | 109 | 10139 | import datetime
import os
from io import BytesIO
import logging
from functools import wraps
from copy import deepcopy
from collections import Counter
import slugify
import yaml
import mistune
import requests
from flask import \
Blueprint, Flask, render_template, abort, send_file, make_response
from flask_cors import CORS
from flask_jsonpify import jsonify
from flask_basicauth import BasicAuth
from datapackage_pipelines.status import status_mgr
from datapackage_pipelines.utilities.stat_utils import user_facing_stats
YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper
def datestr(x):
if x is None:
return ''
return str(datetime.datetime.fromtimestamp(x))
def yamlize(x):
ret = yaml.dump(x, default_flow_style=False, Dumper=YAML_DUMPER)
return ret
markdown = mistune.Markdown(hard_wrap=True)
status = status_mgr()
def make_hierarchies(statuses):
def group(lvl):
pipelines = list(filter(lambda x: len(x['id']) == 1, lvl))
children_ = list(filter(lambda x: len(x['id']) > 1, lvl))
groups_ = {}
for child in children_:
child_key = child['id'].pop(0)
groups_.setdefault(child_key, []).append(child)
children_ = dict(
(k, group(v))
for k, v in groups_.items()
)
for p in pipelines:
p['id'] = p['id'][0]
return {
'pipelines': pipelines,
'children': children_
}
def flatten(children_):
for k, v in children_.items():
v['children'] = flatten(v['children'])
child_keys = list(v['children'].keys())
if len(child_keys) == 1 and len(v['pipelines']) == 0:
child_key = child_keys[0]
children_['/'.join([k, child_key])] = v['children'][child_key]
del children_[k]
return children_
statuses = [
{
'id': st['id'].split('/'),
'title': st.get('title'),
'stats': st.get('stats'),
'slug': st.get('slug')
}
for st in statuses
]
groups = group(statuses)
children = groups.get('children', {})
groups['children'] = flatten(children)
return groups
def basic_auth_required(view_func):
"""
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
blueprint = Blueprint('dpp', 'dpp')
@blueprint.route("")
@blueprint.route("<path:pipeline_path>")
@basic_auth_required
def main(pipeline_path=None):
pipeline_ids = sorted(status.all_pipeline_ids())
# If we have a pipeline_path, filter the pipeline ids.
if pipeline_path is not None:
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
pipeline_ids = [p for p in pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in pipeline_ids:
pipeline_status = status.get(pipeline_id)
ex = pipeline_status.get_last_execution()
success_ex = pipeline_status.get_last_successful_execution()
pipeline_obj = {
'id': pipeline_id.lstrip('./'),
'title': pipeline_status.pipeline_details.get('title'),
'stats': user_facing_stats(ex.stats) if ex else None,
'slug': slugify.slugify(pipeline_id),
'trigger': ex.trigger if ex else None,
'error_log': pipeline_status.errors(),
'state': pipeline_status.state(),
'pipeline': pipeline_status.pipeline_details,
'message': pipeline_status.state().capitalize(),
'dirty': pipeline_status.dirty(),
'runnable': pipeline_status.runnable(),
'class': {'INIT': 'primary',
'QUEUED': 'primary',
'INVALID': 'danger',
'RUNNING': 'warning',
'SUCCEEDED': 'success',
'FAILED': 'danger'
}[pipeline_status.state()],
'ended': datestr(ex.finish_time) if ex else None,
'started': datestr(ex.start_time) if ex else None,
'last_success':
datestr(success_ex.finish_time) if success_ex else None,
}
statuses.append(pipeline_obj)
def state_and_not_dirty(state, p):
return p.get('state') == state and not p.get('dirty')
def state_or_dirty(state, p):
return p.get('state') == state or p.get('dirty')
categories = [
['ALL', 'All Pipelines', lambda _, __: True],
['INVALID', "Can't start", lambda _, p: not p['runnable']],
['QUEUED', 'Waiting to run', lambda state, p: p['state'] == state],
['RUNNING', 'Running', state_and_not_dirty],
['FAILED', 'Failed Execution', state_and_not_dirty],
['SUCCEEDED', 'Successful Execution', state_and_not_dirty],
]
for item in categories:
item.append([p for p in deepcopy(statuses)
if item[2](item[0], p)])
item.append(len(item[-1]))
item.append(make_hierarchies(item[-2]))
return render_template('dashboard.html',
categories=categories,
yamlize=yamlize,
markdown=markdown)
@blueprint.route("api/raw/status")
@basic_auth_required
def pipeline_raw_api_status():
pipelines = sorted(status.all_statuses(), key=lambda x: x.get('id'))
for pipeline in pipelines:
# can get the full details from api/raw/<path:pipeline_id>
for attr in ["pipeline", "reason", "error_log"]:
if attr in pipeline:
del pipeline[attr]
return jsonify(pipelines)
@blueprint.route("api/raw/<path:pipeline_id>")
@basic_auth_required
def pipeline_raw_api(pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
last_execution = pipeline_status.get_last_execution()
last_successful_execution = pipeline_status.get_last_successful_execution()
ret = {
"id": pipeline_id,
"cache_hash": pipeline_status.cache_hash,
"dirty": pipeline_status.dirty(),
"queued": last_execution.queue_time if last_execution else None,
"started": last_execution.start_time if last_execution else None,
"ended": last_execution.finish_time if last_execution else None,
"reason": last_execution.log if last_execution else None,
"error_log": pipeline_status.errors(),
"stats": last_execution.stats if last_execution else None,
"success": last_execution.success if last_execution else None,
"last_success":
last_successful_execution.finish_time
if last_successful_execution else None,
"trigger": last_execution.trigger if last_execution else None,
"pipeline": pipeline_status.pipeline_details,
"source": pipeline_status.source_spec,
"message": pipeline_status.state().capitalize(),
"state": pipeline_status.state(),
}
return jsonify(ret)
@blueprint.route("api/<field>/<path:pipeline_id>")
@basic_auth_required
def pipeline_api(field, pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
ret = None
if field == 'pipeline':
ret = pipeline_status.pipeline_details
ret = yamlize(ret)
elif field == 'source':
ret = pipeline_status.source_spec
ret = yamlize(ret)
elif field == 'log':
ex = pipeline_status.get_last_execution()
ret = ex.log if ex else ''
else:
abort(400)
ret = ret.split('\n')
ret = {'text': ret}
return jsonify(ret)
def _make_badge_response(subject, text, colour):
image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(
subject, text, colour)
r = requests.get(image_url)
buffer_image = BytesIO(r.content)
buffer_image.seek(0)
res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))
res.headers['Cache-Control'] = \
'max-age=0, no-cache, no-store, must-revalidate'
res.headers['Expires'] = '0'
return res
@blueprint.route("badge/<path:pipeline_id>")
def badge(pipeline_id):
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color)
@blueprint.route("badge/collection/<path:pipeline_path>")
def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color)
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \
and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False):
app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_ACTIVE'] = True
basic_auth = BasicAuth(app)
CORS(app)
url_prefix = os.environ.get('DPP_BASE_PATH', '/')
if not url_prefix.endswith('/'):
url_prefix += '/'
logging.info('Serving on path %s', url_prefix)
app.register_blueprint(blueprint, url_prefix=url_prefix)
| import datetime
import os
from io import BytesIO
import logging
from functools import wraps
from copy import deepcopy
from collections import Counter
import slugify
import yaml
import mistune
import requests
from flask import \
Blueprint, Flask, render_template, abort, send_file, make_response
from flask_cors import CORS
from flask_jsonpify import jsonify
from flask_basicauth import BasicAuth
from datapackage_pipelines.status import status_mgr
from datapackage_pipelines.utilities.stat_utils import user_facing_stats
YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper
def datestr(x):
if x is None:
return ''
return str(datetime.datetime.fromtimestamp(x))
def yamlize(x):
ret = yaml.dump(x, default_flow_style=False, Dumper=YAML_DUMPER)
return ret
markdown = mistune.Markdown(hard_wrap=True)
status = status_mgr()
def make_hierarchies(statuses):
def group(lvl):
pipelines = list(filter(lambda x: len(x['id']) == 1, lvl))
children_ = list(filter(lambda x: len(x['id']) > 1, lvl))
groups_ = {}
for child in children_:
child_key = child['id'].pop(0)
groups_.setdefault(child_key, []).append(child)
children_ = dict(
(k, group(v))
for k, v in groups_.items()
)
for p in pipelines:
p['id'] = p['id'][0]
return {
'pipelines': pipelines,
'children': children_
}
def flatten(children_):
for k, v in children_.items():
v['children'] = flatten(v['children'])
child_keys = list(v['children'].keys())
if len(child_keys) == 1 and len(v['pipelines']) == 0:
child_key = child_keys[0]
children_['/'.join([k, child_key])] = v['children'][child_key]
del children_[k]
return children_
statuses = [
{
'id': st['id'].split('/'),
'title': st.get('title'),
'stats': st.get('stats'),
'slug': st.get('slug')
}
for st in statuses
]
groups = group(statuses)
children = groups.get('children', {})
groups['children'] = flatten(children)
return groups
def basic_auth_required(view_func):
"""
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
blueprint = Blueprint('dpp', 'dpp')
@blueprint.route("")
@blueprint.route("<path:pipeline_path>")
@basic_auth_required
def main(pipeline_path=None):
pipeline_ids = sorted(status.all_pipeline_ids())
# If we have a pipeline_path, filter the pipeline ids.
if pipeline_path is not None:
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
pipeline_ids = [p for p in pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in pipeline_ids:
pipeline_status = status.get(pipeline_id)
ex = pipeline_status.get_last_execution()
success_ex = pipeline_status.get_last_successful_execution()
pipeline_obj = {
'id': pipeline_id.lstrip('./'),
'title': pipeline_status.pipeline_details.get('title'),
'stats': user_facing_stats(ex.stats) if ex else None,
'slug': slugify.slugify(pipeline_id),
'trigger': ex.trigger if ex else None,
'error_log': pipeline_status.errors(),
'state': pipeline_status.state(),
'pipeline': pipeline_status.pipeline_details,
'message': pipeline_status.state().capitalize(),
'dirty': pipeline_status.dirty(),
'runnable': pipeline_status.runnable(),
'class': {'INIT': 'primary',
'QUEUED': 'primary',
'INVALID': 'danger',
'RUNNING': 'warning',
'SUCCEEDED': 'success',
'FAILED': 'danger'
}[pipeline_status.state()],
'ended': datestr(ex.finish_time) if ex else None,
'started': datestr(ex.start_time) if ex else None,
'last_success':
datestr(success_ex.finish_time) if success_ex else None,
}
statuses.append(pipeline_obj)
def state_and_not_dirty(state, p):
return p.get('state') == state and not p.get('dirty')
def state_or_dirty(state, p):
return p.get('state') == state or p.get('dirty')
categories = [
['ALL', 'All Pipelines', lambda _, __: True],
['INVALID', "Can't start", lambda _, p: not p['runnable']],
['QUEUED', 'Waiting to run', lambda state, p: p['state'] == state],
['RUNNING', 'Running', state_and_not_dirty],
['FAILED', 'Failed Execution', state_and_not_dirty],
['SUCCEEDED', 'Successful Execution', state_and_not_dirty],
]
for item in categories:
item.append([p for p in deepcopy(statuses)
if item[2](item[0], p)])
item.append(len(item[-1]))
item.append(make_hierarchies(item[-2]))
return render_template('dashboard.html',
categories=categories,
yamlize=yamlize,
markdown=markdown)
@blueprint.route("api/raw/status")
@basic_auth_required
def pipeline_raw_api_status():
pipelines = sorted(status.all_statuses(), key=lambda x: x.get('id'))
for pipeline in pipelines:
# can get the full details from api/raw/<path:pipeline_id>
for attr in ["pipeline", "reason", "error_log"]:
if attr in pipeline:
del pipeline[attr]
return jsonify(pipelines)
@blueprint.route("api/raw/<path:pipeline_id>")
@basic_auth_required
def pipeline_raw_api(pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
last_execution = pipeline_status.get_last_execution()
last_successful_execution = pipeline_status.get_last_successful_execution()
ret = {
"id": pipeline_id,
"cache_hash": pipeline_status.cache_hash,
"dirty": pipeline_status.dirty(),
"queued": last_execution.queue_time if last_execution else None,
"started": last_execution.start_time if last_execution else None,
"ended": last_execution.finish_time if last_execution else None,
"reason": last_execution.log if last_execution else None,
"error_log": pipeline_status.errors(),
"stats": last_execution.stats if last_execution else None,
"success": last_execution.success if last_execution else None,
"last_success":
last_successful_execution.finish_time
if last_successful_execution else None,
"trigger": last_execution.trigger if last_execution else None,
"pipeline": pipeline_status.pipeline_details,
"source": pipeline_status.source_spec,
"message": pipeline_status.state().capitalize(),
"state": pipeline_status.state(),
}
return jsonify(ret)
@blueprint.route("api/<field>/<path:pipeline_id>")
@basic_auth_required
def pipeline_api(field, pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
ret = None
if field == 'pipeline':
ret = pipeline_status.pipeline_details
ret = yamlize(ret)
elif field == 'source':
ret = pipeline_status.source_spec
ret = yamlize(ret)
elif field == 'log':
ex = pipeline_status.get_last_execution()
ret = ex.log if ex else ''
else:
abort(400)
ret = ret.split('\n')
ret = {'text': ret}
return jsonify(ret)
def _make_badge_response(subject, text, colour):
image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(
subject, text, colour)
r = requests.get(image_url)
buffer_image = BytesIO(r.content)
buffer_image.seek(0)
res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))
res.headers['Cache-Control'] = \
'max-age=0, no-cache, no-store, must-revalidate'
res.headers['Expires'] = '0'
return res
@blueprint.route("badge/<path:pipeline_id>")
def badge(pipeline_id):
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color)
@blueprint.route("badge/collection/<path:pipeline_path>")
def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color)
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \
and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False):
app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_ACTIVE'] = True
basic_auth = BasicAuth(app)
CORS(app)
url_prefix = os.environ.get('DPP_BASE_PATH', '/')
if not url_prefix.endswith('/'):
url_prefix += '/'
logging.info('Serving on path %s', url_prefix)
app.register_blueprint(blueprint, url_prefix=url_prefix)
| en | 0.88711 | A decorator that can be used to protect specific views with HTTP basic access authentication. Conditional on having BASIC_AUTH_USERNAME and BASIC_AUTH_PASSWORD set as env vars. # If we have a pipeline_path, filter the pipeline ids. # can get the full details from api/raw/<path:pipeline_id> An individual pipeline status Status badge for a collection of pipelines. # Filter pipeline ids to only include those that start with pipeline_path. | 2.152242 | 2 |
MoveSim/code/models/losses.py | tobinsouth/privacy-preserving-synthetic-mobility-data | 0 | 10140 | <gh_stars>0
# coding: utf-8
import numpy as np
import torch.nn as nn
class distance_loss(nn.Module):
def __init__(self):
with open('../data/raw/Cellular_Baselocation_baidu') as f:
gpss = f.readlines()
self.X = []
self.Y = []
for gps in gpss:
x, y = float(gps.split()[0]), float(gps.split()[1])
self.X.append(x)
self.Y.append(y)
self.X = torch.Tensor(np.array(self.X)).float()
self.Y = torch.Tensor(np.array(self.Y)).float()
def forward(self, x):
"""
:param x: generated sequence, batch_size * seq_len
:return:
"""
x1 = torch.index_select(self.X, 0, x[:, :-1].view(-1))
x2 = torch.index_select(self.X, 0, x[:, 1:].view(-1))
y1 = torch.index_select(self.Y, 0, x[:, :-1].view(-1))
y2 = torch.index_select(self.Y, 0, x[:, :-1].view(-1))
dx = x1 - x2
dy = y1 - y2
loss = dx**2 + dy**2
return loss
class period_loss(nn.Module):
def __init__(self, time_interval):
self.time_interval = time_interval
self.mse = nn.MSELoss()
def forward(self, x):
"""
:param x: generated sequence, batch_size * seq_len
:return:
"""
loss = 0.
for i in range(0, x.size(1) - self.time_interval):
loss += self.mse(x[:, i], x[:, i + self.time_interval])
return loss
| # coding: utf-8
import numpy as np
import torch.nn as nn
class distance_loss(nn.Module):
def __init__(self):
with open('../data/raw/Cellular_Baselocation_baidu') as f:
gpss = f.readlines()
self.X = []
self.Y = []
for gps in gpss:
x, y = float(gps.split()[0]), float(gps.split()[1])
self.X.append(x)
self.Y.append(y)
self.X = torch.Tensor(np.array(self.X)).float()
self.Y = torch.Tensor(np.array(self.Y)).float()
def forward(self, x):
"""
:param x: generated sequence, batch_size * seq_len
:return:
"""
x1 = torch.index_select(self.X, 0, x[:, :-1].view(-1))
x2 = torch.index_select(self.X, 0, x[:, 1:].view(-1))
y1 = torch.index_select(self.Y, 0, x[:, :-1].view(-1))
y2 = torch.index_select(self.Y, 0, x[:, :-1].view(-1))
dx = x1 - x2
dy = y1 - y2
loss = dx**2 + dy**2
return loss
class period_loss(nn.Module):
def __init__(self, time_interval):
self.time_interval = time_interval
self.mse = nn.MSELoss()
def forward(self, x):
"""
:param x: generated sequence, batch_size * seq_len
:return:
"""
loss = 0.
for i in range(0, x.size(1) - self.time_interval):
loss += self.mse(x[:, i], x[:, i + self.time_interval])
return loss | en | 0.664807 | # coding: utf-8 :param x: generated sequence, batch_size * seq_len :return: :param x: generated sequence, batch_size * seq_len :return: | 2.454776 | 2 |
board/game.py | petthauk/chess_ml | 0 | 10141 | import pygame as pg
from pygame.locals import *
import sys
import board.chess_board as board
w = 60 * 8
h = 60 * 8
class Game:
"""
Class to setup and start a game
"""
def __init__(self):
self.b = board.Board(w, h)
def get_board(self):
"""
Returns board
:return: Board-class
"""
return self.b
def run(self):
"""
Where the game is created and launched
:return:
"""
# While loop to show display
while True:
for event in pg.event.get():
# Quitting game
if event.type == QUIT:
pg.quit()
sys.exit()
# If game can continue
if self.b.get_status() == "-":
# Pressing mouse
if event.type == MOUSEBUTTONDOWN:
pos = pg.mouse.get_pos()
for r in self.b.get_board_array():
for square in r:
if square.get_visual().collidepoint(pos):
square.click()
self.b.update_board()
if __name__ == "__main__":
# Launch main-function if running this script
game = Game()
game.run()
| import pygame as pg
from pygame.locals import *
import sys
import board.chess_board as board
w = 60 * 8
h = 60 * 8
class Game:
"""
Class to setup and start a game
"""
def __init__(self):
self.b = board.Board(w, h)
def get_board(self):
"""
Returns board
:return: Board-class
"""
return self.b
def run(self):
"""
Where the game is created and launched
:return:
"""
# While loop to show display
while True:
for event in pg.event.get():
# Quitting game
if event.type == QUIT:
pg.quit()
sys.exit()
# If game can continue
if self.b.get_status() == "-":
# Pressing mouse
if event.type == MOUSEBUTTONDOWN:
pos = pg.mouse.get_pos()
for r in self.b.get_board_array():
for square in r:
if square.get_visual().collidepoint(pos):
square.click()
self.b.update_board()
if __name__ == "__main__":
# Launch main-function if running this script
game = Game()
game.run()
| en | 0.85728 | Class to setup and start a game Returns board :return: Board-class Where the game is created and launched :return: # While loop to show display # Quitting game # If game can continue # Pressing mouse # Launch main-function if running this script | 3.537511 | 4 |
pix2pix/Dataset_util.py | Atharva-Phatak/Season-Tranfer | 2 | 10142 | #importing libraries
import torch
import torch.utils.data as data
import os
import random
from PIL import Image
class CreateDataset(data.Dataset):
def __init__(self , imagedir , subfolder='train' , direction = 'AtoB' , flip = False , transform = None ,resize_scale = None , crop_size = None):
super(CreateDataset , self).__init__()
self.images_path = os.path.join(imagedir , subfolder)
self.image_filenames = [name for name in sorted(os.listdir(self.images_path))]
self.flip = flip
self.transform = transform
self.resize_scale = resize_scale
self.crop_size = crop_size
self.direction = direction
def __getitem__(self , index):
image_path = os.path.join(self.images_path , self.image_filenames[index])
img = Image.open(image_path)
if self.direction == 'AtoB':
inp_img = img.crop((0,0,img.width//2 , img.height))
target_img = img.crop((img.width//2 , 0 , img.width , img.height))
elif self.direction == 'BtoA':
inp_img = img.crop((img.width//2 , 0 , img.width , img.height))
target_img = img.crop((0,0,img.width//2 , img.height))
if self.resize_scale:
inp_img = inp_img.resize((self.resize_scale , self.resize_scale) , Image.BILINEAR)
target_img = target_img.resize((self.resize_scale , self.resize_scale) , Image.BILINEAR)
if self.crop_size:
x = random.randint(0 , self.resize_scale - self.crop_size + 1)
y = random.randint(0 , self.resize_scale - self.crop_size + 1)
inp_img = inp_img.crop((x , y , x + self.crop_size , y + self.crop_size))
target_img = target_img.crop((x , y , x + self.crop_size , y + self.crop_size))
if self.flip:
if random.random() < 0.5:
inp_img = inp_img.transpose(Image.FLIP_LEFT_RIGHT)
target_img = target_img.transpose(Image.FLIP_LEFT_RIGHT)
if self.transform is not None:
inp_img = self.transform(inp_img)
target_img = self.transform(target_img)
return inp_img , target_img
def __len__(self):
return len(self.image_filenames)
| #importing libraries
import torch
import torch.utils.data as data
import os
import random
from PIL import Image
class CreateDataset(data.Dataset):
def __init__(self , imagedir , subfolder='train' , direction = 'AtoB' , flip = False , transform = None ,resize_scale = None , crop_size = None):
super(CreateDataset , self).__init__()
self.images_path = os.path.join(imagedir , subfolder)
self.image_filenames = [name for name in sorted(os.listdir(self.images_path))]
self.flip = flip
self.transform = transform
self.resize_scale = resize_scale
self.crop_size = crop_size
self.direction = direction
def __getitem__(self , index):
image_path = os.path.join(self.images_path , self.image_filenames[index])
img = Image.open(image_path)
if self.direction == 'AtoB':
inp_img = img.crop((0,0,img.width//2 , img.height))
target_img = img.crop((img.width//2 , 0 , img.width , img.height))
elif self.direction == 'BtoA':
inp_img = img.crop((img.width//2 , 0 , img.width , img.height))
target_img = img.crop((0,0,img.width//2 , img.height))
if self.resize_scale:
inp_img = inp_img.resize((self.resize_scale , self.resize_scale) , Image.BILINEAR)
target_img = target_img.resize((self.resize_scale , self.resize_scale) , Image.BILINEAR)
if self.crop_size:
x = random.randint(0 , self.resize_scale - self.crop_size + 1)
y = random.randint(0 , self.resize_scale - self.crop_size + 1)
inp_img = inp_img.crop((x , y , x + self.crop_size , y + self.crop_size))
target_img = target_img.crop((x , y , x + self.crop_size , y + self.crop_size))
if self.flip:
if random.random() < 0.5:
inp_img = inp_img.transpose(Image.FLIP_LEFT_RIGHT)
target_img = target_img.transpose(Image.FLIP_LEFT_RIGHT)
if self.transform is not None:
inp_img = self.transform(inp_img)
target_img = self.transform(target_img)
return inp_img , target_img
def __len__(self):
return len(self.image_filenames)
| en | 0.501408 | #importing libraries | 2.547027 | 3 |
crslab/system/C2CRS_System.py | Zyh716/WSDM2022-C2CRS | 4 | 10143 | <filename>crslab/system/C2CRS_System.py
# @Time : 2022/1/1
# @Author : <NAME>
# @email : <EMAIL>
import os
from math import floor
import torch
from loguru import logger
from typing import List, Dict
from copy import copy, deepcopy
import pickle
import os
import numpy
import ipdb
from crslab.config import PRETRAIN_PATH, SAVE_PATH
from crslab.data import get_dataloader, dataset_language_map
from crslab.evaluator.metrics.base import AverageMetric
from crslab.evaluator.metrics.gen import PPLMetric
from crslab.system.base import BaseSystem
from crslab.system.utils.functions import ind2txt, ind2txt2
import random
from tqdm import tqdm
class C2CRS_System(BaseSystem):
"""This is the system for TGReDial model"""
def __init__(self, opt, train_dataloader, valid_dataloader, test_dataloader, vocab, side_data, restore_system=False,
interact=False, debug=False):
"""
Args:
opt (dict): Indicating the hyper parameters.
train_dataloader (BaseDataLoader): Indicating the train dataloader of corresponding dataset.
valid_dataloader (BaseDataLoader): Indicating the valid dataloader of corresponding dataset.
test_dataloader (BaseDataLoader): Indicating the test dataloader of corresponding dataset.
vocab (dict): Indicating the vocabulary.
side_data (dict): Indicating the side data.
restore_system (bool, optional): Indicating if we store system after training. Defaults to False.
interact (bool, optional): Indicating if we interact with system. Defaults to False.
debug (bool, optional): Indicating if we train in debug mode. Defaults to False.
"""
super(C2CRS_System, self).__init__(opt, train_dataloader, valid_dataloader,
test_dataloader, vocab, side_data, restore_system, interact, debug)
self._init_token_attribute(vocab)
self._init_rec_attribute(side_data, vocab)
self._init_conv_attribute(side_data, vocab)
self._init_pretrain_attribute(side_data, vocab)
self.language = dataset_language_map[self.opt['dataset']]
self.pertrain_save_epoches = [epoch-1 for epoch in eval(opt['pertrain_save_epoches'])]
def _init_token_attribute(self, vocab):
self.ind2tok = vocab['rec']['ind2tok']
self.end_token_idx = vocab['rec']['end']
self.unk_token_idx = vocab['rec']['unk']
self.unk = self.ind2tok.get(self.unk_token_idx, '<unk>')
def _init_rec_attribute(self, side_data, vocab):
self.item_ids = side_data['rec']['item_entity_ids']
self.id2entity = side_data['rec']['entity_kg']['id2entity']
self.dpath = side_data['rec']['dpath']
self.rec_ind2tok = vocab['rec']['ind2tok']
self.rec_optim_opt = deepcopy(self.opt['rec'])
self.rec_batch_size = self.opt['rec_batch_size'] if self.opt['rec_batch_size'] != -1 else self.rec_optim_opt['batch_size']
self.rec_epoch = self.opt['rec_epoch'] if self.opt['rec_epoch'] != -1 else self.rec_optim_opt['epoch']
def _init_conv_attribute(self, side_data, vocab):
self.conv_optim_opt = self.opt['conv']
if self.conv_optim_opt.get('lr_scheduler', None) and 'Transformers' in self.conv_optim_opt['lr_scheduler']['name']:
batch_num = 0
for _ in self.train_dataloader['rec'].get_conv_data(batch_size=self.conv_batch_size, shuffle=False):
batch_num += 1
conv_training_steps = self.conv_epoch * floor(batch_num / self.conv_optim_opt.get('update_freq', 1))
self.conv_optim_opt['lr_scheduler']['training_steps'] = conv_training_steps
self.conv_batch_size = self.opt['conv_batch_size'] if self.opt['conv_batch_size'] != -1 else self.conv_optim_opt['batch_size']
self.conv_epoch = self.opt['conv_epoch'] if self.opt['conv_epoch'] != -1 else self.conv_optim_opt['epoch']
def _init_pretrain_attribute(self, side_data, vocab):
if 'pretrain' in self.opt:
self.pretrain_optim_opt = deepcopy(self.opt['pretrain'])
self.pretrain_epoch = self.opt['pretrain_epoch'] if self.opt['pretrain_epoch'] != -1 else self.pretrain_optim_opt['pretrain_epoch']
self.pretrain_batch_size = self.opt['pretrain_batch_size'] if self.opt['pretrain_batch_size'] != -1 else self.pretrain_optim_opt['batch_size']
def rec_evaluate(self, rec_predict, item_label):
rec_predict = rec_predict.cpu()
rec_predict = rec_predict[:, self.item_ids]
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
item_label = item_label.tolist()
for rec_rank, item in zip(rec_ranks, item_label):
item = self.item_ids.index(item)
self.evaluator.rec_evaluate(rec_rank, item)
def rec_evaluate_and_return_score(self, rec_predict, item_label):
rec_predict = rec_predict.cpu()
rec_predict = rec_predict[:, self.item_ids]
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
_, fully_rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
fully_rec_ranks = fully_rec_ranks.tolist()
item_label = item_label.tolist()
scores = []
for rec_rank, item in zip(rec_ranks, item_label):
item = self.item_ids.index(item)
scores.append(self.evaluator.rec_evaluate_and_return_score(rec_rank, fully_rec_ranks, item, self.opt['score_type']))
return scores, rec_ranks
def conv_evaluate(self, prediction, response):
"""
Args:
prediction: torch.LongTensor, shape=(bs, response_truncate-1)
response: torch.LongTensor, shape=(bs, response_truncate)
the first token in response is <|endoftext|>, it is not in prediction
"""
prediction = prediction.tolist()
response = response.tolist()
for p, r in zip(prediction, response):
p_str, p_ListStr = ind2txt2(p, self.ind2tok, self.end_token_idx)
r_str, r_ListStr = ind2txt2(r[1:], self.ind2tok, self.end_token_idx)
self.evaluator.gen_evaluate(p_str, [r_str], p_ListStr, [r_ListStr])
def step(self, batch, stage, mode, epoch=-1):
batch, unbatchify_batch = batch
self.step_default(batch, stage, mode, epoch)
def step_default(self, batch, stage, mode, epoch=-1):
"""
stage: ['policy', 'rec', 'conv']
mode: ['train', 'val', 'test]
"""
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device)
if stage == 'pretrain_rec':
loss = self.rec_model.pretrain(batch, mode, epoch)
if loss:
if mode == "train":
self.backward(loss)
loss = loss.item()
self.evaluator.optim_metrics.add("loss", AverageMetric(loss))
elif stage == 'policy':
if mode == 'train':
self.rec_model.train()
else:
self.rec_model.eval()
policy_loss, policy_predict = self.rec_model.guide(batch, mode)
if mode == "train" and policy_loss is not None:
self.backward(policy_loss)
else:
self.policy_evaluate(policy_predict, batch[-1])
if isinstance(policy_loss, torch.Tensor):
policy_loss = policy_loss.item()
self.evaluator.optim_metrics.add("policy_loss",
AverageMetric(policy_loss))
elif stage == 'rec':
if mode == 'train':
self.rec_model.train()
else:
self.rec_model.eval()
rec_loss, rec_predict = self.rec_model.recommend(batch, mode)
if mode == "train":
self.backward(rec_loss)
else:
self.rec_evaluate(rec_predict, batch['movie_to_rec'])
rec_loss = rec_loss.item()
self.evaluator.optim_metrics.add("rec_loss",
AverageMetric(rec_loss))
elif stage == "conv":
if mode != "test":
gen_loss, pred = self.rec_model.converse(batch, mode)
if mode == 'train':
self.backward(gen_loss)
else:
self.conv_evaluate(pred, batch['response'])
gen_loss = gen_loss.item()
self.evaluator.optim_metrics.add("gen_loss",
AverageMetric(gen_loss))
self.evaluator.gen_metrics.add("ppl", PPLMetric(gen_loss))
else:
# generate response in rec_model.step
_, pred = self.rec_model.converse(batch, mode)
response = batch['response']
self.conv_evaluate(pred, response)
self.record_conv_gt_pred(response, pred, epoch)
self.record_conv_gt(response, pred)
self.record_conv_pred(response, pred, epoch)
else:
raise
def record_conv_gt_pred(self, batch_response, batch_pred, epoch):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer(f'{epoch}_record_conv_gt_pred', '.txt')
for response, pred in zip(batch_response, batch_pred):
response_tok_list = self.convert_tensor_ids_to_tokens(response)
pred_tok_list = self.convert_tensor_ids_to_tokens(pred)
file_writer.writelines(' '.join(response_tok_list) + '\n')
file_writer.writelines(' '.join(pred_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def record_conv_gt(self, batch_response, batch_pred):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer('record_conv_gt', '.txt')
for response, pred in zip(batch_response, batch_pred):
response_tok_list = self.convert_tensor_ids_to_tokens(response)
file_writer.writelines(' '.join(response_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def record_conv_pred(self, batch_response, batch_pred, epoch):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer(f'{epoch}_record_conv_pred', '.txt')
for response, pred in zip(batch_response, batch_pred):
pred_tok_list = self.convert_tensor_ids_to_tokens(pred)
file_writer.writelines(' '.join(pred_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def get_file_writer(self, file_keywords: str, file_type: str):
file_name = file_keywords + file_type
file_path = os.path.join(self.opt['LOG_PATH'], file_name)
if os.path.exists(file_path):
file_writer = open(file_path, 'a', encoding='utf-8')
else:
file_writer = open(file_path, 'w', encoding='utf-8')
return file_writer
def convert_tensor_ids_to_tokens(self, token_ids):
tokens = []
token_ids = token_ids.tolist() # List[int]
if not token_ids:
return tokens
for token_id in token_ids:
if token_id == self.end_token_idx:
return tokens
tokens.append(self.ind2tok.get(token_id, self.unk))
return tokens
def is_early_stop(self, valid_metric, epoch):
early_stop_result = self.early_stop(valid_metric)
# logger.info(f'valid_metric = {valid_metric}, early_stop_result = {early_stop_result}, stop_mode = {self.stop_mode}')
if early_stop_result == 'Stop':
return True
elif early_stop_result == 'New Model':
self.save_model(epoch=epoch, valid_metric=valid_metric)
elif early_stop_result == 'Patience':
pass
return False
def fit(self):
self.extend_datasets()
self.pre_training()
self.train_recommender_default()
self.train_conversation_using_rec_model()
def extend_datasets(self):
extend_train_dataset = self.train_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.train_dataloader['rec'].replace_dataset(extend_train_dataset)
extend_train_dataset = self.valid_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.valid_dataloader['rec'].replace_dataset(extend_train_dataset)
extend_train_dataset = self.test_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.test_dataloader['rec'].replace_dataset(extend_train_dataset)
def pre_training(self):
self.init_pretrain_optim()
self.pretrain_recommender_convergence()
def init_pretrain_optim(self):
self.pretrain_optim_opt = deepcopy(self.opt['pretrain'])
# get params and training setting
bert_param = [p for n, p in self.rec_model.named_parameters() if 'bert' in n]
other_param = [p for n, p in self.rec_model.named_parameters() if 'bert' not in n]
params = [{'params': bert_param, 'lr': self.pretrain_optim_opt['lr_bert']},
{'params': other_param}]
logger.info('There are {} bert parameters unit, {} other parameters unit'
.format(len(bert_param), len(other_param)))
self.init_optim(deepcopy(self.pretrain_optim_opt), params)
def pretrain_recommender_convergence(self):
for epoch in range(self.pretrain_epoch):
self.pretrain_recommender_one_epoch(epoch)
valid_metric = self.valid_pretrain_recommender(epoch)
if epoch in self.pertrain_save_epoches:
self.save_model(post_fix='epoch_{}'.format(epoch), epoch=epoch, valid_metric=valid_metric)
if self.is_early_stop(valid_metric, epoch):
break
def pretrain_recommender_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Pretrain | Epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_rec_data(self.pretrain_batch_size,
shuffle=True):
self.step(batch, stage='pretrain_rec', mode='train', epoch=epoch)
self.evaluator.report()
def valid_pretrain_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Valid | Epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_rec_data(self.pretrain_batch_size,
shuffle=False):
self.step(batch, stage='pretrain_rec', mode='val', epoch=epoch)
self.evaluator.report()
metric = self.evaluator.optim_metrics['loss']
return metric
def train_recommender_default(self):
self.init_rec_optim()
self.train_recommender_convergence()
# test
if self.rec_epoch != 0:
self.restore_model_from_save()
self.test_recommender('final')
def init_rec_optim(self):
self.rec_optim_opt = deepcopy(self.opt['rec'])
# get params and training setting
bert_param = [p for n, p in self.rec_model.named_parameters() if 'bert' in n]
other_param = [p for n, p in self.rec_model.named_parameters() if 'bert' not in n]
params = [{'params': bert_param, 'lr': self.rec_optim_opt['lr_bert']},
{'params': other_param}]
logger.info('There are {} bert parameters unit, {} other parameters unit'
.format(len(bert_param), len(other_param)))
self.init_optim(deepcopy(self.rec_optim_opt), params)
def train_recommender_convergence(self) -> float:
for epoch in range(self.rec_epoch):
self.train_recommender_one_epoch(epoch)
valid_metric = self.valid_recommender(epoch)
if self.is_early_stop(valid_metric, epoch):
break
def train_recommender_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Train | Epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=True):
self.step(batch, stage='rec', mode='train', epoch=epoch)
self.evaluator.report()
def valid_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Valid | Epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=False):
self.step(batch, stage='rec', mode='val', epoch=epoch)
self.evaluator.report()
metric = self.evaluator.rec_metrics['hit@1'] + self.evaluator.rec_metrics['hit@50']
return metric
def test_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Test ]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=False):
self.step(batch, stage='rec', mode='test', epoch=epoch)
self.evaluator.report()
def train_conversation_using_rec_model(self):
self.init_optim(deepcopy(self.conv_optim_opt), self.rec_model.parameters())
if self.opt['freeze_parameters']:
self.rec_model.freeze_parameters()
self.train_conversation_convergence()
if self.conv_epoch != 0:
self.restore_model_from_save()
self.test_conversation('final')
def train_conversation_convergence(self):
for epoch in range(self.conv_epoch):
self.train_conversation_one_epoch(epoch)
valid_metric = self.valid_conversation(epoch)
self.test_conversation('final')
if self.is_early_stop(valid_metric, epoch):
break
def train_conversation_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Train | epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=True):
self.step(batch, stage='conv', mode='train', epoch=epoch)
self.evaluator.report()
def valid_conversation(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Valid | epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='val', epoch=epoch)
self.evaluator.report()
valid_metric = self.get_sum_dist_metric()
# early stop
return valid_metric
def get_sum_dist_metric(self):
sum_dist = 0
for k in range(1, 5):
try:
sum_dist += self.evaluator.gen_metrics[f'dist@{k}']
except:
pass
return sum_dist
def test_conversation(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Test]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='test', epoch=epoch)
self.evaluator.report()
def interact(self):
pass | <filename>crslab/system/C2CRS_System.py
# @Time : 2022/1/1
# @Author : <NAME>
# @email : <EMAIL>
import os
from math import floor
import torch
from loguru import logger
from typing import List, Dict
from copy import copy, deepcopy
import pickle
import os
import numpy
import ipdb
from crslab.config import PRETRAIN_PATH, SAVE_PATH
from crslab.data import get_dataloader, dataset_language_map
from crslab.evaluator.metrics.base import AverageMetric
from crslab.evaluator.metrics.gen import PPLMetric
from crslab.system.base import BaseSystem
from crslab.system.utils.functions import ind2txt, ind2txt2
import random
from tqdm import tqdm
class C2CRS_System(BaseSystem):
"""This is the system for TGReDial model"""
def __init__(self, opt, train_dataloader, valid_dataloader, test_dataloader, vocab, side_data, restore_system=False,
interact=False, debug=False):
"""
Args:
opt (dict): Indicating the hyper parameters.
train_dataloader (BaseDataLoader): Indicating the train dataloader of corresponding dataset.
valid_dataloader (BaseDataLoader): Indicating the valid dataloader of corresponding dataset.
test_dataloader (BaseDataLoader): Indicating the test dataloader of corresponding dataset.
vocab (dict): Indicating the vocabulary.
side_data (dict): Indicating the side data.
restore_system (bool, optional): Indicating if we store system after training. Defaults to False.
interact (bool, optional): Indicating if we interact with system. Defaults to False.
debug (bool, optional): Indicating if we train in debug mode. Defaults to False.
"""
super(C2CRS_System, self).__init__(opt, train_dataloader, valid_dataloader,
test_dataloader, vocab, side_data, restore_system, interact, debug)
self._init_token_attribute(vocab)
self._init_rec_attribute(side_data, vocab)
self._init_conv_attribute(side_data, vocab)
self._init_pretrain_attribute(side_data, vocab)
self.language = dataset_language_map[self.opt['dataset']]
self.pertrain_save_epoches = [epoch-1 for epoch in eval(opt['pertrain_save_epoches'])]
def _init_token_attribute(self, vocab):
self.ind2tok = vocab['rec']['ind2tok']
self.end_token_idx = vocab['rec']['end']
self.unk_token_idx = vocab['rec']['unk']
self.unk = self.ind2tok.get(self.unk_token_idx, '<unk>')
def _init_rec_attribute(self, side_data, vocab):
self.item_ids = side_data['rec']['item_entity_ids']
self.id2entity = side_data['rec']['entity_kg']['id2entity']
self.dpath = side_data['rec']['dpath']
self.rec_ind2tok = vocab['rec']['ind2tok']
self.rec_optim_opt = deepcopy(self.opt['rec'])
self.rec_batch_size = self.opt['rec_batch_size'] if self.opt['rec_batch_size'] != -1 else self.rec_optim_opt['batch_size']
self.rec_epoch = self.opt['rec_epoch'] if self.opt['rec_epoch'] != -1 else self.rec_optim_opt['epoch']
def _init_conv_attribute(self, side_data, vocab):
self.conv_optim_opt = self.opt['conv']
if self.conv_optim_opt.get('lr_scheduler', None) and 'Transformers' in self.conv_optim_opt['lr_scheduler']['name']:
batch_num = 0
for _ in self.train_dataloader['rec'].get_conv_data(batch_size=self.conv_batch_size, shuffle=False):
batch_num += 1
conv_training_steps = self.conv_epoch * floor(batch_num / self.conv_optim_opt.get('update_freq', 1))
self.conv_optim_opt['lr_scheduler']['training_steps'] = conv_training_steps
self.conv_batch_size = self.opt['conv_batch_size'] if self.opt['conv_batch_size'] != -1 else self.conv_optim_opt['batch_size']
self.conv_epoch = self.opt['conv_epoch'] if self.opt['conv_epoch'] != -1 else self.conv_optim_opt['epoch']
def _init_pretrain_attribute(self, side_data, vocab):
if 'pretrain' in self.opt:
self.pretrain_optim_opt = deepcopy(self.opt['pretrain'])
self.pretrain_epoch = self.opt['pretrain_epoch'] if self.opt['pretrain_epoch'] != -1 else self.pretrain_optim_opt['pretrain_epoch']
self.pretrain_batch_size = self.opt['pretrain_batch_size'] if self.opt['pretrain_batch_size'] != -1 else self.pretrain_optim_opt['batch_size']
def rec_evaluate(self, rec_predict, item_label):
rec_predict = rec_predict.cpu()
rec_predict = rec_predict[:, self.item_ids]
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
item_label = item_label.tolist()
for rec_rank, item in zip(rec_ranks, item_label):
item = self.item_ids.index(item)
self.evaluator.rec_evaluate(rec_rank, item)
def rec_evaluate_and_return_score(self, rec_predict, item_label):
rec_predict = rec_predict.cpu()
rec_predict = rec_predict[:, self.item_ids]
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
_, fully_rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
fully_rec_ranks = fully_rec_ranks.tolist()
item_label = item_label.tolist()
scores = []
for rec_rank, item in zip(rec_ranks, item_label):
item = self.item_ids.index(item)
scores.append(self.evaluator.rec_evaluate_and_return_score(rec_rank, fully_rec_ranks, item, self.opt['score_type']))
return scores, rec_ranks
def conv_evaluate(self, prediction, response):
"""
Args:
prediction: torch.LongTensor, shape=(bs, response_truncate-1)
response: torch.LongTensor, shape=(bs, response_truncate)
the first token in response is <|endoftext|>, it is not in prediction
"""
prediction = prediction.tolist()
response = response.tolist()
for p, r in zip(prediction, response):
p_str, p_ListStr = ind2txt2(p, self.ind2tok, self.end_token_idx)
r_str, r_ListStr = ind2txt2(r[1:], self.ind2tok, self.end_token_idx)
self.evaluator.gen_evaluate(p_str, [r_str], p_ListStr, [r_ListStr])
def step(self, batch, stage, mode, epoch=-1):
batch, unbatchify_batch = batch
self.step_default(batch, stage, mode, epoch)
def step_default(self, batch, stage, mode, epoch=-1):
"""
stage: ['policy', 'rec', 'conv']
mode: ['train', 'val', 'test]
"""
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device)
if stage == 'pretrain_rec':
loss = self.rec_model.pretrain(batch, mode, epoch)
if loss:
if mode == "train":
self.backward(loss)
loss = loss.item()
self.evaluator.optim_metrics.add("loss", AverageMetric(loss))
elif stage == 'policy':
if mode == 'train':
self.rec_model.train()
else:
self.rec_model.eval()
policy_loss, policy_predict = self.rec_model.guide(batch, mode)
if mode == "train" and policy_loss is not None:
self.backward(policy_loss)
else:
self.policy_evaluate(policy_predict, batch[-1])
if isinstance(policy_loss, torch.Tensor):
policy_loss = policy_loss.item()
self.evaluator.optim_metrics.add("policy_loss",
AverageMetric(policy_loss))
elif stage == 'rec':
if mode == 'train':
self.rec_model.train()
else:
self.rec_model.eval()
rec_loss, rec_predict = self.rec_model.recommend(batch, mode)
if mode == "train":
self.backward(rec_loss)
else:
self.rec_evaluate(rec_predict, batch['movie_to_rec'])
rec_loss = rec_loss.item()
self.evaluator.optim_metrics.add("rec_loss",
AverageMetric(rec_loss))
elif stage == "conv":
if mode != "test":
gen_loss, pred = self.rec_model.converse(batch, mode)
if mode == 'train':
self.backward(gen_loss)
else:
self.conv_evaluate(pred, batch['response'])
gen_loss = gen_loss.item()
self.evaluator.optim_metrics.add("gen_loss",
AverageMetric(gen_loss))
self.evaluator.gen_metrics.add("ppl", PPLMetric(gen_loss))
else:
# generate response in rec_model.step
_, pred = self.rec_model.converse(batch, mode)
response = batch['response']
self.conv_evaluate(pred, response)
self.record_conv_gt_pred(response, pred, epoch)
self.record_conv_gt(response, pred)
self.record_conv_pred(response, pred, epoch)
else:
raise
def record_conv_gt_pred(self, batch_response, batch_pred, epoch):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer(f'{epoch}_record_conv_gt_pred', '.txt')
for response, pred in zip(batch_response, batch_pred):
response_tok_list = self.convert_tensor_ids_to_tokens(response)
pred_tok_list = self.convert_tensor_ids_to_tokens(pred)
file_writer.writelines(' '.join(response_tok_list) + '\n')
file_writer.writelines(' '.join(pred_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def record_conv_gt(self, batch_response, batch_pred):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer('record_conv_gt', '.txt')
for response, pred in zip(batch_response, batch_pred):
response_tok_list = self.convert_tensor_ids_to_tokens(response)
file_writer.writelines(' '.join(response_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def record_conv_pred(self, batch_response, batch_pred, epoch):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer(f'{epoch}_record_conv_pred', '.txt')
for response, pred in zip(batch_response, batch_pred):
pred_tok_list = self.convert_tensor_ids_to_tokens(pred)
file_writer.writelines(' '.join(pred_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def get_file_writer(self, file_keywords: str, file_type: str):
file_name = file_keywords + file_type
file_path = os.path.join(self.opt['LOG_PATH'], file_name)
if os.path.exists(file_path):
file_writer = open(file_path, 'a', encoding='utf-8')
else:
file_writer = open(file_path, 'w', encoding='utf-8')
return file_writer
def convert_tensor_ids_to_tokens(self, token_ids):
tokens = []
token_ids = token_ids.tolist() # List[int]
if not token_ids:
return tokens
for token_id in token_ids:
if token_id == self.end_token_idx:
return tokens
tokens.append(self.ind2tok.get(token_id, self.unk))
return tokens
def is_early_stop(self, valid_metric, epoch):
early_stop_result = self.early_stop(valid_metric)
# logger.info(f'valid_metric = {valid_metric}, early_stop_result = {early_stop_result}, stop_mode = {self.stop_mode}')
if early_stop_result == 'Stop':
return True
elif early_stop_result == 'New Model':
self.save_model(epoch=epoch, valid_metric=valid_metric)
elif early_stop_result == 'Patience':
pass
return False
def fit(self):
self.extend_datasets()
self.pre_training()
self.train_recommender_default()
self.train_conversation_using_rec_model()
def extend_datasets(self):
extend_train_dataset = self.train_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.train_dataloader['rec'].replace_dataset(extend_train_dataset)
extend_train_dataset = self.valid_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.valid_dataloader['rec'].replace_dataset(extend_train_dataset)
extend_train_dataset = self.test_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.test_dataloader['rec'].replace_dataset(extend_train_dataset)
def pre_training(self):
self.init_pretrain_optim()
self.pretrain_recommender_convergence()
def init_pretrain_optim(self):
self.pretrain_optim_opt = deepcopy(self.opt['pretrain'])
# get params and training setting
bert_param = [p for n, p in self.rec_model.named_parameters() if 'bert' in n]
other_param = [p for n, p in self.rec_model.named_parameters() if 'bert' not in n]
params = [{'params': bert_param, 'lr': self.pretrain_optim_opt['lr_bert']},
{'params': other_param}]
logger.info('There are {} bert parameters unit, {} other parameters unit'
.format(len(bert_param), len(other_param)))
self.init_optim(deepcopy(self.pretrain_optim_opt), params)
def pretrain_recommender_convergence(self):
for epoch in range(self.pretrain_epoch):
self.pretrain_recommender_one_epoch(epoch)
valid_metric = self.valid_pretrain_recommender(epoch)
if epoch in self.pertrain_save_epoches:
self.save_model(post_fix='epoch_{}'.format(epoch), epoch=epoch, valid_metric=valid_metric)
if self.is_early_stop(valid_metric, epoch):
break
def pretrain_recommender_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Pretrain | Epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_rec_data(self.pretrain_batch_size,
shuffle=True):
self.step(batch, stage='pretrain_rec', mode='train', epoch=epoch)
self.evaluator.report()
def valid_pretrain_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Valid | Epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_rec_data(self.pretrain_batch_size,
shuffle=False):
self.step(batch, stage='pretrain_rec', mode='val', epoch=epoch)
self.evaluator.report()
metric = self.evaluator.optim_metrics['loss']
return metric
def train_recommender_default(self):
self.init_rec_optim()
self.train_recommender_convergence()
# test
if self.rec_epoch != 0:
self.restore_model_from_save()
self.test_recommender('final')
def init_rec_optim(self):
self.rec_optim_opt = deepcopy(self.opt['rec'])
# get params and training setting
bert_param = [p for n, p in self.rec_model.named_parameters() if 'bert' in n]
other_param = [p for n, p in self.rec_model.named_parameters() if 'bert' not in n]
params = [{'params': bert_param, 'lr': self.rec_optim_opt['lr_bert']},
{'params': other_param}]
logger.info('There are {} bert parameters unit, {} other parameters unit'
.format(len(bert_param), len(other_param)))
self.init_optim(deepcopy(self.rec_optim_opt), params)
def train_recommender_convergence(self) -> float:
for epoch in range(self.rec_epoch):
self.train_recommender_one_epoch(epoch)
valid_metric = self.valid_recommender(epoch)
if self.is_early_stop(valid_metric, epoch):
break
def train_recommender_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Train | Epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=True):
self.step(batch, stage='rec', mode='train', epoch=epoch)
self.evaluator.report()
def valid_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Valid | Epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=False):
self.step(batch, stage='rec', mode='val', epoch=epoch)
self.evaluator.report()
metric = self.evaluator.rec_metrics['hit@1'] + self.evaluator.rec_metrics['hit@50']
return metric
def test_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Test ]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=False):
self.step(batch, stage='rec', mode='test', epoch=epoch)
self.evaluator.report()
def train_conversation_using_rec_model(self):
self.init_optim(deepcopy(self.conv_optim_opt), self.rec_model.parameters())
if self.opt['freeze_parameters']:
self.rec_model.freeze_parameters()
self.train_conversation_convergence()
if self.conv_epoch != 0:
self.restore_model_from_save()
self.test_conversation('final')
def train_conversation_convergence(self):
for epoch in range(self.conv_epoch):
self.train_conversation_one_epoch(epoch)
valid_metric = self.valid_conversation(epoch)
self.test_conversation('final')
if self.is_early_stop(valid_metric, epoch):
break
def train_conversation_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Train | epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=True):
self.step(batch, stage='conv', mode='train', epoch=epoch)
self.evaluator.report()
def valid_conversation(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Valid | epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='val', epoch=epoch)
self.evaluator.report()
valid_metric = self.get_sum_dist_metric()
# early stop
return valid_metric
def get_sum_dist_metric(self):
sum_dist = 0
for k in range(1, 5):
try:
sum_dist += self.evaluator.gen_metrics[f'dist@{k}']
except:
pass
return sum_dist
def test_conversation(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Test]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='test', epoch=epoch)
self.evaluator.report()
def interact(self):
pass | en | 0.624887 | # @Time : 2022/1/1 # @Author : <NAME> # @email : <EMAIL> This is the system for TGReDial model Args: opt (dict): Indicating the hyper parameters. train_dataloader (BaseDataLoader): Indicating the train dataloader of corresponding dataset. valid_dataloader (BaseDataLoader): Indicating the valid dataloader of corresponding dataset. test_dataloader (BaseDataLoader): Indicating the test dataloader of corresponding dataset. vocab (dict): Indicating the vocabulary. side_data (dict): Indicating the side data. restore_system (bool, optional): Indicating if we store system after training. Defaults to False. interact (bool, optional): Indicating if we interact with system. Defaults to False. debug (bool, optional): Indicating if we train in debug mode. Defaults to False. Args: prediction: torch.LongTensor, shape=(bs, response_truncate-1) response: torch.LongTensor, shape=(bs, response_truncate) the first token in response is <|endoftext|>, it is not in prediction stage: ['policy', 'rec', 'conv'] mode: ['train', 'val', 'test] # generate response in rec_model.step # (bs, response_truncate), (bs, response_truncate) # (bs, response_truncate), (bs, response_truncate) # (bs, response_truncate), (bs, response_truncate) # List[int] # logger.info(f'valid_metric = {valid_metric}, early_stop_result = {early_stop_result}, stop_mode = {self.stop_mode}') # get params and training setting # test # get params and training setting # early stop | 2.148383 | 2 |
morepath/__init__.py | hugovk/morepath | 314 | 10144 | # flake8: noqa
"""This is the main public API of Morepath.
Additional public APIs can be imported from the :mod:`morepath.error`
and :mod:`morepath.pdbsupport` modules. For custom directive
implementations that interact with core directives for grouping or
subclassing purposes, or that need to use one of the Morepath
registries, you may need to import from :mod:`morepath.directive`.
The other submodules are considered private. If you find yourself
needing to import from them in application or extension code, please
report an issue about it on the Morepath issue tracker.
"""
from dectate import commit
from .app import App, dispatch_method
from .core import (
excview_tween_factory as EXCVIEW,
poisoned_host_header_protection_tween_factory as HOST_HEADER_PROTECTION,
model_predicate,
name_predicate,
request_method_predicate,
)
from .core import request_method_predicate as LAST_VIEW_PREDICATE
from .view import render_json, render_html, redirect
from .request import Request, Response
from .autosetup import scan, autoscan
from .authentication import Identity, IdentityPolicy, NO_IDENTITY
from .converter import Converter
from .reify import reify
from .run import run
| # flake8: noqa
"""This is the main public API of Morepath.
Additional public APIs can be imported from the :mod:`morepath.error`
and :mod:`morepath.pdbsupport` modules. For custom directive
implementations that interact with core directives for grouping or
subclassing purposes, or that need to use one of the Morepath
registries, you may need to import from :mod:`morepath.directive`.
The other submodules are considered private. If you find yourself
needing to import from them in application or extension code, please
report an issue about it on the Morepath issue tracker.
"""
from dectate import commit
from .app import App, dispatch_method
from .core import (
excview_tween_factory as EXCVIEW,
poisoned_host_header_protection_tween_factory as HOST_HEADER_PROTECTION,
model_predicate,
name_predicate,
request_method_predicate,
)
from .core import request_method_predicate as LAST_VIEW_PREDICATE
from .view import render_json, render_html, redirect
from .request import Request, Response
from .autosetup import scan, autoscan
from .authentication import Identity, IdentityPolicy, NO_IDENTITY
from .converter import Converter
from .reify import reify
from .run import run
| en | 0.830472 | # flake8: noqa This is the main public API of Morepath. Additional public APIs can be imported from the :mod:`morepath.error` and :mod:`morepath.pdbsupport` modules. For custom directive implementations that interact with core directives for grouping or subclassing purposes, or that need to use one of the Morepath registries, you may need to import from :mod:`morepath.directive`. The other submodules are considered private. If you find yourself needing to import from them in application or extension code, please report an issue about it on the Morepath issue tracker. | 1.188717 | 1 |
src/AuShadha/demographics/email_and_fax/dijit_fields_constants.py | GosthMan/AuShadha | 46 | 10145 | EMAIL_AND_FAX_FORM_CONSTANTS = {
} | EMAIL_AND_FAX_FORM_CONSTANTS = {
} | none | 1 | 1.115755 | 1 |
|
marketDataRetrieval.py | amertx/Monte-Carlo-Simulation | 0 | 10146 | #Prediction model using an instance of the Monte Carlo simulation and Brownian Motion equation
#import of libraries
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
from scipy.stats import norm
#ticker selection
def mainFunction(tradingSymbol):
data = pd.DataFrame()
data[tradingSymbol] = wb.DataReader(tradingSymbol, data_source='yahoo', start='2019-1-1')['Adj Close']
#percent change of asset price
log_returns = np.log(1+ data.pct_change())
#graph showing growth over time beginning from 2015
data.plot(figsize = (10,6));
plt.show()
#graph of log returns of input ticker
#returns are normally distributed and have a consistent mean
log_returns.plot(figsize = (10,6))
plt.show()
#calculations
averageDailyReturn = log_returns.mean()
variance = log_returns.var()
drift = averageDailyReturn-(variance/2)
standardDeviation = log_returns.std()
#Brownian Motion equation
#r = drift + standardDeviation * (e^r)
#prediction of future stock price based on simulation below using numpy for storing data into array
np.array(drift)
drift.values
standardDeviation.values
#Brownian motion variable correlating to the distance between the mean and the number of standard deviation
norm.ppf(0.95)
#10 x 2 Matrix
x = np.random.rand(10,2)
norm.ppf(x)
#stores distances from the mean value, 0, into the 10 x 2 matrix
Z = norm.ppf(np.random.rand(10,2))
#time interval for the stock price forecast
timeInterval = 365
iterations = 5
#r = drift + standardDeviation * (e^r)
#10 sets of 365 random future stock prices of the ticker symbol
dailyReturns = np.exp(drift.values + standardDeviation.values * norm.ppf(np.random.rand(timeInterval,iterations)))
#returns into price points
presentPrice = data.iloc[-1]
priceList = np.zeros_like(dailyReturns)
priceList[0] = presentPrice
#iteration for the time interavl of 365
for t in range(1, timeInterval):
priceList[t] = priceList[t-1] * dailyReturns[t]
#showcases 10 paths of the future stock price
plt.figure(figsize =(10,6))
plt.plot(priceList)
plt.show()
| #Prediction model using an instance of the Monte Carlo simulation and Brownian Motion equation
#import of libraries
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
from scipy.stats import norm
#ticker selection
def mainFunction(tradingSymbol):
data = pd.DataFrame()
data[tradingSymbol] = wb.DataReader(tradingSymbol, data_source='yahoo', start='2019-1-1')['Adj Close']
#percent change of asset price
log_returns = np.log(1+ data.pct_change())
#graph showing growth over time beginning from 2015
data.plot(figsize = (10,6));
plt.show()
#graph of log returns of input ticker
#returns are normally distributed and have a consistent mean
log_returns.plot(figsize = (10,6))
plt.show()
#calculations
averageDailyReturn = log_returns.mean()
variance = log_returns.var()
drift = averageDailyReturn-(variance/2)
standardDeviation = log_returns.std()
#Brownian Motion equation
#r = drift + standardDeviation * (e^r)
#prediction of future stock price based on simulation below using numpy for storing data into array
np.array(drift)
drift.values
standardDeviation.values
#Brownian motion variable correlating to the distance between the mean and the number of standard deviation
norm.ppf(0.95)
#10 x 2 Matrix
x = np.random.rand(10,2)
norm.ppf(x)
#stores distances from the mean value, 0, into the 10 x 2 matrix
Z = norm.ppf(np.random.rand(10,2))
#time interval for the stock price forecast
timeInterval = 365
iterations = 5
#r = drift + standardDeviation * (e^r)
#10 sets of 365 random future stock prices of the ticker symbol
dailyReturns = np.exp(drift.values + standardDeviation.values * norm.ppf(np.random.rand(timeInterval,iterations)))
#returns into price points
presentPrice = data.iloc[-1]
priceList = np.zeros_like(dailyReturns)
priceList[0] = presentPrice
#iteration for the time interavl of 365
for t in range(1, timeInterval):
priceList[t] = priceList[t-1] * dailyReturns[t]
#showcases 10 paths of the future stock price
plt.figure(figsize =(10,6))
plt.plot(priceList)
plt.show()
| en | 0.791201 | #Prediction model using an instance of the Monte Carlo simulation and Brownian Motion equation #import of libraries #ticker selection #percent change of asset price #graph showing growth over time beginning from 2015 #graph of log returns of input ticker #returns are normally distributed and have a consistent mean #calculations #Brownian Motion equation #r = drift + standardDeviation * (e^r) #prediction of future stock price based on simulation below using numpy for storing data into array #Brownian motion variable correlating to the distance between the mean and the number of standard deviation #10 x 2 Matrix #stores distances from the mean value, 0, into the 10 x 2 matrix #time interval for the stock price forecast #r = drift + standardDeviation * (e^r) #10 sets of 365 random future stock prices of the ticker symbol #returns into price points #iteration for the time interavl of 365 #showcases 10 paths of the future stock price | 3.385495 | 3 |
HelloDeepSpeed/train_bert_ds.py | mrwyattii/DeepSpeedExamples | 0 | 10147 | <gh_stars>0
"""
Modified version of train_bert.py that adds DeepSpeed
"""
import os
import datetime
import json
import pathlib
import re
import string
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar, Union
import random
import datasets
import fire
import logging
import loguru
import numpy as np
import pytz
import sh
import torch
import torch.nn as nn
import deepspeed
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.models.roberta import RobertaConfig, RobertaModel
from transformers.models.roberta.modeling_roberta import (
RobertaLMHead,
RobertaPreTrainedModel,
)
def is_rank_0() -> bool:
return int(os.environ.get("RANK", "0")) == 0
######################################################################
####################### Logging Functions ############################
######################################################################
logger = loguru.logger
def log_dist(message: str,
ranks: List[int] = [],
level: int = logging.INFO) -> None:
"""Log messages for specified ranks only"""
my_rank = int(os.environ.get("RANK", "0"))
if my_rank in ranks:
if level == logging.INFO:
logger.info(f'[Rank {my_rank}] {message}')
if level == logging.ERROR:
logger.error(f'[Rank {my_rank}] {message}')
if level == logging.DEBUG:
logger.debug(f'[Rank {my_rank}] {message}')
######################################################################
############### Dataset Creation Related Functions ###################
######################################################################
TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
def collate_function(batch: List[Tuple[List[int], List[int]]],
pad_token_id: int) -> Dict[str, torch.Tensor]:
"""Collect a list of masked token indices, and labels, and
batch them, padding to max length in the batch.
"""
max_length = max(len(token_ids) for token_ids, _ in batch)
padded_token_ids = [
token_ids +
[pad_token_id for _ in range(0, max_length - len(token_ids))]
for token_ids, _ in batch
]
padded_labels = [
labels + [pad_token_id for _ in range(0, max_length - len(labels))]
for _, labels in batch
]
src_tokens = torch.LongTensor(padded_token_ids)
tgt_tokens = torch.LongTensor(padded_labels)
attention_mask = src_tokens.ne(pad_token_id).type_as(src_tokens)
return {
"src_tokens": src_tokens,
"tgt_tokens": tgt_tokens,
"attention_mask": attention_mask,
}
def masking_function(
text: str,
tokenizer: TokenizerType,
mask_prob: float,
random_replace_prob: float,
unmask_replace_prob: float,
max_length: int,
) -> Tuple[List[int], List[int]]:
"""Given a text string, randomly mask wordpieces for Bert MLM
training.
Args:
text (str):
The input text
tokenizer (TokenizerType):
The tokenizer for tokenization
mask_prob (float):
What fraction of tokens to mask
random_replace_prob (float):
Of the masked tokens, how many should be replaced with
random tokens (improves performance)
unmask_replace_prob (float):
Of the masked tokens, how many should be replaced with
the original token (improves performance)
max_length (int):
The maximum sequence length to consider. Note that for
Bert style models, this is a function of the number of
positional embeddings you learn
Returns:
Tuple[List[int], List[int]]:
The masked token ids (based on the tokenizer passed),
and the output labels (padded with `tokenizer.pad_token_id`)
"""
# Note: By default, encode does add the BOS and EOS token
# Disabling that behaviour to make this more clear
tokenized_ids = ([tokenizer.bos_token_id] +
tokenizer.encode(text,
add_special_tokens=False,
truncation=True,
max_length=max_length - 2) +
[tokenizer.eos_token_id])
seq_len = len(tokenized_ids)
tokenized_ids = np.array(tokenized_ids)
subword_mask = np.full(len(tokenized_ids), False)
# Masking the BOS and EOS token leads to slightly worse performance
low = 1
high = len(subword_mask) - 1
mask_choices = np.arange(low, high)
num_subwords_to_mask = max(
int((mask_prob * (high - low)) + np.random.rand()), 1)
subword_mask[np.random.choice(mask_choices,
num_subwords_to_mask,
replace=False)] = True
# Create the labels first
labels = np.full(seq_len, tokenizer.pad_token_id)
labels[subword_mask] = tokenized_ids[subword_mask]
tokenized_ids[subword_mask] = tokenizer.mask_token_id
# Now of the masked tokens, choose how many to replace with random and how many to unmask
rand_or_unmask_prob = random_replace_prob + unmask_replace_prob
if rand_or_unmask_prob > 0:
rand_or_unmask = subword_mask & (np.random.rand(len(tokenized_ids)) <
rand_or_unmask_prob)
if random_replace_prob == 0:
unmask = rand_or_unmask
rand_mask = None
elif unmask_replace_prob == 0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = unmask_replace_prob / rand_or_unmask_prob
decision = np.random.rand(len(tokenized_ids)) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
if unmask is not None:
tokenized_ids[unmask] = labels[unmask]
if rand_mask is not None:
weights = np.ones(tokenizer.vocab_size)
weights[tokenizer.all_special_ids] = 0
probs = weights / weights.sum()
num_rand = rand_mask.sum()
tokenized_ids[rand_mask] = np.random.choice(tokenizer.vocab_size,
num_rand,
p=probs)
return tokenized_ids.tolist(), labels.tolist()
class WikiTextMLMDataset(Dataset):
"""A [Map style dataset](https://pytorch.org/docs/stable/data.html)
for iterating over the wikitext dataset. Note that this assumes
the dataset can fit in memory. For larger datasets
you'd want to shard them and use an iterable dataset (eg: see
[Infinibatch](https://github.com/microsoft/infinibatch))
Args:
Dataset (datasets.arrow_dataset.Dataset):
The wikitext dataset
masking_function (Callable[[str], Tuple[List[int], List[int]]])
The masking function. To generate one training instance,
the masking function is applied to the `text` of a dataset
record
"""
def __init__(
self,
dataset: datasets.arrow_dataset.Dataset,
masking_function: Callable[[str], Tuple[List[int], List[int]]],
) -> None:
self.dataset = dataset
self.masking_function = masking_function
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> Tuple[List[int], List[int]]:
tokens, labels = self.masking_function(self.dataset[idx]["text"])
return (tokens, labels)
T = TypeVar("T")
class InfiniteIterator(object):
def __init__(self, iterable: Iterable[T]) -> None:
self._iterable = iterable
self._iterator = iter(self._iterable)
def __iter__(self):
return self
def __next__(self) -> T:
next_item = None
try:
next_item = next(self._iterator)
except StopIteration:
self._iterator = iter(self._iterable)
next_item = next(self._iterator)
return next_item
def create_data_iterator(
mask_prob: float,
random_replace_prob: float,
unmask_replace_prob: float,
batch_size: int,
max_seq_length: int = 512,
tokenizer: str = "roberta-base",
) -> InfiniteIterator:
"""Create the dataloader.
Args:
mask_prob (float):
Fraction of tokens to mask
random_replace_prob (float):
Fraction of masked tokens to replace with random token
unmask_replace_prob (float):
Fraction of masked tokens to replace with the actual token
batch_size (int):
The batch size of the generated tensors
max_seq_length (int, optional):
The maximum sequence length for the MLM task. Defaults to 512.
tokenizer (str, optional):
The tokenizer to use. Defaults to "roberta-base".
Returns:
InfiniteIterator:
The torch DataLoader, wrapped in an InfiniteIterator class, to
be able to continuously generate samples
"""
wikitext_dataset = datasets.load_dataset("wikitext",
"wikitext-2-v1",
split="train")
wikitext_dataset = wikitext_dataset.filter(
lambda record: record["text"] != "").map(
lambda record: {"text": record["text"].rstrip("\n")})
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
masking_function_partial = partial(
masking_function,
tokenizer=tokenizer,
mask_prob=mask_prob,
random_replace_prob=random_replace_prob,
unmask_replace_prob=unmask_replace_prob,
max_length=max_seq_length,
)
dataset = WikiTextMLMDataset(wikitext_dataset, masking_function_partial)
collate_fn_partial = partial(collate_function,
pad_token_id=tokenizer.pad_token_id)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn_partial)
return InfiniteIterator(dataloader)
######################################################################
############### Model Creation Related Functions #####################
######################################################################
class RobertaLMHeadWithMaskedPredict(RobertaLMHead):
def __init__(self,
config: RobertaConfig,
embedding_weight: Optional[torch.Tensor] = None) -> None:
super(RobertaLMHeadWithMaskedPredict, self).__init__(config)
if embedding_weight is not None:
self.decoder.weight = embedding_weight
def forward( # pylint: disable=arguments-differ
self,
features: torch.Tensor,
masked_token_indices: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
"""The current `transformers` library does not provide support
for masked_token_indices. This function provides the support, by
running the final forward pass only for the masked indices. This saves
memory
Args:
features (torch.Tensor):
The features to select from. Shape (batch, seq_len, h_dim)
masked_token_indices (torch.Tensor, optional):
The indices of masked tokens for index select. Defaults to None.
Shape: (num_masked_tokens,)
Returns:
torch.Tensor:
The index selected features. Shape (num_masked_tokens, h_dim)
"""
if masked_token_indices is not None:
features = torch.index_select(
features.view(-1, features.shape[-1]), 0, masked_token_indices)
return super().forward(features)
class RobertaMLMModel(RobertaPreTrainedModel):
def __init__(self, config: RobertaConfig, encoder: RobertaModel) -> None:
super().__init__(config)
self.encoder = encoder
self.lm_head = RobertaLMHeadWithMaskedPredict(
config, self.encoder.embeddings.word_embeddings.weight)
self.lm_head.apply(self._init_weights)
def forward(
self,
src_tokens: torch.Tensor,
attention_mask: torch.Tensor,
tgt_tokens: torch.Tensor,
) -> torch.Tensor:
"""The forward pass for the MLM task
Args:
src_tokens (torch.Tensor):
The masked token indices. Shape: (batch, seq_len)
attention_mask (torch.Tensor):
The attention mask, since the batches are padded
to the largest sequence. Shape: (batch, seq_len)
tgt_tokens (torch.Tensor):
The output tokens (padded with `config.pad_token_id`)
Returns:
torch.Tensor:
The MLM loss
"""
# shape: (batch, seq_len, h_dim)
sequence_output, *_ = self.encoder(input_ids=src_tokens,
attention_mask=attention_mask,
return_dict=False)
pad_token_id = self.config.pad_token_id
# (labels have also been padded with pad_token_id)
# filter out all masked labels
# shape: (num_masked_tokens,)
masked_token_indexes = torch.nonzero(
(tgt_tokens != pad_token_id).view(-1)).view(-1)
# shape: (num_masked_tokens, vocab_size)
prediction_scores = self.lm_head(sequence_output, masked_token_indexes)
# shape: (num_masked_tokens,)
target = torch.index_select(tgt_tokens.view(-1), 0,
masked_token_indexes)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), target)
return masked_lm_loss
def create_model(num_layers: int, num_heads: int, ff_dim: int, h_dim: int,
dropout: float) -> RobertaMLMModel:
"""Create a Bert model with the specified `num_heads`, `ff_dim`,
`h_dim` and `dropout`
Args:
num_layers (int):
The number of layers
num_heads (int):
The number of attention heads
ff_dim (int):
The intermediate hidden size of
the feed forward block of the
transformer
h_dim (int):
The hidden dim of the intermediate
representations of the transformer
dropout (float):
The value of dropout to be used.
Note that we apply the same dropout
to both the attention layers and the
FF layers
Returns:
RobertaMLMModel:
A Roberta model for MLM task
"""
roberta_config_dict = {
"attention_probs_dropout_prob": dropout,
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": "gelu",
"hidden_dropout_prob": dropout,
"hidden_size": h_dim,
"initializer_range": 0.02,
"intermediate_size": ff_dim,
"layer_norm_eps": 1e-05,
"max_position_embeddings": 514,
"model_type": "roberta",
"num_attention_heads": num_heads,
"num_hidden_layers": num_layers,
"pad_token_id": 1,
"type_vocab_size": 1,
"vocab_size": 50265,
}
roberta_config = RobertaConfig.from_dict(roberta_config_dict)
roberta_encoder = RobertaModel(roberta_config)
roberta_model = RobertaMLMModel(roberta_config, roberta_encoder)
return roberta_model
######################################################################
########### Experiment Management Related Functions ##################
######################################################################
def get_unique_identifier(length: int = 8) -> str:
"""Create a unique identifier by choosing `length`
random characters from list of ascii characters and numbers
"""
alphabet = string.ascii_lowercase + string.digits
uuid = "".join(alphabet[ix]
for ix in np.random.choice(len(alphabet), length))
return uuid
def create_experiment_dir(checkpoint_dir: pathlib.Path,
all_arguments: Dict[str, Any]) -> pathlib.Path:
"""Create an experiment directory and save all arguments in it.
Additionally, also store the githash and gitdiff. Finally create
a directory for `Tensorboard` logs. The structure would look something
like
checkpoint_dir
`-experiment-name
|- hparams.json
|- githash.log
|- gitdiff.log
`- tb_dir/
Args:
checkpoint_dir (pathlib.Path):
The base checkpoint directory
all_arguments (Dict[str, Any]):
The arguments to save
Returns:
pathlib.Path: The experiment directory
"""
# experiment name follows the following convention
# {exp_type}.{YYYY}.{MM}.{DD}.{HH}.{MM}.{SS}.{uuid}
current_time = datetime.datetime.now(pytz.timezone("US/Pacific"))
expname = "bert_pretrain.{0}.{1}.{2}.{3}.{4}.{5}.{6}".format(
current_time.year,
current_time.month,
current_time.day,
current_time.hour,
current_time.minute,
current_time.second,
get_unique_identifier(),
)
exp_dir = checkpoint_dir / expname
if not is_rank_0():
return exp_dir
exp_dir.mkdir(exist_ok=False)
hparams_file = exp_dir / "hparams.json"
with hparams_file.open("w") as handle:
json.dump(obj=all_arguments, fp=handle, indent=2)
# Save the git hash
try:
gitlog = sh.git.log("-1", format="%H", _tty_out=False, _fg=False)
with (exp_dir / "githash.log").open("w") as handle:
handle.write(gitlog.stdout.decode("utf-8"))
except sh.ErrorReturnCode_128:
log_dist(
"Seems like the code is not running from"
" within a git repo, so hash will"
" not be stored. However, it"
" is strongly advised to use"
" version control.",
ranks=[0],
level=logging.INFO)
# And the git diff
try:
gitdiff = sh.git.diff(_fg=False, _tty_out=False)
with (exp_dir / "gitdiff.log").open("w") as handle:
handle.write(gitdiff.stdout.decode("utf-8"))
except sh.ErrorReturnCode_129:
log_dist(
"Seems like the code is not running from"
" within a git repo, so diff will"
" not be stored. However, it"
" is strongly advised to use"
" version control.",
ranks=[0],
level=logging.INFO)
# Finally create the Tensorboard Dir
tb_dir = exp_dir / "tb_dir"
tb_dir.mkdir(exist_ok=False)
return exp_dir
######################################################################
################ Checkpoint Related Functions ########################
######################################################################
def load_model_checkpoint(
load_checkpoint_dir: pathlib.Path,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
) -> Tuple[int, torch.nn.Module, torch.optim.Optimizer]:
"""Loads the optimizer state dict and model state dict from the load_checkpoint_dir
into the passed model and optimizer. Searches for the most recent checkpoint to
load from
Args:
load_checkpoint_dir (pathlib.Path):
The base checkpoint directory to load from
model (torch.nn.Module):
The model to load the checkpoint weights into
optimizer (torch.optim.Optimizer):
The optimizer to load the checkpoint weigths into
Returns:
Tuple[int, torch.nn.Module, torch.optim.Optimizer]:
The checkpoint step, model with state_dict loaded and
optimizer with state_dict loaded
"""
log_dist(
f"Loading model and optimizer checkpoint from {load_checkpoint_dir}",
ranks=[0],
level=logging.INFO)
checkpoint_files = list(
filter(
lambda path: re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name) is
not None,
load_checkpoint_dir.glob("*.pt"),
))
assert len(checkpoint_files) > 0, "No checkpoints found in directory"
checkpoint_files = sorted(
checkpoint_files,
key=lambda path: int(
re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name).group("iter_no")
),
)
latest_checkpoint_path = checkpoint_files[-1]
checkpoint_step = int(
re.search(r"iter_(?P<iter_no>\d+)\.pt",
latest_checkpoint_path.name).group("iter_no"))
state_dict = torch.load(latest_checkpoint_path)
model.load_state_dict(state_dict["model"], strict=True)
optimizer.load_state_dict(state_dict["optimizer"])
log_dist(
f"Loading model and optimizer checkpoints done. Loaded from {latest_checkpoint_path}",
ranks=[0],
level=logging.INFO)
return checkpoint_step, model, optimizer
######################################################################
######################## Driver Functions ############################
######################################################################
def train(
checkpoint_dir: str = None,
load_checkpoint_dir: str = None,
# Dataset Parameters
mask_prob: float = 0.15,
random_replace_prob: float = 0.1,
unmask_replace_prob: float = 0.1,
max_seq_length: int = 512,
tokenizer: str = "roberta-base",
# Model Parameters
num_layers: int = 6,
num_heads: int = 8,
ff_dim: int = 512,
h_dim: int = 256,
dropout: float = 0.1,
# Training Parameters
batch_size: int = 8,
num_iterations: int = 10000,
checkpoint_every: int = 1000,
log_every: int = 10,
local_rank: int = -1,
) -> pathlib.Path:
"""Trains a [Bert style](https://arxiv.org/pdf/1810.04805.pdf)
(transformer encoder only) model for MLM Task
Args:
checkpoint_dir (str):
The base experiment directory to save experiments to
mask_prob (float, optional):
The fraction of tokens to mask. Defaults to 0.15.
random_replace_prob (float, optional):
The fraction of masked tokens to replace with random token.
Defaults to 0.1.
unmask_replace_prob (float, optional):
The fraction of masked tokens to leave unchanged.
Defaults to 0.1.
max_seq_length (int, optional):
The maximum sequence length of the examples. Defaults to 512.
tokenizer (str, optional):
The tokenizer to use. Defaults to "roberta-base".
num_layers (int, optional):
The number of layers in the Bert model. Defaults to 6.
num_heads (int, optional):
Number of attention heads to use. Defaults to 8.
ff_dim (int, optional):
Size of the intermediate dimension in the FF layer.
Defaults to 512.
h_dim (int, optional):
Size of intermediate representations.
Defaults to 256.
dropout (float, optional):
Amout of Dropout to use. Defaults to 0.1.
batch_size (int, optional):
The minibatch size. Defaults to 8.
num_iterations (int, optional):
Total number of iterations to run the model for.
Defaults to 10000.
checkpoint_every (int, optional):
Save checkpoint after these many steps.
..note ::
You want this to be frequent enough that you can
resume training in case it crashes, but not so much
that you fill up your entire storage !
Defaults to 1000.
log_every (int, optional):
Print logs after these many steps. Defaults to 10.
local_rank (int, optional):
Which GPU to run on (-1 for CPU). Defaults to -1.
Returns:
pathlib.Path: The final experiment directory
"""
device = (torch.device("cuda", local_rank) if (local_rank > -1)
and torch.cuda.is_available() else torch.device("cpu"))
################################
###### Create Exp. Dir #########
################################
if checkpoint_dir is None and load_checkpoint_dir is None:
log_dist(
"Need to specify one of checkpoint_dir"
" or load_checkpoint_dir",
ranks=[0],
level=logging.ERROR)
return
if checkpoint_dir is not None and load_checkpoint_dir is not None:
log_dist(
"Cannot specify both checkpoint_dir"
" and load_checkpoint_dir",
ranks=[0],
level=logging.ERROR)
return
if checkpoint_dir:
log_dist("Creating Experiment Directory",
ranks=[0],
level=logging.INFO)
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_dir.mkdir(exist_ok=True)
all_arguments = {
# Dataset Params
"mask_prob": mask_prob,
"random_replace_prob": random_replace_prob,
"unmask_replace_prob": unmask_replace_prob,
"max_seq_length": max_seq_length,
"tokenizer": tokenizer,
# Model Params
"num_layers": num_layers,
"num_heads": num_heads,
"ff_dim": ff_dim,
"h_dim": h_dim,
"dropout": dropout,
# Training Params
"batch_size": batch_size,
"num_iterations": num_iterations,
"checkpoint_every": checkpoint_every,
}
exp_dir = create_experiment_dir(checkpoint_dir, all_arguments)
log_dist(f"Experiment Directory created at {exp_dir}",
ranks=[0],
level=logging.INFO)
else:
log_dist("Loading from Experiment Directory",
ranks=[0],
level=logging.INFO)
load_checkpoint_dir = pathlib.Path(load_checkpoint_dir)
assert load_checkpoint_dir.exists()
with (load_checkpoint_dir / "hparams.json").open("r") as handle:
hparams = json.load(handle)
# Set the hparams
# Dataset Params
mask_prob = hparams.get("mask_prob", mask_prob)
tokenizer = hparams.get("tokenizer", tokenizer)
random_replace_prob = hparams.get("random_replace_prob",
random_replace_prob)
unmask_replace_prob = hparams.get("unmask_replace_prob",
unmask_replace_prob)
max_seq_length = hparams.get("max_seq_length", max_seq_length)
# Model Params
ff_dim = hparams.get("ff_dim", ff_dim)
h_dim = hparams.get("h_dim", h_dim)
dropout = hparams.get("dropout", dropout)
num_layers = hparams.get("num_layers", num_layers)
num_heads = hparams.get("num_heads", num_heads)
# Training Params
batch_size = hparams.get("batch_size", batch_size)
_num_iterations = hparams.get("num_iterations", num_iterations)
num_iterations = max(num_iterations, _num_iterations)
checkpoint_every = hparams.get("checkpoint_every", checkpoint_every)
exp_dir = load_checkpoint_dir
# Tensorboard writer
if is_rank_0():
tb_dir = exp_dir / "tb_dir"
assert tb_dir.exists()
summary_writer = SummaryWriter(log_dir=tb_dir)
################################
###### Create Datasets #########
################################
log_dist("Creating Datasets", ranks=[0], level=logging.INFO)
data_iterator = create_data_iterator(
mask_prob=mask_prob,
random_replace_prob=random_replace_prob,
unmask_replace_prob=unmask_replace_prob,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
batch_size=batch_size,
)
log_dist("Dataset Creation Done", ranks=[0], level=logging.INFO)
################################
###### Create Model ############
################################
log_dist("Creating Model", ranks=[0], level=logging.INFO)
model = create_model(
num_layers=num_layers,
num_heads=num_heads,
ff_dim=ff_dim,
h_dim=h_dim,
dropout=dropout,
)
log_dist("Model Creation Done", ranks=[0], level=logging.INFO)
################################
###### DeepSpeed engine ########
################################
log_dist("Creating DeepSpeed engine", ranks=[0], level=logging.INFO)
ds_config = {
"train_micro_batch_size_per_gpu": batch_size,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-4
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 1,
"offload_optimizer": {
"device": "cpu"
}
}
}
model, _, _, _ = deepspeed.initialize(model=model,
model_parameters=model.parameters(),
config=ds_config)
log_dist("DeepSpeed engine created", ranks=[0], level=logging.INFO)
################################
#### Load Model checkpoint #####
################################
start_step = 1
if load_checkpoint_dir is not None:
_, client_state = model.load_checkpoint(load_dir=load_checkpoint_dir)
checkpoint_step = client_state['checkpoint_step']
start_step = checkpoint_step + 1
################################
####### The Training Loop ######
################################
log_dist(
f"Total number of model parameters: {sum([p.numel() for p in model.parameters()]):,d}",
ranks=[0],
level=logging.INFO)
model.train()
losses = []
for step, batch in enumerate(data_iterator, start=start_step):
if step >= num_iterations:
break
# Move the tensors to device
for key, value in batch.items():
batch[key] = value.to(device)
# Forward pass
loss = model(**batch)
# Backward pass
model.backward(loss)
# Optimizer Step
model.step()
losses.append(loss.item())
if step % log_every == 0:
log_dist("Loss: {0:.4f}".format(np.mean(losses)),
ranks=[0],
level=logging.INFO)
if is_rank_0():
summary_writer.add_scalar(f"Train/loss", np.mean(losses), step)
if step % checkpoint_every == 0:
model.save_checkpoint(save_dir=exp_dir,
client_state={'checkpoint_step': step})
log_dist("Saved model to {0}".format(exp_dir),
ranks=[0],
level=logging.INFO)
# Save the last checkpoint if not saved yet
if step % checkpoint_every != 0:
model.save_checkpoint(save_dir=exp_dir,
client_state={'checkpoint_step': step})
log_dist("Saved model to {0}".format(exp_dir),
ranks=[0],
level=logging.INFO)
return exp_dir
if __name__ == "__main__":
torch.manual_seed(42)
np.random.seed(0)
random.seed(0)
fire.Fire(train)
| """
Modified version of train_bert.py that adds DeepSpeed
"""
import os
import datetime
import json
import pathlib
import re
import string
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar, Union
import random
import datasets
import fire
import logging
import loguru
import numpy as np
import pytz
import sh
import torch
import torch.nn as nn
import deepspeed
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.models.roberta import RobertaConfig, RobertaModel
from transformers.models.roberta.modeling_roberta import (
RobertaLMHead,
RobertaPreTrainedModel,
)
def is_rank_0() -> bool:
return int(os.environ.get("RANK", "0")) == 0
######################################################################
####################### Logging Functions ############################
######################################################################
logger = loguru.logger
def log_dist(message: str,
ranks: List[int] = [],
level: int = logging.INFO) -> None:
"""Log messages for specified ranks only"""
my_rank = int(os.environ.get("RANK", "0"))
if my_rank in ranks:
if level == logging.INFO:
logger.info(f'[Rank {my_rank}] {message}')
if level == logging.ERROR:
logger.error(f'[Rank {my_rank}] {message}')
if level == logging.DEBUG:
logger.debug(f'[Rank {my_rank}] {message}')
######################################################################
############### Dataset Creation Related Functions ###################
######################################################################
TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
def collate_function(batch: List[Tuple[List[int], List[int]]],
pad_token_id: int) -> Dict[str, torch.Tensor]:
"""Collect a list of masked token indices, and labels, and
batch them, padding to max length in the batch.
"""
max_length = max(len(token_ids) for token_ids, _ in batch)
padded_token_ids = [
token_ids +
[pad_token_id for _ in range(0, max_length - len(token_ids))]
for token_ids, _ in batch
]
padded_labels = [
labels + [pad_token_id for _ in range(0, max_length - len(labels))]
for _, labels in batch
]
src_tokens = torch.LongTensor(padded_token_ids)
tgt_tokens = torch.LongTensor(padded_labels)
attention_mask = src_tokens.ne(pad_token_id).type_as(src_tokens)
return {
"src_tokens": src_tokens,
"tgt_tokens": tgt_tokens,
"attention_mask": attention_mask,
}
def masking_function(
text: str,
tokenizer: TokenizerType,
mask_prob: float,
random_replace_prob: float,
unmask_replace_prob: float,
max_length: int,
) -> Tuple[List[int], List[int]]:
"""Given a text string, randomly mask wordpieces for Bert MLM
training.
Args:
text (str):
The input text
tokenizer (TokenizerType):
The tokenizer for tokenization
mask_prob (float):
What fraction of tokens to mask
random_replace_prob (float):
Of the masked tokens, how many should be replaced with
random tokens (improves performance)
unmask_replace_prob (float):
Of the masked tokens, how many should be replaced with
the original token (improves performance)
max_length (int):
The maximum sequence length to consider. Note that for
Bert style models, this is a function of the number of
positional embeddings you learn
Returns:
Tuple[List[int], List[int]]:
The masked token ids (based on the tokenizer passed),
and the output labels (padded with `tokenizer.pad_token_id`)
"""
# Note: By default, encode does add the BOS and EOS token
# Disabling that behaviour to make this more clear
tokenized_ids = ([tokenizer.bos_token_id] +
tokenizer.encode(text,
add_special_tokens=False,
truncation=True,
max_length=max_length - 2) +
[tokenizer.eos_token_id])
seq_len = len(tokenized_ids)
tokenized_ids = np.array(tokenized_ids)
subword_mask = np.full(len(tokenized_ids), False)
# Masking the BOS and EOS token leads to slightly worse performance
low = 1
high = len(subword_mask) - 1
mask_choices = np.arange(low, high)
num_subwords_to_mask = max(
int((mask_prob * (high - low)) + np.random.rand()), 1)
subword_mask[np.random.choice(mask_choices,
num_subwords_to_mask,
replace=False)] = True
# Create the labels first
labels = np.full(seq_len, tokenizer.pad_token_id)
labels[subword_mask] = tokenized_ids[subword_mask]
tokenized_ids[subword_mask] = tokenizer.mask_token_id
# Now of the masked tokens, choose how many to replace with random and how many to unmask
rand_or_unmask_prob = random_replace_prob + unmask_replace_prob
if rand_or_unmask_prob > 0:
rand_or_unmask = subword_mask & (np.random.rand(len(tokenized_ids)) <
rand_or_unmask_prob)
if random_replace_prob == 0:
unmask = rand_or_unmask
rand_mask = None
elif unmask_replace_prob == 0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = unmask_replace_prob / rand_or_unmask_prob
decision = np.random.rand(len(tokenized_ids)) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
if unmask is not None:
tokenized_ids[unmask] = labels[unmask]
if rand_mask is not None:
weights = np.ones(tokenizer.vocab_size)
weights[tokenizer.all_special_ids] = 0
probs = weights / weights.sum()
num_rand = rand_mask.sum()
tokenized_ids[rand_mask] = np.random.choice(tokenizer.vocab_size,
num_rand,
p=probs)
return tokenized_ids.tolist(), labels.tolist()
class WikiTextMLMDataset(Dataset):
"""A [Map style dataset](https://pytorch.org/docs/stable/data.html)
for iterating over the wikitext dataset. Note that this assumes
the dataset can fit in memory. For larger datasets
you'd want to shard them and use an iterable dataset (eg: see
[Infinibatch](https://github.com/microsoft/infinibatch))
Args:
Dataset (datasets.arrow_dataset.Dataset):
The wikitext dataset
masking_function (Callable[[str], Tuple[List[int], List[int]]])
The masking function. To generate one training instance,
the masking function is applied to the `text` of a dataset
record
"""
def __init__(
self,
dataset: datasets.arrow_dataset.Dataset,
masking_function: Callable[[str], Tuple[List[int], List[int]]],
) -> None:
self.dataset = dataset
self.masking_function = masking_function
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> Tuple[List[int], List[int]]:
tokens, labels = self.masking_function(self.dataset[idx]["text"])
return (tokens, labels)
T = TypeVar("T")
class InfiniteIterator(object):
def __init__(self, iterable: Iterable[T]) -> None:
self._iterable = iterable
self._iterator = iter(self._iterable)
def __iter__(self):
return self
def __next__(self) -> T:
next_item = None
try:
next_item = next(self._iterator)
except StopIteration:
self._iterator = iter(self._iterable)
next_item = next(self._iterator)
return next_item
def create_data_iterator(
mask_prob: float,
random_replace_prob: float,
unmask_replace_prob: float,
batch_size: int,
max_seq_length: int = 512,
tokenizer: str = "roberta-base",
) -> InfiniteIterator:
"""Create the dataloader.
Args:
mask_prob (float):
Fraction of tokens to mask
random_replace_prob (float):
Fraction of masked tokens to replace with random token
unmask_replace_prob (float):
Fraction of masked tokens to replace with the actual token
batch_size (int):
The batch size of the generated tensors
max_seq_length (int, optional):
The maximum sequence length for the MLM task. Defaults to 512.
tokenizer (str, optional):
The tokenizer to use. Defaults to "roberta-base".
Returns:
InfiniteIterator:
The torch DataLoader, wrapped in an InfiniteIterator class, to
be able to continuously generate samples
"""
wikitext_dataset = datasets.load_dataset("wikitext",
"wikitext-2-v1",
split="train")
wikitext_dataset = wikitext_dataset.filter(
lambda record: record["text"] != "").map(
lambda record: {"text": record["text"].rstrip("\n")})
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
masking_function_partial = partial(
masking_function,
tokenizer=tokenizer,
mask_prob=mask_prob,
random_replace_prob=random_replace_prob,
unmask_replace_prob=unmask_replace_prob,
max_length=max_seq_length,
)
dataset = WikiTextMLMDataset(wikitext_dataset, masking_function_partial)
collate_fn_partial = partial(collate_function,
pad_token_id=tokenizer.pad_token_id)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn_partial)
return InfiniteIterator(dataloader)
######################################################################
############### Model Creation Related Functions #####################
######################################################################
class RobertaLMHeadWithMaskedPredict(RobertaLMHead):
def __init__(self,
config: RobertaConfig,
embedding_weight: Optional[torch.Tensor] = None) -> None:
super(RobertaLMHeadWithMaskedPredict, self).__init__(config)
if embedding_weight is not None:
self.decoder.weight = embedding_weight
def forward( # pylint: disable=arguments-differ
self,
features: torch.Tensor,
masked_token_indices: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
"""The current `transformers` library does not provide support
for masked_token_indices. This function provides the support, by
running the final forward pass only for the masked indices. This saves
memory
Args:
features (torch.Tensor):
The features to select from. Shape (batch, seq_len, h_dim)
masked_token_indices (torch.Tensor, optional):
The indices of masked tokens for index select. Defaults to None.
Shape: (num_masked_tokens,)
Returns:
torch.Tensor:
The index selected features. Shape (num_masked_tokens, h_dim)
"""
if masked_token_indices is not None:
features = torch.index_select(
features.view(-1, features.shape[-1]), 0, masked_token_indices)
return super().forward(features)
class RobertaMLMModel(RobertaPreTrainedModel):
def __init__(self, config: RobertaConfig, encoder: RobertaModel) -> None:
super().__init__(config)
self.encoder = encoder
self.lm_head = RobertaLMHeadWithMaskedPredict(
config, self.encoder.embeddings.word_embeddings.weight)
self.lm_head.apply(self._init_weights)
def forward(
self,
src_tokens: torch.Tensor,
attention_mask: torch.Tensor,
tgt_tokens: torch.Tensor,
) -> torch.Tensor:
"""The forward pass for the MLM task
Args:
src_tokens (torch.Tensor):
The masked token indices. Shape: (batch, seq_len)
attention_mask (torch.Tensor):
The attention mask, since the batches are padded
to the largest sequence. Shape: (batch, seq_len)
tgt_tokens (torch.Tensor):
The output tokens (padded with `config.pad_token_id`)
Returns:
torch.Tensor:
The MLM loss
"""
# shape: (batch, seq_len, h_dim)
sequence_output, *_ = self.encoder(input_ids=src_tokens,
attention_mask=attention_mask,
return_dict=False)
pad_token_id = self.config.pad_token_id
# (labels have also been padded with pad_token_id)
# filter out all masked labels
# shape: (num_masked_tokens,)
masked_token_indexes = torch.nonzero(
(tgt_tokens != pad_token_id).view(-1)).view(-1)
# shape: (num_masked_tokens, vocab_size)
prediction_scores = self.lm_head(sequence_output, masked_token_indexes)
# shape: (num_masked_tokens,)
target = torch.index_select(tgt_tokens.view(-1), 0,
masked_token_indexes)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), target)
return masked_lm_loss
def create_model(num_layers: int, num_heads: int, ff_dim: int, h_dim: int,
dropout: float) -> RobertaMLMModel:
"""Create a Bert model with the specified `num_heads`, `ff_dim`,
`h_dim` and `dropout`
Args:
num_layers (int):
The number of layers
num_heads (int):
The number of attention heads
ff_dim (int):
The intermediate hidden size of
the feed forward block of the
transformer
h_dim (int):
The hidden dim of the intermediate
representations of the transformer
dropout (float):
The value of dropout to be used.
Note that we apply the same dropout
to both the attention layers and the
FF layers
Returns:
RobertaMLMModel:
A Roberta model for MLM task
"""
roberta_config_dict = {
"attention_probs_dropout_prob": dropout,
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": "gelu",
"hidden_dropout_prob": dropout,
"hidden_size": h_dim,
"initializer_range": 0.02,
"intermediate_size": ff_dim,
"layer_norm_eps": 1e-05,
"max_position_embeddings": 514,
"model_type": "roberta",
"num_attention_heads": num_heads,
"num_hidden_layers": num_layers,
"pad_token_id": 1,
"type_vocab_size": 1,
"vocab_size": 50265,
}
roberta_config = RobertaConfig.from_dict(roberta_config_dict)
roberta_encoder = RobertaModel(roberta_config)
roberta_model = RobertaMLMModel(roberta_config, roberta_encoder)
return roberta_model
######################################################################
########### Experiment Management Related Functions ##################
######################################################################
def get_unique_identifier(length: int = 8) -> str:
"""Create a unique identifier by choosing `length`
random characters from list of ascii characters and numbers
"""
alphabet = string.ascii_lowercase + string.digits
uuid = "".join(alphabet[ix]
for ix in np.random.choice(len(alphabet), length))
return uuid
def create_experiment_dir(checkpoint_dir: pathlib.Path,
all_arguments: Dict[str, Any]) -> pathlib.Path:
"""Create an experiment directory and save all arguments in it.
Additionally, also store the githash and gitdiff. Finally create
a directory for `Tensorboard` logs. The structure would look something
like
checkpoint_dir
`-experiment-name
|- hparams.json
|- githash.log
|- gitdiff.log
`- tb_dir/
Args:
checkpoint_dir (pathlib.Path):
The base checkpoint directory
all_arguments (Dict[str, Any]):
The arguments to save
Returns:
pathlib.Path: The experiment directory
"""
# experiment name follows the following convention
# {exp_type}.{YYYY}.{MM}.{DD}.{HH}.{MM}.{SS}.{uuid}
current_time = datetime.datetime.now(pytz.timezone("US/Pacific"))
expname = "bert_pretrain.{0}.{1}.{2}.{3}.{4}.{5}.{6}".format(
current_time.year,
current_time.month,
current_time.day,
current_time.hour,
current_time.minute,
current_time.second,
get_unique_identifier(),
)
exp_dir = checkpoint_dir / expname
if not is_rank_0():
return exp_dir
exp_dir.mkdir(exist_ok=False)
hparams_file = exp_dir / "hparams.json"
with hparams_file.open("w") as handle:
json.dump(obj=all_arguments, fp=handle, indent=2)
# Save the git hash
try:
gitlog = sh.git.log("-1", format="%H", _tty_out=False, _fg=False)
with (exp_dir / "githash.log").open("w") as handle:
handle.write(gitlog.stdout.decode("utf-8"))
except sh.ErrorReturnCode_128:
log_dist(
"Seems like the code is not running from"
" within a git repo, so hash will"
" not be stored. However, it"
" is strongly advised to use"
" version control.",
ranks=[0],
level=logging.INFO)
# And the git diff
try:
gitdiff = sh.git.diff(_fg=False, _tty_out=False)
with (exp_dir / "gitdiff.log").open("w") as handle:
handle.write(gitdiff.stdout.decode("utf-8"))
except sh.ErrorReturnCode_129:
log_dist(
"Seems like the code is not running from"
" within a git repo, so diff will"
" not be stored. However, it"
" is strongly advised to use"
" version control.",
ranks=[0],
level=logging.INFO)
# Finally create the Tensorboard Dir
tb_dir = exp_dir / "tb_dir"
tb_dir.mkdir(exist_ok=False)
return exp_dir
######################################################################
################ Checkpoint Related Functions ########################
######################################################################
def load_model_checkpoint(
load_checkpoint_dir: pathlib.Path,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
) -> Tuple[int, torch.nn.Module, torch.optim.Optimizer]:
"""Loads the optimizer state dict and model state dict from the load_checkpoint_dir
into the passed model and optimizer. Searches for the most recent checkpoint to
load from
Args:
load_checkpoint_dir (pathlib.Path):
The base checkpoint directory to load from
model (torch.nn.Module):
The model to load the checkpoint weights into
optimizer (torch.optim.Optimizer):
The optimizer to load the checkpoint weigths into
Returns:
Tuple[int, torch.nn.Module, torch.optim.Optimizer]:
The checkpoint step, model with state_dict loaded and
optimizer with state_dict loaded
"""
log_dist(
f"Loading model and optimizer checkpoint from {load_checkpoint_dir}",
ranks=[0],
level=logging.INFO)
checkpoint_files = list(
filter(
lambda path: re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name) is
not None,
load_checkpoint_dir.glob("*.pt"),
))
assert len(checkpoint_files) > 0, "No checkpoints found in directory"
checkpoint_files = sorted(
checkpoint_files,
key=lambda path: int(
re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name).group("iter_no")
),
)
latest_checkpoint_path = checkpoint_files[-1]
checkpoint_step = int(
re.search(r"iter_(?P<iter_no>\d+)\.pt",
latest_checkpoint_path.name).group("iter_no"))
state_dict = torch.load(latest_checkpoint_path)
model.load_state_dict(state_dict["model"], strict=True)
optimizer.load_state_dict(state_dict["optimizer"])
log_dist(
f"Loading model and optimizer checkpoints done. Loaded from {latest_checkpoint_path}",
ranks=[0],
level=logging.INFO)
return checkpoint_step, model, optimizer
######################################################################
######################## Driver Functions ############################
######################################################################
def train(
checkpoint_dir: str = None,
load_checkpoint_dir: str = None,
# Dataset Parameters
mask_prob: float = 0.15,
random_replace_prob: float = 0.1,
unmask_replace_prob: float = 0.1,
max_seq_length: int = 512,
tokenizer: str = "roberta-base",
# Model Parameters
num_layers: int = 6,
num_heads: int = 8,
ff_dim: int = 512,
h_dim: int = 256,
dropout: float = 0.1,
# Training Parameters
batch_size: int = 8,
num_iterations: int = 10000,
checkpoint_every: int = 1000,
log_every: int = 10,
local_rank: int = -1,
) -> pathlib.Path:
"""Trains a [Bert style](https://arxiv.org/pdf/1810.04805.pdf)
(transformer encoder only) model for MLM Task
Args:
checkpoint_dir (str):
The base experiment directory to save experiments to
mask_prob (float, optional):
The fraction of tokens to mask. Defaults to 0.15.
random_replace_prob (float, optional):
The fraction of masked tokens to replace with random token.
Defaults to 0.1.
unmask_replace_prob (float, optional):
The fraction of masked tokens to leave unchanged.
Defaults to 0.1.
max_seq_length (int, optional):
The maximum sequence length of the examples. Defaults to 512.
tokenizer (str, optional):
The tokenizer to use. Defaults to "roberta-base".
num_layers (int, optional):
The number of layers in the Bert model. Defaults to 6.
num_heads (int, optional):
Number of attention heads to use. Defaults to 8.
ff_dim (int, optional):
Size of the intermediate dimension in the FF layer.
Defaults to 512.
h_dim (int, optional):
Size of intermediate representations.
Defaults to 256.
dropout (float, optional):
Amout of Dropout to use. Defaults to 0.1.
batch_size (int, optional):
The minibatch size. Defaults to 8.
num_iterations (int, optional):
Total number of iterations to run the model for.
Defaults to 10000.
checkpoint_every (int, optional):
Save checkpoint after these many steps.
..note ::
You want this to be frequent enough that you can
resume training in case it crashes, but not so much
that you fill up your entire storage !
Defaults to 1000.
log_every (int, optional):
Print logs after these many steps. Defaults to 10.
local_rank (int, optional):
Which GPU to run on (-1 for CPU). Defaults to -1.
Returns:
pathlib.Path: The final experiment directory
"""
device = (torch.device("cuda", local_rank) if (local_rank > -1)
and torch.cuda.is_available() else torch.device("cpu"))
################################
###### Create Exp. Dir #########
################################
if checkpoint_dir is None and load_checkpoint_dir is None:
log_dist(
"Need to specify one of checkpoint_dir"
" or load_checkpoint_dir",
ranks=[0],
level=logging.ERROR)
return
if checkpoint_dir is not None and load_checkpoint_dir is not None:
log_dist(
"Cannot specify both checkpoint_dir"
" and load_checkpoint_dir",
ranks=[0],
level=logging.ERROR)
return
if checkpoint_dir:
log_dist("Creating Experiment Directory",
ranks=[0],
level=logging.INFO)
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_dir.mkdir(exist_ok=True)
all_arguments = {
# Dataset Params
"mask_prob": mask_prob,
"random_replace_prob": random_replace_prob,
"unmask_replace_prob": unmask_replace_prob,
"max_seq_length": max_seq_length,
"tokenizer": tokenizer,
# Model Params
"num_layers": num_layers,
"num_heads": num_heads,
"ff_dim": ff_dim,
"h_dim": h_dim,
"dropout": dropout,
# Training Params
"batch_size": batch_size,
"num_iterations": num_iterations,
"checkpoint_every": checkpoint_every,
}
exp_dir = create_experiment_dir(checkpoint_dir, all_arguments)
log_dist(f"Experiment Directory created at {exp_dir}",
ranks=[0],
level=logging.INFO)
else:
log_dist("Loading from Experiment Directory",
ranks=[0],
level=logging.INFO)
load_checkpoint_dir = pathlib.Path(load_checkpoint_dir)
assert load_checkpoint_dir.exists()
with (load_checkpoint_dir / "hparams.json").open("r") as handle:
hparams = json.load(handle)
# Set the hparams
# Dataset Params
mask_prob = hparams.get("mask_prob", mask_prob)
tokenizer = hparams.get("tokenizer", tokenizer)
random_replace_prob = hparams.get("random_replace_prob",
random_replace_prob)
unmask_replace_prob = hparams.get("unmask_replace_prob",
unmask_replace_prob)
max_seq_length = hparams.get("max_seq_length", max_seq_length)
# Model Params
ff_dim = hparams.get("ff_dim", ff_dim)
h_dim = hparams.get("h_dim", h_dim)
dropout = hparams.get("dropout", dropout)
num_layers = hparams.get("num_layers", num_layers)
num_heads = hparams.get("num_heads", num_heads)
# Training Params
batch_size = hparams.get("batch_size", batch_size)
_num_iterations = hparams.get("num_iterations", num_iterations)
num_iterations = max(num_iterations, _num_iterations)
checkpoint_every = hparams.get("checkpoint_every", checkpoint_every)
exp_dir = load_checkpoint_dir
# Tensorboard writer
if is_rank_0():
tb_dir = exp_dir / "tb_dir"
assert tb_dir.exists()
summary_writer = SummaryWriter(log_dir=tb_dir)
################################
###### Create Datasets #########
################################
log_dist("Creating Datasets", ranks=[0], level=logging.INFO)
data_iterator = create_data_iterator(
mask_prob=mask_prob,
random_replace_prob=random_replace_prob,
unmask_replace_prob=unmask_replace_prob,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
batch_size=batch_size,
)
log_dist("Dataset Creation Done", ranks=[0], level=logging.INFO)
################################
###### Create Model ############
################################
log_dist("Creating Model", ranks=[0], level=logging.INFO)
model = create_model(
num_layers=num_layers,
num_heads=num_heads,
ff_dim=ff_dim,
h_dim=h_dim,
dropout=dropout,
)
log_dist("Model Creation Done", ranks=[0], level=logging.INFO)
################################
###### DeepSpeed engine ########
################################
log_dist("Creating DeepSpeed engine", ranks=[0], level=logging.INFO)
ds_config = {
"train_micro_batch_size_per_gpu": batch_size,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-4
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 1,
"offload_optimizer": {
"device": "cpu"
}
}
}
model, _, _, _ = deepspeed.initialize(model=model,
model_parameters=model.parameters(),
config=ds_config)
log_dist("DeepSpeed engine created", ranks=[0], level=logging.INFO)
################################
#### Load Model checkpoint #####
################################
start_step = 1
if load_checkpoint_dir is not None:
_, client_state = model.load_checkpoint(load_dir=load_checkpoint_dir)
checkpoint_step = client_state['checkpoint_step']
start_step = checkpoint_step + 1
################################
####### The Training Loop ######
################################
log_dist(
f"Total number of model parameters: {sum([p.numel() for p in model.parameters()]):,d}",
ranks=[0],
level=logging.INFO)
model.train()
losses = []
for step, batch in enumerate(data_iterator, start=start_step):
if step >= num_iterations:
break
# Move the tensors to device
for key, value in batch.items():
batch[key] = value.to(device)
# Forward pass
loss = model(**batch)
# Backward pass
model.backward(loss)
# Optimizer Step
model.step()
losses.append(loss.item())
if step % log_every == 0:
log_dist("Loss: {0:.4f}".format(np.mean(losses)),
ranks=[0],
level=logging.INFO)
if is_rank_0():
summary_writer.add_scalar(f"Train/loss", np.mean(losses), step)
if step % checkpoint_every == 0:
model.save_checkpoint(save_dir=exp_dir,
client_state={'checkpoint_step': step})
log_dist("Saved model to {0}".format(exp_dir),
ranks=[0],
level=logging.INFO)
# Save the last checkpoint if not saved yet
if step % checkpoint_every != 0:
model.save_checkpoint(save_dir=exp_dir,
client_state={'checkpoint_step': step})
log_dist("Saved model to {0}".format(exp_dir),
ranks=[0],
level=logging.INFO)
return exp_dir
if __name__ == "__main__":
torch.manual_seed(42)
np.random.seed(0)
random.seed(0)
fire.Fire(train) | en | 0.506649 | Modified version of train_bert.py that adds DeepSpeed ###################################################################### ####################### Logging Functions ############################ ###################################################################### Log messages for specified ranks only ###################################################################### ############### Dataset Creation Related Functions ################### ###################################################################### Collect a list of masked token indices, and labels, and batch them, padding to max length in the batch. Given a text string, randomly mask wordpieces for Bert MLM training. Args: text (str): The input text tokenizer (TokenizerType): The tokenizer for tokenization mask_prob (float): What fraction of tokens to mask random_replace_prob (float): Of the masked tokens, how many should be replaced with random tokens (improves performance) unmask_replace_prob (float): Of the masked tokens, how many should be replaced with the original token (improves performance) max_length (int): The maximum sequence length to consider. Note that for Bert style models, this is a function of the number of positional embeddings you learn Returns: Tuple[List[int], List[int]]: The masked token ids (based on the tokenizer passed), and the output labels (padded with `tokenizer.pad_token_id`) # Note: By default, encode does add the BOS and EOS token # Disabling that behaviour to make this more clear # Masking the BOS and EOS token leads to slightly worse performance # Create the labels first # Now of the masked tokens, choose how many to replace with random and how many to unmask A [Map style dataset](https://pytorch.org/docs/stable/data.html) for iterating over the wikitext dataset. Note that this assumes the dataset can fit in memory. For larger datasets you'd want to shard them and use an iterable dataset (eg: see [Infinibatch](https://github.com/microsoft/infinibatch)) Args: Dataset (datasets.arrow_dataset.Dataset): The wikitext dataset masking_function (Callable[[str], Tuple[List[int], List[int]]]) The masking function. To generate one training instance, the masking function is applied to the `text` of a dataset record Create the dataloader. Args: mask_prob (float): Fraction of tokens to mask random_replace_prob (float): Fraction of masked tokens to replace with random token unmask_replace_prob (float): Fraction of masked tokens to replace with the actual token batch_size (int): The batch size of the generated tensors max_seq_length (int, optional): The maximum sequence length for the MLM task. Defaults to 512. tokenizer (str, optional): The tokenizer to use. Defaults to "roberta-base". Returns: InfiniteIterator: The torch DataLoader, wrapped in an InfiniteIterator class, to be able to continuously generate samples ###################################################################### ############### Model Creation Related Functions ##################### ###################################################################### # pylint: disable=arguments-differ The current `transformers` library does not provide support for masked_token_indices. This function provides the support, by running the final forward pass only for the masked indices. This saves memory Args: features (torch.Tensor): The features to select from. Shape (batch, seq_len, h_dim) masked_token_indices (torch.Tensor, optional): The indices of masked tokens for index select. Defaults to None. Shape: (num_masked_tokens,) Returns: torch.Tensor: The index selected features. Shape (num_masked_tokens, h_dim) The forward pass for the MLM task Args: src_tokens (torch.Tensor): The masked token indices. Shape: (batch, seq_len) attention_mask (torch.Tensor): The attention mask, since the batches are padded to the largest sequence. Shape: (batch, seq_len) tgt_tokens (torch.Tensor): The output tokens (padded with `config.pad_token_id`) Returns: torch.Tensor: The MLM loss # shape: (batch, seq_len, h_dim) # (labels have also been padded with pad_token_id) # filter out all masked labels # shape: (num_masked_tokens,) # shape: (num_masked_tokens, vocab_size) # shape: (num_masked_tokens,) Create a Bert model with the specified `num_heads`, `ff_dim`, `h_dim` and `dropout` Args: num_layers (int): The number of layers num_heads (int): The number of attention heads ff_dim (int): The intermediate hidden size of the feed forward block of the transformer h_dim (int): The hidden dim of the intermediate representations of the transformer dropout (float): The value of dropout to be used. Note that we apply the same dropout to both the attention layers and the FF layers Returns: RobertaMLMModel: A Roberta model for MLM task ###################################################################### ########### Experiment Management Related Functions ################## ###################################################################### Create a unique identifier by choosing `length` random characters from list of ascii characters and numbers Create an experiment directory and save all arguments in it. Additionally, also store the githash and gitdiff. Finally create a directory for `Tensorboard` logs. The structure would look something like checkpoint_dir `-experiment-name |- hparams.json |- githash.log |- gitdiff.log `- tb_dir/ Args: checkpoint_dir (pathlib.Path): The base checkpoint directory all_arguments (Dict[str, Any]): The arguments to save Returns: pathlib.Path: The experiment directory # experiment name follows the following convention # {exp_type}.{YYYY}.{MM}.{DD}.{HH}.{MM}.{SS}.{uuid} # Save the git hash # And the git diff # Finally create the Tensorboard Dir ###################################################################### ################ Checkpoint Related Functions ######################## ###################################################################### Loads the optimizer state dict and model state dict from the load_checkpoint_dir into the passed model and optimizer. Searches for the most recent checkpoint to load from Args: load_checkpoint_dir (pathlib.Path): The base checkpoint directory to load from model (torch.nn.Module): The model to load the checkpoint weights into optimizer (torch.optim.Optimizer): The optimizer to load the checkpoint weigths into Returns: Tuple[int, torch.nn.Module, torch.optim.Optimizer]: The checkpoint step, model with state_dict loaded and optimizer with state_dict loaded ###################################################################### ######################## Driver Functions ############################ ###################################################################### # Dataset Parameters # Model Parameters # Training Parameters Trains a [Bert style](https://arxiv.org/pdf/1810.04805.pdf) (transformer encoder only) model for MLM Task Args: checkpoint_dir (str): The base experiment directory to save experiments to mask_prob (float, optional): The fraction of tokens to mask. Defaults to 0.15. random_replace_prob (float, optional): The fraction of masked tokens to replace with random token. Defaults to 0.1. unmask_replace_prob (float, optional): The fraction of masked tokens to leave unchanged. Defaults to 0.1. max_seq_length (int, optional): The maximum sequence length of the examples. Defaults to 512. tokenizer (str, optional): The tokenizer to use. Defaults to "roberta-base". num_layers (int, optional): The number of layers in the Bert model. Defaults to 6. num_heads (int, optional): Number of attention heads to use. Defaults to 8. ff_dim (int, optional): Size of the intermediate dimension in the FF layer. Defaults to 512. h_dim (int, optional): Size of intermediate representations. Defaults to 256. dropout (float, optional): Amout of Dropout to use. Defaults to 0.1. batch_size (int, optional): The minibatch size. Defaults to 8. num_iterations (int, optional): Total number of iterations to run the model for. Defaults to 10000. checkpoint_every (int, optional): Save checkpoint after these many steps. ..note :: You want this to be frequent enough that you can resume training in case it crashes, but not so much that you fill up your entire storage ! Defaults to 1000. log_every (int, optional): Print logs after these many steps. Defaults to 10. local_rank (int, optional): Which GPU to run on (-1 for CPU). Defaults to -1. Returns: pathlib.Path: The final experiment directory ################################ ###### Create Exp. Dir ######### ################################ # Dataset Params # Model Params # Training Params # Set the hparams # Dataset Params # Model Params # Training Params # Tensorboard writer ################################ ###### Create Datasets ######### ################################ ################################ ###### Create Model ############ ################################ ################################ ###### DeepSpeed engine ######## ################################ ################################ #### Load Model checkpoint ##### ################################ ################################ ####### The Training Loop ###### ################################ # Move the tensors to device # Forward pass # Backward pass # Optimizer Step # Save the last checkpoint if not saved yet | 2.194234 | 2 |
programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py | vamsitallapudi/Coderefer-Python-Projects | 1 | 10148 | <gh_stars>1-10
# Given a singly linked list, determine if it is a palindrome.
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
fast = slow = head
# find the mid node
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# reverse the second half
node = None
while slow:
nxt = slow.next
slow.next = node
node = slow
slow = nxt
# compare first and second half of nodes
while node:
if node.val != head.val:
return False
node = node.next
head = head.next
return True
| # Given a singly linked list, determine if it is a palindrome.
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
fast = slow = head
# find the mid node
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# reverse the second half
node = None
while slow:
nxt = slow.next
slow.next = node
node = slow
slow = nxt
# compare first and second half of nodes
while node:
if node.val != head.val:
return False
node = node.next
head = head.next
return True | en | 0.79041 | # Given a singly linked list, determine if it is a palindrome. # Definition for singly-linked list. # find the mid node # reverse the second half # compare first and second half of nodes | 4.010343 | 4 |
__init__.py | CloudCIX/rolly | 6 | 10149 | <reponame>CloudCIX/rolly
"""
Rocky is a CLI based provisioning and management tool for CloudCIX Cloud software.
Rocky is designed to operate in an out of band (OOB) network, serarated from other CloudCIX networks.
Rocky's purpose is to facilitate monitoring, testing, debug and recovery
"""
__version__ = '0.3.5'
| """
Rocky is a CLI based provisioning and management tool for CloudCIX Cloud software.
Rocky is designed to operate in an out of band (OOB) network, serarated from other CloudCIX networks.
Rocky's purpose is to facilitate monitoring, testing, debug and recovery
"""
__version__ = '0.3.5' | en | 0.941058 | Rocky is a CLI based provisioning and management tool for CloudCIX Cloud software. Rocky is designed to operate in an out of band (OOB) network, serarated from other CloudCIX networks. Rocky's purpose is to facilitate monitoring, testing, debug and recovery | 0.714859 | 1 |
calculator.py | harshitbansal373/Python-Games | 0 | 10150 | <reponame>harshitbansal373/Python-Games
from tkinter import *
import time
root=Tk()
root.title('Calculator')
root.config(bg='wheat')
def display(x):
global s
s=s+x
text.set(s)
def solve():
global s
try:
s=str(eval(text.get()))
except Exception as e:
text.set(e)
s=''
else:
text.set(s)
def clear():
global s
s=''
text.set(s)
def clear1():
global s
s=text.get()
s=s[:len(s)-1]
text.set(s)
def con():
label['text']=time.ctime()
label.after(1000,con)
s=''
text=StringVar()
f=Frame(root,bg='#dcdde1')
e=Entry(f,textvariable=text,bg='#f5f6fa',fg='#353b48',font='roboto 34 bold',justify='right',relief=RAISED)
e.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
l=['#aabbcc','#bbccdd','#ccddee','#ddeeff']
for i in ['789/','456*','123+','.0-=']:
f=Frame(root,bg=l.pop())
for j in i:
b=Button(f,text=j,bg='#00a8ff',fg='#353b48',font='roboto 34 italic',command=(lambda x=j:display(x)) if j!='=' else solve)
b.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
f1=Frame(root,bg='#dcdde1')
clear=Button(f1,text='C',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear)
clear.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
clear1=Button(f1,text='CE',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear1)
clear1.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f1.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
f2=Frame(root,bg='#dcdde1')
label=Label(f2,bg='#00a8ff',fg='#353b48',font='roboto 34')
label.pack(padx=10,pady=10,expand=YES,fill=BOTH)
f2.pack(padx=10,pady=10,expand=YES,fill=BOTH)
con()
root.mainloop()
| from tkinter import *
import time
root=Tk()
root.title('Calculator')
root.config(bg='wheat')
def display(x):
global s
s=s+x
text.set(s)
def solve():
global s
try:
s=str(eval(text.get()))
except Exception as e:
text.set(e)
s=''
else:
text.set(s)
def clear():
global s
s=''
text.set(s)
def clear1():
global s
s=text.get()
s=s[:len(s)-1]
text.set(s)
def con():
label['text']=time.ctime()
label.after(1000,con)
s=''
text=StringVar()
f=Frame(root,bg='#dcdde1')
e=Entry(f,textvariable=text,bg='#f5f6fa',fg='#353b48',font='roboto 34 bold',justify='right',relief=RAISED)
e.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
l=['#aabbcc','#bbccdd','#ccddee','#ddeeff']
for i in ['789/','456*','123+','.0-=']:
f=Frame(root,bg=l.pop())
for j in i:
b=Button(f,text=j,bg='#00a8ff',fg='#353b48',font='roboto 34 italic',command=(lambda x=j:display(x)) if j!='=' else solve)
b.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
f1=Frame(root,bg='#dcdde1')
clear=Button(f1,text='C',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear)
clear.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
clear1=Button(f1,text='CE',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear1)
clear1.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f1.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
f2=Frame(root,bg='#dcdde1')
label=Label(f2,bg='#00a8ff',fg='#353b48',font='roboto 34')
label.pack(padx=10,pady=10,expand=YES,fill=BOTH)
f2.pack(padx=10,pady=10,expand=YES,fill=BOTH)
con()
root.mainloop() | none | 1 | 3.568442 | 4 |
|
src/Main.py | OlavH96/Master | 0 | 10151 | <gh_stars>0
import glob
import os
import keras
import tensorflow as tf
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import src.util.Files as Files
from src.util.ImageLoader import load_images_generator, resize_image, load_images_generator_with_filename
import numpy as np
import logging as log
import random
from src.util.Arguments import anomaly_arguments, get_model_choice
import src.util.Arguments as Arguments
from scipy.stats import norm
from PIL import Image
from src.train.Models import autoencoder, conv_autoencoder, vae_autoencoder, vae_loss, get_dummy_loss, from_argument_choice
import src.train.Models as Models
import src.util.Filenames as Filenames
import math
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def image_generator(path, max_x, max_y, color_mode="RGB"):
for i in load_images_generator(path, color_mode=color_mode):
i = resize_image(i, max_x, max_y)
i = np.array(i)
i = np.expand_dims(i, axis=0)
i = i / 255
yield (i, i)
def image_generator_with_filename(path, max_x, max_y, color_mode="RGB"):
for i, f in load_images_generator_with_filename(path, color_mode=color_mode):
i = resize_image(i, max_x, max_y)
i = np.array(i)
i = np.expand_dims(i, axis=0)
i = i / 255
yield (i, f)
def centered_image_generator(path, max_x, max_y, color_mode="RGB"):
while True:
for i, o in image_generator(path, max_x, max_y, color_mode=color_mode):
yield (i, o)
def train_on_images(epochs, max_x, max_y, path, model_type, model_name, arg_steps, validation_path, color_mode="RGB"):
sess = tf.Session()
keras.backend.set_session(sess)
# max_x = max([i.shape[0] for i in images])
# max_y = max([i.shape[1] for i in images])
# max_x, max_y = find_max_min_image_size(path = 'detected_images/*.png')
# print(max_x, max_y) # 304, 298
epochs = epochs
shape = (max_y, max_x, 3)
model = Models.from_argument_choice(model_type, shape)
steps = len(glob.glob(path))
if arg_steps != 0:
steps = arg_steps
model.summary()
# define the checkpoint
checkpoint = ModelCheckpoint(model_name, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
log.info('Fitting model...')
if validation_path:
history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode),
validation_data=centered_image_generator(validation_path, max_x, max_y, color_mode=color_mode),
validation_steps=100,
epochs=epochs,
steps_per_epoch=steps,
callbacks=callbacks_list)
else:
history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode),
epochs=epochs,
steps_per_epoch=steps,
callbacks=callbacks_list)
model.save(model_name)
loss = history.history['loss']
try:
plt.plot(loss)
if validation_path:
val_loss = history.history['val_loss']
plt.plot(val_loss, color='g')
plt.title(model_name)
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.savefig(f'training_loss_{model_name}.png')
except:
log.info("Failed to create loss graph")
log.info('Finished fitting model')
return model
def load_model_and_predict(model_path, num_predictions, path, max_x, max_y, model_type, model=None, color_mode="RGB", template_only=False):
# vae_loss(image_shape=(max_x, max_y, 3), log_var=0.5, mu=0.5)
im_shape = (max_x, max_y, 3)
if model_type == get_model_choice(Arguments.VAE) and not model:
model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)})
mu = model.get_layer('mu').output
log_var = model.get_layer('log').output
model.summary()
print(mu, log_var)
model.compile(optimizer='rmsprop', loss=vae_loss(im_shape, log_var, mu))
if model_type == get_model_choice(Arguments.CONVVAE) and not model:
model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)})
encoder = model.get_layer('encoder')
decoder = model.get_layer('decoder')
mu = encoder.get_layer('mu').output
log_var = encoder.get_layer('log').output
model.compile(optimizer='adam', loss=vae_loss(im_shape, log_var, mu))
if model_type != get_model_choice(Arguments.VAE) and not model:
model = load_model(model_path)
model.summary()
print("Loaded Model", model, model.input_shape)
max_x = model.input_shape[1]
max_y = model.input_shape[2]
images = list(image_generator_with_filename(path, max_x, max_y, color_mode=color_mode))
random.shuffle(images)
index = 0
print(f'Loaded {len(images)} images')
model_name = model_path.split('.')[0]
save_dir = Files.makedir_else_cleardir(f'./predictions/{model_name}_{Filenames.remove_path(Filenames.strip_path_modifier(path))}')
for i, filename in images: # centered_image_generator(path, max_x, max_y):
hashed = Filenames.md5hash(filename)
anomaly = "anomaly" in filename
extra = "_anomaly_" if anomaly else "_normal_"
pred = model.predict(i)
print(pred.shape)
for ii in i:
if color_mode == 'HSV':
ii = Image.fromarray((ii * 255).astype(np.uint8), 'HSV')
ii = ii.convert("RGB")
ii = np.array(ii)
plt.imsave(str(save_dir / f'orig{extra}{hashed}_{index}.png'), ii)
#plt.imsave(str(save_dir / f'temp.png'), pred[0], vmin=0, vmax=1)
print("input shape",i.shape)
evaluate = model.evaluate(i, i)
if type(evaluate) is list:
evaluate = evaluate[0]
print(index, evaluate)
for p in pred:
#print("prediction",p)
p = p / np.max(p)
if color_mode == 'HSV':
p = Image.fromarray((p * 255).astype(np.uint8), 'HSV')
p = p.convert('RGB')
p = np.array(p)
if template_only:
# Hacky solution, oh well
template_path = './src/sign_detection/image_generation/images/signs/png/362.50/362_5.png'
im = Image.open(template_path)
im = im.convert('RGB')
im = im.resize(size=(64,64))
im = np.array(im)
score = image_mse(i[0], im)
plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{score}.png'), im)
else:
plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{str(evaluate)}.png'), p)
index += 1
if index == num_predictions:
break
def image_mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
if __name__ == '__main__':
args = anomaly_arguments()
log.info('Arguments', args)
print("Arguments", args)
model = None
if args.do_training:
model = train_on_images(
epochs=args.epochs,
path=args.path,
max_x=args.max_x,
max_y=args.max_y,
model_type=args.model_type,
model_name=args.model,
arg_steps=args.steps,
color_mode=args.color,
validation_path=args.validation_path
)
if args.do_predict:
load_model_and_predict(
model_path=args.model,
num_predictions=args.num_predictions,
max_x=args.max_x,
max_y=args.max_y,
path=args.pred_path if args.pred_path else args.path,
model_type=args.model_type,
model=model,
color_mode=args.color,
template_only=args.template
)
| import glob
import os
import keras
import tensorflow as tf
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import src.util.Files as Files
from src.util.ImageLoader import load_images_generator, resize_image, load_images_generator_with_filename
import numpy as np
import logging as log
import random
from src.util.Arguments import anomaly_arguments, get_model_choice
import src.util.Arguments as Arguments
from scipy.stats import norm
from PIL import Image
from src.train.Models import autoencoder, conv_autoencoder, vae_autoencoder, vae_loss, get_dummy_loss, from_argument_choice
import src.train.Models as Models
import src.util.Filenames as Filenames
import math
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def image_generator(path, max_x, max_y, color_mode="RGB"):
for i in load_images_generator(path, color_mode=color_mode):
i = resize_image(i, max_x, max_y)
i = np.array(i)
i = np.expand_dims(i, axis=0)
i = i / 255
yield (i, i)
def image_generator_with_filename(path, max_x, max_y, color_mode="RGB"):
for i, f in load_images_generator_with_filename(path, color_mode=color_mode):
i = resize_image(i, max_x, max_y)
i = np.array(i)
i = np.expand_dims(i, axis=0)
i = i / 255
yield (i, f)
def centered_image_generator(path, max_x, max_y, color_mode="RGB"):
while True:
for i, o in image_generator(path, max_x, max_y, color_mode=color_mode):
yield (i, o)
def train_on_images(epochs, max_x, max_y, path, model_type, model_name, arg_steps, validation_path, color_mode="RGB"):
sess = tf.Session()
keras.backend.set_session(sess)
# max_x = max([i.shape[0] for i in images])
# max_y = max([i.shape[1] for i in images])
# max_x, max_y = find_max_min_image_size(path = 'detected_images/*.png')
# print(max_x, max_y) # 304, 298
epochs = epochs
shape = (max_y, max_x, 3)
model = Models.from_argument_choice(model_type, shape)
steps = len(glob.glob(path))
if arg_steps != 0:
steps = arg_steps
model.summary()
# define the checkpoint
checkpoint = ModelCheckpoint(model_name, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
log.info('Fitting model...')
if validation_path:
history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode),
validation_data=centered_image_generator(validation_path, max_x, max_y, color_mode=color_mode),
validation_steps=100,
epochs=epochs,
steps_per_epoch=steps,
callbacks=callbacks_list)
else:
history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode),
epochs=epochs,
steps_per_epoch=steps,
callbacks=callbacks_list)
model.save(model_name)
loss = history.history['loss']
try:
plt.plot(loss)
if validation_path:
val_loss = history.history['val_loss']
plt.plot(val_loss, color='g')
plt.title(model_name)
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.savefig(f'training_loss_{model_name}.png')
except:
log.info("Failed to create loss graph")
log.info('Finished fitting model')
return model
def load_model_and_predict(model_path, num_predictions, path, max_x, max_y, model_type, model=None, color_mode="RGB", template_only=False):
# vae_loss(image_shape=(max_x, max_y, 3), log_var=0.5, mu=0.5)
im_shape = (max_x, max_y, 3)
if model_type == get_model_choice(Arguments.VAE) and not model:
model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)})
mu = model.get_layer('mu').output
log_var = model.get_layer('log').output
model.summary()
print(mu, log_var)
model.compile(optimizer='rmsprop', loss=vae_loss(im_shape, log_var, mu))
if model_type == get_model_choice(Arguments.CONVVAE) and not model:
model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)})
encoder = model.get_layer('encoder')
decoder = model.get_layer('decoder')
mu = encoder.get_layer('mu').output
log_var = encoder.get_layer('log').output
model.compile(optimizer='adam', loss=vae_loss(im_shape, log_var, mu))
if model_type != get_model_choice(Arguments.VAE) and not model:
model = load_model(model_path)
model.summary()
print("Loaded Model", model, model.input_shape)
max_x = model.input_shape[1]
max_y = model.input_shape[2]
images = list(image_generator_with_filename(path, max_x, max_y, color_mode=color_mode))
random.shuffle(images)
index = 0
print(f'Loaded {len(images)} images')
model_name = model_path.split('.')[0]
save_dir = Files.makedir_else_cleardir(f'./predictions/{model_name}_{Filenames.remove_path(Filenames.strip_path_modifier(path))}')
for i, filename in images: # centered_image_generator(path, max_x, max_y):
hashed = Filenames.md5hash(filename)
anomaly = "anomaly" in filename
extra = "_anomaly_" if anomaly else "_normal_"
pred = model.predict(i)
print(pred.shape)
for ii in i:
if color_mode == 'HSV':
ii = Image.fromarray((ii * 255).astype(np.uint8), 'HSV')
ii = ii.convert("RGB")
ii = np.array(ii)
plt.imsave(str(save_dir / f'orig{extra}{hashed}_{index}.png'), ii)
#plt.imsave(str(save_dir / f'temp.png'), pred[0], vmin=0, vmax=1)
print("input shape",i.shape)
evaluate = model.evaluate(i, i)
if type(evaluate) is list:
evaluate = evaluate[0]
print(index, evaluate)
for p in pred:
#print("prediction",p)
p = p / np.max(p)
if color_mode == 'HSV':
p = Image.fromarray((p * 255).astype(np.uint8), 'HSV')
p = p.convert('RGB')
p = np.array(p)
if template_only:
# Hacky solution, oh well
template_path = './src/sign_detection/image_generation/images/signs/png/362.50/362_5.png'
im = Image.open(template_path)
im = im.convert('RGB')
im = im.resize(size=(64,64))
im = np.array(im)
score = image_mse(i[0], im)
plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{score}.png'), im)
else:
plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{str(evaluate)}.png'), p)
index += 1
if index == num_predictions:
break
def image_mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
if __name__ == '__main__':
args = anomaly_arguments()
log.info('Arguments', args)
print("Arguments", args)
model = None
if args.do_training:
model = train_on_images(
epochs=args.epochs,
path=args.path,
max_x=args.max_x,
max_y=args.max_y,
model_type=args.model_type,
model_name=args.model,
arg_steps=args.steps,
color_mode=args.color,
validation_path=args.validation_path
)
if args.do_predict:
load_model_and_predict(
model_path=args.model,
num_predictions=args.num_predictions,
max_x=args.max_x,
max_y=args.max_y,
path=args.pred_path if args.pred_path else args.path,
model_type=args.model_type,
model=model,
color_mode=args.color,
template_only=args.template
) | en | 0.416895 | # max_x = max([i.shape[0] for i in images]) # max_y = max([i.shape[1] for i in images]) # max_x, max_y = find_max_min_image_size(path = 'detected_images/*.png') # print(max_x, max_y) # 304, 298 # define the checkpoint # vae_loss(image_shape=(max_x, max_y, 3), log_var=0.5, mu=0.5) #custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)}) #custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)}) # centered_image_generator(path, max_x, max_y): #plt.imsave(str(save_dir / f'temp.png'), pred[0], vmin=0, vmax=1) #print("prediction",p) # Hacky solution, oh well | 2.071998 | 2 |
daproli/manipulation.py | ermshaua/daproli | 0 | 10152 | from .utils import _get_return_type
def windowed(data, size, step=1, ret_type=None):
'''
dp.windowed applies a window function to a collection of data items.
Parameters
-----------
:param data: an iterable collection of data
:param size: the window size
:param step: the window step
:param ret_type: if provided the used return type, otherwise ret_type(data)
:return: the windowed data list
Examples
-----------
>>> import daproli as dp
>>> numbers = range(10)
>>> dp.windowed(numbers, 2, step=2)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
'''
if ret_type is None: ret_type = _get_return_type(data)
return [ret_type(data[i:i+size]) for i in range(0, len(data)-(size-1), step)]
def flatten(data, ret_type=None):
'''
dp.flatten applies a flatten function to a collection of data items.
Parameters
-----------
:param data: an iterable collection of data
:param ret_type: if provided the used return type, otherwise ret_type(data)
:return: the flattened data collection
Examples
-----------
>>> import daproli as dp
>>> dp.flatten([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
'''
if ret_type is None: ret_type = _get_return_type(data)
return ret_type([item for sub in data for item in sub])
| from .utils import _get_return_type
def windowed(data, size, step=1, ret_type=None):
'''
dp.windowed applies a window function to a collection of data items.
Parameters
-----------
:param data: an iterable collection of data
:param size: the window size
:param step: the window step
:param ret_type: if provided the used return type, otherwise ret_type(data)
:return: the windowed data list
Examples
-----------
>>> import daproli as dp
>>> numbers = range(10)
>>> dp.windowed(numbers, 2, step=2)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
'''
if ret_type is None: ret_type = _get_return_type(data)
return [ret_type(data[i:i+size]) for i in range(0, len(data)-(size-1), step)]
def flatten(data, ret_type=None):
'''
dp.flatten applies a flatten function to a collection of data items.
Parameters
-----------
:param data: an iterable collection of data
:param ret_type: if provided the used return type, otherwise ret_type(data)
:return: the flattened data collection
Examples
-----------
>>> import daproli as dp
>>> dp.flatten([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
'''
if ret_type is None: ret_type = _get_return_type(data)
return ret_type([item for sub in data for item in sub])
| en | 0.414735 | dp.windowed applies a window function to a collection of data items. Parameters ----------- :param data: an iterable collection of data :param size: the window size :param step: the window step :param ret_type: if provided the used return type, otherwise ret_type(data) :return: the windowed data list Examples ----------- >>> import daproli as dp >>> numbers = range(10) >>> dp.windowed(numbers, 2, step=2) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] dp.flatten applies a flatten function to a collection of data items. Parameters ----------- :param data: an iterable collection of data :param ret_type: if provided the used return type, otherwise ret_type(data) :return: the flattened data collection Examples ----------- >>> import daproli as dp >>> dp.flatten([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] | 3.608289 | 4 |
ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py | gcxtx/ambari | 1 | 10153 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import json
import sys
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestMetadataServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "ATLAS/0.1.0.2.3/package"
STACK_VERSION = "2.3"
def configureResourcesCalled(self):
self.assertResourceCalled('Directory', '/var/run/atlas',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/etc/atlas/conf',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/var/log/atlas',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/hbase/logs',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/data',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/data',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0644
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/server/webapp',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0644
)
self.assertResourceCalled('File', '/usr/hdp/current/atlas-server/server/webapp/atlas.war',
content = StaticFile('/usr/hdp/current/atlas-server/server/webapp/atlas.war'),
)
appprops = dict(self.getConfig()['configurations'][
'application-properties'])
appprops['atlas.server.bind.address'] = 'c6401.ambari.apache.org'
self.assertResourceCalled('PropertiesFile',
'/etc/atlas/conf/application.properties',
properties=appprops,
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', '/etc/atlas/conf/atlas-env.sh',
content=InlineTemplate(
self.getConfig()['configurations'][
'atlas-env']['content']),
owner='atlas',
group='hadoop',
mode=0755,
)
self.assertResourceCalled('File', '/etc/atlas/conf/atlas-log4j.xml',
content=InlineTemplate(
self.getConfig()['configurations'][
'atlas-log4j']['content']),
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', '/etc/atlas/conf/users-credentials.properties',
content=StaticFile('users-credentials.properties'),
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', '/etc/atlas/conf/policy-store.txt',
content=StaticFile('policy-store.txt'),
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'atlas',
group = 'hadoop',
conf_dir = '/usr/hdp/current/atlas-server/hbase/conf',
configurations = self.getConfig()['configurations']['atlas-hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['atlas-hbase-site']
)
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertNoMoreResources()
def test_configure_secure(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "configure",
config_file="secure.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertResourceCalled('TemplateConfig', '/etc/atlas/conf/atlas_jaas.conf',
owner = 'atlas',
)
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertResourceCalled('Execute', 'source /etc/atlas/conf/atlas-env.sh ; /usr/hdp/current/atlas-server/bin/atlas_start.py',
not_if = 'ls /var/run/atlas/atlas.pid >/dev/null 2>&1 && ps -p `cat /var/run/atlas/atlas.pid` >/dev/null 2>&1',
user = 'atlas',
)
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'source /etc/atlas/conf/atlas-env.sh; /usr/hdp/current/atlas-server/bin/atlas_stop.py',
user = 'atlas',
)
self.assertResourceCalled('File', '/var/run/atlas/atlas.pid',
action = ['delete'],
)
| #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import json
import sys
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestMetadataServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "ATLAS/0.1.0.2.3/package"
STACK_VERSION = "2.3"
def configureResourcesCalled(self):
self.assertResourceCalled('Directory', '/var/run/atlas',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/etc/atlas/conf',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/var/log/atlas',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/hbase/logs',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/data',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/data',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0644
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/server/webapp',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0644
)
self.assertResourceCalled('File', '/usr/hdp/current/atlas-server/server/webapp/atlas.war',
content = StaticFile('/usr/hdp/current/atlas-server/server/webapp/atlas.war'),
)
appprops = dict(self.getConfig()['configurations'][
'application-properties'])
appprops['atlas.server.bind.address'] = 'c6401.ambari.apache.org'
self.assertResourceCalled('PropertiesFile',
'/etc/atlas/conf/application.properties',
properties=appprops,
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', '/etc/atlas/conf/atlas-env.sh',
content=InlineTemplate(
self.getConfig()['configurations'][
'atlas-env']['content']),
owner='atlas',
group='hadoop',
mode=0755,
)
self.assertResourceCalled('File', '/etc/atlas/conf/atlas-log4j.xml',
content=InlineTemplate(
self.getConfig()['configurations'][
'atlas-log4j']['content']),
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', '/etc/atlas/conf/users-credentials.properties',
content=StaticFile('users-credentials.properties'),
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', '/etc/atlas/conf/policy-store.txt',
content=StaticFile('policy-store.txt'),
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'atlas',
group = 'hadoop',
conf_dir = '/usr/hdp/current/atlas-server/hbase/conf',
configurations = self.getConfig()['configurations']['atlas-hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['atlas-hbase-site']
)
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertNoMoreResources()
def test_configure_secure(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "configure",
config_file="secure.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertResourceCalled('TemplateConfig', '/etc/atlas/conf/atlas_jaas.conf',
owner = 'atlas',
)
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertResourceCalled('Execute', 'source /etc/atlas/conf/atlas-env.sh ; /usr/hdp/current/atlas-server/bin/atlas_start.py',
not_if = 'ls /var/run/atlas/atlas.pid >/dev/null 2>&1 && ps -p `cat /var/run/atlas/atlas.pid` >/dev/null 2>&1',
user = 'atlas',
)
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'source /etc/atlas/conf/atlas-env.sh; /usr/hdp/current/atlas-server/bin/atlas_stop.py',
user = 'atlas',
)
self.assertResourceCalled('File', '/var/run/atlas/atlas.pid',
action = ['delete'],
)
| en | 0.857483 | #!/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1.728014 | 2 |
Udacity P3 Additional Files/model.py | sayeayed/Udacity-Project4 | 0 | 10154 | import os
import csv
import numpy as np
from sklearn.utils import shuffle
## Read in frame data
samples = []
with open('/../opt/carnd_p3/data/driving_log.csv') as csvfile: #open the log file
reader = csv.reader(csvfile) #as a readable csv
for line in reader:
samples.append(line) #add each line of the log file to samples
samples = samples[1:] # to remove table header
samples = shuffle(samples) # shuffle entire sample set before splitting into training and validation so that training isn't biased
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2) #split samples into 80% training, 20% validation
from scipy import ndimage #because cv2.imread() imports the image as BGR, and we want RGB
## Define generator to handle small portions of images at a time so that training is not as memory-heavy
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
# shuffle(samples) #shuffle within the training/validation sets, NOT NECESSARY SINCE SHUFFLING ALREADY SHUFFLED
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size] #collect the images for this batch
images = []
angles = []
for batch_sample in batch_samples:
path = '/../opt/carnd_p3/data/IMG/' #assign the location from which to read images
# read in images from all 3 cameras MAKING SURE TO READ IN AS RGB
center_image = ndimage.imread(path+batch_sample[0].split('/')[-1])
left_image = ndimage.imread(path+batch_sample[1].split('/')[-1])
right_image = ndimage.imread(path+batch_sample[2].split('/')[-1])
# read in steering angle
center_angle = float(batch_sample[3]) #read the steering angle
# apply a steering correction for the left and right images, in a way to generate "new" samples
correction = 0.2
left_angle = center_angle + correction
right_angle = center_angle - correction
# add images and angles to batch set
images.extend([center_image, left_image, right_image])
angles.extend([center_angle, left_angle, right_angle])
# copy all batches' images to final numpy array
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train) #shuffle before yielding result
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
ch, row, col = 3, 160, 320 # Full image format
#import Keras model layers
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Conv2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
# BUILD MODEL
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: x/127.5 - 1.0, input_shape=(row,col,ch)))
# Crop incoming data (training, validation, and autonomous so that everything is consistent)
model.add(Cropping2D(cropping=((60,20), (0,0)))) # could be first layer to reduce memory used in Lambda calculation, and thus faster training
# Begin CNN (similar to NVIDIA architecture)
# Convolution layer 1-3, kernel size 5 with stride of 2
model.add(Conv2D(24,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(36,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(48,(5,5),strides=(2,2),activation='relu'))
# Convolution layers 4-5, kernel size 3 wth stride of 1
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
# Flatten convolution output to yield single numerical result
model.add(Flatten())
# Fully connected layers to complete computations, gradually decreasing in parameters until final value
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
## Training hyper parameters to play with
## Stop training checkpoints...
# save_path = 'model{epoch:02d}-{val_loss:.2f}.h5'
# checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)
# stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)
## OR
batch_size = 32
epochs = 5 #***
## Compile and train the model
model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) #use Mean Squared Error to measure loss, use Adam optimizer for tuning
model.fit_generator(train_generator, steps_per_epoch= len(train_samples)/batch_size,validation_data=validation_generator, validation_steps=len(validation_samples)/batch_size, epochs=5, verbose = 1) # train using generators
#save the trained model
model.save('model.h5') | import os
import csv
import numpy as np
from sklearn.utils import shuffle
## Read in frame data
samples = []
with open('/../opt/carnd_p3/data/driving_log.csv') as csvfile: #open the log file
reader = csv.reader(csvfile) #as a readable csv
for line in reader:
samples.append(line) #add each line of the log file to samples
samples = samples[1:] # to remove table header
samples = shuffle(samples) # shuffle entire sample set before splitting into training and validation so that training isn't biased
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2) #split samples into 80% training, 20% validation
from scipy import ndimage #because cv2.imread() imports the image as BGR, and we want RGB
## Define generator to handle small portions of images at a time so that training is not as memory-heavy
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
# shuffle(samples) #shuffle within the training/validation sets, NOT NECESSARY SINCE SHUFFLING ALREADY SHUFFLED
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size] #collect the images for this batch
images = []
angles = []
for batch_sample in batch_samples:
path = '/../opt/carnd_p3/data/IMG/' #assign the location from which to read images
# read in images from all 3 cameras MAKING SURE TO READ IN AS RGB
center_image = ndimage.imread(path+batch_sample[0].split('/')[-1])
left_image = ndimage.imread(path+batch_sample[1].split('/')[-1])
right_image = ndimage.imread(path+batch_sample[2].split('/')[-1])
# read in steering angle
center_angle = float(batch_sample[3]) #read the steering angle
# apply a steering correction for the left and right images, in a way to generate "new" samples
correction = 0.2
left_angle = center_angle + correction
right_angle = center_angle - correction
# add images and angles to batch set
images.extend([center_image, left_image, right_image])
angles.extend([center_angle, left_angle, right_angle])
# copy all batches' images to final numpy array
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train) #shuffle before yielding result
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
ch, row, col = 3, 160, 320 # Full image format
#import Keras model layers
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Conv2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
# BUILD MODEL
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: x/127.5 - 1.0, input_shape=(row,col,ch)))
# Crop incoming data (training, validation, and autonomous so that everything is consistent)
model.add(Cropping2D(cropping=((60,20), (0,0)))) # could be first layer to reduce memory used in Lambda calculation, and thus faster training
# Begin CNN (similar to NVIDIA architecture)
# Convolution layer 1-3, kernel size 5 with stride of 2
model.add(Conv2D(24,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(36,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(48,(5,5),strides=(2,2),activation='relu'))
# Convolution layers 4-5, kernel size 3 wth stride of 1
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
# Flatten convolution output to yield single numerical result
model.add(Flatten())
# Fully connected layers to complete computations, gradually decreasing in parameters until final value
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
## Training hyper parameters to play with
## Stop training checkpoints...
# save_path = 'model{epoch:02d}-{val_loss:.2f}.h5'
# checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)
# stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)
## OR
batch_size = 32
epochs = 5 #***
## Compile and train the model
model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) #use Mean Squared Error to measure loss, use Adam optimizer for tuning
model.fit_generator(train_generator, steps_per_epoch= len(train_samples)/batch_size,validation_data=validation_generator, validation_steps=len(validation_samples)/batch_size, epochs=5, verbose = 1) # train using generators
#save the trained model
model.save('model.h5') | en | 0.772373 | ## Read in frame data #open the log file #as a readable csv #add each line of the log file to samples # to remove table header # shuffle entire sample set before splitting into training and validation so that training isn't biased #split samples into 80% training, 20% validation #because cv2.imread() imports the image as BGR, and we want RGB ## Define generator to handle small portions of images at a time so that training is not as memory-heavy # Loop forever so the generator never terminates # shuffle(samples) #shuffle within the training/validation sets, NOT NECESSARY SINCE SHUFFLING ALREADY SHUFFLED #collect the images for this batch #assign the location from which to read images # read in images from all 3 cameras MAKING SURE TO READ IN AS RGB # read in steering angle #read the steering angle # apply a steering correction for the left and right images, in a way to generate "new" samples # add images and angles to batch set # copy all batches' images to final numpy array #shuffle before yielding result # compile and train the model using the generator function # Full image format #import Keras model layers # BUILD MODEL # Preprocess incoming data, centered around zero with small standard deviation # Crop incoming data (training, validation, and autonomous so that everything is consistent) # could be first layer to reduce memory used in Lambda calculation, and thus faster training # Begin CNN (similar to NVIDIA architecture) # Convolution layer 1-3, kernel size 5 with stride of 2 # Convolution layers 4-5, kernel size 3 wth stride of 1 # Flatten convolution output to yield single numerical result # Fully connected layers to complete computations, gradually decreasing in parameters until final value ## Training hyper parameters to play with ## Stop training checkpoints... # save_path = 'model{epoch:02d}-{val_loss:.2f}.h5' # checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True) # stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5) ## OR #*** ## Compile and train the model #use Mean Squared Error to measure loss, use Adam optimizer for tuning # train using generators #save the trained model | 3.029101 | 3 |
utils/wavelengthfit_prim.py | GeminiDRSoftware/GHOSTDR | 1 | 10155 | #!/usr/bin/env python3
""" A script containing the basic principles of the extraction primitive inner
workings"""
from __future__ import division, print_function
from ghostdr import polyfit
import numpy as pn
# Firstly, let's find all the needed files
fitsdir='/Users/mireland/data/ghost/cal_frames/'
#Define the files in use (NB xmod.txt and wavemod.txt should be correct)
arc_file = fitsdir+"arc_extracted.fits"
# load it in now:
extracted_flux,extracted_vars=pyfits.getdata(arc_file)
# Where is the default location for the model? By default it is a parameter
# in the ghost class. If this needs to be overwritten, go ahead.
# This is the xmod file. Wherever it is saved from the flat reduction.
xmodel_file=fitsdir+'GHOST_1_1_blue_std_xmodPolyfit.fits'
# All the other models... which are currently in the "test" directory.
wmodel_file=test_files_dir+'wparams_blue_std.fits'
spatmod_file=test_files_dir+'spatmod.fits'
specmod_file=test_files_dir+'specmod.fits'
rotmod_file=test_files_dir+'rotmod2.fits'
# Find the arc line list file
arclinefile='/home/jbento/code/ghostdr/ghostdr/ADCONFIG_GHOST/lookups/GHOST/Polyfit/mnras0378-0221-SD1.txt'
arcwaves, arcfluxes= np.loadtxt(arclinefile,usecols=[1,2]).T
#instantiate the ghost arm
arm = polyfit.GhostArm('blue',mode='std')
arm.spectral_format_with_matrix(xpars,wpars,spatpars,specpars,rotpars)
#Get the initial default model from the lookup location
xpars=pyfits.getdata(xmodel_file)
wpars=pyfits.getdata(wmodel_file)
spatpars=pyfits.getdata(spatmod_file)
specpars=pyfits.getdata(specmod_file)
rotpars=pyfits.getdata(rotmod_file)
slitview = polyfit.SlitView(image_array, flat_image_array, mode='std')
# The extractor is given the polyfit "arm" object, and a slitview object which has
# been instantiated with the slit viewer data.
extractor = polyfit.Extractor(arm, slitview)
#Now find the other lines, after first re-loading into the extractor.
# the inspect parameter is a verbose option for visualising the line
# finding results
lines_out=extractor.find_lines(extracted_flux, arcwaves, inspect=False)
#Now finally do the wavelength fit!
fitted_params, wave_and_resid = arm.read_lines_and_fit(wpars,lines_out,ydeg=3,xdeg=3)
# Optionally show residuals?
#Now write the output to a file, in whatever format suits the recipe system best.
pyfits.writeto('outputs.fits',fitted_params)
| #!/usr/bin/env python3
""" A script containing the basic principles of the extraction primitive inner
workings"""
from __future__ import division, print_function
from ghostdr import polyfit
import numpy as pn
# Firstly, let's find all the needed files
fitsdir='/Users/mireland/data/ghost/cal_frames/'
#Define the files in use (NB xmod.txt and wavemod.txt should be correct)
arc_file = fitsdir+"arc_extracted.fits"
# load it in now:
extracted_flux,extracted_vars=pyfits.getdata(arc_file)
# Where is the default location for the model? By default it is a parameter
# in the ghost class. If this needs to be overwritten, go ahead.
# This is the xmod file. Wherever it is saved from the flat reduction.
xmodel_file=fitsdir+'GHOST_1_1_blue_std_xmodPolyfit.fits'
# All the other models... which are currently in the "test" directory.
wmodel_file=test_files_dir+'wparams_blue_std.fits'
spatmod_file=test_files_dir+'spatmod.fits'
specmod_file=test_files_dir+'specmod.fits'
rotmod_file=test_files_dir+'rotmod2.fits'
# Find the arc line list file
arclinefile='/home/jbento/code/ghostdr/ghostdr/ADCONFIG_GHOST/lookups/GHOST/Polyfit/mnras0378-0221-SD1.txt'
arcwaves, arcfluxes= np.loadtxt(arclinefile,usecols=[1,2]).T
#instantiate the ghost arm
arm = polyfit.GhostArm('blue',mode='std')
arm.spectral_format_with_matrix(xpars,wpars,spatpars,specpars,rotpars)
#Get the initial default model from the lookup location
xpars=pyfits.getdata(xmodel_file)
wpars=pyfits.getdata(wmodel_file)
spatpars=pyfits.getdata(spatmod_file)
specpars=pyfits.getdata(specmod_file)
rotpars=pyfits.getdata(rotmod_file)
slitview = polyfit.SlitView(image_array, flat_image_array, mode='std')
# The extractor is given the polyfit "arm" object, and a slitview object which has
# been instantiated with the slit viewer data.
extractor = polyfit.Extractor(arm, slitview)
#Now find the other lines, after first re-loading into the extractor.
# the inspect parameter is a verbose option for visualising the line
# finding results
lines_out=extractor.find_lines(extracted_flux, arcwaves, inspect=False)
#Now finally do the wavelength fit!
fitted_params, wave_and_resid = arm.read_lines_and_fit(wpars,lines_out,ydeg=3,xdeg=3)
# Optionally show residuals?
#Now write the output to a file, in whatever format suits the recipe system best.
pyfits.writeto('outputs.fits',fitted_params)
| en | 0.828556 | #!/usr/bin/env python3 A script containing the basic principles of the extraction primitive inner workings # Firstly, let's find all the needed files #Define the files in use (NB xmod.txt and wavemod.txt should be correct) # load it in now: # Where is the default location for the model? By default it is a parameter # in the ghost class. If this needs to be overwritten, go ahead. # This is the xmod file. Wherever it is saved from the flat reduction. # All the other models... which are currently in the "test" directory. # Find the arc line list file #instantiate the ghost arm #Get the initial default model from the lookup location # The extractor is given the polyfit "arm" object, and a slitview object which has # been instantiated with the slit viewer data. #Now find the other lines, after first re-loading into the extractor. # the inspect parameter is a verbose option for visualising the line # finding results #Now finally do the wavelength fit! # Optionally show residuals? #Now write the output to a file, in whatever format suits the recipe system best. | 2.45145 | 2 |
time_management/test/kronos_test.py | AyushRawal/time-management | 1 | 10156 | import unittest
import datetime
import kronos
string_format_time = "%Y-%m-%d %H:%M:%S"
date_time_str = "2020-07-19 18:14:21"
class KronosTest(unittest.TestCase):
def test_get_day_of_week(self):
for i in range(len(kronos.week_days)):
date = kronos.get_date_time_from_string(f"2020-08-{10 + i} 13:00:00")
self.assertEqual(kronos.week_days.get(i), kronos.get_day_of_week(date))
def test_is_yesterday(self):
date_time = kronos.get_date_time_from_string("2020-07-20 18:14:21")
self.assertTrue(kronos.is_yesterday(date_time_str, today=date_time))
date_time = kronos.get_date_time_from_string("2020-07-19 18:14:21")
self.assertFalse(kronos.is_yesterday(date_time_str, today=date_time))
def test_is_previous_friday(self):
last_friday = "2020-08-14 13:00:00"
last_monday = kronos.get_date_time_from_string("2020-08-17 13:00:00")
self.assertTrue(kronos.is_previous_friday(last_friday, last_monday))
last_tuesday = kronos.get_date_time_from_string("2020-08-18 13:00:00")
self.assertFalse(kronos.is_previous_friday(last_friday, last_tuesday))
def test_is_overdue_checks_correctly(self):
creation_date = "2020-08-10 13:00:00"
completion_goal = 5
self.assertTrue(kronos.is_overdue(creation_date, completion_goal))
on_time_date = kronos.get_date_time_as_string()
on_time_goal = 100
self.assertFalse(kronos.is_overdue(on_time_date, on_time_goal))
| import unittest
import datetime
import kronos
string_format_time = "%Y-%m-%d %H:%M:%S"
date_time_str = "2020-07-19 18:14:21"
class KronosTest(unittest.TestCase):
def test_get_day_of_week(self):
for i in range(len(kronos.week_days)):
date = kronos.get_date_time_from_string(f"2020-08-{10 + i} 13:00:00")
self.assertEqual(kronos.week_days.get(i), kronos.get_day_of_week(date))
def test_is_yesterday(self):
date_time = kronos.get_date_time_from_string("2020-07-20 18:14:21")
self.assertTrue(kronos.is_yesterday(date_time_str, today=date_time))
date_time = kronos.get_date_time_from_string("2020-07-19 18:14:21")
self.assertFalse(kronos.is_yesterday(date_time_str, today=date_time))
def test_is_previous_friday(self):
last_friday = "2020-08-14 13:00:00"
last_monday = kronos.get_date_time_from_string("2020-08-17 13:00:00")
self.assertTrue(kronos.is_previous_friday(last_friday, last_monday))
last_tuesday = kronos.get_date_time_from_string("2020-08-18 13:00:00")
self.assertFalse(kronos.is_previous_friday(last_friday, last_tuesday))
def test_is_overdue_checks_correctly(self):
creation_date = "2020-08-10 13:00:00"
completion_goal = 5
self.assertTrue(kronos.is_overdue(creation_date, completion_goal))
on_time_date = kronos.get_date_time_as_string()
on_time_goal = 100
self.assertFalse(kronos.is_overdue(on_time_date, on_time_goal))
| none | 1 | 3.387139 | 3 |
|
mfc/mfc.py | FuelCellUAV/FC_datalogger | 0 | 10157 | ##!/usr/bin/env python3
# Mass Flow Controller Arduino driver
# Copyright (C) 2015 <NAME>, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# Import libraries
from time import sleep
#from quick2wire.i2c import I2CMaster, reading
# Define class
class mfc:
@staticmethod
def _getRaw(fun, ch):
return fun(ch)
# External getter
def get(self, fun, ch):
raw = self._getRaw(fun, ch)
rate = raw/5.0*1.5
return rate
# External getter
def getMoles(self, fun, ch):
rate = self.get(fun,ch)*(7.0/6280.0) # TODO should be *125.718/134.82 (density H2 at 1.5bar)
return rate
| ##!/usr/bin/env python3
# Mass Flow Controller Arduino driver
# Copyright (C) 2015 <NAME>, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# Import libraries
from time import sleep
#from quick2wire.i2c import I2CMaster, reading
# Define class
class mfc:
@staticmethod
def _getRaw(fun, ch):
return fun(ch)
# External getter
def get(self, fun, ch):
raw = self._getRaw(fun, ch)
rate = raw/5.0*1.5
return rate
# External getter
def getMoles(self, fun, ch):
rate = self.get(fun,ch)*(7.0/6280.0) # TODO should be *125.718/134.82 (density H2 at 1.5bar)
return rate
| en | 0.757022 | ##!/usr/bin/env python3 # Mass Flow Controller Arduino driver # Copyright (C) 2015 <NAME>, <NAME> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ############################################################################# # Import libraries #from quick2wire.i2c import I2CMaster, reading # Define class # External getter # External getter # TODO should be *125.718/134.82 (density H2 at 1.5bar) | 2.157583 | 2 |
odm/dialects/postgresql/green.py | quantmind/pulsar-odm | 16 | 10158 | from asyncio import Future
from greenlet import getcurrent
import psycopg2
from psycopg2 import * # noqa
from psycopg2 import extensions, OperationalError
__version__ = psycopg2.__version__
def psycopg2_wait_callback(conn):
"""A wait callback to allow greenlet to work with Psycopg.
The caller must be from a greenlet other than the main one.
:param conn: psycopg2 connection or file number
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
"""
while True:
state = conn.poll()
if state == extensions.POLL_OK:
# Done with waiting
break
elif state == extensions.POLL_READ:
_wait_fd(conn)
elif state == extensions.POLL_WRITE:
_wait_fd(conn, read=False)
else: # pragma nocover
raise OperationalError("Bad result from poll: %r" % state)
# INTERNALS
def _wait_fd(conn, read=True):
'''Wait for an event on file descriptor ``fd``.
:param conn: file descriptor
:param read: wait for a read event if ``True``, otherwise a wait
for write event.
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
'''
current = getcurrent()
parent = current.parent
assert parent, '"_wait_fd" must be called by greenlet with a parent'
try:
fileno = conn.fileno()
except AttributeError:
fileno = conn
future = Future()
# When the event on fd occurs switch back to the current greenlet
if read:
future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read)
else:
future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read)
# switch back to parent greenlet
parent.switch(future)
# Back on the child greenlet. Raise error if there is one
future.result()
def _done_wait_fd(fd, future, read):
try:
if read:
future._loop.remove_reader(fd)
else:
future._loop.remove_writer(fd)
except Exception as exc:
future.set_exception(exc)
else:
future.set_result(None)
try:
extensions.POLL_OK
except AttributeError: # pragma nocover
from pulsar import ImproperlyConfigured
raise ImproperlyConfigured(
'Psycopg2 does not have support for asynchronous connections. '
'You need at least version 2.2.0 of Psycopg2.')
extensions.set_wait_callback(psycopg2_wait_callback)
| from asyncio import Future
from greenlet import getcurrent
import psycopg2
from psycopg2 import * # noqa
from psycopg2 import extensions, OperationalError
__version__ = psycopg2.__version__
def psycopg2_wait_callback(conn):
"""A wait callback to allow greenlet to work with Psycopg.
The caller must be from a greenlet other than the main one.
:param conn: psycopg2 connection or file number
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
"""
while True:
state = conn.poll()
if state == extensions.POLL_OK:
# Done with waiting
break
elif state == extensions.POLL_READ:
_wait_fd(conn)
elif state == extensions.POLL_WRITE:
_wait_fd(conn, read=False)
else: # pragma nocover
raise OperationalError("Bad result from poll: %r" % state)
# INTERNALS
def _wait_fd(conn, read=True):
'''Wait for an event on file descriptor ``fd``.
:param conn: file descriptor
:param read: wait for a read event if ``True``, otherwise a wait
for write event.
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
'''
current = getcurrent()
parent = current.parent
assert parent, '"_wait_fd" must be called by greenlet with a parent'
try:
fileno = conn.fileno()
except AttributeError:
fileno = conn
future = Future()
# When the event on fd occurs switch back to the current greenlet
if read:
future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read)
else:
future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read)
# switch back to parent greenlet
parent.switch(future)
# Back on the child greenlet. Raise error if there is one
future.result()
def _done_wait_fd(fd, future, read):
try:
if read:
future._loop.remove_reader(fd)
else:
future._loop.remove_writer(fd)
except Exception as exc:
future.set_exception(exc)
else:
future.set_result(None)
try:
extensions.POLL_OK
except AttributeError: # pragma nocover
from pulsar import ImproperlyConfigured
raise ImproperlyConfigured(
'Psycopg2 does not have support for asynchronous connections. '
'You need at least version 2.2.0 of Psycopg2.')
extensions.set_wait_callback(psycopg2_wait_callback)
| en | 0.801586 | # noqa A wait callback to allow greenlet to work with Psycopg. The caller must be from a greenlet other than the main one. :param conn: psycopg2 connection or file number This function must be invoked from a coroutine with parent, therefore invoking it from the main greenlet will raise an exception. # Done with waiting # pragma nocover # INTERNALS Wait for an event on file descriptor ``fd``. :param conn: file descriptor :param read: wait for a read event if ``True``, otherwise a wait for write event. This function must be invoked from a coroutine with parent, therefore invoking it from the main greenlet will raise an exception. # When the event on fd occurs switch back to the current greenlet # switch back to parent greenlet # Back on the child greenlet. Raise error if there is one # pragma nocover | 2.718807 | 3 |
test/test_replica_set_connection.py | h4ck3rm1k3/mongo-python-driver | 1 | 10159 | # Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import copy
import datetime
import os
import signal
import socket
import sys
import time
import thread
import traceback
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo.connection import Connection
from pymongo.read_preferences import ReadPreference
from pymongo.replica_set_connection import ReplicaSetConnection
from pymongo.replica_set_connection import _partition_node
from pymongo.database import Database
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
OperationFailure)
from test import version
from test.utils import delay, assertReadFrom, assertReadFromAll, read_from_which_host
host = os.environ.get("DB_IP", 'localhost')
port = int(os.environ.get("DB_PORT", 27017))
pair = '%s:%d' % (host, port)
class TestReplicaSetConnectionAgainstStandalone(unittest.TestCase):
"""This is a funny beast -- we want to run tests for ReplicaSetConnection
but only if the database at DB_IP and DB_PORT is a standalone.
"""
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
raise SkipTest()
def test_connect(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='anything',
connectTimeoutMS=600)
class TestConnectionReplicaSetBase(unittest.TestCase):
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
self.name = str(response['setName'])
self.w = len(response['hosts'])
self.hosts = set([_partition_node(h)
for h in response["hosts"]])
self.arbiters = set([_partition_node(h)
for h in response.get("arbiters", [])])
repl_set_status = conn.admin.command('replSetGetStatus')
primary_info = [
m for m in repl_set_status['members']
if m['stateStr'] == 'PRIMARY'
][0]
self.primary = _partition_node(primary_info['name'])
self.secondaries = [
_partition_node(m['name']) for m in repl_set_status['members']
if m['stateStr'] == 'SECONDARY'
]
else:
raise SkipTest()
def _get_connection(self, **kwargs):
return ReplicaSetConnection(pair,
replicaSet=self.name,
**kwargs)
class TestConnection(TestConnectionReplicaSetBase):
def test_connect(self):
self.assertRaises(ConnectionFailure, ReplicaSetConnection,
"somedomainthatdoesntexist.org:27017",
replicaSet=self.name,
connectTimeoutMS=600)
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='fdlksjfdslkjfd')
self.assertTrue(ReplicaSetConnection(pair, replicaSet=self.name))
def test_repr(self):
connection = self._get_connection()
self.assertEqual(repr(connection),
"ReplicaSetConnection(%r)" % (["%s:%d" % n
for n in
self.hosts],))
def test_properties(self):
c = ReplicaSetConnection(pair, replicaSet=self.name)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 10)
self.assertEqual(c.document_class, dict)
self.assertEqual(c.tz_aware, False)
# Make sure RSC's properties are copied to Database and Collection
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.PRIMARY)
self.assertEqual(obj.tag_sets, [{}])
self.assertEqual(obj.secondary_acceptable_latency_ms, 15)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, False)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.PRIMARY, cursor._Cursor__read_preference)
self.assertEqual([{}], cursor._Cursor__tag_sets)
self.assertEqual(15, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
c.close()
tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}]
c = ReplicaSetConnection(pair, replicaSet=self.name, max_pool_size=25,
document_class=SON, tz_aware=True,
slaveOk=False, safe=True,
read_preference=ReadPreference.SECONDARY,
tag_sets=copy.deepcopy(tag_sets),
secondary_acceptable_latency_ms=77)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 25)
self.assertEqual(c.document_class, SON)
self.assertEqual(c.tz_aware, True)
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.SECONDARY)
self.assertEqual(obj.tag_sets, tag_sets)
self.assertEqual(obj.secondary_acceptable_latency_ms, 77)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, True)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.SECONDARY, cursor._Cursor__read_preference)
self.assertEqual(tag_sets, cursor._Cursor__tag_sets)
self.assertEqual(77, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
cursor = c.pymongo_test.test.find(
read_preference=ReadPreference.NEAREST,
tag_sets=[{'dc':'ny'}, {}],
secondary_acceptable_latency_ms=123)
self.assertEqual(
ReadPreference.NEAREST, cursor._Cursor__read_preference)
self.assertEqual([{'dc':'ny'}, {}], cursor._Cursor__tag_sets)
self.assertEqual(123, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
if version.at_least(c, (1, 7, 4)):
self.assertEqual(c.max_bson_size, 16777216)
else:
self.assertEqual(c.max_bson_size, 4194304)
c.close()
def test_get_db(self):
connection = self._get_connection()
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, connection, "")
self.assertRaises(InvalidName, make_db, connection, "te$t")
self.assertRaises(InvalidName, make_db, connection, "te.t")
self.assertRaises(InvalidName, make_db, connection, "te\\t")
self.assertRaises(InvalidName, make_db, connection, "te/t")
self.assertRaises(InvalidName, make_db, connection, "te st")
self.assertTrue(isinstance(connection.test, Database))
self.assertEqual(connection.test, connection["test"])
self.assertEqual(connection.test, Database(connection, "test"))
connection.close()
def test_auto_reconnect_exception_when_read_preference_is_secondary(self):
c = self._get_connection()
db = c.pymongo_test
def raise_socket_error(*args, **kwargs):
raise socket.error
old_sendall = socket.socket.sendall
socket.socket.sendall = raise_socket_error
try:
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
self.assertRaises(AutoReconnect, cursor.next)
finally:
socket.socket.sendall = old_sendall
def test_operations(self):
c = self._get_connection()
# Check explicitly for a case we've commonly hit in tests:
# a replica set is started with a tiny oplog, a previous
# test does a big insert that leaves the secondaries
# permanently "RECOVERING", and our insert(w=self.w) hangs
# forever.
rs_status = c.admin.command('replSetGetStatus')
members = rs_status['members']
self.assertFalse(
[m for m in members if m['stateStr'] == 'RECOVERING'],
"Replica set is recovering, try a larger oplogSize next time"
)
db = c.pymongo_test
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.insert({'foo': 'x'}, safe=True, w=self.w, wtimeout=10000)
self.assertEqual(1, db.test.count())
cursor = db.test.find()
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we read from the primary
self.assertEqual(c.primary, cursor._Cursor__connection_id)
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we didn't read from the primary
self.assertTrue(cursor._Cursor__connection_id in c.secondaries)
self.assertEqual(1, db.test.count())
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.drop()
c.close()
def test_database_names(self):
connection = self._get_connection()
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
connection.close()
def test_drop_database(self):
connection = self._get_connection()
self.assertRaises(TypeError, connection.drop_database, 5)
self.assertRaises(TypeError, connection.drop_database, None)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database("pymongo_test")
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database(connection.pymongo_test)
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.close()
def test_copy_db(self):
c = self._get_connection()
self.assertTrue(c.in_request())
self.assertRaises(TypeError, c.copy_database, 4, "foo")
self.assertRaises(TypeError, c.copy_database, "foo", 4)
self.assertRaises(InvalidName, c.copy_database, "foo", "$foo")
c.pymongo_test.test.drop()
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.pymongo_test.test.insert({"foo": "bar"})
self.assertFalse("pymongo_test1" in c.database_names())
self.assertFalse("pymongo_test2" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1")
# copy_database() didn't accidentally end the request
self.assertTrue(c.in_request())
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.end_request()
self.assertFalse(c.in_request())
c.copy_database("pymongo_test", "pymongo_test2", pair)
# copy_database() didn't accidentally restart the request
self.assertFalse(c.in_request())
time.sleep(1)
self.assertTrue("pymongo_test2" in c.database_names())
self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"])
if version.at_least(c, (1, 3, 3, 1)):
c.drop_database("pymongo_test1")
c.pymongo_test.add_user("mike", "password")
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="foo", password="<PASSWORD>")
self.assertFalse("pymongo_test1" in c.database_names())
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="mike", password="<PASSWORD>")
self.assertFalse("pymongo_test1" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1",
username="mike", password="password")
self.assertTrue("pymongo_test1" in c.database_names())
time.sleep(2)
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.close()
def test_iteration(self):
connection = self._get_connection()
def iterate():
[a for a in connection]
self.assertRaises(TypeError, iterate)
connection.close()
def test_disconnect(self):
c = self._get_connection()
coll = c.foo.bar
c.disconnect()
c.disconnect()
coll.count()
c.disconnect()
c.disconnect()
coll.count()
def test_fork(self):
"""Test using a connection before and after a fork.
"""
if sys.platform == "win32":
raise SkipTest()
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest()
db = self._get_connection().pymongo_test
# Failure occurs if the connection is used before the fork
db.test.find_one()
#db.connection.end_request()
def loop(pipe):
while True:
try:
db.test.insert({"a": "b"}, safe=True)
for _ in db.test.find():
pass
except:
traceback.print_exc()
pipe.send(True)
os._exit(1)
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
# recv will only have data if the subprocess failed
try:
cp1.recv()
self.fail()
except EOFError:
pass
try:
cp2.recv()
self.fail()
except EOFError:
pass
db.connection.close()
def test_document_class(self):
c = self._get_connection()
db = c.pymongo_test
db.test.insert({"x": 1})
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.close()
c = self._get_connection(document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.close()
def test_network_timeout(self):
no_timeout = self._get_connection()
timeout_sec = 1
timeout = self._get_connection(socketTimeoutMS=timeout_sec*1000)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert({"x": 1}, safe=True)
# A $where clause that takes a second longer than the timeout
where_func = delay(1 + timeout_sec)
def get_x(db):
doc = db.test.find().where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def get_x_timeout(db, t):
doc = db.test.find(network_timeout=t).where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None))
self.assertRaises(ConnectionFailure, get_x_timeout,
no_timeout.pymongo_test, 0.1)
no_timeout.close()
timeout.close()
def test_tz_aware(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
tz_aware='foo', replicaSet=self.name)
aware = self._get_connection(tz_aware=True)
naive = self._get_connection()
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert({"x": now}, safe=True)
time.sleep(1)
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
def test_ipv6(self):
try:
connection = ReplicaSetConnection("[::1]:%d" % (port,),
replicaSet=self.name)
except:
# Either mongod was started without --ipv6
# or the OS doesn't support it (or both).
raise SkipTest()
# Try a few simple things
connection = ReplicaSetConnection("mongodb://[::1]:%d" % (port,),
replicaSet=self.name)
connection = ReplicaSetConnection("mongodb://[::1]:%d/?safe=true;"
"replicaSet=%s" % (port, self.name))
connection = ReplicaSetConnection("[::1]:%d,localhost:"
"%d" % (port, port),
replicaSet=self.name)
connection = ReplicaSetConnection("localhost:%d,[::1]:"
"%d" % (port, port),
replicaSet=self.name)
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_bernie.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
connection.close()
def _test_kill_cursor_explicit(self, read_pref):
c = self._get_connection(read_preference=read_pref)
db = c.pymongo_test
db.drop_collection("test")
test = db.test
test.insert([{"i": i} for i in range(20)], w=1 + len(c.secondaries))
# Partially evaluate cursor so it's left alive, then kill it
cursor = test.find().batch_size(10)
cursor.next()
self.assertNotEqual(0, cursor.cursor_id)
connection_id = cursor._Cursor__connection_id
writer = c._ReplicaSetConnection__writer
if read_pref == ReadPreference.PRIMARY:
msg = "Expected cursor's connection_id to be %s, got %s" % (
writer, connection_id)
self.assertEqual(connection_id, writer, msg)
else:
self.assertNotEqual(connection_id, writer,
"Expected cursor's connection_id not to be primary")
cursor_id = cursor.cursor_id
# Cursor dead on server - trigger a getMore on the same cursor_id and
# check that the server returns an error.
cursor2 = cursor.clone()
cursor2._Cursor__id = cursor_id
if (sys.platform.startswith('java') or
'PyPy' in sys.version):
# Explicitly kill cursor.
cursor.close()
else:
# Implicitly kill it in CPython.
del cursor
self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_kill_cursor_explicit_primary(self):
self._test_kill_cursor_explicit(ReadPreference.PRIMARY)
def test_kill_cursor_explicit_secondary(self):
self._test_kill_cursor_explicit(ReadPreference.SECONDARY)
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure Connection closes its
# socket if it gets an interrupt while waiting to recv() from it.
c = self._get_connection()
db = c.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert({'_id': 1}, safe=True)
old_signal_handler = None
try:
# Platform-specific hacks for raising a KeyboardInterrupt on the main
# thread while find() is in-progress: On Windows, SIGALRM is unavailable
# so we use second thread. In our Bamboo setup on Linux, the thread
# technique causes an error in the test at sock.recv():
# TypeError: 'int' object is not callable
# We don't know what causes this in Bamboo, so we hack around it.
if sys.platform == 'win32':
def interrupter():
time.sleep(0.25)
# Raises KeyboardInterrupt in the main thread
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
else:
# Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT for one
# second in the future, but easy to schedule SIGALRM.
def sigalarm(num, frame):
raise KeyboardInterrupt
old_signal_handler = signal.signal(signal.SIGALRM, sigalarm)
signal.alarm(1)
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
db.foo.find({'$where': where}).next()
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected ConnectionFailure")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
db.foo.find().next()
)
finally:
if old_signal_handler:
signal.signal(signal.SIGALRM, old_signal_handler)
def test_auto_start_request(self):
for bad_horrible_value in (None, 5, 'hi!'):
self.assertRaises(
(TypeError, ConfigurationError),
lambda: self._get_connection(auto_start_request=bad_horrible_value)
)
# auto_start_request should default to True
conn = self._get_connection()
pools = [mongo.pool for mongo in
conn._ReplicaSetConnection__members.values()]
self.assertTrue(conn.auto_start_request)
self.assertTrue(conn.in_request())
# Trigger the RSC to actually start a request
conn.test.test.find_one()
for pool in pools:
self.assertTrue(pool.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
for pool in pools:
self.assertFalse(pool.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.close()
conn = self._get_connection(auto_start_request=False)
self.assertFalse(conn.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
conn.close()
def test_schedule_refresh(self):
# Monitor thread starts waiting for _refresh_interval, 30 seconds
conn = self._get_connection()
# Reconnect if necessary
conn.pymongo_test.test.find_one()
secondaries = conn.secondaries
for secondary in secondaries:
conn._ReplicaSetConnection__members[secondary].up = False
conn._ReplicaSetConnection__members[conn.primary].up = False
# Wake up monitor thread
conn._ReplicaSetConnection__schedule_refresh()
# Refresh interval is 30 seconds; scheduling a refresh tells the
# monitor thread / greenlet to start a refresh now. We still need to
# sleep a few seconds for it to complete.
time.sleep(5)
for secondary in secondaries:
self.assertTrue(conn._ReplicaSetConnection__members[secondary].up,
"ReplicaSetConnection didn't detect secondary is up")
self.assertTrue(conn._ReplicaSetConnection__members[conn.primary].up,
"ReplicaSetConnection didn't detect primary is up")
conn.close()
def test_pinned_member(self):
latency = 1000 * 1000
conn = self._get_connection(
auto_start_request=False, secondary_acceptable_latency_ms=latency)
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
# No pinning since we're not in a request
assertReadFromAll(
self, conn, conn.secondaries,
ReadPreference.SECONDARY, None, latency)
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
conn.start_request()
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Repin
primary = read_from_which_host(conn, ReadPreference.PRIMARY)
self.assertEqual(conn.primary, primary)
assertReadFrom(self, conn, primary, ReadPreference.NEAREST)
# Repin again
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Unpin
conn.end_request()
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
if __name__ == "__main__":
unittest.main()
| # Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import copy
import datetime
import os
import signal
import socket
import sys
import time
import thread
import traceback
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo.connection import Connection
from pymongo.read_preferences import ReadPreference
from pymongo.replica_set_connection import ReplicaSetConnection
from pymongo.replica_set_connection import _partition_node
from pymongo.database import Database
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
OperationFailure)
from test import version
from test.utils import delay, assertReadFrom, assertReadFromAll, read_from_which_host
host = os.environ.get("DB_IP", 'localhost')
port = int(os.environ.get("DB_PORT", 27017))
pair = '%s:%d' % (host, port)
class TestReplicaSetConnectionAgainstStandalone(unittest.TestCase):
"""This is a funny beast -- we want to run tests for ReplicaSetConnection
but only if the database at DB_IP and DB_PORT is a standalone.
"""
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
raise SkipTest()
def test_connect(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='anything',
connectTimeoutMS=600)
class TestConnectionReplicaSetBase(unittest.TestCase):
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
self.name = str(response['setName'])
self.w = len(response['hosts'])
self.hosts = set([_partition_node(h)
for h in response["hosts"]])
self.arbiters = set([_partition_node(h)
for h in response.get("arbiters", [])])
repl_set_status = conn.admin.command('replSetGetStatus')
primary_info = [
m for m in repl_set_status['members']
if m['stateStr'] == 'PRIMARY'
][0]
self.primary = _partition_node(primary_info['name'])
self.secondaries = [
_partition_node(m['name']) for m in repl_set_status['members']
if m['stateStr'] == 'SECONDARY'
]
else:
raise SkipTest()
def _get_connection(self, **kwargs):
return ReplicaSetConnection(pair,
replicaSet=self.name,
**kwargs)
class TestConnection(TestConnectionReplicaSetBase):
def test_connect(self):
self.assertRaises(ConnectionFailure, ReplicaSetConnection,
"somedomainthatdoesntexist.org:27017",
replicaSet=self.name,
connectTimeoutMS=600)
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='fdlksjfdslkjfd')
self.assertTrue(ReplicaSetConnection(pair, replicaSet=self.name))
def test_repr(self):
connection = self._get_connection()
self.assertEqual(repr(connection),
"ReplicaSetConnection(%r)" % (["%s:%d" % n
for n in
self.hosts],))
def test_properties(self):
c = ReplicaSetConnection(pair, replicaSet=self.name)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 10)
self.assertEqual(c.document_class, dict)
self.assertEqual(c.tz_aware, False)
# Make sure RSC's properties are copied to Database and Collection
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.PRIMARY)
self.assertEqual(obj.tag_sets, [{}])
self.assertEqual(obj.secondary_acceptable_latency_ms, 15)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, False)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.PRIMARY, cursor._Cursor__read_preference)
self.assertEqual([{}], cursor._Cursor__tag_sets)
self.assertEqual(15, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
c.close()
tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}]
c = ReplicaSetConnection(pair, replicaSet=self.name, max_pool_size=25,
document_class=SON, tz_aware=True,
slaveOk=False, safe=True,
read_preference=ReadPreference.SECONDARY,
tag_sets=copy.deepcopy(tag_sets),
secondary_acceptable_latency_ms=77)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 25)
self.assertEqual(c.document_class, SON)
self.assertEqual(c.tz_aware, True)
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.SECONDARY)
self.assertEqual(obj.tag_sets, tag_sets)
self.assertEqual(obj.secondary_acceptable_latency_ms, 77)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, True)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.SECONDARY, cursor._Cursor__read_preference)
self.assertEqual(tag_sets, cursor._Cursor__tag_sets)
self.assertEqual(77, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
cursor = c.pymongo_test.test.find(
read_preference=ReadPreference.NEAREST,
tag_sets=[{'dc':'ny'}, {}],
secondary_acceptable_latency_ms=123)
self.assertEqual(
ReadPreference.NEAREST, cursor._Cursor__read_preference)
self.assertEqual([{'dc':'ny'}, {}], cursor._Cursor__tag_sets)
self.assertEqual(123, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
if version.at_least(c, (1, 7, 4)):
self.assertEqual(c.max_bson_size, 16777216)
else:
self.assertEqual(c.max_bson_size, 4194304)
c.close()
def test_get_db(self):
connection = self._get_connection()
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, connection, "")
self.assertRaises(InvalidName, make_db, connection, "te$t")
self.assertRaises(InvalidName, make_db, connection, "te.t")
self.assertRaises(InvalidName, make_db, connection, "te\\t")
self.assertRaises(InvalidName, make_db, connection, "te/t")
self.assertRaises(InvalidName, make_db, connection, "te st")
self.assertTrue(isinstance(connection.test, Database))
self.assertEqual(connection.test, connection["test"])
self.assertEqual(connection.test, Database(connection, "test"))
connection.close()
def test_auto_reconnect_exception_when_read_preference_is_secondary(self):
c = self._get_connection()
db = c.pymongo_test
def raise_socket_error(*args, **kwargs):
raise socket.error
old_sendall = socket.socket.sendall
socket.socket.sendall = raise_socket_error
try:
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
self.assertRaises(AutoReconnect, cursor.next)
finally:
socket.socket.sendall = old_sendall
def test_operations(self):
c = self._get_connection()
# Check explicitly for a case we've commonly hit in tests:
# a replica set is started with a tiny oplog, a previous
# test does a big insert that leaves the secondaries
# permanently "RECOVERING", and our insert(w=self.w) hangs
# forever.
rs_status = c.admin.command('replSetGetStatus')
members = rs_status['members']
self.assertFalse(
[m for m in members if m['stateStr'] == 'RECOVERING'],
"Replica set is recovering, try a larger oplogSize next time"
)
db = c.pymongo_test
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.insert({'foo': 'x'}, safe=True, w=self.w, wtimeout=10000)
self.assertEqual(1, db.test.count())
cursor = db.test.find()
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we read from the primary
self.assertEqual(c.primary, cursor._Cursor__connection_id)
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we didn't read from the primary
self.assertTrue(cursor._Cursor__connection_id in c.secondaries)
self.assertEqual(1, db.test.count())
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.drop()
c.close()
def test_database_names(self):
connection = self._get_connection()
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
connection.close()
def test_drop_database(self):
connection = self._get_connection()
self.assertRaises(TypeError, connection.drop_database, 5)
self.assertRaises(TypeError, connection.drop_database, None)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database("pymongo_test")
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database(connection.pymongo_test)
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.close()
def test_copy_db(self):
c = self._get_connection()
self.assertTrue(c.in_request())
self.assertRaises(TypeError, c.copy_database, 4, "foo")
self.assertRaises(TypeError, c.copy_database, "foo", 4)
self.assertRaises(InvalidName, c.copy_database, "foo", "$foo")
c.pymongo_test.test.drop()
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.pymongo_test.test.insert({"foo": "bar"})
self.assertFalse("pymongo_test1" in c.database_names())
self.assertFalse("pymongo_test2" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1")
# copy_database() didn't accidentally end the request
self.assertTrue(c.in_request())
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.end_request()
self.assertFalse(c.in_request())
c.copy_database("pymongo_test", "pymongo_test2", pair)
# copy_database() didn't accidentally restart the request
self.assertFalse(c.in_request())
time.sleep(1)
self.assertTrue("pymongo_test2" in c.database_names())
self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"])
if version.at_least(c, (1, 3, 3, 1)):
c.drop_database("pymongo_test1")
c.pymongo_test.add_user("mike", "password")
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="foo", password="<PASSWORD>")
self.assertFalse("pymongo_test1" in c.database_names())
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="mike", password="<PASSWORD>")
self.assertFalse("pymongo_test1" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1",
username="mike", password="password")
self.assertTrue("pymongo_test1" in c.database_names())
time.sleep(2)
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.close()
def test_iteration(self):
connection = self._get_connection()
def iterate():
[a for a in connection]
self.assertRaises(TypeError, iterate)
connection.close()
def test_disconnect(self):
c = self._get_connection()
coll = c.foo.bar
c.disconnect()
c.disconnect()
coll.count()
c.disconnect()
c.disconnect()
coll.count()
def test_fork(self):
"""Test using a connection before and after a fork.
"""
if sys.platform == "win32":
raise SkipTest()
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest()
db = self._get_connection().pymongo_test
# Failure occurs if the connection is used before the fork
db.test.find_one()
#db.connection.end_request()
def loop(pipe):
while True:
try:
db.test.insert({"a": "b"}, safe=True)
for _ in db.test.find():
pass
except:
traceback.print_exc()
pipe.send(True)
os._exit(1)
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
# recv will only have data if the subprocess failed
try:
cp1.recv()
self.fail()
except EOFError:
pass
try:
cp2.recv()
self.fail()
except EOFError:
pass
db.connection.close()
def test_document_class(self):
c = self._get_connection()
db = c.pymongo_test
db.test.insert({"x": 1})
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.close()
c = self._get_connection(document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.close()
def test_network_timeout(self):
no_timeout = self._get_connection()
timeout_sec = 1
timeout = self._get_connection(socketTimeoutMS=timeout_sec*1000)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert({"x": 1}, safe=True)
# A $where clause that takes a second longer than the timeout
where_func = delay(1 + timeout_sec)
def get_x(db):
doc = db.test.find().where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def get_x_timeout(db, t):
doc = db.test.find(network_timeout=t).where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None))
self.assertRaises(ConnectionFailure, get_x_timeout,
no_timeout.pymongo_test, 0.1)
no_timeout.close()
timeout.close()
def test_tz_aware(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
tz_aware='foo', replicaSet=self.name)
aware = self._get_connection(tz_aware=True)
naive = self._get_connection()
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert({"x": now}, safe=True)
time.sleep(1)
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
def test_ipv6(self):
try:
connection = ReplicaSetConnection("[::1]:%d" % (port,),
replicaSet=self.name)
except:
# Either mongod was started without --ipv6
# or the OS doesn't support it (or both).
raise SkipTest()
# Try a few simple things
connection = ReplicaSetConnection("mongodb://[::1]:%d" % (port,),
replicaSet=self.name)
connection = ReplicaSetConnection("mongodb://[::1]:%d/?safe=true;"
"replicaSet=%s" % (port, self.name))
connection = ReplicaSetConnection("[::1]:%d,localhost:"
"%d" % (port, port),
replicaSet=self.name)
connection = ReplicaSetConnection("localhost:%d,[::1]:"
"%d" % (port, port),
replicaSet=self.name)
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_bernie.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
connection.close()
def _test_kill_cursor_explicit(self, read_pref):
c = self._get_connection(read_preference=read_pref)
db = c.pymongo_test
db.drop_collection("test")
test = db.test
test.insert([{"i": i} for i in range(20)], w=1 + len(c.secondaries))
# Partially evaluate cursor so it's left alive, then kill it
cursor = test.find().batch_size(10)
cursor.next()
self.assertNotEqual(0, cursor.cursor_id)
connection_id = cursor._Cursor__connection_id
writer = c._ReplicaSetConnection__writer
if read_pref == ReadPreference.PRIMARY:
msg = "Expected cursor's connection_id to be %s, got %s" % (
writer, connection_id)
self.assertEqual(connection_id, writer, msg)
else:
self.assertNotEqual(connection_id, writer,
"Expected cursor's connection_id not to be primary")
cursor_id = cursor.cursor_id
# Cursor dead on server - trigger a getMore on the same cursor_id and
# check that the server returns an error.
cursor2 = cursor.clone()
cursor2._Cursor__id = cursor_id
if (sys.platform.startswith('java') or
'PyPy' in sys.version):
# Explicitly kill cursor.
cursor.close()
else:
# Implicitly kill it in CPython.
del cursor
self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_kill_cursor_explicit_primary(self):
self._test_kill_cursor_explicit(ReadPreference.PRIMARY)
def test_kill_cursor_explicit_secondary(self):
self._test_kill_cursor_explicit(ReadPreference.SECONDARY)
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure Connection closes its
# socket if it gets an interrupt while waiting to recv() from it.
c = self._get_connection()
db = c.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert({'_id': 1}, safe=True)
old_signal_handler = None
try:
# Platform-specific hacks for raising a KeyboardInterrupt on the main
# thread while find() is in-progress: On Windows, SIGALRM is unavailable
# so we use second thread. In our Bamboo setup on Linux, the thread
# technique causes an error in the test at sock.recv():
# TypeError: 'int' object is not callable
# We don't know what causes this in Bamboo, so we hack around it.
if sys.platform == 'win32':
def interrupter():
time.sleep(0.25)
# Raises KeyboardInterrupt in the main thread
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
else:
# Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT for one
# second in the future, but easy to schedule SIGALRM.
def sigalarm(num, frame):
raise KeyboardInterrupt
old_signal_handler = signal.signal(signal.SIGALRM, sigalarm)
signal.alarm(1)
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
db.foo.find({'$where': where}).next()
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected ConnectionFailure")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
db.foo.find().next()
)
finally:
if old_signal_handler:
signal.signal(signal.SIGALRM, old_signal_handler)
def test_auto_start_request(self):
for bad_horrible_value in (None, 5, 'hi!'):
self.assertRaises(
(TypeError, ConfigurationError),
lambda: self._get_connection(auto_start_request=bad_horrible_value)
)
# auto_start_request should default to True
conn = self._get_connection()
pools = [mongo.pool for mongo in
conn._ReplicaSetConnection__members.values()]
self.assertTrue(conn.auto_start_request)
self.assertTrue(conn.in_request())
# Trigger the RSC to actually start a request
conn.test.test.find_one()
for pool in pools:
self.assertTrue(pool.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
for pool in pools:
self.assertFalse(pool.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.close()
conn = self._get_connection(auto_start_request=False)
self.assertFalse(conn.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
conn.close()
def test_schedule_refresh(self):
# Monitor thread starts waiting for _refresh_interval, 30 seconds
conn = self._get_connection()
# Reconnect if necessary
conn.pymongo_test.test.find_one()
secondaries = conn.secondaries
for secondary in secondaries:
conn._ReplicaSetConnection__members[secondary].up = False
conn._ReplicaSetConnection__members[conn.primary].up = False
# Wake up monitor thread
conn._ReplicaSetConnection__schedule_refresh()
# Refresh interval is 30 seconds; scheduling a refresh tells the
# monitor thread / greenlet to start a refresh now. We still need to
# sleep a few seconds for it to complete.
time.sleep(5)
for secondary in secondaries:
self.assertTrue(conn._ReplicaSetConnection__members[secondary].up,
"ReplicaSetConnection didn't detect secondary is up")
self.assertTrue(conn._ReplicaSetConnection__members[conn.primary].up,
"ReplicaSetConnection didn't detect primary is up")
conn.close()
def test_pinned_member(self):
latency = 1000 * 1000
conn = self._get_connection(
auto_start_request=False, secondary_acceptable_latency_ms=latency)
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
# No pinning since we're not in a request
assertReadFromAll(
self, conn, conn.secondaries,
ReadPreference.SECONDARY, None, latency)
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
conn.start_request()
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Repin
primary = read_from_which_host(conn, ReadPreference.PRIMARY)
self.assertEqual(conn.primary, primary)
assertReadFrom(self, conn, primary, ReadPreference.NEAREST)
# Repin again
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Unpin
conn.end_request()
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
if __name__ == "__main__":
unittest.main()
| en | 0.869281 | # Copyright 2011-2012 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test the replica_set_connection module. This is a funny beast -- we want to run tests for ReplicaSetConnection but only if the database at DB_IP and DB_PORT is a standalone. # Make sure RSC's properties are copied to Database and Collection # Check explicitly for a case we've commonly hit in tests: # a replica set is started with a tiny oplog, a previous # test does a big insert that leaves the secondaries # permanently "RECOVERING", and our insert(w=self.w) hangs # forever. # Ensure we read from the primary # Ensure we didn't read from the primary # copy_database() didn't accidentally end the request # copy_database() didn't accidentally restart the request Test using a connection before and after a fork. # Failure occurs if the connection is used before the fork #db.connection.end_request() # recv will only have data if the subprocess failed # A $where clause that takes a second longer than the timeout # Either mongod was started without --ipv6 # or the OS doesn't support it (or both). # Try a few simple things # Partially evaluate cursor so it's left alive, then kill it # Cursor dead on server - trigger a getMore on the same cursor_id and # check that the server returns an error. # Explicitly kill cursor. # Implicitly kill it in CPython. # Test fix for PYTHON-294 -- make sure Connection closes its # socket if it gets an interrupt while waiting to recv() from it. # A $where clause which takes 1.5 sec to execute # Need exactly 1 document so find() will execute its $where clause once # Platform-specific hacks for raising a KeyboardInterrupt on the main # thread while find() is in-progress: On Windows, SIGALRM is unavailable # so we use second thread. In our Bamboo setup on Linux, the thread # technique causes an error in the test at sock.recv(): # TypeError: 'int' object is not callable # We don't know what causes this in Bamboo, so we hack around it. # Raises KeyboardInterrupt in the main thread # Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT for one # second in the future, but easy to schedule SIGALRM. # Will be interrupted by a KeyboardInterrupt. # Can't use self.assertRaises() because it doesn't catch system # exceptions # Raises AssertionError due to PYTHON-294 -- Mongo's response to the # previous find() is still waiting to be read on the socket, so the # request id's don't match. # auto_start_request should default to True # Trigger the RSC to actually start a request # Monitor thread starts waiting for _refresh_interval, 30 seconds # Reconnect if necessary # Wake up monitor thread # Refresh interval is 30 seconds; scheduling a refresh tells the # monitor thread / greenlet to start a refresh now. We still need to # sleep a few seconds for it to complete. # No pinning since we're not in a request # Repin # Repin again # Unpin | 1.958817 | 2 |
jqi/cmd.py | jan-g/jqi | 3 | 10160 | import argparse_helper as argparse
import config_dir
import sys
from .editor import Editor
def main(*args):
if len(args) > 0:
args = [args]
parser = argparse.ArgumentParser()
parser.add_argument("-f", dest="cfg_file", help="query save name")
parser.add_argument("-x", default=False, action="store_true", dest="run", help="run immediately")
parser.add_argument("-l", default=False, action="count", dest="list", help="list saved queries")
parser.add_argument("-p", default=False, action="store_true", dest="previous", help="use previous query")
parser.add_argument("pattern", nargs="?", help="override saved pattern")
parser.add_argument("file", nargs="?", help="file to operate on")
args = parser.parse_args(*args)
if args.cfg_file is None and args.previous:
args.cfg_file = "previous"
if args.cfg_file is not None and args.file is None:
args.file = args.pattern
args.pattern = None
editor = Editor(file=args.cfg_file, pattern=args.pattern)
if args.list > 0:
if args.cfg_file is not None:
cfg = config_dir.load_config(name=".jqi", sub_dir="query", sub_name=args.cfg_file, create=False)
print(cfg["pattern"])
else:
list_stored(args.list > 1)
return
if args.file is None:
text = sys.stdin.read()
else:
with open(args.file) as f:
text = f.read()
if args.run:
editor.jq(text, stdio=True)
else:
result = editor.run(text)
if result == 0:
editor.save()
editor.save("previous")
else:
sys.exit(result)
def list_stored(long=False):
d = config_dir.config_dir(name=".jqi", sub_dir="query")
for f in d.iterdir():
name = f.name
cfg = config_dir.load_config(name=".jqi", sub_dir="query", sub_name=name, create=False)
if long:
print(name)
for line in cfg["pattern"].splitlines():
print("\t{}".format(line))
else:
print("{}\t{}".format(name, cfg["pattern"].splitlines()[0]))
if __name__ == '__main__':
main("-f", "foo", "/tmp/x")
| import argparse_helper as argparse
import config_dir
import sys
from .editor import Editor
def main(*args):
if len(args) > 0:
args = [args]
parser = argparse.ArgumentParser()
parser.add_argument("-f", dest="cfg_file", help="query save name")
parser.add_argument("-x", default=False, action="store_true", dest="run", help="run immediately")
parser.add_argument("-l", default=False, action="count", dest="list", help="list saved queries")
parser.add_argument("-p", default=False, action="store_true", dest="previous", help="use previous query")
parser.add_argument("pattern", nargs="?", help="override saved pattern")
parser.add_argument("file", nargs="?", help="file to operate on")
args = parser.parse_args(*args)
if args.cfg_file is None and args.previous:
args.cfg_file = "previous"
if args.cfg_file is not None and args.file is None:
args.file = args.pattern
args.pattern = None
editor = Editor(file=args.cfg_file, pattern=args.pattern)
if args.list > 0:
if args.cfg_file is not None:
cfg = config_dir.load_config(name=".jqi", sub_dir="query", sub_name=args.cfg_file, create=False)
print(cfg["pattern"])
else:
list_stored(args.list > 1)
return
if args.file is None:
text = sys.stdin.read()
else:
with open(args.file) as f:
text = f.read()
if args.run:
editor.jq(text, stdio=True)
else:
result = editor.run(text)
if result == 0:
editor.save()
editor.save("previous")
else:
sys.exit(result)
def list_stored(long=False):
d = config_dir.config_dir(name=".jqi", sub_dir="query")
for f in d.iterdir():
name = f.name
cfg = config_dir.load_config(name=".jqi", sub_dir="query", sub_name=name, create=False)
if long:
print(name)
for line in cfg["pattern"].splitlines():
print("\t{}".format(line))
else:
print("{}\t{}".format(name, cfg["pattern"].splitlines()[0]))
if __name__ == '__main__':
main("-f", "foo", "/tmp/x")
| none | 1 | 2.597689 | 3 |
|
setup.py | ASKBOT/python-import-utils | 1 | 10161 | <reponame>ASKBOT/python-import-utils
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import import_utils
setup(
name = "import-utils",
version = import_utils.__version__,
description = 'A module that supports simple programmatic module imports',
packages = find_packages(),
author = 'Evgeny.Fadeev',
author_email = '<EMAIL>',
license = 'BSD',
keywords = 'import, module',
url = 'http://askbot.org',
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
long_description = import_utils.__doc__
)
| import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import import_utils
setup(
name = "import-utils",
version = import_utils.__version__,
description = 'A module that supports simple programmatic module imports',
packages = find_packages(),
author = 'Evgeny.Fadeev',
author_email = '<EMAIL>',
license = 'BSD',
keywords = 'import, module',
url = 'http://askbot.org',
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
long_description = import_utils.__doc__
) | none | 1 | 1.3185 | 1 |
|
visual_dynamics/policies/random_offset_camera_target_policy.py | alexlee-gk/visual_dynamics | 30 | 10162 | import numpy as np
from visual_dynamics.policies import CameraTargetPolicy
class RandomOffsetCameraTargetPolicy(CameraTargetPolicy):
def __init__(self, env, target_env, camera_node_name, agent_node_name, target_node_name,
height=12.0, radius=16.0, angle=(-np.pi/4, np.pi/4), tightness=0.1, hra_interpolation=True):
self.height = height
self.radius = radius
self.angle = angle
offset = self.sample_offset()
super(RandomOffsetCameraTargetPolicy, self).__init__(env, target_env, camera_node_name, agent_node_name,
target_node_name, offset, tightness=tightness,
hra_interpolation=hra_interpolation)
def reset(self):
self.offset = self.sample_offset()
state = super(RandomOffsetCameraTargetPolicy, self).reset()
# self.offset = self.sample_offset()
return state
def sample_offset(self):
height = np.random.uniform(*self.height) if isinstance(self.height, (list, tuple)) else self.height
radius = np.random.uniform(*self.radius) if isinstance(self.radius, (list, tuple)) else self.radius
angle = np.random.uniform(*self.angle) if isinstance(self.angle, (list, tuple)) else self.angle
return np.array([radius * np.sin(angle), -radius * np.cos(angle), height])
def _get_config(self):
config = super(RandomOffsetCameraTargetPolicy, self)._get_config()
config.pop('offset')
config.update({'height': self.height,
'radius': self.radius,
'angle': self.angle})
return config
| import numpy as np
from visual_dynamics.policies import CameraTargetPolicy
class RandomOffsetCameraTargetPolicy(CameraTargetPolicy):
def __init__(self, env, target_env, camera_node_name, agent_node_name, target_node_name,
height=12.0, radius=16.0, angle=(-np.pi/4, np.pi/4), tightness=0.1, hra_interpolation=True):
self.height = height
self.radius = radius
self.angle = angle
offset = self.sample_offset()
super(RandomOffsetCameraTargetPolicy, self).__init__(env, target_env, camera_node_name, agent_node_name,
target_node_name, offset, tightness=tightness,
hra_interpolation=hra_interpolation)
def reset(self):
self.offset = self.sample_offset()
state = super(RandomOffsetCameraTargetPolicy, self).reset()
# self.offset = self.sample_offset()
return state
def sample_offset(self):
height = np.random.uniform(*self.height) if isinstance(self.height, (list, tuple)) else self.height
radius = np.random.uniform(*self.radius) if isinstance(self.radius, (list, tuple)) else self.radius
angle = np.random.uniform(*self.angle) if isinstance(self.angle, (list, tuple)) else self.angle
return np.array([radius * np.sin(angle), -radius * np.cos(angle), height])
def _get_config(self):
config = super(RandomOffsetCameraTargetPolicy, self)._get_config()
config.pop('offset')
config.update({'height': self.height,
'radius': self.radius,
'angle': self.angle})
return config
| en | 0.270382 | # self.offset = self.sample_offset() | 2.319897 | 2 |
Day3/Day3.py | ErAgOn-AmAnSiRoHi/Advent-of-Code-2021 | 0 | 10163 | with open("inputday3.txt") as f:
data = [x for x in f.read().split()]
gamma = ""
epsilon = ""
for b in range(0, len(data[0])):
one = 0
zero = 0
for c in range(0, len(data)):
if data[c][b] == '0':
zero += 1
else:
one += 1
if zero > one:
gamma += '0'
epsilon += '1'
else:
gamma += '1'
epsilon += '0'
g = int(gamma, 2)
e = int(epsilon, 2)
print("PART 1", g * e)
gamma = ""
epsilon = ""
data2 = data.copy()
index = 0
while len(data) > 1:
one = 0
zero = 0
ones = []
zeroes = []
for c in range(0, len(data)):
if data[c][index] == "0":
zero += 1
zeroes.append(data[c])
else:
one += 1
ones.append(data[c])
if zero > one:
data = zeroes
else:
data = ones
index += 1
oxygen = int(data[0], 2)
data = data2
index = 0
while len(data) > 1:
one = 0
zero = 0
ones = []
zeroes = []
for c in range(0, len(data)):
if data[c][index] == '0':
zero += 1
zeroes.append(data[c])
else:
one += 1
ones.append(data[c])
if one < zero:
data = ones
else:
data = zeroes
index += 1
co2 = int(data[0], 2)
print("PART 2", oxygen * co2)
| with open("inputday3.txt") as f:
data = [x for x in f.read().split()]
gamma = ""
epsilon = ""
for b in range(0, len(data[0])):
one = 0
zero = 0
for c in range(0, len(data)):
if data[c][b] == '0':
zero += 1
else:
one += 1
if zero > one:
gamma += '0'
epsilon += '1'
else:
gamma += '1'
epsilon += '0'
g = int(gamma, 2)
e = int(epsilon, 2)
print("PART 1", g * e)
gamma = ""
epsilon = ""
data2 = data.copy()
index = 0
while len(data) > 1:
one = 0
zero = 0
ones = []
zeroes = []
for c in range(0, len(data)):
if data[c][index] == "0":
zero += 1
zeroes.append(data[c])
else:
one += 1
ones.append(data[c])
if zero > one:
data = zeroes
else:
data = ones
index += 1
oxygen = int(data[0], 2)
data = data2
index = 0
while len(data) > 1:
one = 0
zero = 0
ones = []
zeroes = []
for c in range(0, len(data)):
if data[c][index] == '0':
zero += 1
zeroes.append(data[c])
else:
one += 1
ones.append(data[c])
if one < zero:
data = ones
else:
data = zeroes
index += 1
co2 = int(data[0], 2)
print("PART 2", oxygen * co2)
| none | 1 | 3.278127 | 3 |
|
keras2pytorch_dataset.py | MPCAICDM/MPCA | 0 | 10164 | <gh_stars>0
from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
from misc import AverageMeter
from eval_accuracy import simple_accuracy
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import torch
from multiprocessing import Value
def softmax(input_tensor):
act = torch.nn.Softmax(dim=1)
return act(input_tensor).numpy()
class dataset_pytorch(data.Dataset):
def __init__(self, train_data, train_labels, test_data, test_labels, train=True,
transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.train_data = train_data # ndarray
self.train_labels = train_labels
self.test_data = test_data
self.test_labels = test_labels
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class transformer_score_dataset(data.Dataset):
def __init__(self, train_data, train_labels, data_transformer, aux_labels=None, transform=None,
target_transform=None, train_sequential=False):
self.transform = transform
self.target_transform = target_transform
self.train_data = train_data
self.train_labels = train_labels
self.aux_labels = aux_labels
self.transfomer = data_transformer
self.n_transforms = self.transfomer.n_transforms
self.train_sequential = train_sequential
if train_sequential:
self.length = self.train_data.shape[0]
self.transform_idx = 0
self.iter_count = Value('i', 0)
else:
self.length = self.train_data.shape[0] * self.transfomer.n_transforms
assert self.length == len(self.train_labels)
def __len__(self):
return self.length
def __getitem__(self, idx):
if self.train_sequential:
with self.iter_count.get_lock():
self.iter_count.value += 1
if self.iter_count.value == self.length:
self.transform_idx = (self.transform_idx + 1) % self.n_transforms
self.iter_count.value = 0
image_idx, transform_idx = idx, self.transform_idx
nidx = image_idx * self.n_transforms + transform_idx
else:
image_idx, transform_idx = idx // self.n_transforms, idx % self.n_transforms
nidx = idx
img, target = self.transfomer.transform_one(self.train_data[image_idx], transform_idx).copy(), self.train_labels[nidx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[idx])
return img, target
class transformer_dataset(data.Dataset):
def __init__(self, train_data, train_labels, data_transformer, aux_labels=None, transform=None,
target_transform=None, train_sequential=False, is_padding=False):
self.transform = transform
self.target_transform = target_transform
self.train_data = train_data
self.train_labels = train_labels
self.aux_labels = aux_labels
self.transfomer = data_transformer
self.n_transforms = self.transfomer.n_transforms
self.train_sequential = train_sequential
self.is_padding = is_padding
if train_sequential:
self.length = self.train_data.shape[0]
self.transform_idx = 0
self.iter_count = Value('i', 0)
else:
self.length = self.train_data.shape[0] * self.transfomer.n_transforms
assert self.length == len(self.train_labels)
def __len__(self):
return self.length
def __getitem__(self, idx):
if self.train_sequential:
with self.iter_count.get_lock():
self.iter_count.value += 1
if self.iter_count.value == self.length:
self.transform_idx = (self.transform_idx + 1) % self.n_transforms
self.iter_count.value = 0
image_idx, transform_idx = idx, self.transform_idx
nidx = image_idx * self.n_transforms + transform_idx
else:
image_idx, transform_idx = idx // self.n_transforms, idx % self.n_transforms
nidx = idx
if self.is_padding:
img = np.pad(self.train_data[image_idx].copy(), ((2, 2), (2, 2), (0, 0)), 'constant')
#print(img.shape)
img, target = self.transfomer.transform_one(img, transform_idx).copy(), self.train_labels[nidx]
else:
img, target = self.transfomer.transform_one(self.train_data[image_idx], transform_idx).copy(), self.train_labels[nidx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[idx])
return img, target
class h5idx_dataset(data.Dataset):
def __init__(self, train_index, train_labels, total_data, aux_labels=None, transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.train_index = train_index # just a index
self.train_labels = train_labels
self.aux_labels = aux_labels
self.total_data = total_data
self.length = self.train_index.shape[0] * self.total_data.shape[1]
self.n_transform = self.total_data.shape[1]
assert self.length == len(self.train_labels)
def __len__(self):
return self.length
def __getitem__(self, idx):
image_idx, transform_idx = idx // self.n_transform, idx % self.n_transform
img, target = np.array(self.total_data[self.train_index[image_idx], transform_idx, :]), self.train_labels[idx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[idx])
return img, target
class trainset_pytorch(data.Dataset):
def __init__(self, train_data, train_labels, aux_labels=None,transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.train_data = train_data # ndarray
self.train_labels = train_labels
self.aux_labels = aux_labels
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.train_data[index], self.train_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img) # used if the img is [H, W, C] and the dtype is uint8
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[index])
return img, target
def __len__(self):
return len(self.train_data)
class testset_pytorch(data.Dataset):
def __init__(self, test_data, transform=None):
self.transform = transform
self.test_data = test_data # ndarray
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img = self.test_data[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img
def __len__(self):
return len(self.test_data)
class dataset_reorganized(data.Dataset):
def __init__(self, data, transform=None):
self.transform = transform
self.data = data # ndarray
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
imgs = self.data[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img) # used if the img is [H, W, C] and the dtype is uint8
if self.transform is not None:
new_imgs = []
for i in range(imgs.shape[0]):
img = imgs[i]
img = self.transform(img)
new_imgs.append(img.unsqueeze(0))
new_imgs = torch.cat(new_imgs, dim=0)
else:
raise NotImplementedError
return new_imgs
def __len__(self):
return len(self.data)
def train_reorganized(trainloader, model, criterion, optimizer, epochs):
# train the model
model.train()
top1 = AverageMeter()
losses = AverageMeter()
for epoch in range(epochs):
for batch_idx, (inputs) in enumerate(trainloader):
targets = torch.LongTensor(np.tile(np.arange(inputs.size(1)), inputs.size(0)))
inputs = inputs.reshape(-1, inputs.size(-3), inputs.size(-2), inputs.size(-1))
inputs, targets = torch.autograd.Variable(inputs.cuda()), torch.autograd.Variable(targets.cuda())
outputs, _ = model(inputs)
loss = criterion(outputs, targets)
prec1 = simple_accuracy(outputs.data.cpu(), targets.data.cpu())
top1.update(prec1, inputs.size(0))
losses.update(loss.data.cpu(), inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Epoch: [{} | {}], batch: {}, loss: {}, Accuracy: {}'.format(epoch + 1, epochs, batch_idx + 1, losses.avg, top1.avg))
def test_reorganized(testloader, model):
model.eval()
res = torch.Tensor()
for batch_idx, (inputs) in enumerate(testloader):
inputs = inputs.reshape(-1, inputs.size(-3), inputs.size(-2), inputs.size(-1))
inputs = torch.autograd.Variable(inputs.cuda())
outputs, _ = model(inputs)
res = torch.cat((res, outputs.data.cpu()), dim=0)
return res
def get_scores(outputs, targets):
scores = []
for i in range(outputs.shape[0]):
scores.append(outputs[i, targets[i]])
return np.array(scores) | from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
from misc import AverageMeter
from eval_accuracy import simple_accuracy
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import torch
from multiprocessing import Value
def softmax(input_tensor):
act = torch.nn.Softmax(dim=1)
return act(input_tensor).numpy()
class dataset_pytorch(data.Dataset):
def __init__(self, train_data, train_labels, test_data, test_labels, train=True,
transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.train_data = train_data # ndarray
self.train_labels = train_labels
self.test_data = test_data
self.test_labels = test_labels
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class transformer_score_dataset(data.Dataset):
def __init__(self, train_data, train_labels, data_transformer, aux_labels=None, transform=None,
target_transform=None, train_sequential=False):
self.transform = transform
self.target_transform = target_transform
self.train_data = train_data
self.train_labels = train_labels
self.aux_labels = aux_labels
self.transfomer = data_transformer
self.n_transforms = self.transfomer.n_transforms
self.train_sequential = train_sequential
if train_sequential:
self.length = self.train_data.shape[0]
self.transform_idx = 0
self.iter_count = Value('i', 0)
else:
self.length = self.train_data.shape[0] * self.transfomer.n_transforms
assert self.length == len(self.train_labels)
def __len__(self):
return self.length
def __getitem__(self, idx):
if self.train_sequential:
with self.iter_count.get_lock():
self.iter_count.value += 1
if self.iter_count.value == self.length:
self.transform_idx = (self.transform_idx + 1) % self.n_transforms
self.iter_count.value = 0
image_idx, transform_idx = idx, self.transform_idx
nidx = image_idx * self.n_transforms + transform_idx
else:
image_idx, transform_idx = idx // self.n_transforms, idx % self.n_transforms
nidx = idx
img, target = self.transfomer.transform_one(self.train_data[image_idx], transform_idx).copy(), self.train_labels[nidx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[idx])
return img, target
class transformer_dataset(data.Dataset):
def __init__(self, train_data, train_labels, data_transformer, aux_labels=None, transform=None,
target_transform=None, train_sequential=False, is_padding=False):
self.transform = transform
self.target_transform = target_transform
self.train_data = train_data
self.train_labels = train_labels
self.aux_labels = aux_labels
self.transfomer = data_transformer
self.n_transforms = self.transfomer.n_transforms
self.train_sequential = train_sequential
self.is_padding = is_padding
if train_sequential:
self.length = self.train_data.shape[0]
self.transform_idx = 0
self.iter_count = Value('i', 0)
else:
self.length = self.train_data.shape[0] * self.transfomer.n_transforms
assert self.length == len(self.train_labels)
def __len__(self):
return self.length
def __getitem__(self, idx):
if self.train_sequential:
with self.iter_count.get_lock():
self.iter_count.value += 1
if self.iter_count.value == self.length:
self.transform_idx = (self.transform_idx + 1) % self.n_transforms
self.iter_count.value = 0
image_idx, transform_idx = idx, self.transform_idx
nidx = image_idx * self.n_transforms + transform_idx
else:
image_idx, transform_idx = idx // self.n_transforms, idx % self.n_transforms
nidx = idx
if self.is_padding:
img = np.pad(self.train_data[image_idx].copy(), ((2, 2), (2, 2), (0, 0)), 'constant')
#print(img.shape)
img, target = self.transfomer.transform_one(img, transform_idx).copy(), self.train_labels[nidx]
else:
img, target = self.transfomer.transform_one(self.train_data[image_idx], transform_idx).copy(), self.train_labels[nidx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[idx])
return img, target
class h5idx_dataset(data.Dataset):
def __init__(self, train_index, train_labels, total_data, aux_labels=None, transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.train_index = train_index # just a index
self.train_labels = train_labels
self.aux_labels = aux_labels
self.total_data = total_data
self.length = self.train_index.shape[0] * self.total_data.shape[1]
self.n_transform = self.total_data.shape[1]
assert self.length == len(self.train_labels)
def __len__(self):
return self.length
def __getitem__(self, idx):
image_idx, transform_idx = idx // self.n_transform, idx % self.n_transform
img, target = np.array(self.total_data[self.train_index[image_idx], transform_idx, :]), self.train_labels[idx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[idx])
return img, target
class trainset_pytorch(data.Dataset):
def __init__(self, train_data, train_labels, aux_labels=None,transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.train_data = train_data # ndarray
self.train_labels = train_labels
self.aux_labels = aux_labels
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.train_data[index], self.train_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img) # used if the img is [H, W, C] and the dtype is uint8
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[index])
return img, target
def __len__(self):
return len(self.train_data)
class testset_pytorch(data.Dataset):
def __init__(self, test_data, transform=None):
self.transform = transform
self.test_data = test_data # ndarray
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img = self.test_data[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img
def __len__(self):
return len(self.test_data)
class dataset_reorganized(data.Dataset):
def __init__(self, data, transform=None):
self.transform = transform
self.data = data # ndarray
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
imgs = self.data[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img) # used if the img is [H, W, C] and the dtype is uint8
if self.transform is not None:
new_imgs = []
for i in range(imgs.shape[0]):
img = imgs[i]
img = self.transform(img)
new_imgs.append(img.unsqueeze(0))
new_imgs = torch.cat(new_imgs, dim=0)
else:
raise NotImplementedError
return new_imgs
def __len__(self):
return len(self.data)
def train_reorganized(trainloader, model, criterion, optimizer, epochs):
# train the model
model.train()
top1 = AverageMeter()
losses = AverageMeter()
for epoch in range(epochs):
for batch_idx, (inputs) in enumerate(trainloader):
targets = torch.LongTensor(np.tile(np.arange(inputs.size(1)), inputs.size(0)))
inputs = inputs.reshape(-1, inputs.size(-3), inputs.size(-2), inputs.size(-1))
inputs, targets = torch.autograd.Variable(inputs.cuda()), torch.autograd.Variable(targets.cuda())
outputs, _ = model(inputs)
loss = criterion(outputs, targets)
prec1 = simple_accuracy(outputs.data.cpu(), targets.data.cpu())
top1.update(prec1, inputs.size(0))
losses.update(loss.data.cpu(), inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Epoch: [{} | {}], batch: {}, loss: {}, Accuracy: {}'.format(epoch + 1, epochs, batch_idx + 1, losses.avg, top1.avg))
def test_reorganized(testloader, model):
model.eval()
res = torch.Tensor()
for batch_idx, (inputs) in enumerate(testloader):
inputs = inputs.reshape(-1, inputs.size(-3), inputs.size(-2), inputs.size(-1))
inputs = torch.autograd.Variable(inputs.cuda())
outputs, _ = model(inputs)
res = torch.cat((res, outputs.data.cpu()), dim=0)
return res
def get_scores(outputs, targets):
scores = []
for i in range(outputs.shape[0]):
scores.append(outputs[i, targets[i]])
return np.array(scores) | en | 0.749778 | # training set or test set # ndarray Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. # doing this so that it is consistent with all other datasets # to return a PIL Image #print(img.shape) # just a index # ndarray Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. # doing this so that it is consistent with all other datasets # to return a PIL Image # img = Image.fromarray(img) # used if the img is [H, W, C] and the dtype is uint8 # ndarray Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. # doing this so that it is consistent with all other datasets # to return a PIL Image # img = Image.fromarray(img) # ndarray Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. # doing this so that it is consistent with all other datasets # to return a PIL Image # img = Image.fromarray(img) # used if the img is [H, W, C] and the dtype is uint8 # train the model # compute gradient and do SGD step | 2.57122 | 3 |
mir/tools/mir_repo_utils.py | fenrir-z/ymir-cmd | 1 | 10165 | import json
import logging
import os
from typing import Optional
from mir import scm
from mir.tools import mir_storage
def mir_check_repo_dvc_dirty(mir_root: str = ".") -> bool:
names = [name for name in mir_storage.get_all_mir_paths() if os.path.isfile(os.path.join(mir_root, name))]
if names:
dvc_cmd_args = ["--show-json", "--targets"]
dvc_cmd_args.extend(names)
dvc_scm = scm.Scm(mir_root, scm_executable="dvc")
dvc_result = dvc_scm.diff(dvc_cmd_args)
json_object = json.loads(dvc_result)
keys = ['added', 'deleted', 'modified', 'renamed', 'not in cache']
dvc_dirty = False
for key in keys:
dirty_value = json_object.get(key, None)
if dirty_value:
logging.info(f"{key}: {dirty_value}")
dvc_dirty = True
return dvc_dirty
else:
# if no mir files in this mir repo, it's clean
return False
def mir_check_repo_git_dirty(mir_root: str = ".") -> bool:
git_scm = scm.Scm(mir_root, scm_executable="git")
git_result = git_scm.status("-s") # if clean, returns nothing
if (git_result or len(git_result) > 0):
logging.info(f"{git_result}")
return True
return False # clean
def mir_check_repo_dirty(mir_root: str = '.') -> bool:
return mir_check_repo_dvc_dirty(mir_root) or mir_check_repo_git_dirty(mir_root)
def mir_check_branch_exists(mir_root: str, branch: str) -> bool:
try:
git_scm = scm.Scm(mir_root, scm_executable="git")
git_scm.rev_parse(branch)
return True
except Exception:
# git rev-parse will return non-zero code when can not find branch
# and cmd.py packs non-zero return code as an error
return False
def work_dir_to_monitor_file(work_dir: Optional[str]) -> Optional[str]:
return os.path.join(work_dir, 'out', 'monitor.txt') if work_dir else None
| import json
import logging
import os
from typing import Optional
from mir import scm
from mir.tools import mir_storage
def mir_check_repo_dvc_dirty(mir_root: str = ".") -> bool:
names = [name for name in mir_storage.get_all_mir_paths() if os.path.isfile(os.path.join(mir_root, name))]
if names:
dvc_cmd_args = ["--show-json", "--targets"]
dvc_cmd_args.extend(names)
dvc_scm = scm.Scm(mir_root, scm_executable="dvc")
dvc_result = dvc_scm.diff(dvc_cmd_args)
json_object = json.loads(dvc_result)
keys = ['added', 'deleted', 'modified', 'renamed', 'not in cache']
dvc_dirty = False
for key in keys:
dirty_value = json_object.get(key, None)
if dirty_value:
logging.info(f"{key}: {dirty_value}")
dvc_dirty = True
return dvc_dirty
else:
# if no mir files in this mir repo, it's clean
return False
def mir_check_repo_git_dirty(mir_root: str = ".") -> bool:
git_scm = scm.Scm(mir_root, scm_executable="git")
git_result = git_scm.status("-s") # if clean, returns nothing
if (git_result or len(git_result) > 0):
logging.info(f"{git_result}")
return True
return False # clean
def mir_check_repo_dirty(mir_root: str = '.') -> bool:
return mir_check_repo_dvc_dirty(mir_root) or mir_check_repo_git_dirty(mir_root)
def mir_check_branch_exists(mir_root: str, branch: str) -> bool:
try:
git_scm = scm.Scm(mir_root, scm_executable="git")
git_scm.rev_parse(branch)
return True
except Exception:
# git rev-parse will return non-zero code when can not find branch
# and cmd.py packs non-zero return code as an error
return False
def work_dir_to_monitor_file(work_dir: Optional[str]) -> Optional[str]:
return os.path.join(work_dir, 'out', 'monitor.txt') if work_dir else None
| en | 0.601488 | # if no mir files in this mir repo, it's clean # if clean, returns nothing # clean # git rev-parse will return non-zero code when can not find branch # and cmd.py packs non-zero return code as an error | 2.219279 | 2 |
utils/edit_utils.py | ermekaitygulov/STIT | 6 | 10166 | <reponame>ermekaitygulov/STIT
import argparse
import math
import os
import pickle
from typing import List
import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import configs.paths_config
from configs import paths_config
from training.networks import SynthesisBlock
def add_texts_to_image_vertical(texts, pivot_images):
images_height = pivot_images.height
images_width = pivot_images.width
text_height = 256 + 16 - images_height % 16
num_images = len(texts)
image_width = images_width // num_images
text_image = Image.new('RGB', (images_width, text_height), (255, 255, 255))
draw = ImageDraw.Draw(text_image)
font_size = int(math.ceil(24 * image_width / 256))
try:
font = ImageFont.truetype("truetype/freefont/FreeSans.ttf", font_size)
except OSError:
font = ImageFont.load_default()
for i, text in enumerate(texts):
draw.text((image_width // 2 + i * image_width, text_height // 2), text, fill='black', anchor='ms', font=font)
out_image = Image.new('RGB', (pivot_images.width, pivot_images.height + text_image.height))
out_image.paste(text_image, (0, 0))
out_image.paste(pivot_images, (0, text_image.height))
return out_image
def get_affine_layers(synthesis):
blocks: List[SynthesisBlock] = [getattr(synthesis, f'b{res}') for res in synthesis.block_resolutions]
affine_layers = []
for block in blocks:
if hasattr(block, 'conv0'):
affine_layers.append((block.conv0.affine, True))
affine_layers.append((block.conv1.affine, True))
affine_layers.append((block.torgb.affine, False))
return affine_layers
def load_stylespace_std():
with open(paths_config.stylespace_mean_std, 'rb') as f:
_, s_std = pickle.load(f)
s_std = [torch.from_numpy(s).cuda() for s in s_std]
return s_std
def to_styles(edit: torch.Tensor, affine_layers):
idx = 0
styles = []
for layer, is_conv in affine_layers:
layer_dim = layer.weight.shape[0]
if is_conv:
styles.append(edit[idx:idx + layer_dim].clone())
idx += layer_dim
else:
styles.append(torch.zeros(layer_dim, device=edit.device, dtype=edit.dtype))
return styles
def w_to_styles(w, affine_layers):
w_idx = 0
styles = []
for affine, is_conv in affine_layers:
styles.append(affine(w[:, w_idx]))
if is_conv:
w_idx += 1
return styles
def paste_image_mask(inverse_transform, image, dst_image, mask, radius=0, sigma=0.0):
image_masked = image.copy().convert('RGBA')
pasted_image = dst_image.copy().convert('RGBA')
if radius != 0:
mask_np = np.array(mask)
kernel_size = (radius * 2 + 1, radius * 2 + 1)
kernel = np.ones(kernel_size)
eroded = cv2.erode(mask_np, kernel, borderType=cv2.BORDER_CONSTANT, borderValue=0)
blurred_mask = cv2.GaussianBlur(eroded, kernel_size, sigmaX=sigma)
blurred_mask = Image.fromarray(blurred_mask)
image_masked.putalpha(blurred_mask)
else:
image_masked.putalpha(mask)
projected = image_masked.transform(dst_image.size, Image.PERSPECTIVE, inverse_transform,
Image.BILINEAR)
pasted_image.alpha_composite(projected)
return pasted_image
def paste_image(inverse_transform, img, orig_image):
pasted_image = orig_image.copy().convert('RGBA')
projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, inverse_transform, Image.BILINEAR)
pasted_image.paste(projected, (0, 0), mask=projected)
return pasted_image
| import argparse
import math
import os
import pickle
from typing import List
import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import configs.paths_config
from configs import paths_config
from training.networks import SynthesisBlock
def add_texts_to_image_vertical(texts, pivot_images):
images_height = pivot_images.height
images_width = pivot_images.width
text_height = 256 + 16 - images_height % 16
num_images = len(texts)
image_width = images_width // num_images
text_image = Image.new('RGB', (images_width, text_height), (255, 255, 255))
draw = ImageDraw.Draw(text_image)
font_size = int(math.ceil(24 * image_width / 256))
try:
font = ImageFont.truetype("truetype/freefont/FreeSans.ttf", font_size)
except OSError:
font = ImageFont.load_default()
for i, text in enumerate(texts):
draw.text((image_width // 2 + i * image_width, text_height // 2), text, fill='black', anchor='ms', font=font)
out_image = Image.new('RGB', (pivot_images.width, pivot_images.height + text_image.height))
out_image.paste(text_image, (0, 0))
out_image.paste(pivot_images, (0, text_image.height))
return out_image
def get_affine_layers(synthesis):
blocks: List[SynthesisBlock] = [getattr(synthesis, f'b{res}') for res in synthesis.block_resolutions]
affine_layers = []
for block in blocks:
if hasattr(block, 'conv0'):
affine_layers.append((block.conv0.affine, True))
affine_layers.append((block.conv1.affine, True))
affine_layers.append((block.torgb.affine, False))
return affine_layers
def load_stylespace_std():
with open(paths_config.stylespace_mean_std, 'rb') as f:
_, s_std = pickle.load(f)
s_std = [torch.from_numpy(s).cuda() for s in s_std]
return s_std
def to_styles(edit: torch.Tensor, affine_layers):
idx = 0
styles = []
for layer, is_conv in affine_layers:
layer_dim = layer.weight.shape[0]
if is_conv:
styles.append(edit[idx:idx + layer_dim].clone())
idx += layer_dim
else:
styles.append(torch.zeros(layer_dim, device=edit.device, dtype=edit.dtype))
return styles
def w_to_styles(w, affine_layers):
w_idx = 0
styles = []
for affine, is_conv in affine_layers:
styles.append(affine(w[:, w_idx]))
if is_conv:
w_idx += 1
return styles
def paste_image_mask(inverse_transform, image, dst_image, mask, radius=0, sigma=0.0):
image_masked = image.copy().convert('RGBA')
pasted_image = dst_image.copy().convert('RGBA')
if radius != 0:
mask_np = np.array(mask)
kernel_size = (radius * 2 + 1, radius * 2 + 1)
kernel = np.ones(kernel_size)
eroded = cv2.erode(mask_np, kernel, borderType=cv2.BORDER_CONSTANT, borderValue=0)
blurred_mask = cv2.GaussianBlur(eroded, kernel_size, sigmaX=sigma)
blurred_mask = Image.fromarray(blurred_mask)
image_masked.putalpha(blurred_mask)
else:
image_masked.putalpha(mask)
projected = image_masked.transform(dst_image.size, Image.PERSPECTIVE, inverse_transform,
Image.BILINEAR)
pasted_image.alpha_composite(projected)
return pasted_image
def paste_image(inverse_transform, img, orig_image):
pasted_image = orig_image.copy().convert('RGBA')
projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, inverse_transform, Image.BILINEAR)
pasted_image.paste(projected, (0, 0), mask=projected)
return pasted_image | none | 1 | 2.528114 | 3 |
|
conll_df/conll_df.py | interrogator/conll-df | 27 | 10167 | <filename>conll_df/conll_df.py
import pandas as pd
# UD 1.0
CONLL_COLUMNS = ['i', 'w', 'l', 'p', 'n', 'm', 'g', 'f', 'd', 'c']
# UD 2.0
CONLL_COLUMNS_V2 = ['i', 'w', 'l', 'x', 'p', 'm', 'g', 'f', 'e', 'o']
# possible morphological attributes
MORPH_ATTS = ['type',
'animacy',
#'gender',
'number'
"Abbr",
"Animacy",
"Aspect",
"Case",
"Definite",
"Degree",
"Evident",
"Foreign",
"Gender",
"Mood",
"NumType",
"Number",
"Person",
"Polarity",
"Polite",
"Poss",
"PronType",
"Reflex",
"Tense",
"VerbForm",
"Voice",
"Type"]
def _make_sent_csv(sentstring, fname, meta, splitter, i, skip_meta=False):
"""
Take one CONLL-U sentence and add all metadata to each row
Return: str (CSV data) and dict (sent level metadata)
"""
fixed_lines = []
raw_lines = sentstring.splitlines()
for line in raw_lines:
if not line:
continue
if line.startswith('#'):
if not skip_meta:
try:
k, v = line.lstrip('# ').split(splitter, 1)
except ValueError:
k, v = line.lstrip('# ').split(splitter.strip(), 1)
meta[k.lower().strip()] = v.strip()
else:
line = '%s\t%s\t%s' % (fname, i, line)
fixed_lines.append(line)
return '\n'.join(fixed_lines), meta
def _add_governors_to_df(df):
"""
Add governor info to a DF. Increases memory usage quite a bit.
"""
# save the original index
i = df.index.get_level_values('i')
# add g
dfg = df.set_index('g', append=True)
# remove i
dfg = dfg.reset_index('i')
dfg = df.loc[dfg.index]
dfg = dfg[['w', 'l', 'p', 'f']]
dfg['i'] = i
dfg = dfg.set_index('i', append=True)
dfg.index.names = ['file', 's', 'g', 'i']
dfg = dfg.reset_index('g', drop=True)
for c in list(dfg.columns):
try:
dfg[c] = dfg[c].cat.add_categories(['ROOT'])
except (AttributeError, ValueError):
pass
dfg = dfg.fillna('ROOT')
dfg.columns = ['gw', 'gl', 'gp', 'gf']
dfg = df.join(dfg, how="inner")
return dfg
def conll_df(path,
corpus_name=False,
corp_folder=False,
v2="auto",
skip_morph=False,
skip_meta=False,
add_gov=False,
drop=['text', 'newdoc id'],
file_index=True,
categories=True,
extra_fields='auto',
drop_redundant=True,
**kwargs):
"""
Optimised CONLL-U reader for v2.0 data
Args:
path (str): the file to prepare
Returns:
pd.DataFrame: 2d array representation of file data
"""
import os
import re
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
splitter = ' = ' if v2 else '='
with open(path, 'r') as fo:
data = fo.read().strip('\n')
if v2 == 'auto':
v2 = 'sent_id = ' in data[:9999]
fname = os.path.basename(path)
# metadata that applies filewide
# a little bonus for those with annual data
basedict = {}
if not skip_meta:
year = re.search(r'[12][0-9][0-9][0-9]', fname)
if year:
basedict['year'] = year.group(0)
sents = data.split('\n\n')
sents_meta = [_make_sent_csv(sstring, fname, dict(basedict), splitter, i, skip_meta=skip_meta) \
for i, sstring in enumerate(sents, start=1)]
sents, metadata = zip(*sents_meta)
# make the sent df
sents = '\n\n'.join(sents)
sents = StringIO(sents)
if v2:
cols = ['file', 's'] + CONLL_COLUMNS_V2
else:
cols = ['file', 's'] + CONLL_COLUMNS
df = pd.read_csv(sents, sep="\t", header=None, names=cols, quoting=kwargs.pop('quoting', 3),
index_col=[0, 1, 2], engine='c', na_filter=False, **kwargs)
if v2 and not skip_morph:
df['m'] = df['m'].fillna('')
df['o'] = df['o'].fillna('')
if extra_fields == 'auto':
# evil line to get all possible keys in the final column
extra_fields = list(df['o'].str.extractall(r'(?:^|\|)([^=]+?)=')[0].unique())
cats = MORPH_ATTS + extra_fields
if 'SpaceAfter' not in cats:
cats.append('SpaceAfter')
cats = list(set(cats))
om = df['o'].str.cat(df['m'], sep='|').str.strip('|_')
# this is a very slow list comp, but i can't think of a better way to do it.
# the 'extractall' solution makes columns for not just the value, but the key...
extra = [om.str.extract('%s=([^|$]+)' % cat.title(), expand=True) for cat in cats]
extra = pd.concat(extra, axis=1)
extra.columns = cats
df = pd.concat([df, extra], axis=1)
# make and join the meta df
if not skip_meta:
metadata = {i: d for i, d in enumerate(metadata, start=1)}
metadata = pd.DataFrame(metadata).T
metadata.index.name = 's'
df = metadata.join(df, how='inner')
# we never want these to show up as a dataframe column
badcols = ['sent_id', 's', 'i', 'file']
# if we aren't parsing morph and extra columns, we should at least keep them
if not skip_morph:
badcols += ['o', 'm']
if drop:
badcols = badcols + drop
df = df.drop(badcols, axis=1, errors='ignore')
# some evil code to handle conll-u files where g col could be a string
if 'g' in df.columns:
df['g'] = df['g'].fillna(0)
if df['g'].dtype in [object, str]:
df['g'] = df['g'].str.replace('_', '0').astype(int)
df['g'] = df['g'].astype(int)
df = df.fillna('_')
# attempt to categorise data
if categories:
for c in list(df.columns):
if c in ['g', 'date']:
continue
try:
df[c] = df[c].astype('category')
except:
pass
if add_gov:
df = _add_governors_to_df(df)
if not file_index:
df.index = df.index.droplevel('file')
if drop_redundant:
empty_cols = []
for c in df.columns:
if len(df[c].unique()) == 1:
empty_cols.append(c)
df = df.drop(empty_cols, axis=1)
#reorder columns so that important things are first
firsts = CONLL_COLUMNS_V2 if v2 else CONLL_COLUMNS
firsts = [i for i in firsts if i in list(df.columns)]
lasts = [i for i in list(df.columns) if i not in firsts]
df = df[firsts + lasts]
return df
| <filename>conll_df/conll_df.py
import pandas as pd
# UD 1.0
CONLL_COLUMNS = ['i', 'w', 'l', 'p', 'n', 'm', 'g', 'f', 'd', 'c']
# UD 2.0
CONLL_COLUMNS_V2 = ['i', 'w', 'l', 'x', 'p', 'm', 'g', 'f', 'e', 'o']
# possible morphological attributes
MORPH_ATTS = ['type',
'animacy',
#'gender',
'number'
"Abbr",
"Animacy",
"Aspect",
"Case",
"Definite",
"Degree",
"Evident",
"Foreign",
"Gender",
"Mood",
"NumType",
"Number",
"Person",
"Polarity",
"Polite",
"Poss",
"PronType",
"Reflex",
"Tense",
"VerbForm",
"Voice",
"Type"]
def _make_sent_csv(sentstring, fname, meta, splitter, i, skip_meta=False):
"""
Take one CONLL-U sentence and add all metadata to each row
Return: str (CSV data) and dict (sent level metadata)
"""
fixed_lines = []
raw_lines = sentstring.splitlines()
for line in raw_lines:
if not line:
continue
if line.startswith('#'):
if not skip_meta:
try:
k, v = line.lstrip('# ').split(splitter, 1)
except ValueError:
k, v = line.lstrip('# ').split(splitter.strip(), 1)
meta[k.lower().strip()] = v.strip()
else:
line = '%s\t%s\t%s' % (fname, i, line)
fixed_lines.append(line)
return '\n'.join(fixed_lines), meta
def _add_governors_to_df(df):
"""
Add governor info to a DF. Increases memory usage quite a bit.
"""
# save the original index
i = df.index.get_level_values('i')
# add g
dfg = df.set_index('g', append=True)
# remove i
dfg = dfg.reset_index('i')
dfg = df.loc[dfg.index]
dfg = dfg[['w', 'l', 'p', 'f']]
dfg['i'] = i
dfg = dfg.set_index('i', append=True)
dfg.index.names = ['file', 's', 'g', 'i']
dfg = dfg.reset_index('g', drop=True)
for c in list(dfg.columns):
try:
dfg[c] = dfg[c].cat.add_categories(['ROOT'])
except (AttributeError, ValueError):
pass
dfg = dfg.fillna('ROOT')
dfg.columns = ['gw', 'gl', 'gp', 'gf']
dfg = df.join(dfg, how="inner")
return dfg
def conll_df(path,
corpus_name=False,
corp_folder=False,
v2="auto",
skip_morph=False,
skip_meta=False,
add_gov=False,
drop=['text', 'newdoc id'],
file_index=True,
categories=True,
extra_fields='auto',
drop_redundant=True,
**kwargs):
"""
Optimised CONLL-U reader for v2.0 data
Args:
path (str): the file to prepare
Returns:
pd.DataFrame: 2d array representation of file data
"""
import os
import re
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
splitter = ' = ' if v2 else '='
with open(path, 'r') as fo:
data = fo.read().strip('\n')
if v2 == 'auto':
v2 = 'sent_id = ' in data[:9999]
fname = os.path.basename(path)
# metadata that applies filewide
# a little bonus for those with annual data
basedict = {}
if not skip_meta:
year = re.search(r'[12][0-9][0-9][0-9]', fname)
if year:
basedict['year'] = year.group(0)
sents = data.split('\n\n')
sents_meta = [_make_sent_csv(sstring, fname, dict(basedict), splitter, i, skip_meta=skip_meta) \
for i, sstring in enumerate(sents, start=1)]
sents, metadata = zip(*sents_meta)
# make the sent df
sents = '\n\n'.join(sents)
sents = StringIO(sents)
if v2:
cols = ['file', 's'] + CONLL_COLUMNS_V2
else:
cols = ['file', 's'] + CONLL_COLUMNS
df = pd.read_csv(sents, sep="\t", header=None, names=cols, quoting=kwargs.pop('quoting', 3),
index_col=[0, 1, 2], engine='c', na_filter=False, **kwargs)
if v2 and not skip_morph:
df['m'] = df['m'].fillna('')
df['o'] = df['o'].fillna('')
if extra_fields == 'auto':
# evil line to get all possible keys in the final column
extra_fields = list(df['o'].str.extractall(r'(?:^|\|)([^=]+?)=')[0].unique())
cats = MORPH_ATTS + extra_fields
if 'SpaceAfter' not in cats:
cats.append('SpaceAfter')
cats = list(set(cats))
om = df['o'].str.cat(df['m'], sep='|').str.strip('|_')
# this is a very slow list comp, but i can't think of a better way to do it.
# the 'extractall' solution makes columns for not just the value, but the key...
extra = [om.str.extract('%s=([^|$]+)' % cat.title(), expand=True) for cat in cats]
extra = pd.concat(extra, axis=1)
extra.columns = cats
df = pd.concat([df, extra], axis=1)
# make and join the meta df
if not skip_meta:
metadata = {i: d for i, d in enumerate(metadata, start=1)}
metadata = pd.DataFrame(metadata).T
metadata.index.name = 's'
df = metadata.join(df, how='inner')
# we never want these to show up as a dataframe column
badcols = ['sent_id', 's', 'i', 'file']
# if we aren't parsing morph and extra columns, we should at least keep them
if not skip_morph:
badcols += ['o', 'm']
if drop:
badcols = badcols + drop
df = df.drop(badcols, axis=1, errors='ignore')
# some evil code to handle conll-u files where g col could be a string
if 'g' in df.columns:
df['g'] = df['g'].fillna(0)
if df['g'].dtype in [object, str]:
df['g'] = df['g'].str.replace('_', '0').astype(int)
df['g'] = df['g'].astype(int)
df = df.fillna('_')
# attempt to categorise data
if categories:
for c in list(df.columns):
if c in ['g', 'date']:
continue
try:
df[c] = df[c].astype('category')
except:
pass
if add_gov:
df = _add_governors_to_df(df)
if not file_index:
df.index = df.index.droplevel('file')
if drop_redundant:
empty_cols = []
for c in df.columns:
if len(df[c].unique()) == 1:
empty_cols.append(c)
df = df.drop(empty_cols, axis=1)
#reorder columns so that important things are first
firsts = CONLL_COLUMNS_V2 if v2 else CONLL_COLUMNS
firsts = [i for i in firsts if i in list(df.columns)]
lasts = [i for i in list(df.columns) if i not in firsts]
df = df[firsts + lasts]
return df
| en | 0.835032 | # UD 1.0 # UD 2.0 # possible morphological attributes #'gender', Take one CONLL-U sentence and add all metadata to each row Return: str (CSV data) and dict (sent level metadata) Add governor info to a DF. Increases memory usage quite a bit. # save the original index # add g # remove i Optimised CONLL-U reader for v2.0 data Args: path (str): the file to prepare Returns: pd.DataFrame: 2d array representation of file data # metadata that applies filewide # a little bonus for those with annual data # make the sent df # evil line to get all possible keys in the final column # this is a very slow list comp, but i can't think of a better way to do it. # the 'extractall' solution makes columns for not just the value, but the key... # make and join the meta df # we never want these to show up as a dataframe column # if we aren't parsing morph and extra columns, we should at least keep them # some evil code to handle conll-u files where g col could be a string # attempt to categorise data #reorder columns so that important things are first | 2.946643 | 3 |
scripts/postgres_to_lmdb_bars_60m.py | alexanu/atpy | 24 | 10168 | <filename>scripts/postgres_to_lmdb_bars_60m.py
#!/bin/python3
import argparse
import datetime
import functools
import logging
import os
import psycopg2
from dateutil.relativedelta import relativedelta
from atpy.data.cache.lmdb_cache import *
from atpy.data.cache.postgres_cache import BarsInPeriodProvider
from atpy.data.cache.postgres_cache import request_adjustments
from atpy.data.splits_dividends import adjust_df
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="PostgreSQL to LMDB configuration")
parser.add_argument('-lmdb_path', type=str, default=None, help="LMDB Path")
parser.add_argument('-delta_back', type=int, default=8, help="Default number of years to look back")
parser.add_argument('-adjust_splits', action='store_true', default=True, help="Adjust splits before saving")
parser.add_argument('-adjust_dividends', action='store_true', default=False, help="Adjust dividends before saving")
args = parser.parse_args()
lmdb_path = args.lmdb_path if args.lmdb_path is not None else os.environ['ATPY_LMDB_PATH']
con = psycopg2.connect(os.environ['POSTGRESQL_CACHE'])
adjustments = None
if args.adjust_splits and args.adjust_dividends:
adjustments = request_adjustments(conn=con, table_name='splits_dividends')
elif args.adjust_splits:
adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='split')
elif args.adjust_dividends:
adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='dividend')
now = datetime.datetime.now()
bgn_prd = datetime.datetime(now.year - args.delta_back, 1, 1)
bgn_prd = bgn_prd + relativedelta(days=7 - bgn_prd.weekday())
cache_read = functools.partial(read_pickle, lmdb_path=lmdb_path)
bars_in_period = BarsInPeriodProvider(conn=con, interval_len=3600, interval_type='s', bars_table='bars_60m', bgn_prd=bgn_prd, delta=relativedelta(days=7),
overlap=relativedelta(microseconds=-1), cache=cache_read)
for i, df in enumerate(bars_in_period):
if cache_read(bars_in_period.current_cache_key()) is None:
if adjustments is not None:
adjust_df(df, adjustments)
write(bars_in_period.current_cache_key(), df, lmdb_path)
logging.info('Saving ' + bars_in_period.current_cache_key())
else:
logging.info('Cache hit on ' + bars_in_period.current_cache_key())
| <filename>scripts/postgres_to_lmdb_bars_60m.py
#!/bin/python3
import argparse
import datetime
import functools
import logging
import os
import psycopg2
from dateutil.relativedelta import relativedelta
from atpy.data.cache.lmdb_cache import *
from atpy.data.cache.postgres_cache import BarsInPeriodProvider
from atpy.data.cache.postgres_cache import request_adjustments
from atpy.data.splits_dividends import adjust_df
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="PostgreSQL to LMDB configuration")
parser.add_argument('-lmdb_path', type=str, default=None, help="LMDB Path")
parser.add_argument('-delta_back', type=int, default=8, help="Default number of years to look back")
parser.add_argument('-adjust_splits', action='store_true', default=True, help="Adjust splits before saving")
parser.add_argument('-adjust_dividends', action='store_true', default=False, help="Adjust dividends before saving")
args = parser.parse_args()
lmdb_path = args.lmdb_path if args.lmdb_path is not None else os.environ['ATPY_LMDB_PATH']
con = psycopg2.connect(os.environ['POSTGRESQL_CACHE'])
adjustments = None
if args.adjust_splits and args.adjust_dividends:
adjustments = request_adjustments(conn=con, table_name='splits_dividends')
elif args.adjust_splits:
adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='split')
elif args.adjust_dividends:
adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='dividend')
now = datetime.datetime.now()
bgn_prd = datetime.datetime(now.year - args.delta_back, 1, 1)
bgn_prd = bgn_prd + relativedelta(days=7 - bgn_prd.weekday())
cache_read = functools.partial(read_pickle, lmdb_path=lmdb_path)
bars_in_period = BarsInPeriodProvider(conn=con, interval_len=3600, interval_type='s', bars_table='bars_60m', bgn_prd=bgn_prd, delta=relativedelta(days=7),
overlap=relativedelta(microseconds=-1), cache=cache_read)
for i, df in enumerate(bars_in_period):
if cache_read(bars_in_period.current_cache_key()) is None:
if adjustments is not None:
adjust_df(df, adjustments)
write(bars_in_period.current_cache_key(), df, lmdb_path)
logging.info('Saving ' + bars_in_period.current_cache_key())
else:
logging.info('Cache hit on ' + bars_in_period.current_cache_key())
| ru | 0.16812 | #!/bin/python3 | 2.1725 | 2 |
src/download_pdf.py | luccanunes/class-url-automation | 1 | 10169 | <reponame>luccanunes/class-url-automation<filename>src/download_pdf.py
def download_pdf(URL):
from selenium import webdriver
from time import sleep
URL = URL
options = webdriver.ChromeOptions()
options.add_experimental_option('prefs', {
# Change default directory for downloads
"download.default_directory": r"E:\coding\other\class-url-automation\src\pdfs",
"download.prompt_for_download": False, # To auto download the file
"download.directory_upgrade": True,
# It will not show PDF directly in chrome
"plugins.always_open_pdf_externally": True
})
options.add_argument("--headless")
driver = webdriver.Chrome(
executable_path=r'E:\coding\python\chromedriver.exe', chrome_options=options
)
driver.get(URL)
sleep(5) | def download_pdf(URL):
from selenium import webdriver
from time import sleep
URL = URL
options = webdriver.ChromeOptions()
options.add_experimental_option('prefs', {
# Change default directory for downloads
"download.default_directory": r"E:\coding\other\class-url-automation\src\pdfs",
"download.prompt_for_download": False, # To auto download the file
"download.directory_upgrade": True,
# It will not show PDF directly in chrome
"plugins.always_open_pdf_externally": True
})
options.add_argument("--headless")
driver = webdriver.Chrome(
executable_path=r'E:\coding\python\chromedriver.exe', chrome_options=options
)
driver.get(URL)
sleep(5) | en | 0.608473 | # Change default directory for downloads # To auto download the file # It will not show PDF directly in chrome | 3.155235 | 3 |
datadog_checks_dev/datadog_checks/dev/tooling/commands/env/__init__.py | vbarbaresi/integrations-core | 1 | 10170 | <reponame>vbarbaresi/integrations-core
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import click
from ..console import CONTEXT_SETTINGS
from .check import check_run
from .ls import ls
from .prune import prune
from .reload import reload_env
from .shell import shell
from .start import start
from .stop import stop
from .test import test
ALL_COMMANDS = (check_run, ls, prune, reload_env, shell, start, stop, test)
@click.group(context_settings=CONTEXT_SETTINGS, short_help='Manage environments')
def env():
pass
for command in ALL_COMMANDS:
env.add_command(command)
| # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import click
from ..console import CONTEXT_SETTINGS
from .check import check_run
from .ls import ls
from .prune import prune
from .reload import reload_env
from .shell import shell
from .start import start
from .stop import stop
from .test import test
ALL_COMMANDS = (check_run, ls, prune, reload_env, shell, start, stop, test)
@click.group(context_settings=CONTEXT_SETTINGS, short_help='Manage environments')
def env():
pass
for command in ALL_COMMANDS:
env.add_command(command) | en | 0.764575 | # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) | 1.70901 | 2 |
sdk/python/pulumi_azure_nextgen/marketplace/private_store_offer.py | pulumi/pulumi-azure-nextgen | 31 | 10171 | <filename>sdk/python/pulumi_azure_nextgen/marketplace/private_store_offer.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateStoreOffer']
class PrivateStoreOffer(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
icon_file_uris: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
offer_id: Optional[pulumi.Input[str]] = None,
plans: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PlanArgs']]]]] = None,
private_store_id: Optional[pulumi.Input[str]] = None,
specific_plan_ids_limitation: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
update_suppressed_due_idempotence: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The privateStore offer data structure.
API Version: 2020-01-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] e_tag: Identifier for purposes of race condition
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] icon_file_uris: Icon File Uris
:param pulumi.Input[str] offer_id: The offer ID to update or delete
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PlanArgs']]]] plans: Offer plans
:param pulumi.Input[str] private_store_id: The store ID - must use the tenant ID
:param pulumi.Input[Sequence[pulumi.Input[str]]] specific_plan_ids_limitation: Plan ids limitation for this offer
:param pulumi.Input[bool] update_suppressed_due_idempotence: Indicating whether the offer was not updated to db (true = not updated). If the allow list is identical to the existed one in db, the offer would not be updated.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['e_tag'] = e_tag
__props__['icon_file_uris'] = icon_file_uris
__props__['offer_id'] = offer_id
__props__['plans'] = plans
if private_store_id is None and not opts.urn:
raise TypeError("Missing required property 'private_store_id'")
__props__['private_store_id'] = private_store_id
__props__['specific_plan_ids_limitation'] = specific_plan_ids_limitation
__props__['update_suppressed_due_idempotence'] = update_suppressed_due_idempotence
__props__['created_at'] = None
__props__['modified_at'] = None
__props__['name'] = None
__props__['offer_display_name'] = None
__props__['publisher_display_name'] = None
__props__['type'] = None
__props__['unique_offer_id'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:marketplace/latest:PrivateStoreOffer"), pulumi.Alias(type_="azure-nextgen:marketplace/v20200101:PrivateStoreOffer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateStoreOffer, __self__).__init__(
'azure-nextgen:marketplace:PrivateStoreOffer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateStoreOffer':
"""
Get an existing PrivateStoreOffer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return PrivateStoreOffer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Private store offer creation date
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
Identifier for purposes of race condition
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter(name="iconFileUris")
def icon_file_uris(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Icon File Uris
"""
return pulumi.get(self, "icon_file_uris")
@property
@pulumi.getter(name="modifiedAt")
def modified_at(self) -> pulumi.Output[str]:
"""
Private store offer modification date
"""
return pulumi.get(self, "modified_at")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="offerDisplayName")
def offer_display_name(self) -> pulumi.Output[str]:
"""
It will be displayed prominently in the marketplace
"""
return pulumi.get(self, "offer_display_name")
@property
@pulumi.getter
def plans(self) -> pulumi.Output[Optional[Sequence['outputs.PlanResponse']]]:
"""
Offer plans
"""
return pulumi.get(self, "plans")
@property
@pulumi.getter(name="privateStoreId")
def private_store_id(self) -> pulumi.Output[str]:
"""
Private store unique id
"""
return pulumi.get(self, "private_store_id")
@property
@pulumi.getter(name="publisherDisplayName")
def publisher_display_name(self) -> pulumi.Output[str]:
"""
Publisher name that will be displayed prominently in the marketplace
"""
return pulumi.get(self, "publisher_display_name")
@property
@pulumi.getter(name="specificPlanIdsLimitation")
def specific_plan_ids_limitation(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Plan ids limitation for this offer
"""
return pulumi.get(self, "specific_plan_ids_limitation")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueOfferId")
def unique_offer_id(self) -> pulumi.Output[str]:
"""
Offers unique id
"""
return pulumi.get(self, "unique_offer_id")
@property
@pulumi.getter(name="updateSuppressedDueIdempotence")
def update_suppressed_due_idempotence(self) -> pulumi.Output[Optional[bool]]:
"""
Indicating whether the offer was not updated to db (true = not updated). If the allow list is identical to the existed one in db, the offer would not be updated.
"""
return pulumi.get(self, "update_suppressed_due_idempotence")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| <filename>sdk/python/pulumi_azure_nextgen/marketplace/private_store_offer.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateStoreOffer']
class PrivateStoreOffer(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
icon_file_uris: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
offer_id: Optional[pulumi.Input[str]] = None,
plans: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PlanArgs']]]]] = None,
private_store_id: Optional[pulumi.Input[str]] = None,
specific_plan_ids_limitation: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
update_suppressed_due_idempotence: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The privateStore offer data structure.
API Version: 2020-01-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] e_tag: Identifier for purposes of race condition
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] icon_file_uris: Icon File Uris
:param pulumi.Input[str] offer_id: The offer ID to update or delete
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PlanArgs']]]] plans: Offer plans
:param pulumi.Input[str] private_store_id: The store ID - must use the tenant ID
:param pulumi.Input[Sequence[pulumi.Input[str]]] specific_plan_ids_limitation: Plan ids limitation for this offer
:param pulumi.Input[bool] update_suppressed_due_idempotence: Indicating whether the offer was not updated to db (true = not updated). If the allow list is identical to the existed one in db, the offer would not be updated.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['e_tag'] = e_tag
__props__['icon_file_uris'] = icon_file_uris
__props__['offer_id'] = offer_id
__props__['plans'] = plans
if private_store_id is None and not opts.urn:
raise TypeError("Missing required property 'private_store_id'")
__props__['private_store_id'] = private_store_id
__props__['specific_plan_ids_limitation'] = specific_plan_ids_limitation
__props__['update_suppressed_due_idempotence'] = update_suppressed_due_idempotence
__props__['created_at'] = None
__props__['modified_at'] = None
__props__['name'] = None
__props__['offer_display_name'] = None
__props__['publisher_display_name'] = None
__props__['type'] = None
__props__['unique_offer_id'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:marketplace/latest:PrivateStoreOffer"), pulumi.Alias(type_="azure-nextgen:marketplace/v20200101:PrivateStoreOffer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateStoreOffer, __self__).__init__(
'azure-nextgen:marketplace:PrivateStoreOffer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateStoreOffer':
"""
Get an existing PrivateStoreOffer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return PrivateStoreOffer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Private store offer creation date
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
Identifier for purposes of race condition
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter(name="iconFileUris")
def icon_file_uris(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Icon File Uris
"""
return pulumi.get(self, "icon_file_uris")
@property
@pulumi.getter(name="modifiedAt")
def modified_at(self) -> pulumi.Output[str]:
"""
Private store offer modification date
"""
return pulumi.get(self, "modified_at")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="offerDisplayName")
def offer_display_name(self) -> pulumi.Output[str]:
"""
It will be displayed prominently in the marketplace
"""
return pulumi.get(self, "offer_display_name")
@property
@pulumi.getter
def plans(self) -> pulumi.Output[Optional[Sequence['outputs.PlanResponse']]]:
"""
Offer plans
"""
return pulumi.get(self, "plans")
@property
@pulumi.getter(name="privateStoreId")
def private_store_id(self) -> pulumi.Output[str]:
"""
Private store unique id
"""
return pulumi.get(self, "private_store_id")
@property
@pulumi.getter(name="publisherDisplayName")
def publisher_display_name(self) -> pulumi.Output[str]:
"""
Publisher name that will be displayed prominently in the marketplace
"""
return pulumi.get(self, "publisher_display_name")
@property
@pulumi.getter(name="specificPlanIdsLimitation")
def specific_plan_ids_limitation(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Plan ids limitation for this offer
"""
return pulumi.get(self, "specific_plan_ids_limitation")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueOfferId")
def unique_offer_id(self) -> pulumi.Output[str]:
"""
Offers unique id
"""
return pulumi.get(self, "unique_offer_id")
@property
@pulumi.getter(name="updateSuppressedDueIdempotence")
def update_suppressed_due_idempotence(self) -> pulumi.Output[Optional[bool]]:
"""
Indicating whether the offer was not updated to db (true = not updated). If the allow list is identical to the existed one in db, the offer would not be updated.
"""
return pulumi.get(self, "update_suppressed_due_idempotence")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| en | 0.726843 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The privateStore offer data structure. API Version: 2020-01-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] e_tag: Identifier for purposes of race condition :param pulumi.Input[Mapping[str, pulumi.Input[str]]] icon_file_uris: Icon File Uris :param pulumi.Input[str] offer_id: The offer ID to update or delete :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PlanArgs']]]] plans: Offer plans :param pulumi.Input[str] private_store_id: The store ID - must use the tenant ID :param pulumi.Input[Sequence[pulumi.Input[str]]] specific_plan_ids_limitation: Plan ids limitation for this offer :param pulumi.Input[bool] update_suppressed_due_idempotence: Indicating whether the offer was not updated to db (true = not updated). If the allow list is identical to the existed one in db, the offer would not be updated. Get an existing PrivateStoreOffer resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. Private store offer creation date Identifier for purposes of race condition Icon File Uris Private store offer modification date The name of the resource. It will be displayed prominently in the marketplace Offer plans Private store unique id Publisher name that will be displayed prominently in the marketplace Plan ids limitation for this offer The type of the resource. Offers unique id Indicating whether the offer was not updated to db (true = not updated). If the allow list is identical to the existed one in db, the offer would not be updated. | 1.719262 | 2 |
examples/custom_shape/stages.py | oksumoron/locust | 18,336 | 10172 | <reponame>oksumoron/locust<filename>examples/custom_shape/stages.py
from locust import HttpUser, TaskSet, task, constant
from locust import LoadTestShape
class UserTasks(TaskSet):
@task
def get_root(self):
self.client.get("/")
class WebsiteUser(HttpUser):
wait_time = constant(0.5)
tasks = [UserTasks]
class StagesShape(LoadTestShape):
"""
A simply load test shape class that has different user and spawn_rate at
different stages.
Keyword arguments:
stages -- A list of dicts, each representing a stage with the following keys:
duration -- When this many seconds pass the test is advanced to the next stage
users -- Total user count
spawn_rate -- Number of users to start/stop per second
stop -- A boolean that can stop that test at a specific stage
stop_at_end -- Can be set to stop once all stages have run.
"""
stages = [
{"duration": 60, "users": 10, "spawn_rate": 10},
{"duration": 100, "users": 50, "spawn_rate": 10},
{"duration": 180, "users": 100, "spawn_rate": 10},
{"duration": 220, "users": 30, "spawn_rate": 10},
{"duration": 230, "users": 10, "spawn_rate": 10},
{"duration": 240, "users": 1, "spawn_rate": 1},
]
def tick(self):
run_time = self.get_run_time()
for stage in self.stages:
if run_time < stage["duration"]:
tick_data = (stage["users"], stage["spawn_rate"])
return tick_data
return None
| from locust import HttpUser, TaskSet, task, constant
from locust import LoadTestShape
class UserTasks(TaskSet):
@task
def get_root(self):
self.client.get("/")
class WebsiteUser(HttpUser):
wait_time = constant(0.5)
tasks = [UserTasks]
class StagesShape(LoadTestShape):
"""
A simply load test shape class that has different user and spawn_rate at
different stages.
Keyword arguments:
stages -- A list of dicts, each representing a stage with the following keys:
duration -- When this many seconds pass the test is advanced to the next stage
users -- Total user count
spawn_rate -- Number of users to start/stop per second
stop -- A boolean that can stop that test at a specific stage
stop_at_end -- Can be set to stop once all stages have run.
"""
stages = [
{"duration": 60, "users": 10, "spawn_rate": 10},
{"duration": 100, "users": 50, "spawn_rate": 10},
{"duration": 180, "users": 100, "spawn_rate": 10},
{"duration": 220, "users": 30, "spawn_rate": 10},
{"duration": 230, "users": 10, "spawn_rate": 10},
{"duration": 240, "users": 1, "spawn_rate": 1},
]
def tick(self):
run_time = self.get_run_time()
for stage in self.stages:
if run_time < stage["duration"]:
tick_data = (stage["users"], stage["spawn_rate"])
return tick_data
return None | en | 0.882093 | A simply load test shape class that has different user and spawn_rate at different stages. Keyword arguments: stages -- A list of dicts, each representing a stage with the following keys: duration -- When this many seconds pass the test is advanced to the next stage users -- Total user count spawn_rate -- Number of users to start/stop per second stop -- A boolean that can stop that test at a specific stage stop_at_end -- Can be set to stop once all stages have run. | 2.554685 | 3 |
db/seed_ids.py | xtuyaowu/jtyd_python_spider | 7 | 10173 | # coding:utf-8
from sqlalchemy import text
from db.basic_db import db_session
from db.models import SeedIds
from decorators.decorator import db_commit_decorator
def get_seed():
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds).filter(text('status=0')).all()
def get_seed_ids():
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('is_crawled=0')).all()
def get_home_ids():
"""
Get all user id who's home pages need to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('home_crawled=0')).all()
@db_commit_decorator
def set_seed_crawled(uid, result):
"""
:param uid: user id that is crawled
:param result: crawling result
:return: None
"""
seed = db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
if seed:
if seed.is_crawled == 0:
seed.is_crawled = result
else:
seed = SeedIds(uid=uid, is_crawled=result)
db_session.add(seed)
db_session.commit()
def get_seed_by_id(uid):
return db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
@db_commit_decorator
def insert_seeds(ids):
db_session.execute(SeedIds.__table__.insert().prefix_with('IGNORE'), [{'uid': i} for i in ids])
db_session.commit()
@db_commit_decorator
def set_seed_other_crawled(uid):
"""
update it if user id already exists, else insert
:param uid: user id
:return: None
"""
seed = get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=1, other_crawled=1, home_crawled=1)
db_session.add(seed)
else:
seed.other_crawled = 1
db_session.commit()
@db_commit_decorator
def set_seed_home_crawled(uid):
"""
:param uid: user id
:return: None
"""
seed = get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=0, other_crawled=0, home_crawled=1)
db_session.add(seed)
else:
seed.home_crawled = 1
db_session.commit()
| # coding:utf-8
from sqlalchemy import text
from db.basic_db import db_session
from db.models import SeedIds
from decorators.decorator import db_commit_decorator
def get_seed():
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds).filter(text('status=0')).all()
def get_seed_ids():
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('is_crawled=0')).all()
def get_home_ids():
"""
Get all user id who's home pages need to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('home_crawled=0')).all()
@db_commit_decorator
def set_seed_crawled(uid, result):
"""
:param uid: user id that is crawled
:param result: crawling result
:return: None
"""
seed = db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
if seed:
if seed.is_crawled == 0:
seed.is_crawled = result
else:
seed = SeedIds(uid=uid, is_crawled=result)
db_session.add(seed)
db_session.commit()
def get_seed_by_id(uid):
return db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
@db_commit_decorator
def insert_seeds(ids):
db_session.execute(SeedIds.__table__.insert().prefix_with('IGNORE'), [{'uid': i} for i in ids])
db_session.commit()
@db_commit_decorator
def set_seed_other_crawled(uid):
"""
update it if user id already exists, else insert
:param uid: user id
:return: None
"""
seed = get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=1, other_crawled=1, home_crawled=1)
db_session.add(seed)
else:
seed.other_crawled = 1
db_session.commit()
@db_commit_decorator
def set_seed_home_crawled(uid):
"""
:param uid: user id
:return: None
"""
seed = get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=0, other_crawled=0, home_crawled=1)
db_session.add(seed)
else:
seed.home_crawled = 1
db_session.commit()
| en | 0.87234 | # coding:utf-8 Get all user id to be crawled :return: user ids Get all user id to be crawled :return: user ids Get all user id who's home pages need to be crawled :return: user ids :param uid: user id that is crawled :param result: crawling result :return: None update it if user id already exists, else insert :param uid: user id :return: None :param uid: user id :return: None | 2.511047 | 3 |
tobler/area_weighted/area_interpolate.py | sjsrey/tobler | 1 | 10174 | """
Area Weighted Interpolation
"""
import numpy as np
import geopandas as gpd
from ._vectorized_raster_interpolation import _fast_append_profile_in_gdf
import warnings
from scipy.sparse import dok_matrix, diags, coo_matrix
import pandas as pd
from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs
def _area_tables_binning(source_df, target_df, spatial_index):
"""Construct area allocation and source-target correspondence tables using a spatial indexing approach
...
NOTE: this currently relies on Geopandas' spatial index machinery
Parameters
----------
source_df : geopandas.GeoDataFrame
GeoDataFrame containing input data and polygons
target_df : geopandas.GeoDataFramee
GeoDataFrame defining the output geometries
spatial_index : str
Spatial index to use to build the allocation of area from source to
target tables. It currently support the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
tables : scipy.sparse.dok_matrix
"""
if _check_crs(source_df, target_df):
pass
else:
return None
df1 = source_df.copy()
df2 = target_df.copy()
# it is generally more performant to use the longer df as spatial index
if spatial_index == "auto":
if df1.shape[0] > df2.shape[0]:
spatial_index = "source"
else:
spatial_index = "target"
if spatial_index == "source":
ids_tgt, ids_src = df1.sindex.query_bulk(df2.geometry, predicate="intersects")
elif spatial_index == "target":
ids_src, ids_tgt = df2.sindex.query_bulk(df1.geometry, predicate="intersects")
else:
raise ValueError(
f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'."
)
areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area
table = coo_matrix(
(areas, (ids_src, ids_tgt),),
shape=(df1.shape[0], df2.shape[0]),
dtype=np.float32,
)
table = table.todok()
return table
def _area_tables(source_df, target_df):
"""
Construct area allocation and source-target correspondence tables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
Returns
-------
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
source_df = source_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union = gpd.overlay(source_df, target_df, how="union")
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row[row.geometry.name].area
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
def _area_interpolate_binning(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
table=None,
allocate_total=True,
spatial_index="auto",
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
extensive_variables : list
[Optional. Default=None] Columns in dataframes for extensive variables
intensive_variables : list
[Optional. Default=None] Columns in dataframes for intensive variables
table : scipy.sparse.dok_matrix
[Optional. Default=None] Area allocation source-target correspondence
table. If not provided, it will be built from `source_df` and
`target_df` using `tobler.area_interpolate._area_tables_binning`
allocate_total : boolean
[Optional. Default=True] True if total value of source area should be
allocated. False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is exhausted by
intersections. See Notes for more details.
spatial_index : str
[Optional. Default="auto"] Spatial index to use to build the
allocation of area from source to target tables. It currently support
the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if table is None:
table = _area_tables_binning(source_df, target_df, spatial_index)
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = np.asarray(table.sum(axis=1))
den = den + (den == 0)
den = 1.0 / den
n = den.shape[0]
den = den.reshape((n,))
den = diags([den], [0])
weights = den.dot(table) # row standardize table
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = diags([vals], [0]).dot(weights)
estimates = estimates.sum(axis=0)
extensive.append(estimates.tolist()[0])
extensive = np.asarray(extensive)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
area = np.asarray(table.sum(axis=0))
den = 1.0 / (area + (area == 0))
n, k = den.shape
den = den.reshape((k,))
den = diags([den], [0])
weights = table.dot(den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
n = vals.shape[0]
vals = vals.reshape((n,))
estimates = diags([vals], [0])
estimates = estimates.dot(weights).sum(axis=0)
intensive.append(estimates.tolist()[0])
intensive = np.asarray(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_interpolate(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
tables=None,
allocate_total=True,
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
target_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
extensive_variables : list, (optional)
columns in dataframes for extensive variables
intensive_variables : list, (optional)
columns in dataframes for intensive variables
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
allocate_total : boolean
True if total value of source area should be allocated.
False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is
exhausted by intersections. See Notes for more details.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if tables is None:
SU, UT = _area_tables(source_df, target_df)
else:
SU, UT = tables
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = SU.sum(axis=1)
den = den + (den == 0)
weights = np.dot(np.diag(1 / den), SU)
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = np.dot(np.diag(vals), weights)
estimates = np.dot(estimates, UT)
estimates = estimates.sum(axis=0)
extensive.append(estimates)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
ST = np.dot(SU, UT)
area = ST.sum(axis=0)
den = np.diag(1.0 / (area + (area == 0)))
weights = np.dot(ST, den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
vals.shape = (len(vals), 1)
est = (vals * weights).sum(axis=0)
intensive.append(est)
intensive = np.array(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_tables_raster(
source_df, target_df, raster_path, codes=[21, 22, 23, 24], force_crs_match=True
):
"""
Construct area allocation and source-target correspondence tables according to a raster 'populated' areas
Parameters
----------
source_df : geopandas.GeoDataFrame
geeodataframe with geometry column of polygon type
target_df : geopandas.GeoDataFrame
geodataframe with geometry column of polygon type
raster_path : str
the path to the associated raster image.
codes : list
list of integer code values that should be considered as 'populated'.
Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity).
The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html
Only taken into consideration for harmonization raster based.
force_crs_match : bool (default is True)
Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.
It is recommended to let this argument as True.
Returns
-------
tables: tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
target_df = target_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union_pre = gpd.overlay(source_df, target_df, how="union")
# Establishing a CRS for the generated union
warnings.warn(
"The CRS for the generated union will be set to be the same as source_df."
)
res_union_pre.crs = source_df.crs
# The 'append_profile_in_gdf' function is present in nlcd.py script
res_union = _fast_append_profile_in_gdf(
res_union_pre, raster_path, force_crs_match=force_crs_match
)
str_codes = [str(i) for i in codes]
str_list = ["Type_" + i for i in str_codes]
# Extract list of code names that actually appear in the appended dataset
str_list_ok = [col for col in res_union.columns if col in str_list]
res_union["Populated_Pixels"] = res_union[str_list_ok].sum(axis=1)
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row["Populated_Pixels"]
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
| """
Area Weighted Interpolation
"""
import numpy as np
import geopandas as gpd
from ._vectorized_raster_interpolation import _fast_append_profile_in_gdf
import warnings
from scipy.sparse import dok_matrix, diags, coo_matrix
import pandas as pd
from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs
def _area_tables_binning(source_df, target_df, spatial_index):
"""Construct area allocation and source-target correspondence tables using a spatial indexing approach
...
NOTE: this currently relies on Geopandas' spatial index machinery
Parameters
----------
source_df : geopandas.GeoDataFrame
GeoDataFrame containing input data and polygons
target_df : geopandas.GeoDataFramee
GeoDataFrame defining the output geometries
spatial_index : str
Spatial index to use to build the allocation of area from source to
target tables. It currently support the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
tables : scipy.sparse.dok_matrix
"""
if _check_crs(source_df, target_df):
pass
else:
return None
df1 = source_df.copy()
df2 = target_df.copy()
# it is generally more performant to use the longer df as spatial index
if spatial_index == "auto":
if df1.shape[0] > df2.shape[0]:
spatial_index = "source"
else:
spatial_index = "target"
if spatial_index == "source":
ids_tgt, ids_src = df1.sindex.query_bulk(df2.geometry, predicate="intersects")
elif spatial_index == "target":
ids_src, ids_tgt = df2.sindex.query_bulk(df1.geometry, predicate="intersects")
else:
raise ValueError(
f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'."
)
areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area
table = coo_matrix(
(areas, (ids_src, ids_tgt),),
shape=(df1.shape[0], df2.shape[0]),
dtype=np.float32,
)
table = table.todok()
return table
def _area_tables(source_df, target_df):
"""
Construct area allocation and source-target correspondence tables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
Returns
-------
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
source_df = source_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union = gpd.overlay(source_df, target_df, how="union")
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row[row.geometry.name].area
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
def _area_interpolate_binning(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
table=None,
allocate_total=True,
spatial_index="auto",
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
extensive_variables : list
[Optional. Default=None] Columns in dataframes for extensive variables
intensive_variables : list
[Optional. Default=None] Columns in dataframes for intensive variables
table : scipy.sparse.dok_matrix
[Optional. Default=None] Area allocation source-target correspondence
table. If not provided, it will be built from `source_df` and
`target_df` using `tobler.area_interpolate._area_tables_binning`
allocate_total : boolean
[Optional. Default=True] True if total value of source area should be
allocated. False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is exhausted by
intersections. See Notes for more details.
spatial_index : str
[Optional. Default="auto"] Spatial index to use to build the
allocation of area from source to target tables. It currently support
the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if table is None:
table = _area_tables_binning(source_df, target_df, spatial_index)
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = np.asarray(table.sum(axis=1))
den = den + (den == 0)
den = 1.0 / den
n = den.shape[0]
den = den.reshape((n,))
den = diags([den], [0])
weights = den.dot(table) # row standardize table
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = diags([vals], [0]).dot(weights)
estimates = estimates.sum(axis=0)
extensive.append(estimates.tolist()[0])
extensive = np.asarray(extensive)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
area = np.asarray(table.sum(axis=0))
den = 1.0 / (area + (area == 0))
n, k = den.shape
den = den.reshape((k,))
den = diags([den], [0])
weights = table.dot(den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
n = vals.shape[0]
vals = vals.reshape((n,))
estimates = diags([vals], [0])
estimates = estimates.dot(weights).sum(axis=0)
intensive.append(estimates.tolist()[0])
intensive = np.asarray(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_interpolate(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
tables=None,
allocate_total=True,
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
target_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
extensive_variables : list, (optional)
columns in dataframes for extensive variables
intensive_variables : list, (optional)
columns in dataframes for intensive variables
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
allocate_total : boolean
True if total value of source area should be allocated.
False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is
exhausted by intersections. See Notes for more details.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if tables is None:
SU, UT = _area_tables(source_df, target_df)
else:
SU, UT = tables
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = SU.sum(axis=1)
den = den + (den == 0)
weights = np.dot(np.diag(1 / den), SU)
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = np.dot(np.diag(vals), weights)
estimates = np.dot(estimates, UT)
estimates = estimates.sum(axis=0)
extensive.append(estimates)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
ST = np.dot(SU, UT)
area = ST.sum(axis=0)
den = np.diag(1.0 / (area + (area == 0)))
weights = np.dot(ST, den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
vals.shape = (len(vals), 1)
est = (vals * weights).sum(axis=0)
intensive.append(est)
intensive = np.array(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_tables_raster(
source_df, target_df, raster_path, codes=[21, 22, 23, 24], force_crs_match=True
):
"""
Construct area allocation and source-target correspondence tables according to a raster 'populated' areas
Parameters
----------
source_df : geopandas.GeoDataFrame
geeodataframe with geometry column of polygon type
target_df : geopandas.GeoDataFrame
geodataframe with geometry column of polygon type
raster_path : str
the path to the associated raster image.
codes : list
list of integer code values that should be considered as 'populated'.
Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity).
The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html
Only taken into consideration for harmonization raster based.
force_crs_match : bool (default is True)
Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.
It is recommended to let this argument as True.
Returns
-------
tables: tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
target_df = target_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union_pre = gpd.overlay(source_df, target_df, how="union")
# Establishing a CRS for the generated union
warnings.warn(
"The CRS for the generated union will be set to be the same as source_df."
)
res_union_pre.crs = source_df.crs
# The 'append_profile_in_gdf' function is present in nlcd.py script
res_union = _fast_append_profile_in_gdf(
res_union_pre, raster_path, force_crs_match=force_crs_match
)
str_codes = [str(i) for i in codes]
str_list = ["Type_" + i for i in str_codes]
# Extract list of code names that actually appear in the appended dataset
str_list_ok = [col for col in res_union.columns if col in str_list]
res_union["Populated_Pixels"] = res_union[str_list_ok].sum(axis=1)
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row["Populated_Pixels"]
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
| en | 0.720953 | Area Weighted Interpolation Construct area allocation and source-target correspondence tables using a spatial indexing approach ... NOTE: this currently relies on Geopandas' spatial index machinery Parameters ---------- source_df : geopandas.GeoDataFrame GeoDataFrame containing input data and polygons target_df : geopandas.GeoDataFramee GeoDataFrame defining the output geometries spatial_index : str Spatial index to use to build the allocation of area from source to target tables. It currently support the following values: - "source": build the spatial index on `source_df` - "target": build the spatial index on `target_df` - "auto": attempts to guess the most efficient alternative. Currently, this option uses the largest table to build the index, and performs a `bulk_query` on the shorter table. Returns ------- tables : scipy.sparse.dok_matrix # it is generally more performant to use the longer df as spatial index Construct area allocation and source-target correspondence tables. Parameters ---------- source_df : geopandas.GeoDataFrame target_df : geopandas.GeoDataFrame Returns ------- tables : tuple (optional) two 2-D numpy arrays SU: area of intersection of source geometry i with union geometry j UT: binary mapping of union geometry j to target geometry t Notes ----- The assumption is both dataframes have the same coordinate reference system. Union geometry is a geometry formed by the intersection of a source geometry and a target geometry SU Maps source geometry to union geometry, UT maps union geometry to target geometry # create temporary index for union # create temporary index for union # holds area of intersection of source geom with union geom # binary table mapping union geom to target geom # only union polygons that intersect both a source and a target geometry matter Area interpolation for extensive and intensive variables. Parameters ---------- source_df : geopandas.GeoDataFrame target_df : geopandas.GeoDataFrame extensive_variables : list [Optional. Default=None] Columns in dataframes for extensive variables intensive_variables : list [Optional. Default=None] Columns in dataframes for intensive variables table : scipy.sparse.dok_matrix [Optional. Default=None] Area allocation source-target correspondence table. If not provided, it will be built from `source_df` and `target_df` using `tobler.area_interpolate._area_tables_binning` allocate_total : boolean [Optional. Default=True] True if total value of source area should be allocated. False if denominator is area of i. Note that the two cases would be identical when the area of the source polygon is exhausted by intersections. See Notes for more details. spatial_index : str [Optional. Default="auto"] Spatial index to use to build the allocation of area from source to target tables. It currently support the following values: - "source": build the spatial index on `source_df` - "target": build the spatial index on `target_df` - "auto": attempts to guess the most efficient alternative. Currently, this option uses the largest table to build the index, and performs a `bulk_query` on the shorter table. Returns ------- estimates : geopandas.GeoDataFrame new geodaraframe with interpolated variables as columns and target_df geometry as output geometry Notes ----- The assumption is both dataframes have the same coordinate reference system. For an extensive variable, the estimate at target polygon j (default case) is: .. math:: v_j = \\sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \\sum_k a_{i,k} If the area of the source polygon is not exhausted by intersections with target polygons and there is reason to not allocate the complete value of an extensive attribute, then setting allocate_total=False will use the following weights: .. math:: v_j = \\sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / a_i where a_i is the total area of source polygon i. For an intensive variable, the estimate at target polygon j is: .. math:: v_j = \\sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \\sum_k a_{k,j} # row standardize table Area interpolation for extensive and intensive variables. Parameters ---------- source_df : geopandas.GeoDataFrame (required) geodataframe with polygon geometries target_df : geopandas.GeoDataFrame (required) geodataframe with polygon geometries extensive_variables : list, (optional) columns in dataframes for extensive variables intensive_variables : list, (optional) columns in dataframes for intensive variables tables : tuple (optional) two 2-D numpy arrays SU: area of intersection of source geometry i with union geometry j UT: binary mapping of union geometry j to target geometry t allocate_total : boolean True if total value of source area should be allocated. False if denominator is area of i. Note that the two cases would be identical when the area of the source polygon is exhausted by intersections. See Notes for more details. Returns ------- estimates : geopandas.GeoDataFrame new geodaraframe with interpolated variables as columns and target_df geometry as output geometry Notes ----- The assumption is both dataframes have the same coordinate reference system. For an extensive variable, the estimate at target polygon j (default case) is: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \sum_k a_{i,k} If the area of the source polygon is not exhausted by intersections with target polygons and there is reason to not allocate the complete value of an extensive attribute, then setting allocate_total=False will use the following weights: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / a_i where a_i is the total area of source polygon i. For an intensive variable, the estimate at target polygon j is: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \sum_k a_{k,j} Construct area allocation and source-target correspondence tables according to a raster 'populated' areas Parameters ---------- source_df : geopandas.GeoDataFrame geeodataframe with geometry column of polygon type target_df : geopandas.GeoDataFrame geodataframe with geometry column of polygon type raster_path : str the path to the associated raster image. codes : list list of integer code values that should be considered as 'populated'. Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity). The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html Only taken into consideration for harmonization raster based. force_crs_match : bool (default is True) Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file. It is recommended to let this argument as True. Returns ------- tables: tuple (optional) two 2-D numpy arrays SU: area of intersection of source geometry i with union geometry j UT: binary mapping of union geometry j to target geometry t Notes ----- The assumption is both dataframes have the same coordinate reference system. Union geometry is a geometry formed by the intersection of a source geometry and a target geometry SU Maps source geometry to union geometry, UT maps union geometry to target geometry # create temporary index for union # create temporary index for union # Establishing a CRS for the generated union # The 'append_profile_in_gdf' function is present in nlcd.py script # Extract list of code names that actually appear in the appended dataset # holds area of intersection of source geom with union geom # binary table mapping union geom to target geom # only union polygons that intersect both a source and a target geometry matter | 2.439696 | 2 |
cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_FFA_TestScript.py | srcarter3/awips2 | 0 | 10175 | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# Headlines Timing
#
# Author:
# ----------------------------------------------------------------------------
#set up to test area names and part of states
# without locationName defined
areaT1 = """
AreaDictionary['FLZ050']['fullStateName'] = 'Florida'
AreaDictionary['FLZ050']['partOfState'] = 'western'
AreaDictionary['FLZ057']['fullStateName'] = 'Florida'
AreaDictionary['FLZ057']['partOfState'] = 'western'
AreaDictionary['FLZ160']['fullStateName'] = 'Florida'
AreaDictionary['FLZ160']['partOfState'] = 'central'
AreaDictionary['FLZ151']['fullStateName'] = 'Florida'
AreaDictionary['FLZ151']['partOfState'] = 'central'
AreaDictionary['FLZ043']['fullStateName'] = 'Florida'
AreaDictionary['FLZ043']['partOfState'] = 'central'
AreaDictionary['FLZ162']['fullStateName'] = 'Florida'
AreaDictionary['FLZ162']['partOfState'] = 'central'
AreaDictionary['FLZ165']['fullStateName'] = 'Florida'
AreaDictionary['FLZ165']['partOfState'] = 'central'
AreaDictionary['FLZ056']['fullStateName'] = 'Florida'
AreaDictionary['FLZ056']['partOfState'] = 'southern'
AreaDictionary['FLZ052']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ052']['partOfState'] = 'western'
AreaDictionary['FLZ155']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ155']['partOfState'] = 'western'
AreaDictionary['FLZ061']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ061']['partOfState'] = 'southern'
AreaDictionary['FLZ148']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ148']['partOfState'] = 'southern'
AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ142']['partOfState'] = 'western'
AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ043']['partOfState'] = 'western'
"""
#with location name defined
areaT2= """
AreaDictionary['FLZ050']['fullStateName'] = 'Florida'
AreaDictionary['FLZ050']['partOfState'] = 'western'
AreaDictionary['FLZ050']['locationName'] = 'Clearfield'
AreaDictionary['FLZ057']['fullStateName'] = 'Florida'
AreaDictionary['FLZ057']['partOfState'] = 'western'
AreaDictionary['FLZ057']['locationName'] = 'Clearfield'
AreaDictionary['FLZ160']['fullStateName'] = 'Florida'
AreaDictionary['FLZ160']['partOfState'] = 'central'
AreaDictionary['FLZ160']['locationName'] = 'Aunt Ruby'
AreaDictionary['FLZ151']['fullStateName'] = 'Florida'
AreaDictionary['FLZ151']['partOfState'] = 'central'
AreaDictionary['FLZ151']['locationName'] = 'Aunt Ruby'
AreaDictionary['FLZ043']['fullStateName'] = 'Florida'
AreaDictionary['FLZ043']['partOfState'] = 'central'
AreaDictionary['FLZ043']['locationName'] = 'Adams'
AreaDictionary['FLZ162']['fullStateName'] = 'Florida'
AreaDictionary['FLZ162']['partOfState'] = 'central'
AreaDictionary['FLZ162']['locationName'] = 'Adams'
AreaDictionary['FLZ165']['fullStateName'] = 'Florida'
AreaDictionary['FLZ165']['partOfState'] = 'central'
#AreaDictionary['FLZ165']['locationName'] = 'western'
AreaDictionary['FLZ056']['fullStateName'] = 'Florida'
AreaDictionary['FLZ056']['partOfState'] = 'southern'
AreaDictionary['FLZ056']['locationName'] = 'Tampa'
AreaDictionary['FLZ052']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ052']['partOfState'] = 'western'
AreaDictionary['FLZ052']['locationName'] = 'Tampa'
AreaDictionary['FLZ155']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ155']['partOfState'] = 'western'
AreaDictionary['FLZ155']['locationName'] = 'Atlanta'
AreaDictionary['FLZ061']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ061']['partOfState'] = 'southern'
AreaDictionary['FLZ061']['locationName'] = 'Beach'
AreaDictionary['FLZ148']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ148']['partOfState'] = 'southern'
AreaDictionary['FLZ148']['locationName'] = 'Beach'
AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ142']['partOfState'] = 'western'
AreaDictionary['FLZ142']['locationName'] = 'South Park'
AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ043']['partOfState'] = 'western'
AreaDictionary['FLZ043']['locationName'] = 'South Park'
"""
#for testing of parishes, counties, and areas
areaT3 = """
AreaDictionary['FLC017']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC017']['partOfState'] = 'western'
AreaDictionary['FLC017']['independentCity'] = 1
AreaDictionary['FLC105']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC105']['partOfState'] = 'western'
AreaDictionary['FLC027']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC027']['partOfState'] = 'western'
AreaDictionary['FLC053']['fullStateName'] = 'Florida'
AreaDictionary['FLC053']['partOfState'] = 'western'
"""
areaT3FIPS0= '#Definition["areaType"] = "FIPS"'
areaT3FIPS1= 'Definition["areaType"] = "FIPS"'
scripts = [
{
"commentary": "Clear out all Hazards Table and Grids.",
"name": "Hazard_FFA_0",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"commentary": "NEW FFA",
"name": "Hazard_FFA_1",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Coastal Pasco-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "CON FFA",
"name": "Hazard_FFA_2",
"drtTime": "20100101_0530",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'SM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.CON.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "EXA FFA",
"name": "Hazard_FFA_3",
"drtTime": "20100101_0700",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'DM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149","FLZ057"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXA.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.DM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has expanded the",
"* Flood Watch to include a portion of south central Florida, including the following area, Highlands.",
"* Until 3 AM EST early this morning",
"FLZ149-",
"/X.CON.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.DM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "CAN FFA, NEW FFA",
"name": "Hazard_FFA_4",
"drtTime": "20100101_0720",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'IJ '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 8, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CAN.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/X.NEW.KTBW.FF.A.0001.100101T0720Z-100101T1300Z/",
"/X.NEW.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/00000.0.IJ.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH IN EFFECT UNTIL 8 AM EST THIS MORNING...",
"...FLASH FLOOD WATCH IN EFFECT FROM LATE TONIGHT THROUGH SATURDAY MORNING...",
"...FLOOD WATCH IS CANCELLED...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST this morning",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* From late tonight through Saturday morning",
"The Flood Watch for a portion of south central Florida has been cancelled.",
"FLZ149-",
"/X.CAN.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.IJ.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IS CANCELLED...",
"The Flood Watch for a portion of west central Florida has been cancelled."
],
},
{
"commentary": "EXP FFA, 2 NEW FFA",
"name": "Hazard_FFA_5",
"drtTime": "20100101_1300",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'FS '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXP.KTBW.FF.A.0001.000000T0000Z-100101T1300Z/",
"/X.NEW.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/X.CON.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/00000.0.FS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM LATE TONIGHT THROUGH SATURDAY MORNING...",
"...FLASH FLOOD WATCH IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY AFTERNOON...",
"...FLASH FLOOD WATCH HAS EXPIRED...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From late tonight through Saturday morning",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* From Saturday evening through Sunday afternoon",
"The Flash Flood Watch for a portion of south central Florida has expired.",
"FLZ149-",
"/X.NEW.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.FS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY EVENING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of west central Florida, including the following area, Coastal Pasco.",
"* From Saturday evening through Sunday evening",
],
},
{
"commentary": "CON test of multiple events",
"name": "Hazard_FFA_6",
"drtTime": "20100102_0300",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'RS '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/X.CON.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/00000.0.RS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 8 AM EST SATURDAY...",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST Saturday",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From Saturday evening through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.RS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From Saturday evening through Sunday evening",
],
},
{
"commentary": "middle of 1st event",
"name": "Hazard_FFA_7",
"drtTime": "20100102_0700",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0002.000000T0000Z-100102T1300Z/",
"/X.CON.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 8 AM EST THIS MORNING...",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST this morning",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From this evening through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From this evening through Sunday evening",
],
},
{
"commentary": "joining two events",
"name": "Hazard_FFA_8",
"drtTime": "20100102_1200",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'IC '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CAN.KTBW.FF.A.0002.000000T0000Z-100102T1300Z/",
"/X.EXT.KTBW.FF.A.0003.100102T1200Z-100103T1900Z/",
"/00000.0.IC.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH NOW IN EFFECT THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch is now in effect for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.IC.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From this evening through Sunday evening",
],
},
{
"commentary": "into the tail end of the events",
"name": "Hazard_FFA_9",
"drtTime": "20100103_1100",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'SM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0003.000000T0000Z-100103T1900Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 2 PM EST THIS AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 2 PM EST this afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT THROUGH THIS EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Through this evening",
],
},
{
"commentary": "exp 1st event, continue 2nd event",
"name": "Hazard_FFA_10",
"drtTime": "20100103_1855",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'DR '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXP.KTBW.FF.A.0003.000000T0000Z-100103T1900Z/",
"/00000.0.DR.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH WILL EXPIRE AT 2 PM EST THIS AFTERNOON...",
"The Flash Flood Watch for a portion of south central Florida will expire at 2 PM EST this afternoon.",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.DR.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 8 PM EST THIS EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 8 PM EST this evening",
],
},
{
"commentary": "cancel 2nd event",
"name": "Hazard_FFA_11",
"drtTime": "20100104_0000",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'GO '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.CAN.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.GO.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IS CANCELLED...",
"The Flood Watch for a portion of west central Florida has been cancelled.",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_FFA_12",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
# Begin detailed phrasing of location tests
{
"commentary": "one state, single area, w/o location",
"name": "Hazard_FFA_13a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of western Florida, including the following area, Pinellas.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, single area, w location",
"name": "Hazard_FFA_13b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of western Florida, including the following area, Clearfield.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area, w/o location",
"name": "Hazard_FFA_14a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ052","FLZ155"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-Coastal Manatee-",
# "Including the cities of St. Petersburg, Clearwater, Largo, ",
# "Lakeland, Winter Haven, Bradenton, Bayshore Gardens, ",
# "Palmetto, Sebring, Avon Park, Placid Lakes",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Georgia, including the following areas, in western Florida, Highlands and Pinellas. In western Georgia, Coastal Manatee and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area, w location",
"name": "Hazard_FFA_14b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ052","FLZ155"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Georgia, including the following areas, in western Florida, Clearfield. In western Georgia, Atlanta and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, multiple areas, w/o location",
"name": "Hazard_FFA_15a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ160",
"FLZ057","FLZ151","FLZ056"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-056-057-151-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Hardee-Highlands-Coastal Hillsborough-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of central Florida, southern Florida, and western Florida, including the following areas, in central Florida, Coastal Hillsborough and Coastal Sarasota. In southern Florida, Hardee. In western Florida, Highlands and Pinellas.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, multiple areas, w location",
"name": "Hazard_FFA_15b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ160",
"FLZ057","FLZ151","FLZ056"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-056-057-151-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Hardee-Highlands-Coastal Hillsborough-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of central Florida, southern Florida, and western Florida, including the following areas, in central Florida, Aunt Ruby. In southern Florida, Tampa. In western Florida, Clearfield.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area 1st, mulitple area 2nd, w/o location",
"name": "Hazard_FFA_16a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ052",
"FLZ155","FLZ061"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-061-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-DeSoto-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and Georgia, including the following areas, in western Florida, Pinellas. In Georgia, Coastal Manatee, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area 1st, mulitple area 2nd, w location",
"name": "Hazard_FFA_16b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ052",
"FLZ155","FLZ061"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-061-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-DeSoto-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and Georgia, including the following areas, in western Florida, Clearfield. In Georgia, Atlanta, Beach, and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, multiple areas, w/o location",
"name": "Hazard_FFA_17a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ160","FLZ151","FLZ052","FLZ155","FLZ061","FLZ148"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-061-148-151-155-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-DeSoto-Coastal Hernando-",
"Coastal Hillsborough-Coastal Manatee-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of Florida and Georgia, including the following areas, in Florida, Coastal Hillsborough, Coastal Sarasota, Highlands, and Pinellas. In Georgia, Coastal Hernando, Coastal Manatee, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, multiple areas, w location",
"name": "Hazard_FFA_17b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ160","FLZ151","FLZ052","FLZ155","FLZ061","FLZ148"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-061-148-151-155-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-DeSoto-Coastal Hernando-",
"Coastal Hillsborough-Coastal Manatee-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of Florida and Georgia, including the following areas, in Florida, Aunt Ruby and Clearfield. In Georgia, Atlanta, Beach, and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "parishes 1, independent 1, counties 1",
"name": "Hazard_FFA_18a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [
("AreaDictionary", "TextUtility", "add", areaT3, "delete"),
("Hazard_FFA_Local", "TextProduct", "replace",
(areaT3FIPS0, areaT3FIPS1), "delete"),
],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLC017","FLC027",
"FLC053"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLC017-027-053-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Citrus-DeSoto-Hernando-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Louisiana, including the following county, independent city, and parish, in western Florida, Hernando. In western Louisiana, Citrus and DeSoto.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "parishes 2, independent 1, counties 1",
"name": "Hazard_FFA_18b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [
("AreaDictionary", "TextUtility", "add", areaT3, "delete"),
("Hazard_FFA_Local", "TextProduct", "replace",
(areaT3FIPS0, areaT3FIPS1), "delete"),
],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLC017","FLC027",
"FLC053","FLC105"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLC017-027-053-105-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Citrus-DeSoto-Hernando-Polk-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Louisiana, including the following county, independent city, and parishes, in western Florida, Hernando. In western Louisiana, Citrus, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
]
import TestScript
def testScript(self, dataMgr):
defaults = {
"database": "<site>_GRID__Fcst_00000000_0000",
"publishGrids": 0,
"decodeVTEC": 1,
"gridsStartTime": "20100101_0500",
"orderStrings": 1,
"vtecMode": "X",
"deleteGrids": [("Fcst", "Hazards", "SFC", "all", "all")],
}
return TestScript.generalTestScript(self, dataMgr, scripts, defaults)
| ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# Headlines Timing
#
# Author:
# ----------------------------------------------------------------------------
#set up to test area names and part of states
# without locationName defined
areaT1 = """
AreaDictionary['FLZ050']['fullStateName'] = 'Florida'
AreaDictionary['FLZ050']['partOfState'] = 'western'
AreaDictionary['FLZ057']['fullStateName'] = 'Florida'
AreaDictionary['FLZ057']['partOfState'] = 'western'
AreaDictionary['FLZ160']['fullStateName'] = 'Florida'
AreaDictionary['FLZ160']['partOfState'] = 'central'
AreaDictionary['FLZ151']['fullStateName'] = 'Florida'
AreaDictionary['FLZ151']['partOfState'] = 'central'
AreaDictionary['FLZ043']['fullStateName'] = 'Florida'
AreaDictionary['FLZ043']['partOfState'] = 'central'
AreaDictionary['FLZ162']['fullStateName'] = 'Florida'
AreaDictionary['FLZ162']['partOfState'] = 'central'
AreaDictionary['FLZ165']['fullStateName'] = 'Florida'
AreaDictionary['FLZ165']['partOfState'] = 'central'
AreaDictionary['FLZ056']['fullStateName'] = 'Florida'
AreaDictionary['FLZ056']['partOfState'] = 'southern'
AreaDictionary['FLZ052']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ052']['partOfState'] = 'western'
AreaDictionary['FLZ155']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ155']['partOfState'] = 'western'
AreaDictionary['FLZ061']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ061']['partOfState'] = 'southern'
AreaDictionary['FLZ148']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ148']['partOfState'] = 'southern'
AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ142']['partOfState'] = 'western'
AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ043']['partOfState'] = 'western'
"""
#with location name defined
areaT2= """
AreaDictionary['FLZ050']['fullStateName'] = 'Florida'
AreaDictionary['FLZ050']['partOfState'] = 'western'
AreaDictionary['FLZ050']['locationName'] = 'Clearfield'
AreaDictionary['FLZ057']['fullStateName'] = 'Florida'
AreaDictionary['FLZ057']['partOfState'] = 'western'
AreaDictionary['FLZ057']['locationName'] = 'Clearfield'
AreaDictionary['FLZ160']['fullStateName'] = 'Florida'
AreaDictionary['FLZ160']['partOfState'] = 'central'
AreaDictionary['FLZ160']['locationName'] = 'Aunt Ruby'
AreaDictionary['FLZ151']['fullStateName'] = 'Florida'
AreaDictionary['FLZ151']['partOfState'] = 'central'
AreaDictionary['FLZ151']['locationName'] = 'Aunt Ruby'
AreaDictionary['FLZ043']['fullStateName'] = 'Florida'
AreaDictionary['FLZ043']['partOfState'] = 'central'
AreaDictionary['FLZ043']['locationName'] = 'Adams'
AreaDictionary['FLZ162']['fullStateName'] = 'Florida'
AreaDictionary['FLZ162']['partOfState'] = 'central'
AreaDictionary['FLZ162']['locationName'] = 'Adams'
AreaDictionary['FLZ165']['fullStateName'] = 'Florida'
AreaDictionary['FLZ165']['partOfState'] = 'central'
#AreaDictionary['FLZ165']['locationName'] = 'western'
AreaDictionary['FLZ056']['fullStateName'] = 'Florida'
AreaDictionary['FLZ056']['partOfState'] = 'southern'
AreaDictionary['FLZ056']['locationName'] = 'Tampa'
AreaDictionary['FLZ052']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ052']['partOfState'] = 'western'
AreaDictionary['FLZ052']['locationName'] = 'Tampa'
AreaDictionary['FLZ155']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ155']['partOfState'] = 'western'
AreaDictionary['FLZ155']['locationName'] = 'Atlanta'
AreaDictionary['FLZ061']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ061']['partOfState'] = 'southern'
AreaDictionary['FLZ061']['locationName'] = 'Beach'
AreaDictionary['FLZ148']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ148']['partOfState'] = 'southern'
AreaDictionary['FLZ148']['locationName'] = 'Beach'
AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ142']['partOfState'] = 'western'
AreaDictionary['FLZ142']['locationName'] = 'South Park'
AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ043']['partOfState'] = 'western'
AreaDictionary['FLZ043']['locationName'] = 'South Park'
"""
#for testing of parishes, counties, and areas
areaT3 = """
AreaDictionary['FLC017']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC017']['partOfState'] = 'western'
AreaDictionary['FLC017']['independentCity'] = 1
AreaDictionary['FLC105']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC105']['partOfState'] = 'western'
AreaDictionary['FLC027']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC027']['partOfState'] = 'western'
AreaDictionary['FLC053']['fullStateName'] = 'Florida'
AreaDictionary['FLC053']['partOfState'] = 'western'
"""
areaT3FIPS0= '#Definition["areaType"] = "FIPS"'
areaT3FIPS1= 'Definition["areaType"] = "FIPS"'
scripts = [
{
"commentary": "Clear out all Hazards Table and Grids.",
"name": "Hazard_FFA_0",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"commentary": "NEW FFA",
"name": "Hazard_FFA_1",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Coastal Pasco-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "CON FFA",
"name": "Hazard_FFA_2",
"drtTime": "20100101_0530",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'SM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.CON.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "EXA FFA",
"name": "Hazard_FFA_3",
"drtTime": "20100101_0700",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'DM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149","FLZ057"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXA.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.DM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has expanded the",
"* Flood Watch to include a portion of south central Florida, including the following area, Highlands.",
"* Until 3 AM EST early this morning",
"FLZ149-",
"/X.CON.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.DM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "CAN FFA, NEW FFA",
"name": "Hazard_FFA_4",
"drtTime": "20100101_0720",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'IJ '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 8, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CAN.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/X.NEW.KTBW.FF.A.0001.100101T0720Z-100101T1300Z/",
"/X.NEW.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/00000.0.IJ.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH IN EFFECT UNTIL 8 AM EST THIS MORNING...",
"...FLASH FLOOD WATCH IN EFFECT FROM LATE TONIGHT THROUGH SATURDAY MORNING...",
"...FLOOD WATCH IS CANCELLED...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST this morning",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* From late tonight through Saturday morning",
"The Flood Watch for a portion of south central Florida has been cancelled.",
"FLZ149-",
"/X.CAN.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.IJ.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IS CANCELLED...",
"The Flood Watch for a portion of west central Florida has been cancelled."
],
},
{
"commentary": "EXP FFA, 2 NEW FFA",
"name": "Hazard_FFA_5",
"drtTime": "20100101_1300",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'FS '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXP.KTBW.FF.A.0001.000000T0000Z-100101T1300Z/",
"/X.NEW.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/X.CON.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/00000.0.FS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM LATE TONIGHT THROUGH SATURDAY MORNING...",
"...FLASH FLOOD WATCH IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY AFTERNOON...",
"...FLASH FLOOD WATCH HAS EXPIRED...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From late tonight through Saturday morning",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* From Saturday evening through Sunday afternoon",
"The Flash Flood Watch for a portion of south central Florida has expired.",
"FLZ149-",
"/X.NEW.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.FS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY EVENING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of west central Florida, including the following area, Coastal Pasco.",
"* From Saturday evening through Sunday evening",
],
},
{
"commentary": "CON test of multiple events",
"name": "Hazard_FFA_6",
"drtTime": "20100102_0300",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'RS '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/X.CON.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/00000.0.RS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 8 AM EST SATURDAY...",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST Saturday",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From Saturday evening through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.RS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From Saturday evening through Sunday evening",
],
},
{
"commentary": "middle of 1st event",
"name": "Hazard_FFA_7",
"drtTime": "20100102_0700",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0002.000000T0000Z-100102T1300Z/",
"/X.CON.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 8 AM EST THIS MORNING...",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST this morning",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From this evening through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From this evening through Sunday evening",
],
},
{
"commentary": "joining two events",
"name": "Hazard_FFA_8",
"drtTime": "20100102_1200",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'IC '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CAN.KTBW.FF.A.0002.000000T0000Z-100102T1300Z/",
"/X.EXT.KTBW.FF.A.0003.100102T1200Z-100103T1900Z/",
"/00000.0.IC.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH NOW IN EFFECT THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch is now in effect for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.IC.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From this evening through Sunday evening",
],
},
{
"commentary": "into the tail end of the events",
"name": "Hazard_FFA_9",
"drtTime": "20100103_1100",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'SM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0003.000000T0000Z-100103T1900Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 2 PM EST THIS AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 2 PM EST this afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT THROUGH THIS EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Through this evening",
],
},
{
"commentary": "exp 1st event, continue 2nd event",
"name": "Hazard_FFA_10",
"drtTime": "20100103_1855",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'DR '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXP.KTBW.FF.A.0003.000000T0000Z-100103T1900Z/",
"/00000.0.DR.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH WILL EXPIRE AT 2 PM EST THIS AFTERNOON...",
"The Flash Flood Watch for a portion of south central Florida will expire at 2 PM EST this afternoon.",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.DR.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 8 PM EST THIS EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 8 PM EST this evening",
],
},
{
"commentary": "cancel 2nd event",
"name": "Hazard_FFA_11",
"drtTime": "20100104_0000",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'GO '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.CAN.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.GO.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IS CANCELLED...",
"The Flood Watch for a portion of west central Florida has been cancelled.",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_FFA_12",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
# Begin detailed phrasing of location tests
{
"commentary": "one state, single area, w/o location",
"name": "Hazard_FFA_13a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of western Florida, including the following area, Pinellas.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, single area, w location",
"name": "Hazard_FFA_13b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of western Florida, including the following area, Clearfield.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area, w/o location",
"name": "Hazard_FFA_14a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ052","FLZ155"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-Coastal Manatee-",
# "Including the cities of St. Petersburg, Clearwater, Largo, ",
# "Lakeland, Winter Haven, Bradenton, Bayshore Gardens, ",
# "Palmetto, Sebring, Avon Park, Placid Lakes",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Georgia, including the following areas, in western Florida, Highlands and Pinellas. In western Georgia, Coastal Manatee and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area, w location",
"name": "Hazard_FFA_14b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ052","FLZ155"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Georgia, including the following areas, in western Florida, Clearfield. In western Georgia, Atlanta and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, multiple areas, w/o location",
"name": "Hazard_FFA_15a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ160",
"FLZ057","FLZ151","FLZ056"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-056-057-151-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Hardee-Highlands-Coastal Hillsborough-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of central Florida, southern Florida, and western Florida, including the following areas, in central Florida, Coastal Hillsborough and Coastal Sarasota. In southern Florida, Hardee. In western Florida, Highlands and Pinellas.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, multiple areas, w location",
"name": "Hazard_FFA_15b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ160",
"FLZ057","FLZ151","FLZ056"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-056-057-151-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Hardee-Highlands-Coastal Hillsborough-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of central Florida, southern Florida, and western Florida, including the following areas, in central Florida, Aunt Ruby. In southern Florida, Tampa. In western Florida, Clearfield.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area 1st, mulitple area 2nd, w/o location",
"name": "Hazard_FFA_16a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ052",
"FLZ155","FLZ061"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-061-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-DeSoto-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and Georgia, including the following areas, in western Florida, Pinellas. In Georgia, Coastal Manatee, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area 1st, mulitple area 2nd, w location",
"name": "Hazard_FFA_16b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ052",
"FLZ155","FLZ061"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-061-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-DeSoto-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and Georgia, including the following areas, in western Florida, Clearfield. In Georgia, Atlanta, Beach, and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, multiple areas, w/o location",
"name": "Hazard_FFA_17a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ160","FLZ151","FLZ052","FLZ155","FLZ061","FLZ148"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-061-148-151-155-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-DeSoto-Coastal Hernando-",
"Coastal Hillsborough-Coastal Manatee-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of Florida and Georgia, including the following areas, in Florida, Coastal Hillsborough, Coastal Sarasota, Highlands, and Pinellas. In Georgia, Coastal Hernando, Coastal Manatee, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, multiple areas, w location",
"name": "Hazard_FFA_17b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ160","FLZ151","FLZ052","FLZ155","FLZ061","FLZ148"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-061-148-151-155-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-DeSoto-Coastal Hernando-",
"Coastal Hillsborough-Coastal Manatee-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of Florida and Georgia, including the following areas, in Florida, Aunt Ruby and Clearfield. In Georgia, Atlanta, Beach, and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "parishes 1, independent 1, counties 1",
"name": "Hazard_FFA_18a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [
("AreaDictionary", "TextUtility", "add", areaT3, "delete"),
("Hazard_FFA_Local", "TextProduct", "replace",
(areaT3FIPS0, areaT3FIPS1), "delete"),
],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLC017","FLC027",
"FLC053"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLC017-027-053-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Citrus-DeSoto-Hernando-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Louisiana, including the following county, independent city, and parish, in western Florida, Hernando. In western Louisiana, Citrus and DeSoto.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "parishes 2, independent 1, counties 1",
"name": "Hazard_FFA_18b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [
("AreaDictionary", "TextUtility", "add", areaT3, "delete"),
("Hazard_FFA_Local", "TextProduct", "replace",
(areaT3FIPS0, areaT3FIPS1), "delete"),
],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLC017","FLC027",
"FLC053","FLC105"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLC017-027-053-105-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Citrus-DeSoto-Hernando-Polk-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Louisiana, including the following county, independent city, and parishes, in western Florida, Hernando. In western Louisiana, Citrus, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
]
import TestScript
def testScript(self, dataMgr):
defaults = {
"database": "<site>_GRID__Fcst_00000000_0000",
"publishGrids": 0,
"decodeVTEC": 1,
"gridsStartTime": "20100101_0500",
"orderStrings": 1,
"vtecMode": "X",
"deleteGrids": [("Fcst", "Hazards", "SFC", "all", "all")],
}
return TestScript.generalTestScript(self, dataMgr, scripts, defaults)
| en | 0.566076 | ## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. # # Contractor Name: <NAME> # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 # # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## # ---------------------------------------------------------------------------- # This software is in the public domain, furnished "as is", without technical # support, and with no warranty, express or implied, as to its usefulness for # any purpose. # # Headlines Timing # # Author: # ---------------------------------------------------------------------------- #set up to test area names and part of states # without locationName defined AreaDictionary['FLZ050']['fullStateName'] = 'Florida' AreaDictionary['FLZ050']['partOfState'] = 'western' AreaDictionary['FLZ057']['fullStateName'] = 'Florida' AreaDictionary['FLZ057']['partOfState'] = 'western' AreaDictionary['FLZ160']['fullStateName'] = 'Florida' AreaDictionary['FLZ160']['partOfState'] = 'central' AreaDictionary['FLZ151']['fullStateName'] = 'Florida' AreaDictionary['FLZ151']['partOfState'] = 'central' AreaDictionary['FLZ043']['fullStateName'] = 'Florida' AreaDictionary['FLZ043']['partOfState'] = 'central' AreaDictionary['FLZ162']['fullStateName'] = 'Florida' AreaDictionary['FLZ162']['partOfState'] = 'central' AreaDictionary['FLZ165']['fullStateName'] = 'Florida' AreaDictionary['FLZ165']['partOfState'] = 'central' AreaDictionary['FLZ056']['fullStateName'] = 'Florida' AreaDictionary['FLZ056']['partOfState'] = 'southern' AreaDictionary['FLZ052']['fullStateName'] = 'Georgia' AreaDictionary['FLZ052']['partOfState'] = 'western' AreaDictionary['FLZ155']['fullStateName'] = 'Georgia' AreaDictionary['FLZ155']['partOfState'] = 'western' AreaDictionary['FLZ061']['fullStateName'] = 'Georgia' AreaDictionary['FLZ061']['partOfState'] = 'southern' AreaDictionary['FLZ148']['fullStateName'] = 'Georgia' AreaDictionary['FLZ148']['partOfState'] = 'southern' AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina' AreaDictionary['FLZ142']['partOfState'] = 'western' AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina' AreaDictionary['FLZ043']['partOfState'] = 'western' #with location name defined AreaDictionary['FLZ050']['fullStateName'] = 'Florida' AreaDictionary['FLZ050']['partOfState'] = 'western' AreaDictionary['FLZ050']['locationName'] = 'Clearfield' AreaDictionary['FLZ057']['fullStateName'] = 'Florida' AreaDictionary['FLZ057']['partOfState'] = 'western' AreaDictionary['FLZ057']['locationName'] = 'Clearfield' AreaDictionary['FLZ160']['fullStateName'] = 'Florida' AreaDictionary['FLZ160']['partOfState'] = 'central' AreaDictionary['FLZ160']['locationName'] = 'Aunt Ruby' AreaDictionary['FLZ151']['fullStateName'] = 'Florida' AreaDictionary['FLZ151']['partOfState'] = 'central' AreaDictionary['FLZ151']['locationName'] = 'Aunt Ruby' AreaDictionary['FLZ043']['fullStateName'] = 'Florida' AreaDictionary['FLZ043']['partOfState'] = 'central' AreaDictionary['FLZ043']['locationName'] = 'Adams' AreaDictionary['FLZ162']['fullStateName'] = 'Florida' AreaDictionary['FLZ162']['partOfState'] = 'central' AreaDictionary['FLZ162']['locationName'] = 'Adams' AreaDictionary['FLZ165']['fullStateName'] = 'Florida' AreaDictionary['FLZ165']['partOfState'] = 'central' #AreaDictionary['FLZ165']['locationName'] = 'western' AreaDictionary['FLZ056']['fullStateName'] = 'Florida' AreaDictionary['FLZ056']['partOfState'] = 'southern' AreaDictionary['FLZ056']['locationName'] = 'Tampa' AreaDictionary['FLZ052']['fullStateName'] = 'Georgia' AreaDictionary['FLZ052']['partOfState'] = 'western' AreaDictionary['FLZ052']['locationName'] = 'Tampa' AreaDictionary['FLZ155']['fullStateName'] = 'Georgia' AreaDictionary['FLZ155']['partOfState'] = 'western' AreaDictionary['FLZ155']['locationName'] = 'Atlanta' AreaDictionary['FLZ061']['fullStateName'] = 'Georgia' AreaDictionary['FLZ061']['partOfState'] = 'southern' AreaDictionary['FLZ061']['locationName'] = 'Beach' AreaDictionary['FLZ148']['fullStateName'] = 'Georgia' AreaDictionary['FLZ148']['partOfState'] = 'southern' AreaDictionary['FLZ148']['locationName'] = 'Beach' AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina' AreaDictionary['FLZ142']['partOfState'] = 'western' AreaDictionary['FLZ142']['locationName'] = 'South Park' AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina' AreaDictionary['FLZ043']['partOfState'] = 'western' AreaDictionary['FLZ043']['locationName'] = 'South Park' #for testing of parishes, counties, and areas AreaDictionary['FLC017']['fullStateName'] = 'Louisiana' AreaDictionary['FLC017']['partOfState'] = 'western' AreaDictionary['FLC017']['independentCity'] = 1 AreaDictionary['FLC105']['fullStateName'] = 'Louisiana' AreaDictionary['FLC105']['partOfState'] = 'western' AreaDictionary['FLC027']['fullStateName'] = 'Louisiana' AreaDictionary['FLC027']['partOfState'] = 'western' AreaDictionary['FLC053']['fullStateName'] = 'Florida' AreaDictionary['FLC053']['partOfState'] = 'western' # Begin detailed phrasing of location tests # "Including the cities of St. Petersburg, Clearwater, Largo, ", # "Lakeland, Winter Haven, Bradenton, Bayshore Gardens, ", # "Palmetto, Sebring, Avon Park, Placid Lakes", | 0.807086 | 1 |
main.py | hasanzadeh99/mapna_test_2021 | 0 | 10176 | import time
old_input_value = False
flag_falling_edge = None
start = None
flag_output_mask = False
DELAY_CONST = 10 # delay time from falling edge ... .
output = None
def response_function():
global old_input_value, flag_falling_edge, start, flag_output_mask, output
if flag_falling_edge:
output = True
end = time.perf_counter()
if end - start > DELAY_CONST:
output = 0
flag_falling_edge = 0
flag_output_mask = False
input_value = bool(int(input('Please Enter your Input Value: ')))
if old_input_value == False and input_value == True:
if not flag_output_mask: output = input_value
old_input_value = input_value
print('Input Rising Edge detected ... ')
print(f'output is: {output}')
elif old_input_value == False and input_value == False:
if not flag_output_mask: output = input_value
old_input_value = input_value
print(f'output is: {output}')
elif old_input_value == True and input_value == True:
old_input_value = input_value
if not flag_output_mask: output = input_value
print(f'output is: {output}')
elif old_input_value == True and input_value == False:
start = time.perf_counter()
print('Input Falling Edge detected ... ')
flag_falling_edge = True
flag_output_mask = True
old_input_value = input_value
print(f'output is: {output}')
if __name__ == '__main__':
DELAY_CONST=int(input("Hello \nPlease Enter Your delay value here :"))
while True:
response_function()
| import time
old_input_value = False
flag_falling_edge = None
start = None
flag_output_mask = False
DELAY_CONST = 10 # delay time from falling edge ... .
output = None
def response_function():
global old_input_value, flag_falling_edge, start, flag_output_mask, output
if flag_falling_edge:
output = True
end = time.perf_counter()
if end - start > DELAY_CONST:
output = 0
flag_falling_edge = 0
flag_output_mask = False
input_value = bool(int(input('Please Enter your Input Value: ')))
if old_input_value == False and input_value == True:
if not flag_output_mask: output = input_value
old_input_value = input_value
print('Input Rising Edge detected ... ')
print(f'output is: {output}')
elif old_input_value == False and input_value == False:
if not flag_output_mask: output = input_value
old_input_value = input_value
print(f'output is: {output}')
elif old_input_value == True and input_value == True:
old_input_value = input_value
if not flag_output_mask: output = input_value
print(f'output is: {output}')
elif old_input_value == True and input_value == False:
start = time.perf_counter()
print('Input Falling Edge detected ... ')
flag_falling_edge = True
flag_output_mask = True
old_input_value = input_value
print(f'output is: {output}')
if __name__ == '__main__':
DELAY_CONST=int(input("Hello \nPlease Enter Your delay value here :"))
while True:
response_function()
| en | 0.504065 | # delay time from falling edge ... . | 3.404546 | 3 |
ansiblemetrics/playbook/num_deprecated_modules.py | radon-h2020/AnsibleMetrics | 1 | 10177 | <filename>ansiblemetrics/playbook/num_deprecated_modules.py
from ansiblemetrics.ansible_modules import DEPRECATED_MODULES_LIST
from ansiblemetrics.ansible_metric import AnsibleMetric
class NumDeprecatedModules(AnsibleMetric):
""" This class measures the number of times tasks use deprecated modules."""
def count(self):
"""Return the deprecated modules occurrence.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.num_deprecated_modules import NumDeprecatedModules
playbook = '''
- name: Include unique username from register.yml
include_vars: # non deprecated module
file: username_info.yml
- name: Create a service
oc: # deprecated module
state: present
name: myservice
namespace: mynamespace
kind: Service
'''
NumDeprecatedModules(playbook).count()
>> 1
Returns
-------
int
deprecated modules occurrence
"""
modules = []
for task in self.tasks:
if not task:
continue
for key in task:
if key in DEPRECATED_MODULES_LIST:
modules.append(key)
return len(modules)
| <filename>ansiblemetrics/playbook/num_deprecated_modules.py
from ansiblemetrics.ansible_modules import DEPRECATED_MODULES_LIST
from ansiblemetrics.ansible_metric import AnsibleMetric
class NumDeprecatedModules(AnsibleMetric):
""" This class measures the number of times tasks use deprecated modules."""
def count(self):
"""Return the deprecated modules occurrence.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.num_deprecated_modules import NumDeprecatedModules
playbook = '''
- name: Include unique username from register.yml
include_vars: # non deprecated module
file: username_info.yml
- name: Create a service
oc: # deprecated module
state: present
name: myservice
namespace: mynamespace
kind: Service
'''
NumDeprecatedModules(playbook).count()
>> 1
Returns
-------
int
deprecated modules occurrence
"""
modules = []
for task in self.tasks:
if not task:
continue
for key in task:
if key in DEPRECATED_MODULES_LIST:
modules.append(key)
return len(modules)
| en | 0.453836 | This class measures the number of times tasks use deprecated modules. Return the deprecated modules occurrence. Example ------- .. highlight:: python .. code-block:: python from ansiblemetrics.general.num_deprecated_modules import NumDeprecatedModules playbook = ''' - name: Include unique username from register.yml include_vars: # non deprecated module file: username_info.yml - name: Create a service oc: # deprecated module state: present name: myservice namespace: mynamespace kind: Service ''' NumDeprecatedModules(playbook).count() >> 1 Returns ------- int deprecated modules occurrence | 2.534623 | 3 |
app/api/v1/validators/validators.py | GraceKiarie/iReporter | 1 | 10178 | """ This module does validation for data input in incidents """
import re
class Validate():
"""
methods for validatin incidents input data
"""
def valid_email(self, email):
self.vemail = re.match(
r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if not self.vemail:
return None
return True
def valid_password(self, password):
self.password = re.match(r'[A-Za-z0-9@#$%^&+=]{8,}', password)
if self.password is None:
return None
return True
def valid_string(self, value):
"""
checks if value in data is empty
"""
self.value = value
if not isinstance(self.value, str):
return None
return True
| """ This module does validation for data input in incidents """
import re
class Validate():
"""
methods for validatin incidents input data
"""
def valid_email(self, email):
self.vemail = re.match(
r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if not self.vemail:
return None
return True
def valid_password(self, password):
self.password = re.match(r'[A-Za-z0-9@#$%^&+=]{8,}', password)
if self.password is None:
return None
return True
def valid_string(self, value):
"""
checks if value in data is empty
"""
self.value = value
if not isinstance(self.value, str):
return None
return True
| en | 0.547579 | This module does validation for data input in incidents methods for validatin incidents input data #$%^&+=]{8,}', password) checks if value in data is empty | 3.571932 | 4 |
bin/find_latest_versions.py | ebreton/ghost-in-a-shell | 2 | 10179 | <reponame>ebreton/ghost-in-a-shell
#!/usr/bin/python
from distutils.version import LooseVersion
import argparse
import logging
import requests
import re
session = requests.Session()
# authorization token
TOKEN_URL = "https://auth.docker.io/token?service=registry.docker.io&scope=repository:%s:pull"
# find all tags
TAGS_URL = "https://index.docker.io/v2/%s/tags/list"
TAG_RE = re.compile("^[\d]+(\.[\d]+)*$")
# get image digest for target
TARGET_DIGEST = "https://index.docker.io/v2/%(repository)s/manifests/%(tag)s"
class Fetcher:
DIGEST_HEADER = {}
def __init__(self, repository):
self.repository = repository
self.token = self.get_token()
self.headers = {"Authorization": "Bearer %s"% self.token}
self.headers_for_tags = {
"Authorization": "Bearer %s"% self.token,
"Accept": "application/vnd.docker.distribution.manifest.v2+json"
}
logging.debug("initialized fetcher for %s", self.repository)
def get_token(self):
response = session.get(TOKEN_URL % self.repository)
response.raise_for_status()
token = response.json().get("token")
logging.debug("got token: %s", token)
return token
def get_versions(self):
response = session.get(TAGS_URL % self.repository, headers=self.headers_for_tags)
response.raise_for_status()
all_tags = response.json().get("tags")
numbered_tags = filter(lambda x: TAG_RE.match(x), all_tags)
versions = map(LooseVersion, numbered_tags)
logging.debug("got tags: %s", versions)
return versions
def find_latest(repository):
fetcher = Fetcher(repository)
all_tags = fetcher.get_versions()
return max(all_tags)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage="""Version checker script
This file retreives the latest version of ghost container image from docker hub
It can be run with both python 2.7 and 3.6""")
parser.add_argument("repository", nargs='?',
help="repository name [default:library/ghost]",
default="library/ghost")
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-q', '--quiet', action='store_true')
args = parser.parse_args()
# set up level of logging
level = logging.INFO
if args.quiet:
level = logging.WARNING
elif args.debug:
level = logging.DEBUG
# set up logging to console
logging.basicConfig(format='%(levelname)s - %(funcName)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(level)
logging.debug(args)
# version needs to be print to output in order to be retrieved by Makefile
print(find_latest(args.repository))
| #!/usr/bin/python
from distutils.version import LooseVersion
import argparse
import logging
import requests
import re
session = requests.Session()
# authorization token
TOKEN_URL = "https://auth.docker.io/token?service=registry.docker.io&scope=repository:%s:pull"
# find all tags
TAGS_URL = "https://index.docker.io/v2/%s/tags/list"
TAG_RE = re.compile("^[\d]+(\.[\d]+)*$")
# get image digest for target
TARGET_DIGEST = "https://index.docker.io/v2/%(repository)s/manifests/%(tag)s"
class Fetcher:
DIGEST_HEADER = {}
def __init__(self, repository):
self.repository = repository
self.token = self.get_token()
self.headers = {"Authorization": "Bearer %s"% self.token}
self.headers_for_tags = {
"Authorization": "Bearer %s"% self.token,
"Accept": "application/vnd.docker.distribution.manifest.v2+json"
}
logging.debug("initialized fetcher for %s", self.repository)
def get_token(self):
response = session.get(TOKEN_URL % self.repository)
response.raise_for_status()
token = response.json().get("token")
logging.debug("got token: %s", token)
return token
def get_versions(self):
response = session.get(TAGS_URL % self.repository, headers=self.headers_for_tags)
response.raise_for_status()
all_tags = response.json().get("tags")
numbered_tags = filter(lambda x: TAG_RE.match(x), all_tags)
versions = map(LooseVersion, numbered_tags)
logging.debug("got tags: %s", versions)
return versions
def find_latest(repository):
fetcher = Fetcher(repository)
all_tags = fetcher.get_versions()
return max(all_tags)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage="""Version checker script
This file retreives the latest version of ghost container image from docker hub
It can be run with both python 2.7 and 3.6""")
parser.add_argument("repository", nargs='?',
help="repository name [default:library/ghost]",
default="library/ghost")
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-q', '--quiet', action='store_true')
args = parser.parse_args()
# set up level of logging
level = logging.INFO
if args.quiet:
level = logging.WARNING
elif args.debug:
level = logging.DEBUG
# set up logging to console
logging.basicConfig(format='%(levelname)s - %(funcName)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(level)
logging.debug(args)
# version needs to be print to output in order to be retrieved by Makefile
print(find_latest(args.repository)) | en | 0.784932 | #!/usr/bin/python # authorization token # find all tags # get image digest for target Version checker script This file retreives the latest version of ghost container image from docker hub It can be run with both python 2.7 and 3.6 # set up level of logging # set up logging to console # version needs to be print to output in order to be retrieved by Makefile | 2.415279 | 2 |
tests/conftest.py | badarsebard/terraform-pytest | 0 | 10180 | from .terraform import TerraformManager
import pytest
from _pytest.tmpdir import TempPathFactory
@pytest.fixture(scope='session')
def tfenv(tmp_path_factory: TempPathFactory):
env_vars = {
}
with TerraformManager(path_factory=tmp_path_factory, env_vars=env_vars) as deployment:
yield deployment
| from .terraform import TerraformManager
import pytest
from _pytest.tmpdir import TempPathFactory
@pytest.fixture(scope='session')
def tfenv(tmp_path_factory: TempPathFactory):
env_vars = {
}
with TerraformManager(path_factory=tmp_path_factory, env_vars=env_vars) as deployment:
yield deployment
| none | 1 | 1.708254 | 2 |
|
pelicanconf.py | myrle-krantz/treasurer-site | 1 | 10181 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
# vim: encoding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from datetime import date
# import os
# import sys
PATH = 'content'
TIMEZONE = 'UTC'
DEFAULT_LANG = u'en'
AUTHOR = u'Treasurer Team'
SITENAME = u'Apache Treasurer'
SITEDOMAIN = 'treasurer.apache.org'
SITEURL = 'https://treasurer.apache.org'
# SITELOGO = 'https://treasurer.apache.org/images/logo.png'
# SITEDESC = u'<blank>'
SITEREPOSITORY = 'https://github.com/apache/treasurer-site/blob/main/content/pages/'
TRADEMARKS = u'Apache and the Apache feather logo are trademarks or registered trademarks'
CURRENTYEAR = date.today().year
# Save pages using full directory preservation
PAGES_PATHS = ['content']
# PATH_METADATA= '(?P<path_no_ext>.*)\..*'
# PAGE_SAVE_AS= '{path_no_ext}.html'
PAGE_URL = '{slug}.html'
SLUGIFY_SOURCE = 'basename'
PAGE_SAVE_AS = '{slug}.html'
# We want to serve any images
STATIC_PATHS = ['.htaccess', 'images']
# We don't use articles, but we don't want pelican to think
# that content/ contains articles.
ARTICLE_PATHS = ['articles']
# Disable these pages
ARCHIVES_SAVE_AS = ''
ARTICLE_SAVE_AS = ''
AUTHORS_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
INDEX_SAVE_AS = ''
TAGS_SAVE_AS = ''
# Enable ATOM feed and Disable other feeds
FEED_DOMAIN = SITEURL
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Pelican Plugins
# The provided location. If the buildbot does not have a new plugin then look into requirements.txt
PLUGIN_PATHS = ['./theme/plugins']
PLUGINS = ['toc', 'pelican-gfm', 'sitemap']
# TOC Generator
TOC_HEADERS = r"h[1-6]"
# Sitemap Generator
SITEMAP = {
"exclude": ["tag/", "category/"],
"format": "xml",
"priorities": {
"articles": 0.1,
"indexes": 0.1,
"pages": 0.8
},
"changefreqs": {
"articles": "never",
"indexes": "never",
"pages": "monthly"
}
}
# Unused links
LINKS = ( )
SOCIAL = ( )
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
| #!/usr/bin/env python
# -*- coding: utf-8 -*- #
# vim: encoding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from datetime import date
# import os
# import sys
PATH = 'content'
TIMEZONE = 'UTC'
DEFAULT_LANG = u'en'
AUTHOR = u'Treasurer Team'
SITENAME = u'Apache Treasurer'
SITEDOMAIN = 'treasurer.apache.org'
SITEURL = 'https://treasurer.apache.org'
# SITELOGO = 'https://treasurer.apache.org/images/logo.png'
# SITEDESC = u'<blank>'
SITEREPOSITORY = 'https://github.com/apache/treasurer-site/blob/main/content/pages/'
TRADEMARKS = u'Apache and the Apache feather logo are trademarks or registered trademarks'
CURRENTYEAR = date.today().year
# Save pages using full directory preservation
PAGES_PATHS = ['content']
# PATH_METADATA= '(?P<path_no_ext>.*)\..*'
# PAGE_SAVE_AS= '{path_no_ext}.html'
PAGE_URL = '{slug}.html'
SLUGIFY_SOURCE = 'basename'
PAGE_SAVE_AS = '{slug}.html'
# We want to serve any images
STATIC_PATHS = ['.htaccess', 'images']
# We don't use articles, but we don't want pelican to think
# that content/ contains articles.
ARTICLE_PATHS = ['articles']
# Disable these pages
ARCHIVES_SAVE_AS = ''
ARTICLE_SAVE_AS = ''
AUTHORS_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
INDEX_SAVE_AS = ''
TAGS_SAVE_AS = ''
# Enable ATOM feed and Disable other feeds
FEED_DOMAIN = SITEURL
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Pelican Plugins
# The provided location. If the buildbot does not have a new plugin then look into requirements.txt
PLUGIN_PATHS = ['./theme/plugins']
PLUGINS = ['toc', 'pelican-gfm', 'sitemap']
# TOC Generator
TOC_HEADERS = r"h[1-6]"
# Sitemap Generator
SITEMAP = {
"exclude": ["tag/", "category/"],
"format": "xml",
"priorities": {
"articles": 0.1,
"indexes": 0.1,
"pages": 0.8
},
"changefreqs": {
"articles": "never",
"indexes": "never",
"pages": "monthly"
}
}
# Unused links
LINKS = ( )
SOCIAL = ( )
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
| en | 0.830465 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # vim: encoding=utf-8 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os # import sys # SITELOGO = 'https://treasurer.apache.org/images/logo.png' # SITEDESC = u'<blank>' # Save pages using full directory preservation # PATH_METADATA= '(?P<path_no_ext>.*)\..*' # PAGE_SAVE_AS= '{path_no_ext}.html' # We want to serve any images # We don't use articles, but we don't want pelican to think # that content/ contains articles. # Disable these pages # Enable ATOM feed and Disable other feeds # Pelican Plugins # The provided location. If the buildbot does not have a new plugin then look into requirements.txt # TOC Generator # Sitemap Generator # Unused links # Uncomment following line if you want document-relative URLs when developing # RELATIVE_URLS = True | 1.53358 | 2 |
TurtleArt/taturtle.py | sugar-activities/4585-activity | 0 | 10182 | # -*- coding: utf-8 -*-
#Copyright (c) 2010,12 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from random import uniform
from math import sin, cos, pi, sqrt
from gettext import gettext as _
import gtk
import cairo
from taconstants import TURTLE_LAYER, DEFAULT_TURTLE_COLORS
from tasprite_factory import SVG, svg_str_to_pixbuf
from tacanvas import wrap100, COLOR_TABLE
from sprites import Sprite
from tautils import debug_output
SHAPES = 36
def generate_turtle_pixbufs(colors):
""" Generate pixbufs for generic turtles """
shapes = []
svg = SVG()
svg.set_scale(1.0)
for i in range(SHAPES):
svg.set_orientation(i * 10)
shapes.append(svg_str_to_pixbuf(svg.turtle(colors)))
return shapes
class Turtles:
def __init__(self, sprite_list):
""" Class to hold turtles """
self.dict = dict()
self.sprite_list = sprite_list
self.default_pixbufs = []
def get_turtle(self, k, append=False, colors=None):
""" Find a turtle """
if k in self.dict:
return self.dict[k]
elif not append:
return None
else:
if colors == None:
Turtle(self, k)
elif type(colors) in [list, tuple]:
Turtle(self, k, colors)
else:
Turtle(self, k, colors.split(','))
return self.dict[k]
def get_turtle_key(self, turtle):
""" Find a turtle's name """
for k in iter(self.dict):
if self.dict[k] == turtle:
return k
return None
def turtle_count(self):
""" How many turtles are there? """
return(len(self.dict))
def add_to_dict(self, k, turtle):
""" Add a new turtle """
self.dict[k] = turtle
def remove_from_dict(self, k):
""" Delete a turtle """
if k in self.dict:
del(self.dict[k])
def show_all(self):
""" Make all turtles visible """
for k in iter(self.dict):
self.dict[k].show()
def spr_to_turtle(self, spr):
""" Find the turtle that corresponds to sprite spr. """
for k in iter(self.dict):
if spr == self.dict[k].spr:
return self.dict[k]
return None
def get_pixbufs(self):
""" Get the pixbufs for the default turtle shapes. """
if self.default_pixbufs == []:
self.default_pixbufs = generate_turtle_pixbufs(
["#008000", "#00A000"])
return(self.default_pixbufs)
class Turtle:
def __init__(self, turtles, key, turtle_colors=None):
""" The turtle is not a block, just a sprite with an orientation """
self.x = 0
self.y = 0
self.hidden = False
self.shapes = []
self.custom_shapes = False
self.type = 'turtle'
self.name = key
self.heading = 0
self.pen_shade = 50
self.pen_color = 0
self.pen_gray = 100
self.pen_size = 5
self.pen_state = True
self.label_block = None
self._prep_shapes(key, turtles, turtle_colors)
# Choose a random angle from which to attach the turtle label.
if turtles.sprite_list is not None:
self.spr = Sprite(turtles.sprite_list, 0, 0, self.shapes[0])
angle = uniform(0, pi * 4 / 3.0) # 240 degrees
w = self.shapes[0].get_width()
r = w * 0.67
# Restrict angle the the sides 30-150; 210-330
if angle > pi * 2 / 3.0:
angle += pi / 2.0 # + 90
self.label_xy = [int(r * sin(angle)),
int(r * cos(angle) + w / 2.0)]
else:
angle += pi / 6.0 # + 30
self.label_xy = [int(r * sin(angle) + w / 2.0),
int(r * cos(angle) + w / 2.0)]
else:
self.spr = None
turtles.add_to_dict(key, self)
def _prep_shapes(self, name, turtles=None, turtle_colors=None):
# If the turtle name is an int, we'll use a palette color as the
# turtle color
try:
int_key = int(name)
use_color_table = True
except ValueError:
use_color_table = False
if turtle_colors is not None:
self.colors = turtle_colors[:]
self.shapes = generate_turtle_pixbufs(self.colors)
elif use_color_table:
fill = wrap100(int_key)
stroke = wrap100(fill + 10)
self.colors = ['#%06x' % (COLOR_TABLE[fill]),
'#%06x' % (COLOR_TABLE[stroke])]
self.shapes = generate_turtle_pixbufs(self.colors)
else:
if turtles is not None:
self.colors = DEFAULT_TURTLE_COLORS
self.shapes = turtles.get_pixbufs()
def set_turtle_colors(self, turtle_colors):
''' reset the colors of a preloaded turtle '''
if turtle_colors is not None:
self.colors = turtle_colors[:]
self.shapes = generate_turtle_pixbufs(self.colors)
self.set_heading(self.heading)
def set_shapes(self, shapes, i=0):
""" Reskin the turtle """
n = len(shapes)
if n == 1 and i > 0: # set shape[i]
if i < len(self.shapes):
self.shapes[i] = shapes[0]
elif n == SHAPES: # all shapes have been precomputed
self.shapes = shapes[:]
else: # rotate shapes
if n != 1:
debug_output("%d images passed to set_shapes: ignoring" % (n),
self.tw.running_sugar)
if self.heading == 0: # rotate the shapes
images = []
w, h = shapes[0].get_width(), shapes[0].get_height()
nw = nh = int(sqrt(w * w + h * h))
for i in range(SHAPES):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, nw, nh)
context = cairo.Context(surface)
context = gtk.gdk.CairoContext(context)
context.translate(nw / 2., nh / 2.)
context.rotate(i * 10 * pi / 180.)
context.translate(-nw / 2., -nh / 2.)
context.set_source_pixbuf(shapes[0], (nw - w) / 2.,
(nh - h) / 2.)
context.rectangle(0, 0, nw, nh)
context.fill()
images.append(surface)
self.shapes = images[:]
else: # associate shape with image at current heading
j = int(self.heading + 5) % 360 / (360 / SHAPES)
self.shapes[j] = shapes[0]
self.custom_shapes = True
self.show()
def reset_shapes(self):
""" Reset the shapes to the standard turtle """
if self.custom_shapes:
self.shapes = generate_turtle_pixbufs(self.colors)
self.custom_shapes = False
def set_heading(self, heading):
""" Set the turtle heading (one shape per 360/SHAPES degrees) """
self.heading = heading
i = (int(self.heading + 5) % 360) / (360 / SHAPES)
if not self.hidden and self.spr is not None:
try:
self.spr.set_shape(self.shapes[i])
except IndexError:
self.spr.set_shape(self.shapes[0])
def set_color(self, color):
""" Set the pen color for this turtle. """
self.pen_color = color
def set_gray(self, gray):
""" Set the pen gray level for this turtle. """
self.pen_gray = gray
def set_shade(self, shade):
""" Set the pen shade for this turtle. """
self.pen_shade = shade
def set_pen_size(self, pen_size):
""" Set the pen size for this turtle. """
self.pen_size = pen_size
def set_pen_state(self, pen_state):
""" Set the pen state (down==True) for this turtle. """
self.pen_state = pen_state
def hide(self):
""" Hide the turtle. """
if self.spr is not None:
self.spr.hide()
if self.label_block is not None:
self.label_block.spr.hide()
self.hidden = True
def show(self):
""" Show the turtle. """
if self.spr is not None:
self.spr.set_layer(TURTLE_LAYER)
self.hidden = False
self.move((self.x, self.y))
self.set_heading(self.heading)
if self.label_block is not None:
self.label_block.spr.move((self.x + self.label_xy[0],
self.y + self.label_xy[1]))
self.label_block.spr.set_layer(TURTLE_LAYER + 1)
def move(self, pos):
""" Move the turtle. """
self.x, self.y = int(pos[0]), int(pos[1])
if not self.hidden and self.spr is not None:
self.spr.move(pos)
if self.label_block is not None:
self.label_block.spr.move((pos[0] + self.label_xy[0],
pos[1] + self.label_xy[1]))
return(self.x, self.y)
def get_name(self):
''' return turtle name (key) '''
return self.name
def get_xy(self):
""" Return the turtle's x, y coordinates. """
return(self.x, self.y)
def get_heading(self):
""" Return the turtle's heading. """
return(self.heading)
def get_color(self):
""" Return the turtle's color. """
return(self.pen_color)
def get_gray(self):
""" Return the turtle's gray level. """
return(self.pen_gray)
def get_shade(self):
""" Return the turtle's shade. """
return(self.pen_shade)
def get_pen_size(self):
""" Return the turtle's pen size. """
return(self.pen_size)
def get_pen_state(self):
""" Return the turtle's pen state. """
return(self.pen_state)
| # -*- coding: utf-8 -*-
#Copyright (c) 2010,12 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from random import uniform
from math import sin, cos, pi, sqrt
from gettext import gettext as _
import gtk
import cairo
from taconstants import TURTLE_LAYER, DEFAULT_TURTLE_COLORS
from tasprite_factory import SVG, svg_str_to_pixbuf
from tacanvas import wrap100, COLOR_TABLE
from sprites import Sprite
from tautils import debug_output
SHAPES = 36
def generate_turtle_pixbufs(colors):
""" Generate pixbufs for generic turtles """
shapes = []
svg = SVG()
svg.set_scale(1.0)
for i in range(SHAPES):
svg.set_orientation(i * 10)
shapes.append(svg_str_to_pixbuf(svg.turtle(colors)))
return shapes
class Turtles:
def __init__(self, sprite_list):
""" Class to hold turtles """
self.dict = dict()
self.sprite_list = sprite_list
self.default_pixbufs = []
def get_turtle(self, k, append=False, colors=None):
""" Find a turtle """
if k in self.dict:
return self.dict[k]
elif not append:
return None
else:
if colors == None:
Turtle(self, k)
elif type(colors) in [list, tuple]:
Turtle(self, k, colors)
else:
Turtle(self, k, colors.split(','))
return self.dict[k]
def get_turtle_key(self, turtle):
""" Find a turtle's name """
for k in iter(self.dict):
if self.dict[k] == turtle:
return k
return None
def turtle_count(self):
""" How many turtles are there? """
return(len(self.dict))
def add_to_dict(self, k, turtle):
""" Add a new turtle """
self.dict[k] = turtle
def remove_from_dict(self, k):
""" Delete a turtle """
if k in self.dict:
del(self.dict[k])
def show_all(self):
""" Make all turtles visible """
for k in iter(self.dict):
self.dict[k].show()
def spr_to_turtle(self, spr):
""" Find the turtle that corresponds to sprite spr. """
for k in iter(self.dict):
if spr == self.dict[k].spr:
return self.dict[k]
return None
def get_pixbufs(self):
""" Get the pixbufs for the default turtle shapes. """
if self.default_pixbufs == []:
self.default_pixbufs = generate_turtle_pixbufs(
["#008000", "#00A000"])
return(self.default_pixbufs)
class Turtle:
def __init__(self, turtles, key, turtle_colors=None):
""" The turtle is not a block, just a sprite with an orientation """
self.x = 0
self.y = 0
self.hidden = False
self.shapes = []
self.custom_shapes = False
self.type = 'turtle'
self.name = key
self.heading = 0
self.pen_shade = 50
self.pen_color = 0
self.pen_gray = 100
self.pen_size = 5
self.pen_state = True
self.label_block = None
self._prep_shapes(key, turtles, turtle_colors)
# Choose a random angle from which to attach the turtle label.
if turtles.sprite_list is not None:
self.spr = Sprite(turtles.sprite_list, 0, 0, self.shapes[0])
angle = uniform(0, pi * 4 / 3.0) # 240 degrees
w = self.shapes[0].get_width()
r = w * 0.67
# Restrict angle the the sides 30-150; 210-330
if angle > pi * 2 / 3.0:
angle += pi / 2.0 # + 90
self.label_xy = [int(r * sin(angle)),
int(r * cos(angle) + w / 2.0)]
else:
angle += pi / 6.0 # + 30
self.label_xy = [int(r * sin(angle) + w / 2.0),
int(r * cos(angle) + w / 2.0)]
else:
self.spr = None
turtles.add_to_dict(key, self)
def _prep_shapes(self, name, turtles=None, turtle_colors=None):
# If the turtle name is an int, we'll use a palette color as the
# turtle color
try:
int_key = int(name)
use_color_table = True
except ValueError:
use_color_table = False
if turtle_colors is not None:
self.colors = turtle_colors[:]
self.shapes = generate_turtle_pixbufs(self.colors)
elif use_color_table:
fill = wrap100(int_key)
stroke = wrap100(fill + 10)
self.colors = ['#%06x' % (COLOR_TABLE[fill]),
'#%06x' % (COLOR_TABLE[stroke])]
self.shapes = generate_turtle_pixbufs(self.colors)
else:
if turtles is not None:
self.colors = DEFAULT_TURTLE_COLORS
self.shapes = turtles.get_pixbufs()
def set_turtle_colors(self, turtle_colors):
''' reset the colors of a preloaded turtle '''
if turtle_colors is not None:
self.colors = turtle_colors[:]
self.shapes = generate_turtle_pixbufs(self.colors)
self.set_heading(self.heading)
def set_shapes(self, shapes, i=0):
""" Reskin the turtle """
n = len(shapes)
if n == 1 and i > 0: # set shape[i]
if i < len(self.shapes):
self.shapes[i] = shapes[0]
elif n == SHAPES: # all shapes have been precomputed
self.shapes = shapes[:]
else: # rotate shapes
if n != 1:
debug_output("%d images passed to set_shapes: ignoring" % (n),
self.tw.running_sugar)
if self.heading == 0: # rotate the shapes
images = []
w, h = shapes[0].get_width(), shapes[0].get_height()
nw = nh = int(sqrt(w * w + h * h))
for i in range(SHAPES):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, nw, nh)
context = cairo.Context(surface)
context = gtk.gdk.CairoContext(context)
context.translate(nw / 2., nh / 2.)
context.rotate(i * 10 * pi / 180.)
context.translate(-nw / 2., -nh / 2.)
context.set_source_pixbuf(shapes[0], (nw - w) / 2.,
(nh - h) / 2.)
context.rectangle(0, 0, nw, nh)
context.fill()
images.append(surface)
self.shapes = images[:]
else: # associate shape with image at current heading
j = int(self.heading + 5) % 360 / (360 / SHAPES)
self.shapes[j] = shapes[0]
self.custom_shapes = True
self.show()
def reset_shapes(self):
""" Reset the shapes to the standard turtle """
if self.custom_shapes:
self.shapes = generate_turtle_pixbufs(self.colors)
self.custom_shapes = False
def set_heading(self, heading):
""" Set the turtle heading (one shape per 360/SHAPES degrees) """
self.heading = heading
i = (int(self.heading + 5) % 360) / (360 / SHAPES)
if not self.hidden and self.spr is not None:
try:
self.spr.set_shape(self.shapes[i])
except IndexError:
self.spr.set_shape(self.shapes[0])
def set_color(self, color):
""" Set the pen color for this turtle. """
self.pen_color = color
def set_gray(self, gray):
""" Set the pen gray level for this turtle. """
self.pen_gray = gray
def set_shade(self, shade):
""" Set the pen shade for this turtle. """
self.pen_shade = shade
def set_pen_size(self, pen_size):
""" Set the pen size for this turtle. """
self.pen_size = pen_size
def set_pen_state(self, pen_state):
""" Set the pen state (down==True) for this turtle. """
self.pen_state = pen_state
def hide(self):
""" Hide the turtle. """
if self.spr is not None:
self.spr.hide()
if self.label_block is not None:
self.label_block.spr.hide()
self.hidden = True
def show(self):
""" Show the turtle. """
if self.spr is not None:
self.spr.set_layer(TURTLE_LAYER)
self.hidden = False
self.move((self.x, self.y))
self.set_heading(self.heading)
if self.label_block is not None:
self.label_block.spr.move((self.x + self.label_xy[0],
self.y + self.label_xy[1]))
self.label_block.spr.set_layer(TURTLE_LAYER + 1)
def move(self, pos):
""" Move the turtle. """
self.x, self.y = int(pos[0]), int(pos[1])
if not self.hidden and self.spr is not None:
self.spr.move(pos)
if self.label_block is not None:
self.label_block.spr.move((pos[0] + self.label_xy[0],
pos[1] + self.label_xy[1]))
return(self.x, self.y)
def get_name(self):
''' return turtle name (key) '''
return self.name
def get_xy(self):
""" Return the turtle's x, y coordinates. """
return(self.x, self.y)
def get_heading(self):
""" Return the turtle's heading. """
return(self.heading)
def get_color(self):
""" Return the turtle's color. """
return(self.pen_color)
def get_gray(self):
""" Return the turtle's gray level. """
return(self.pen_gray)
def get_shade(self):
""" Return the turtle's shade. """
return(self.pen_shade)
def get_pen_size(self):
""" Return the turtle's pen size. """
return(self.pen_size)
def get_pen_state(self):
""" Return the turtle's pen state. """
return(self.pen_state)
| en | 0.792446 | # -*- coding: utf-8 -*- #Copyright (c) 2010,12 <NAME> #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in #all copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #THE SOFTWARE. Generate pixbufs for generic turtles Class to hold turtles Find a turtle Find a turtle's name How many turtles are there? Add a new turtle Delete a turtle Make all turtles visible Find the turtle that corresponds to sprite spr. Get the pixbufs for the default turtle shapes. The turtle is not a block, just a sprite with an orientation # Choose a random angle from which to attach the turtle label. # 240 degrees # Restrict angle the the sides 30-150; 210-330 # + 90 # + 30 # If the turtle name is an int, we'll use a palette color as the # turtle color reset the colors of a preloaded turtle Reskin the turtle # set shape[i] # all shapes have been precomputed # rotate shapes # rotate the shapes # associate shape with image at current heading Reset the shapes to the standard turtle Set the turtle heading (one shape per 360/SHAPES degrees) Set the pen color for this turtle. Set the pen gray level for this turtle. Set the pen shade for this turtle. Set the pen size for this turtle. Set the pen state (down==True) for this turtle. Hide the turtle. Show the turtle. Move the turtle. return turtle name (key) Return the turtle's x, y coordinates. Return the turtle's heading. Return the turtle's color. Return the turtle's gray level. Return the turtle's shade. Return the turtle's pen size. Return the turtle's pen state. | 2.184829 | 2 |
django_backend/group.py | holg/django_backend | 0 | 10183 | try:
from django.forms.utils import pretty_name
except ImportError:
from django.forms.forms import pretty_name
from django.template import Context
from django.template.loader import render_to_string
from .compat import context_flatten
class Group(list):
"""
A simplistic representation of backends that are related and should be
displayed as one "group" in the backend (e.g. as one box in the sidebar).
"""
template_name = 'django_backend/_group.html'
def __init__(self, id, name=None, position=0, template_name=None):
self.id = id
if name is None:
name = pretty_name(id)
self.template_name = template_name or self.template_name
self.name = name
self.position = position
super(Group, self).__init__()
@property
def backends(self):
return list(self)
def get_context_data(self, context, **kwargs):
data = {
'group': self,
}
data.update(kwargs)
return data
def get_template_name(self):
return self.template_name
def render(self, context):
context_data = {}
if isinstance(context, Context):
context_data.update(context_flatten(context))
context_data = self.get_context_data(context, **context_data)
return render_to_string(self.get_template_name(), context_data)
| try:
from django.forms.utils import pretty_name
except ImportError:
from django.forms.forms import pretty_name
from django.template import Context
from django.template.loader import render_to_string
from .compat import context_flatten
class Group(list):
"""
A simplistic representation of backends that are related and should be
displayed as one "group" in the backend (e.g. as one box in the sidebar).
"""
template_name = 'django_backend/_group.html'
def __init__(self, id, name=None, position=0, template_name=None):
self.id = id
if name is None:
name = pretty_name(id)
self.template_name = template_name or self.template_name
self.name = name
self.position = position
super(Group, self).__init__()
@property
def backends(self):
return list(self)
def get_context_data(self, context, **kwargs):
data = {
'group': self,
}
data.update(kwargs)
return data
def get_template_name(self):
return self.template_name
def render(self, context):
context_data = {}
if isinstance(context, Context):
context_data.update(context_flatten(context))
context_data = self.get_context_data(context, **context_data)
return render_to_string(self.get_template_name(), context_data)
| en | 0.924972 | A simplistic representation of backends that are related and should be displayed as one "group" in the backend (e.g. as one box in the sidebar). | 2.072056 | 2 |
src/eodc_openeo_bindings/map_comparison_processes.py | eodcgmbh/eodc-openeo-bindings | 0 | 10184 | <filename>src/eodc_openeo_bindings/map_comparison_processes.py
"""
"""
from eodc_openeo_bindings.map_utils import map_default
def map_lt(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'lt', 'apply', param_dict)
def map_lte(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'lte', 'apply', param_dict)
def map_gt(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'gt', 'apply', param_dict)
def map_gte(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'gte', 'apply', param_dict)
def map_eq(process):
"""
"""
param_dict = {'y': 'numpy.array'}
# NOTE: how to map type dynamically to support strings?
if 'delta' in process['arguments']:
param_dict['delta'] = 'int'
if 'case_sensitive' in process['arguments']:
param_dict['case_sensitive'] = 'bool'
return map_default(process, 'eq', 'apply', param_dict)
| <filename>src/eodc_openeo_bindings/map_comparison_processes.py
"""
"""
from eodc_openeo_bindings.map_utils import map_default
def map_lt(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'lt', 'apply', param_dict)
def map_lte(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'lte', 'apply', param_dict)
def map_gt(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'gt', 'apply', param_dict)
def map_gte(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'gte', 'apply', param_dict)
def map_eq(process):
"""
"""
param_dict = {'y': 'numpy.array'}
# NOTE: how to map type dynamically to support strings?
if 'delta' in process['arguments']:
param_dict['delta'] = 'int'
if 'case_sensitive' in process['arguments']:
param_dict['case_sensitive'] = 'bool'
return map_default(process, 'eq', 'apply', param_dict)
| en | 0.845223 | # NOTE: how to map type dynamically to support strings? | 2.566149 | 3 |
scripts/flow_tests/__init__.py | rombie/contrail-test | 5 | 10185 | <reponame>rombie/contrail-test<gh_stars>1-10
"""FLOW RELATED SYSTEM TEST CASES."""
| """FLOW RELATED SYSTEM TEST CASES.""" | en | 0.741739 | FLOW RELATED SYSTEM TEST CASES. | 0.997169 | 1 |
server/main.py | KejiaQiang/Spicy_pot_search | 1 | 10186 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, request, abort, render_template
from datetime import timedelta
import pymysql
from search import start_search, decorate
page_dir = "E:/WEBPAGES_RAW"
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)
connection = pymysql.connect(host="localhost",port=3306,user="root",db="spicy_pot")
cursor = connection.cursor()
@app.route('/')
def homepage():
return render_template("root.html")
@app.route('/search')
def search():
word = request.args.get('s')
page = int(request.args.get('p'))
all_res = start_search(word,cursor)
if len(all_res) == 0:
return render_template("result.html",result={"word":word,"pages":-1,"currentPage":1,"res":[]})
pages = ((len(all_res)-1)//10) + 1
res = decorate(all_res[(page-1)*10:page*10])
content = {"word":word,"pages":pages,"currentPage":page,"res":res}
return render_template("result.html",result=content)
@app.route('/cache')
def cache():
p = request.args.get('p')
c = request.args.get('c')
read = open(page_dir+"/"+p+"/"+c,'r',encoding="utf-8")
save = open("templates/temp.html",'w',encoding="utf-8")
for line in read:
save.write(line)
read.close()
save.close()
return render_template("temp.html")
app.run(host='0.0.0.0',port=80,debug=True)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, request, abort, render_template
from datetime import timedelta
import pymysql
from search import start_search, decorate
page_dir = "E:/WEBPAGES_RAW"
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)
connection = pymysql.connect(host="localhost",port=3306,user="root",db="spicy_pot")
cursor = connection.cursor()
@app.route('/')
def homepage():
return render_template("root.html")
@app.route('/search')
def search():
word = request.args.get('s')
page = int(request.args.get('p'))
all_res = start_search(word,cursor)
if len(all_res) == 0:
return render_template("result.html",result={"word":word,"pages":-1,"currentPage":1,"res":[]})
pages = ((len(all_res)-1)//10) + 1
res = decorate(all_res[(page-1)*10:page*10])
content = {"word":word,"pages":pages,"currentPage":page,"res":res}
return render_template("result.html",result=content)
@app.route('/cache')
def cache():
p = request.args.get('p')
c = request.args.get('c')
read = open(page_dir+"/"+p+"/"+c,'r',encoding="utf-8")
save = open("templates/temp.html",'w',encoding="utf-8")
for line in read:
save.write(line)
read.close()
save.close()
return render_template("temp.html")
app.run(host='0.0.0.0',port=80,debug=True)
| en | 0.44423 | #!/usr/bin/python # -*- coding: utf-8 -*- | 2.331873 | 2 |
examples/3d/subduction/viz/plot_dispwarp.py | cehanagan/pylith | 93 | 10187 | <filename>examples/3d/subduction/viz/plot_dispwarp.py
#!/usr/bin/env pvpython
# -*- Python -*- (syntax highlighting)
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md.md for license information.
#
# ----------------------------------------------------------------------
# Plot the undeformed domain as a gray wireframe and then the deformed
# domain, colored by the value of the x-displacemenet.
# User-specified parameters.
#
# Default values for parameters. To use different values, overwrite
# them in the ParaView Python shell or on the command line. For
# example, set OUTPUT_DIR to the absolute path if not starting
# ParaView from the terminal shell where you ran PyLith:
#
# import os
# OUTPUT_DIR = os.path.join(os.environ["HOME"], "src", "pylith", "examples", "2d", "subduction", "output")
DEFAULTS = {
"OUTPUT_DIR": "output",
"SIM": "step02",
"WARP_SCALE": 10.0e+3,
"FIELD": "displacement",
"FIELD_COMPONENT": "Magnitude",
"TIMESTEP": 0, # Use 0 for first, -1 for last.
}
# ----------------------------------------------------------------------
from paraview.simple import *
import os
def visualize(parameters):
# Disable automatic camera reset on "Show"
paraview.simple._DisableFirstRenderCameraReset()
# Read data
filename = os.path.join(parameters.output_dir, "%s-domain.xmf" % parameters.sim)
if not os.path.isfile(filename):
raise IOError("File '%s' does not exist." % filename)
dataDomain = XDMFReader(FileNames=[filename])
RenameSource("%s-domain" % parameters.sim, dataDomain)
scene = GetAnimationScene()
scene.UpdateAnimationUsingDataTimeSteps()
if parameters.timestep == -1:
scene.GoToLast()
view = GetActiveViewOrCreate('RenderView')
# Gray wireframe for undeformed domain.
domainDisplay = Show(dataDomain, view)
domainDisplay.Representation = 'Wireframe'
domainDisplay.AmbientColor = [0.5, 0.5, 0.5]
# Warp domain to show deformation
warp = WarpByVector(Input=dataDomain)
warp.Vectors = ['POINTS', 'displacement']
warp.ScaleFactor = parameters.warp_scale
warpDisplay = Show(warp, view)
ColorBy(warpDisplay, ('POINTS', parameters.field, parameters.field_component))
warpDisplay.RescaleTransferFunctionToDataRange(True)
warpDisplay.SetScalarBarVisibility(view, True)
warpDisplay.SetRepresentationType('Surface With Edges')
# Rescale color bar to exactly fit the current data range
warpDisplay.RescaleTransferFunctionToDataRange(False, False)
# Customize colorbar
displacementLUT = GetColorTransferFunction(parameters.field)
colorbar = GetScalarBar(displacementLUT, view)
if parameters.field_component.lower() == "magnitude":
colorbar.Title = "Displacement Mag. (m)"
else:
colorbar.Title = "%s-displacement (m)" % parameters.field_component.lower()
colorbar.ComponentTitle = ""
# Annotate time
tstamp = AnnotateTimeFilter(warp)
tstamp.Format = 'Time: %2.0f yr'
tstamp.Scale = 3.168808781402895e-08 # seconds to years
tstampDisplay = Show(tstamp, view)
tstampDisplay.FontFamily = "Courier"
tstampDisplay.FontSize = 14
view.ResetCamera()
view.Update()
Render()
class Parameters(object):
keys = ("OUTPUT_DIR", "SIM", "WARP_SCALE", "FIELD", "FIELD_COMPONENT", "TIMESTEP")
def __init__(self):
globalVars = globals()
for key in Parameters.keys:
if key in globalVars.keys():
setattr(self, key.lower(), globalVars[key])
else:
setattr(self, key.lower(), DEFAULTS[key])
return
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Running from outside the ParaView GUI via pvpython
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output-dir", action="store", dest="output_dir", default=DEFAULTS["OUTPUT_DIR"])
parser.add_argument("--sim", action="store", dest="sim", default=DEFAULTS["SIM"])
parser.add_argument("--warp-scale", action="store", type=float, dest="warp_scale", default=DEFAULTS["WARP_SCALE"])
parser.add_argument("--field", action="store", dest="field", default=DEFAULTS["FIELD"])
parser.add_argument("--component", action="store", dest="field_component", default=DEFAULTS["FIELD_COMPONENT"])
parser.add_argument("--timestep", action="store", dest="timestep", default=-1)
parser.add_argument("--screenshot", action="store", dest="screenshot")
args = parser.parse_args()
visualize(args)
view = GetRenderView()
view.CameraPosition = [78002.89373974672, -1531813.1739094853, 595774.2094961794]
view.CameraFocalPoint = [-45014.6313325238, 149523.68421156122, -335271.271063906]
view.CameraViewUp = [0.0, 0.0, 1.0]
view.ViewSize = [960, 540]
view.Update()
if args.screenshot:
WriteImage(args.screenshot)
Interact()
else:
# Running inside the ParaView GUI
visualize(Parameters())
# End of file
| <filename>examples/3d/subduction/viz/plot_dispwarp.py
#!/usr/bin/env pvpython
# -*- Python -*- (syntax highlighting)
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md.md for license information.
#
# ----------------------------------------------------------------------
# Plot the undeformed domain as a gray wireframe and then the deformed
# domain, colored by the value of the x-displacemenet.
# User-specified parameters.
#
# Default values for parameters. To use different values, overwrite
# them in the ParaView Python shell or on the command line. For
# example, set OUTPUT_DIR to the absolute path if not starting
# ParaView from the terminal shell where you ran PyLith:
#
# import os
# OUTPUT_DIR = os.path.join(os.environ["HOME"], "src", "pylith", "examples", "2d", "subduction", "output")
DEFAULTS = {
"OUTPUT_DIR": "output",
"SIM": "step02",
"WARP_SCALE": 10.0e+3,
"FIELD": "displacement",
"FIELD_COMPONENT": "Magnitude",
"TIMESTEP": 0, # Use 0 for first, -1 for last.
}
# ----------------------------------------------------------------------
from paraview.simple import *
import os
def visualize(parameters):
# Disable automatic camera reset on "Show"
paraview.simple._DisableFirstRenderCameraReset()
# Read data
filename = os.path.join(parameters.output_dir, "%s-domain.xmf" % parameters.sim)
if not os.path.isfile(filename):
raise IOError("File '%s' does not exist." % filename)
dataDomain = XDMFReader(FileNames=[filename])
RenameSource("%s-domain" % parameters.sim, dataDomain)
scene = GetAnimationScene()
scene.UpdateAnimationUsingDataTimeSteps()
if parameters.timestep == -1:
scene.GoToLast()
view = GetActiveViewOrCreate('RenderView')
# Gray wireframe for undeformed domain.
domainDisplay = Show(dataDomain, view)
domainDisplay.Representation = 'Wireframe'
domainDisplay.AmbientColor = [0.5, 0.5, 0.5]
# Warp domain to show deformation
warp = WarpByVector(Input=dataDomain)
warp.Vectors = ['POINTS', 'displacement']
warp.ScaleFactor = parameters.warp_scale
warpDisplay = Show(warp, view)
ColorBy(warpDisplay, ('POINTS', parameters.field, parameters.field_component))
warpDisplay.RescaleTransferFunctionToDataRange(True)
warpDisplay.SetScalarBarVisibility(view, True)
warpDisplay.SetRepresentationType('Surface With Edges')
# Rescale color bar to exactly fit the current data range
warpDisplay.RescaleTransferFunctionToDataRange(False, False)
# Customize colorbar
displacementLUT = GetColorTransferFunction(parameters.field)
colorbar = GetScalarBar(displacementLUT, view)
if parameters.field_component.lower() == "magnitude":
colorbar.Title = "Displacement Mag. (m)"
else:
colorbar.Title = "%s-displacement (m)" % parameters.field_component.lower()
colorbar.ComponentTitle = ""
# Annotate time
tstamp = AnnotateTimeFilter(warp)
tstamp.Format = 'Time: %2.0f yr'
tstamp.Scale = 3.168808781402895e-08 # seconds to years
tstampDisplay = Show(tstamp, view)
tstampDisplay.FontFamily = "Courier"
tstampDisplay.FontSize = 14
view.ResetCamera()
view.Update()
Render()
class Parameters(object):
keys = ("OUTPUT_DIR", "SIM", "WARP_SCALE", "FIELD", "FIELD_COMPONENT", "TIMESTEP")
def __init__(self):
globalVars = globals()
for key in Parameters.keys:
if key in globalVars.keys():
setattr(self, key.lower(), globalVars[key])
else:
setattr(self, key.lower(), DEFAULTS[key])
return
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Running from outside the ParaView GUI via pvpython
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output-dir", action="store", dest="output_dir", default=DEFAULTS["OUTPUT_DIR"])
parser.add_argument("--sim", action="store", dest="sim", default=DEFAULTS["SIM"])
parser.add_argument("--warp-scale", action="store", type=float, dest="warp_scale", default=DEFAULTS["WARP_SCALE"])
parser.add_argument("--field", action="store", dest="field", default=DEFAULTS["FIELD"])
parser.add_argument("--component", action="store", dest="field_component", default=DEFAULTS["FIELD_COMPONENT"])
parser.add_argument("--timestep", action="store", dest="timestep", default=-1)
parser.add_argument("--screenshot", action="store", dest="screenshot")
args = parser.parse_args()
visualize(args)
view = GetRenderView()
view.CameraPosition = [78002.89373974672, -1531813.1739094853, 595774.2094961794]
view.CameraFocalPoint = [-45014.6313325238, 149523.68421156122, -335271.271063906]
view.CameraViewUp = [0.0, 0.0, 1.0]
view.ViewSize = [960, 540]
view.Update()
if args.screenshot:
WriteImage(args.screenshot)
Interact()
else:
# Running inside the ParaView GUI
visualize(Parameters())
# End of file
| en | 0.59386 | #!/usr/bin/env pvpython # -*- Python -*- (syntax highlighting) # ---------------------------------------------------------------------- # # <NAME>, U.S. Geological Survey # <NAME>, GNS Science # <NAME>, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md.md for license information. # # ---------------------------------------------------------------------- # Plot the undeformed domain as a gray wireframe and then the deformed # domain, colored by the value of the x-displacemenet. # User-specified parameters. # # Default values for parameters. To use different values, overwrite # them in the ParaView Python shell or on the command line. For # example, set OUTPUT_DIR to the absolute path if not starting # ParaView from the terminal shell where you ran PyLith: # # import os # OUTPUT_DIR = os.path.join(os.environ["HOME"], "src", "pylith", "examples", "2d", "subduction", "output") # Use 0 for first, -1 for last. # ---------------------------------------------------------------------- # Disable automatic camera reset on "Show" # Read data # Gray wireframe for undeformed domain. # Warp domain to show deformation # Rescale color bar to exactly fit the current data range # Customize colorbar # Annotate time # seconds to years # ---------------------------------------------------------------------- # Running from outside the ParaView GUI via pvpython # Running inside the ParaView GUI # End of file | 2.264544 | 2 |
src/spaceone/inventory/manager/rds_manager.py | jean1042/plugin-aws-cloud-services | 4 | 10188 | from spaceone.inventory.libs.manager import AWSManager
# todo: __init__에서 한번에 명세 할수 있게 바꾸기
# 지금은 로케이터에서 글로벌에서 값을 가져오는 로직 때문에 별도 파일이 없으면 에러 발생
class RDSConnectorManager(AWSManager):
connector_name = 'RDSConnector'
| from spaceone.inventory.libs.manager import AWSManager
# todo: __init__에서 한번에 명세 할수 있게 바꾸기
# 지금은 로케이터에서 글로벌에서 값을 가져오는 로직 때문에 별도 파일이 없으면 에러 발생
class RDSConnectorManager(AWSManager):
connector_name = 'RDSConnector'
| ko | 1.000069 | # todo: __init__에서 한번에 명세 할수 있게 바꾸기 # 지금은 로케이터에서 글로벌에서 값을 가져오는 로직 때문에 별도 파일이 없으면 에러 발생 | 1.483464 | 1 |
script/upload-checksums.py | fireball-x/atom-shell | 4 | 10189 | <filename>script/upload-checksums.py<gh_stars>1-10
#!/usr/bin/env python
import argparse
import hashlib
import os
import tempfile
from lib.config import s3_config
from lib.util import download, rm_rf, s3put
DIST_URL = 'https://atom.io/download/atom-shell/'
def main():
args = parse_args()
url = DIST_URL + args.version + '/'
directory, files = download_files(url, get_files_list(args.version))
checksums = [
create_checksum('sha1', directory, 'SHASUMS.txt', files),
create_checksum('sha256', directory, 'SHASUMS256.txt', files)
]
bucket, access_key, secret_key = s3_config()
s3put(bucket, access_key, secret_key, directory,
'atom-shell/dist/{0}'.format(args.version), checksums)
rm_rf(directory)
def parse_args():
parser = argparse.ArgumentParser(description='upload sumsha file')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
def get_files_list(version):
return [
'node-{0}.tar.gz'.format(version),
'iojs-{0}.tar.gz'.format(version),
'node.lib',
'x64/node.lib',
'win-x86/iojs.lib',
'win-x64/iojs.lib',
]
def download_files(url, files):
directory = tempfile.mkdtemp(prefix='electron-tmp')
return directory, [
download(f, url + f, os.path.join(directory, f))
for f in files
]
def create_checksum(algorithm, directory, filename, files):
lines = []
for path in files:
h = hashlib.new(algorithm)
with open(path, 'r') as f:
h.update(f.read())
lines.append(h.hexdigest() + ' ' + os.path.relpath(path, directory))
checksum_file = os.path.join(directory, filename)
with open(checksum_file, 'w') as f:
f.write('\n'.join(lines) + '\n')
return checksum_file
if __name__ == '__main__':
import sys
sys.exit(main())
| <filename>script/upload-checksums.py<gh_stars>1-10
#!/usr/bin/env python
import argparse
import hashlib
import os
import tempfile
from lib.config import s3_config
from lib.util import download, rm_rf, s3put
DIST_URL = 'https://atom.io/download/atom-shell/'
def main():
args = parse_args()
url = DIST_URL + args.version + '/'
directory, files = download_files(url, get_files_list(args.version))
checksums = [
create_checksum('sha1', directory, 'SHASUMS.txt', files),
create_checksum('sha256', directory, 'SHASUMS256.txt', files)
]
bucket, access_key, secret_key = s3_config()
s3put(bucket, access_key, secret_key, directory,
'atom-shell/dist/{0}'.format(args.version), checksums)
rm_rf(directory)
def parse_args():
parser = argparse.ArgumentParser(description='upload sumsha file')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
def get_files_list(version):
return [
'node-{0}.tar.gz'.format(version),
'iojs-{0}.tar.gz'.format(version),
'node.lib',
'x64/node.lib',
'win-x86/iojs.lib',
'win-x64/iojs.lib',
]
def download_files(url, files):
directory = tempfile.mkdtemp(prefix='electron-tmp')
return directory, [
download(f, url + f, os.path.join(directory, f))
for f in files
]
def create_checksum(algorithm, directory, filename, files):
lines = []
for path in files:
h = hashlib.new(algorithm)
with open(path, 'r') as f:
h.update(f.read())
lines.append(h.hexdigest() + ' ' + os.path.relpath(path, directory))
checksum_file = os.path.join(directory, filename)
with open(checksum_file, 'w') as f:
f.write('\n'.join(lines) + '\n')
return checksum_file
if __name__ == '__main__':
import sys
sys.exit(main())
| ru | 0.26433 | #!/usr/bin/env python | 2.620468 | 3 |
pythoncode/kmeansimage.py | loganpadon/PokemonOneShot | 0 | 10190 | <gh_stars>0
# import the necessary packages
from sklearn.cluster import KMeans
import skimage
import matplotlib.pyplot as plt
import argparse
import cv2
def mean_image(image,clt):
image2=image
for x in range(len(image2)):
classes=clt.predict(image2[x])
for y in range(len(classes)):
image2[x,y]=clt.cluster_centers_[classes[y]]
image2=skimage.color.lab2rgb(image2)
return image2
def plot_colors(hist, centroids):
# initialize the bar chart representing the relative frequency
# of each of the colors
bar = np.zeros((50, 300, 3), dtype = "uint8")
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for (percent, color) in zip(hist, centroids):
print color
c = skimage.color.lab2rgb([[color]])
print c*255
# plot the relative percentage of each cluster
endX = startX + (percent * 300)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
c[0][0]*255, -1)
startX = endX
# return the bar chart
return bar
# import the necessary packages
import numpy as np
import cv2
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
ap.add_argument("-c", "--clusters", required = True, type = int,
help = "# of clusters")
args = vars(ap.parse_args())
# load the image and convert it from BGR to RGB so that
# we can dispaly it with matplotlib
image = cv2.imread(args["image"])
image2 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = skimage.color.rgb2lab(image2)
# show our image
plt.figure()
plt.axis("off")
plt.imshow(image2)
# reshape the image to be a list of pixels
imagedata = image.reshape((image.shape[0] * image.shape[1], 3))
# cluster the pixel intensities
clt = KMeans(n_clusters = args["clusters"])
clt.fit(imagedata)
hist = centroid_histogram(clt)
bar = plot_colors(hist, clt.cluster_centers_)
# show our color bar
plt.figure()
plt.axis("off")
plt.imshow(bar)
imagek=mean_image(image,clt)
plt.figure()
plt.axis("off")
plt.imshow(imagek)
plt.show()
| # import the necessary packages
from sklearn.cluster import KMeans
import skimage
import matplotlib.pyplot as plt
import argparse
import cv2
def mean_image(image,clt):
image2=image
for x in range(len(image2)):
classes=clt.predict(image2[x])
for y in range(len(classes)):
image2[x,y]=clt.cluster_centers_[classes[y]]
image2=skimage.color.lab2rgb(image2)
return image2
def plot_colors(hist, centroids):
# initialize the bar chart representing the relative frequency
# of each of the colors
bar = np.zeros((50, 300, 3), dtype = "uint8")
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for (percent, color) in zip(hist, centroids):
print color
c = skimage.color.lab2rgb([[color]])
print c*255
# plot the relative percentage of each cluster
endX = startX + (percent * 300)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
c[0][0]*255, -1)
startX = endX
# return the bar chart
return bar
# import the necessary packages
import numpy as np
import cv2
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
ap.add_argument("-c", "--clusters", required = True, type = int,
help = "# of clusters")
args = vars(ap.parse_args())
# load the image and convert it from BGR to RGB so that
# we can dispaly it with matplotlib
image = cv2.imread(args["image"])
image2 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = skimage.color.rgb2lab(image2)
# show our image
plt.figure()
plt.axis("off")
plt.imshow(image2)
# reshape the image to be a list of pixels
imagedata = image.reshape((image.shape[0] * image.shape[1], 3))
# cluster the pixel intensities
clt = KMeans(n_clusters = args["clusters"])
clt.fit(imagedata)
hist = centroid_histogram(clt)
bar = plot_colors(hist, clt.cluster_centers_)
# show our color bar
plt.figure()
plt.axis("off")
plt.imshow(bar)
imagek=mean_image(image,clt)
plt.figure()
plt.axis("off")
plt.imshow(imagek)
plt.show() | en | 0.760657 | # import the necessary packages # initialize the bar chart representing the relative frequency # of each of the colors # loop over the percentage of each cluster and the color of # each cluster # plot the relative percentage of each cluster # return the bar chart # import the necessary packages # grab the number of different clusters and create a histogram # based on the number of pixels assigned to each cluster # normalize the histogram, such that it sums to one # return the histogram # construct the argument parser and parse the arguments # load the image and convert it from BGR to RGB so that # we can dispaly it with matplotlib # show our image # reshape the image to be a list of pixels # cluster the pixel intensities # show our color bar | 3.111655 | 3 |
tests/encode.py | EddieBreeg/C_b64 | 0 | 10191 | <gh_stars>0
from sys import argv
from base64 import b64encode
with open("data", 'rb') as fIn:
b = fIn.read()
print(b64encode(b).decode()) | from sys import argv
from base64 import b64encode
with open("data", 'rb') as fIn:
b = fIn.read()
print(b64encode(b).decode()) | none | 1 | 2.459261 | 2 |
|
src/json_sort/lib.py | cdumay/json-sort | 3 | 10192 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import logging
import sys, os, json
from cdumay_rest_client.client import RESTClient
from cdumay_rest_client.exceptions import NotFound, HTTPException
class NoSuchFile(NotFound):
"""NoSuchFile"""
def oncritical(exc):
"""description of oncritical"""
if isinstance(exc, HTTPException):
logging.critical(exc.message)
else:
logging.critical(str(exc))
sys.exit(1)
def file_exists(filename):
"""description of file_exists"""
filename = os.path.realpath(filename)
logging.debug("Checking file: {}".format(filename))
if not os.path.exists(filename):
raise NoSuchFile(
message="No such file '{}'".format(filename),
extra=dict(filename=filename)
)
return filename
def file_write(dst, data):
"""description of file_write"""
if dst:
dst = os.path.realpath(dst)
logging.debug("Saving to: {}".format(dst))
out = open(dst, "w")
else:
logging.debug("Current std will be used")
out = sys.stdout
json.dump(
data, out, ensure_ascii=False, sort_keys=True, indent=2,
separators=(',', ': ')
)
def from_local(src, dst=None):
"""description of from_local"""
try:
file_write(dst, json.load(open(file_exists(src), "r")))
except Exception as exc:
oncritical(exc)
def from_remote(src, dst=None):
"""description of fromurl"""
try:
file_write(
dst, RESTClient(server=src).do_request(method="GET", path="")
)
except Exception as exc:
oncritical(exc)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import logging
import sys, os, json
from cdumay_rest_client.client import RESTClient
from cdumay_rest_client.exceptions import NotFound, HTTPException
class NoSuchFile(NotFound):
"""NoSuchFile"""
def oncritical(exc):
"""description of oncritical"""
if isinstance(exc, HTTPException):
logging.critical(exc.message)
else:
logging.critical(str(exc))
sys.exit(1)
def file_exists(filename):
"""description of file_exists"""
filename = os.path.realpath(filename)
logging.debug("Checking file: {}".format(filename))
if not os.path.exists(filename):
raise NoSuchFile(
message="No such file '{}'".format(filename),
extra=dict(filename=filename)
)
return filename
def file_write(dst, data):
"""description of file_write"""
if dst:
dst = os.path.realpath(dst)
logging.debug("Saving to: {}".format(dst))
out = open(dst, "w")
else:
logging.debug("Current std will be used")
out = sys.stdout
json.dump(
data, out, ensure_ascii=False, sort_keys=True, indent=2,
separators=(',', ': ')
)
def from_local(src, dst=None):
"""description of from_local"""
try:
file_write(dst, json.load(open(file_exists(src), "r")))
except Exception as exc:
oncritical(exc)
def from_remote(src, dst=None):
"""description of fromurl"""
try:
file_write(
dst, RESTClient(server=src).do_request(method="GET", path="")
)
except Exception as exc:
oncritical(exc)
| en | 0.533282 | #!/usr/bin/env python # -*- coding: utf-8 -*- .. codeauthor:: <NAME> <<EMAIL>> NoSuchFile description of oncritical description of file_exists description of file_write description of from_local description of fromurl | 2.410436 | 2 |
test/test_create_dataset.py | gregstarr/ttools | 0 | 10193 | import numpy as np
import pytest
import apexpy
import tempfile
import os
import h5py
from ttools import create_dataset, config, io, utils
map_periods = [np.timedelta64(10, 'm'), np.timedelta64(30, 'm'), np.timedelta64(1, 'h'), np.timedelta64(2, 'h')]
@pytest.fixture
def times():
yield np.datetime64('2010-01-01T00:00:00') + np.arange(100) * np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_assemble_args(times, map_period):
mlat = np.arange(10)
mlt = np.arange(10)
ssmlon = np.random.rand(times.shape[0])
mlt, mlat = np.meshgrid(mlt, mlat)
mlat = mlat[None, :, :] * np.ones((times.shape[0], 1, 1))
mlt = mlt[None, :, :] * np.ones((times.shape[0], 1, 1))
tec = np.random.rand(*mlat.shape)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges, bin_edges]
args = create_dataset.assemble_binning_args(mlat, mlt, tec, times, ssmlon, bins, map_period)
assert len(args) == np.ceil((times[-1] - times[0]) / map_period)
assert args[0][3][0] == times[0]
assert args[-1][3][0] + map_period >= times[-1]
assert args[-1][3][0] < times[-1]
assert args[-1][3][-1] == times[-1]
for i in range(len(args) - 1):
assert args[i][3][-1] == args[i + 1][3][0] - np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_process_file(madrigal_data_dir, map_period):
"""not that good of a test: wait for bugs and add asserts
"""
start_date = np.datetime64('2012-06-08')
end_date = np.datetime64('2012-06-13')
converter = apexpy.Apex()
mlat, mlon = create_dataset.get_mag_grid(config.madrigal_lat, config.madrigal_lon, converter)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges + 30, bin_edges]
times, tec, ssmlon, n, std = create_dataset.process_file(start_date, end_date, mlat, mlon, converter, bins,
map_period, madrigal_data_dir)
assert times.shape[0] == tec.shape[0] == n.shape[0] == std.shape[0] == ssmlon.shape[0]
assert np.isnan(tec[times < np.datetime64('2012-06-10')]).all()
assert np.isnan(tec[times >= np.datetime64('2012-06-11')]).all()
assert np.isfinite(tec[(times >= np.datetime64('2012-06-10')) * (times < np.datetime64('2012-06-11'))]).any()
assert not np.isnan(tec).all(axis=(0, 1)).any()
assert not np.isnan(tec).all(axis=(0, 2)).any()
def test_calculate_bins():
mlat = np.arange(10)[None, :, None] * np.ones((1, 1, 10))
mlt = np.arange(10)[None, None, :] * np.ones((1, 10, 1))
tec = np.zeros((1, 10, 10))
tec[0, 0, 0] = 10
tec[0, 0, -1] = 20
tec[0, -1, 0] = 30
times = ssmlon = np.ones(1) * np.nan
be = np.array([-.5, 4.5, 9.5])
bins = [be, be]
out_t, out_tec, out_ssm, out_n, out_std = create_dataset.calculate_bins(mlat.ravel(), mlt.ravel(), tec.ravel(),
times, ssmlon, bins)
assert np.isnan(out_t)
assert np.isnan(out_ssm)
assert out_tec.shape == (2, 2)
assert out_tec[0, 0] == 10 / 25
assert out_tec[0, 1] == 20 / 25
assert out_tec[1, 0] == 30 / 25
assert out_tec[1, 1] == 0
assert np.all(out_n == 25)
def test_process_dataset():
start_date = np.datetime64("2012-03-07")
end_date = np.datetime64("2012-03-08")
file_dt = np.timedelta64(12, 'h')
mlat_bins = np.array([35, 45, 55, 65])
mlt_bins = np.array([-1.5, -.5, .5, 1.5])
def fn_pattern(date):
return f"{date.astype('datetime64[h]')}.h5"
dates = np.arange(start_date, end_date, file_dt)
with tempfile.TemporaryDirectory() as tempdir:
files = [os.path.join(tempdir, fn_pattern(d)) for d in dates]
create_dataset.process_dataset(start_date, end_date, mlat_bins, mlt_bins, apex_dt=np.timedelta64(365, 'D'),
file_dt=file_dt, output_dir=tempdir, file_name_pattern=fn_pattern)
grid_fn = os.path.join(tempdir, 'grid.h5')
assert os.path.exists(grid_fn)
with h5py.File(grid_fn, 'r') as f:
mlt_vals = f['mlt'][()]
mlat_vals = f['mlat'][()]
assert np.all(mlt_vals == [-1, 0, 1])
assert np.all(mlat_vals == [40, 50, 60])
for f, d in zip(files, dates):
assert os.path.exists(f)
tec, times, ssmlon, n, std = io.open_tec_file(f)
assert tec.shape == (12, 3, 3)
assert utils.datetime64_to_timestamp(d) == times[0]
| import numpy as np
import pytest
import apexpy
import tempfile
import os
import h5py
from ttools import create_dataset, config, io, utils
map_periods = [np.timedelta64(10, 'm'), np.timedelta64(30, 'm'), np.timedelta64(1, 'h'), np.timedelta64(2, 'h')]
@pytest.fixture
def times():
yield np.datetime64('2010-01-01T00:00:00') + np.arange(100) * np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_assemble_args(times, map_period):
mlat = np.arange(10)
mlt = np.arange(10)
ssmlon = np.random.rand(times.shape[0])
mlt, mlat = np.meshgrid(mlt, mlat)
mlat = mlat[None, :, :] * np.ones((times.shape[0], 1, 1))
mlt = mlt[None, :, :] * np.ones((times.shape[0], 1, 1))
tec = np.random.rand(*mlat.shape)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges, bin_edges]
args = create_dataset.assemble_binning_args(mlat, mlt, tec, times, ssmlon, bins, map_period)
assert len(args) == np.ceil((times[-1] - times[0]) / map_period)
assert args[0][3][0] == times[0]
assert args[-1][3][0] + map_period >= times[-1]
assert args[-1][3][0] < times[-1]
assert args[-1][3][-1] == times[-1]
for i in range(len(args) - 1):
assert args[i][3][-1] == args[i + 1][3][0] - np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_process_file(madrigal_data_dir, map_period):
"""not that good of a test: wait for bugs and add asserts
"""
start_date = np.datetime64('2012-06-08')
end_date = np.datetime64('2012-06-13')
converter = apexpy.Apex()
mlat, mlon = create_dataset.get_mag_grid(config.madrigal_lat, config.madrigal_lon, converter)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges + 30, bin_edges]
times, tec, ssmlon, n, std = create_dataset.process_file(start_date, end_date, mlat, mlon, converter, bins,
map_period, madrigal_data_dir)
assert times.shape[0] == tec.shape[0] == n.shape[0] == std.shape[0] == ssmlon.shape[0]
assert np.isnan(tec[times < np.datetime64('2012-06-10')]).all()
assert np.isnan(tec[times >= np.datetime64('2012-06-11')]).all()
assert np.isfinite(tec[(times >= np.datetime64('2012-06-10')) * (times < np.datetime64('2012-06-11'))]).any()
assert not np.isnan(tec).all(axis=(0, 1)).any()
assert not np.isnan(tec).all(axis=(0, 2)).any()
def test_calculate_bins():
mlat = np.arange(10)[None, :, None] * np.ones((1, 1, 10))
mlt = np.arange(10)[None, None, :] * np.ones((1, 10, 1))
tec = np.zeros((1, 10, 10))
tec[0, 0, 0] = 10
tec[0, 0, -1] = 20
tec[0, -1, 0] = 30
times = ssmlon = np.ones(1) * np.nan
be = np.array([-.5, 4.5, 9.5])
bins = [be, be]
out_t, out_tec, out_ssm, out_n, out_std = create_dataset.calculate_bins(mlat.ravel(), mlt.ravel(), tec.ravel(),
times, ssmlon, bins)
assert np.isnan(out_t)
assert np.isnan(out_ssm)
assert out_tec.shape == (2, 2)
assert out_tec[0, 0] == 10 / 25
assert out_tec[0, 1] == 20 / 25
assert out_tec[1, 0] == 30 / 25
assert out_tec[1, 1] == 0
assert np.all(out_n == 25)
def test_process_dataset():
start_date = np.datetime64("2012-03-07")
end_date = np.datetime64("2012-03-08")
file_dt = np.timedelta64(12, 'h')
mlat_bins = np.array([35, 45, 55, 65])
mlt_bins = np.array([-1.5, -.5, .5, 1.5])
def fn_pattern(date):
return f"{date.astype('datetime64[h]')}.h5"
dates = np.arange(start_date, end_date, file_dt)
with tempfile.TemporaryDirectory() as tempdir:
files = [os.path.join(tempdir, fn_pattern(d)) for d in dates]
create_dataset.process_dataset(start_date, end_date, mlat_bins, mlt_bins, apex_dt=np.timedelta64(365, 'D'),
file_dt=file_dt, output_dir=tempdir, file_name_pattern=fn_pattern)
grid_fn = os.path.join(tempdir, 'grid.h5')
assert os.path.exists(grid_fn)
with h5py.File(grid_fn, 'r') as f:
mlt_vals = f['mlt'][()]
mlat_vals = f['mlat'][()]
assert np.all(mlt_vals == [-1, 0, 1])
assert np.all(mlat_vals == [40, 50, 60])
for f, d in zip(files, dates):
assert os.path.exists(f)
tec, times, ssmlon, n, std = io.open_tec_file(f)
assert tec.shape == (12, 3, 3)
assert utils.datetime64_to_timestamp(d) == times[0]
| en | 0.94663 | not that good of a test: wait for bugs and add asserts | 1.863567 | 2 |
docs_src/options/callback/tutorial001.py | madkinsz/typer | 7,615 | 10194 | import typer
def name_callback(value: str):
if value != "Camila":
raise typer.BadParameter("Only Camila is allowed")
return value
def main(name: str = typer.Option(..., callback=name_callback)):
typer.echo(f"Hello {name}")
if __name__ == "__main__":
typer.run(main)
| import typer
def name_callback(value: str):
if value != "Camila":
raise typer.BadParameter("Only Camila is allowed")
return value
def main(name: str = typer.Option(..., callback=name_callback)):
typer.echo(f"Hello {name}")
if __name__ == "__main__":
typer.run(main)
| none | 1 | 2.794564 | 3 |
|
qft-client-py2.py | bocajspear1/qft | 0 | 10195 | <reponame>bocajspear1/qft<gh_stars>0
import socket
import threading
from time import sleep
from threading import Thread
import json
import sys
def display_test(address, port,text_result, test):
if (text_result == "QFT_SUCCESS" and test == True) or (text_result != "QFT_SUCCESS" and test == False):
# Test is correct
print "PASSED: Test for " + str(address) + ":" + str(port) + " resulted in " + str(test)
else:
print "FAILED: Test for " + str(address) + ":" + str(port) + " did not result in " + str(test)
def TCPTest(address, port, test):
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_socket.settimeout(2)
my_socket.connect((address, port))
fileobj = my_socket.makefile("rw")
fileobj.write('QFT_REQUEST\n')
fileobj.flush()
result = fileobj.readline().strip()
display_test(address, port, result, test)
except socket.error as e:
#print(e)
display_test(address, port, "FAILED", test)
except socket.timeout as e:
display_test(address, port, "FAILED", test)
my_socket.close()
def UDPTest(address, port, test):
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
my_socket.settimeout(2)
my_socket.sendto("QFT_REQUEST".encode('utf-8'), (address, port))
# receive data from client (data, addr)
d = my_socket.recvfrom(1024)
reply = d[0]
addr = d[1]
result = d[0].decode('utf-8').strip()
display_test(address, port, result, test)
except socket.timeout as e:
display_test(address, port, "FAILED", test)
try:
timeout = 5
if len(sys.argv) > 1:
if (len(sys.argv) -1 ) % 2 != 0:
print "\nInvalid number of arguments\n\n-t Time between tests in seconds\n"
sys.exit()
else:
if sys.argv[1] == "-t" and sys.argv[2].isdigit() and int(sys.argv[2]) > 2:
timeout = int(sys.argv[2])
else:
print "\nInvalid arguments\n\n-t Time between tests in seconds\n"
sys.exit()
print "\nqft-client.py v1.s\n\n"
json_cfg = json.loads(open("client.cfg").read())
print "Config loaded. Starting tests in 1 second...\n\n"
sleep(1)
while True:
for item in json_cfg:
if item["type"] == "tcp":
t = Thread(target=TCPTest, args=( item["remote_address"], item["port"], item["test_for"]))
elif item["type"] == "udp":
t = Thread(target=UDPTest, args=( item["remote_address"], item["port"], item["test_for"]))
else:
print "Invalid Type!"
t.start()
sleep(timeout)
print "\n=======================================================\n"
except IOError as e:
print("Config file, client.cfg, not found")
sys.exit()
except ValueError as e:
print("Error in config JSON")
sys.exit()
| import socket
import threading
from time import sleep
from threading import Thread
import json
import sys
def display_test(address, port,text_result, test):
if (text_result == "QFT_SUCCESS" and test == True) or (text_result != "QFT_SUCCESS" and test == False):
# Test is correct
print "PASSED: Test for " + str(address) + ":" + str(port) + " resulted in " + str(test)
else:
print "FAILED: Test for " + str(address) + ":" + str(port) + " did not result in " + str(test)
def TCPTest(address, port, test):
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_socket.settimeout(2)
my_socket.connect((address, port))
fileobj = my_socket.makefile("rw")
fileobj.write('QFT_REQUEST\n')
fileobj.flush()
result = fileobj.readline().strip()
display_test(address, port, result, test)
except socket.error as e:
#print(e)
display_test(address, port, "FAILED", test)
except socket.timeout as e:
display_test(address, port, "FAILED", test)
my_socket.close()
def UDPTest(address, port, test):
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
my_socket.settimeout(2)
my_socket.sendto("QFT_REQUEST".encode('utf-8'), (address, port))
# receive data from client (data, addr)
d = my_socket.recvfrom(1024)
reply = d[0]
addr = d[1]
result = d[0].decode('utf-8').strip()
display_test(address, port, result, test)
except socket.timeout as e:
display_test(address, port, "FAILED", test)
try:
timeout = 5
if len(sys.argv) > 1:
if (len(sys.argv) -1 ) % 2 != 0:
print "\nInvalid number of arguments\n\n-t Time between tests in seconds\n"
sys.exit()
else:
if sys.argv[1] == "-t" and sys.argv[2].isdigit() and int(sys.argv[2]) > 2:
timeout = int(sys.argv[2])
else:
print "\nInvalid arguments\n\n-t Time between tests in seconds\n"
sys.exit()
print "\nqft-client.py v1.s\n\n"
json_cfg = json.loads(open("client.cfg").read())
print "Config loaded. Starting tests in 1 second...\n\n"
sleep(1)
while True:
for item in json_cfg:
if item["type"] == "tcp":
t = Thread(target=TCPTest, args=( item["remote_address"], item["port"], item["test_for"]))
elif item["type"] == "udp":
t = Thread(target=UDPTest, args=( item["remote_address"], item["port"], item["test_for"]))
else:
print "Invalid Type!"
t.start()
sleep(timeout)
print "\n=======================================================\n"
except IOError as e:
print("Config file, client.cfg, not found")
sys.exit()
except ValueError as e:
print("Error in config JSON")
sys.exit() | en | 0.865445 | # Test is correct #print(e) # receive data from client (data, addr) | 2.979587 | 3 |
mdemanipulation/src/mdeoperation.py | modelia/ai-for-model-manipulation | 0 | 10196 | <gh_stars>0
#!/usr/bin/env python2
import math
import os
import random
import sys
import time
import logging
import argparse
import numpy as np
from six.moves import xrange
import json
import torch
import torch.nn as nn
import torch.optim as optim
from torch import cuda
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
import data_utils
import network
import cPickle as pickle
import datetime
def create_model(source_vocab_size, target_vocab_size, source_vocab_list, target_vocab_list, dropout_rate,
max_source_len, max_target_len):
model = network.Tree2TreeModel(
source_vocab_size,
target_vocab_size,
source_vocab_list,
target_vocab_list,
args.max_depth,
args.embedding_size,
args.hidden_size,
args.num_layers,
args.max_gradient_norm,
args.batch_size,
args.learning_rate,
dropout_rate,
args.no_pf,
args.no_attention)
if cuda.is_available():
model.cuda()
if args.load_model:
print("Reading model parameters from %s" % args.load_model)
pretrained_model = torch.load(args.load_model)
model.load_state_dict(pretrained_model)
else:
print("Created model with fresh parameters.")
model.init_weights(args.param_init)
return model
def step_tree2tree(model, encoder_inputs, init_decoder_inputs, feed_previous=False):
if feed_previous == False:
model.dropout_rate = args.dropout_rate
else:
model.dropout_rate = 0.0
predictions_per_batch, prediction_managers = model(encoder_inputs, init_decoder_inputs, feed_previous=feed_previous)
total_loss = None
for (predictions, target) in predictions_per_batch:
loss = model.loss_function(predictions, target)
if total_loss is None:
total_loss = loss
else:
total_loss += loss
total_loss /= len(encoder_inputs)
if feed_previous:
output_predictions = []
for prediction_manager in prediction_managers:
output_predictions.append(model.tree2seq(prediction_manager, 1))
if feed_previous == False:
model.optimizer.zero_grad()
total_loss.backward()
if args.max_gradient_norm > 0:
clip_grad_norm(model.parameters(), args.max_gradient_norm)
model.optimizer.step()
for idx in range(len(encoder_inputs)):
encoder_inputs[idx].clear_states()
if feed_previous:
return total_loss.data[0], output_predictions
else:
return total_loss.data[0]
def evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list):
test_loss = 0
acc_tokens = 0
tot_tokens = 0
acc_programs = 0
tot_programs = len(test_set)
res = []
for idx in xrange(0, len(test_set), args.batch_size):
encoder_inputs, decoder_inputs = model.get_batch(test_set, start_idx=idx)
eval_loss, raw_outputs = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=True)
test_loss += len(encoder_inputs) * eval_loss
for i in xrange(len(encoder_inputs)):
if idx + i >= len(test_set):
break
current_output = []
for j in xrange(len(raw_outputs[i])):
current_output.append(raw_outputs[i][j])
current_source, current_target, current_source_manager, current_target_manager = test_set[idx + i]
current_target_print = data_utils.serialize_tree_with_vocabulary(current_target, target_vocab)
current_target = data_utils.serialize_tree(current_target)
current_source_print = data_utils.serialize_tree_with_vocabulary(current_source, source_vocab)
current_source = data_utils.serialize_tree(current_source)
# print("Evaluation time: %s seconds" % (datetime.datetime.now() - start_evaluation_datetime))
# print((datetime.datetime.now() - start_evaluation_datetime))
res.append((current_source, current_target, current_output))
current_output_print = data_utils.serialize_seq_with_vocabulary(current_output, target_vocab)
# print("--Current source / Current target / Current output--")
print(current_source_print)
print(current_target_print)
print(current_output_print)
# print(source_vocab)
print("---")
tot_tokens += len(current_target)
all_correct = 1
wrong_tokens = 0
for j in xrange(len(current_output)):
if j >= len(current_target):
break
if current_output[j] == current_target[j]:
acc_tokens += 1
else:
all_correct = 0
wrong_tokens += 1
acc_programs += all_correct
print(acc_tokens, tot_tokens, acc_programs, tot_programs)
test_loss /= tot_programs
print(" eval: loss %.2f" % test_loss)
print(" eval: accuracy of tokens %.2f" % (acc_tokens * 1.0 / tot_tokens))
print(" eval: accuracy of programs %.2f" % (acc_programs * 1.0 / tot_programs))
print(acc_tokens, tot_tokens, acc_programs, tot_programs)
def train(training_dataset, validation_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list, no_train):
train_model = not no_train;
time_training = 0;
# build_from_scratch = True;
# pretrained_model_path = "/home/lola/nn/neuralnetwork.pth";
if (train_model):
print ("Reading training and val data :")
train_set = data_utils.prepare_data(training_dataset, source_vocab, target_vocab)
val_set = data_utils.prepare_data(validation_dataset, source_vocab, target_vocab)
if not os.path.isdir(args.train_dir_checkpoints):
os.makedirs(args.train_dir_checkpoints)
start_time = time.time()
start_datetime = datetime.datetime.now()
# if (build_from_scratch):
print("Creating %d layers of %d units." % (args.num_layers, args.hidden_size))
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list, args.dropout_rate,
args.max_source_len, args.max_target_len)
# else:
# print("Loading pretrained model")
# pretrained_model = torch.load(pretrained_model_path)
# model.load_state_dict(pretrained_model)
print("Training model")
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
training_dataset_size = len(train_set)
for epoch in range(args.num_epochs):
print("epoch: %s/%s" % (epoch+1, args.num_epochs))
batch = 0
random.shuffle(train_set)
for batch_idx in range(0, training_dataset_size, args.batch_size):
batch += 1
start_time = time.time()
encoder_inputs, decoder_inputs = model.get_batch(train_set, start_idx=batch_idx)
step_loss = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=False)
step_time += (time.time() - start_time) / args.steps_per_checkpoint
loss += step_loss / args.steps_per_checkpoint
current_step += 1
print(" batch: %s/%s" % (batch, training_dataset_size/args.batch_size))
if current_step % args.learning_rate_decay_steps == 0 and model.learning_rate > 0.0001:
model.decay_learning_rate(args.learning_rate_decay_factor)
if current_step % args.steps_per_checkpoint == 0:
print ("learning rate %.4f step-time %.2f loss "
"%.2f" % (model.learning_rate, step_time, loss))
previous_losses.append(loss)
ckpt_path = os.path.join(args.train_dir_checkpoints, "translate_" + str(current_step) + ".ckpt")
ckpt = model.state_dict()
torch.save(ckpt, ckpt_path)
step_time, loss = 0.0, 0.0
encoder_inputs, decoder_inputs = model.get_batch(val_set, start_idx=0)
eval_loss, decoder_outputs = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=True)
print(" eval: loss %.2f" % eval_loss)
sys.stdout.flush()
time_training = (datetime.datetime.now() - start_datetime)
print("Saving model")
torch.save(model.state_dict(), "/home/lola/nn/neuralnetwork.pth")
else : # not train_model
print("Loading the pretrained model")
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list,
args.dropout_rate,
args.max_source_len, args.max_target_len)
print("Evaluating model")
start_evaluation_datetime = datetime.datetime.now()
test_dataset = json.load(open(args.test_dataset, 'r'))
test_set = data_utils.prepare_data(test_dataset, source_vocab, target_vocab)
evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
if (train_model):
print("Training time: %s seconds" % time_training)
print("Total Evaluation time: %s seconds" % (datetime.datetime.now() - start_evaluation_datetime))
def test(test_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list):
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list, 0.0,
args.max_source_len, args.max_target_len)
test_set = data_utils.prepare_data(test_dataset, source_vocab, target_vocab)
evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
parser = argparse.ArgumentParser()
parser.add_argument('--param_init', type=float, default=0.1,
help='Parameters are initialized over uniform distribution in (-param_init, param_init)')
parser.add_argument('--num_epochs', type=int, default=30, help='number of training epochs') #default 30
parser.add_argument('--learning_rate', type=float, default=0.005, # default 0.005
help='learning rate')
parser.add_argument('--learning_rate_decay_factor', type=float, default=0.8,
help='learning rate decays by this much')
parser.add_argument('--learning_rate_decay_steps', type=int, default=2000, # default=2000
help='decay the learning rate after certain steps')
parser.add_argument('--max_gradient_norm', type=float, default=5.0,
help='clip gradients to this norm')
parser.add_argument('--batch_size', type=int, default=64, #default 100
help='batch size')
parser.add_argument('--max_depth', type=int, default=100,
help='max depth for tree models')
parser.add_argument('--hidden_size', type=int, default=256,
help='size of each model layer')
parser.add_argument('--embedding_size', type=int, default=256,
help='size of the embedding')
parser.add_argument('--dropout_rate', type=float, default=0.75, # default=0.5
help='dropout rate')
parser.add_argument('--num_layers', type=int, default=1, # default=1,
help='number of layers in the model')
parser.add_argument('--source_vocab_size', type=int, default=0,
help='source vocabulary size (0: no limit)')
parser.add_argument('--target_vocab_size', type=int, default=0,
help='target vocabulary size (0: no limit)')
parser.add_argument('--train_dir_checkpoints', type=str, default='/home/lola/nn/checkpoints', # default='../model_ckpts/tree2tree/',
help='training directory - checkpoints')
parser.add_argument('--training_dataset', type=str, default='/home/lola/nn/models_train.json', # default='../data/CS-JS/BL/preprocessed_progs_train.json',
help='training dataset path')
parser.add_argument('--validation_dataset', type=str, default='/home/lola/nn/models_valid.json', #default='../data/CS-JS/BL/preprocessed_progs_valid.json',
help='validation dataset path')
parser.add_argument('--test_dataset', type=str, default='/home/lola/nn/models_test.json', #default='../data/CS-JS/BL/preprocessed_progs_test.json',
help='test dataset path')
parser.add_argument('--load_model', type=str, default='/home/lola/nn/neuralnetwork.pth', # default=None
help='path to the pretrained model')
parser.add_argument('--vocab_filename', type=str, default=None,
help='filename for the vocabularies')
parser.add_argument('--steps_per_checkpoint', type=int, default=500,
help='number of training steps per checkpoint')
parser.add_argument('--max_source_len', type=int, default=115,
help='max length for input')
parser.add_argument('--max_target_len', type=int, default=315,
help='max length for output')
parser.add_argument('--test', action='store_true', help='set to true for testing')
parser.add_argument('--no_attention', action='store_true', help='set to true to disable attention')
parser.add_argument('--no_pf', action='store_true', help='set to true to disable parent attention feeding')
parser.add_argument('--no_train', help='set to true to prevent the network from training', action='store_true')
args = parser.parse_args()
def main():
if args.no_attention:
args.no_pf = True
training_dataset = json.load(open(args.training_dataset, 'r'))
source_vocab, target_vocab, source_vocab_list, target_vocab_list = data_utils.build_vocab(training_dataset, args.vocab_filename)
if args.test:
test_dataset = json.load(open(args.test_dataset, 'r'))
test(test_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
else:
validation_dataset = json.load(open(args.validation_dataset, 'r'))
# print("Val data %s" % validation_dataset)
train(training_dataset, validation_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list, args.no_train)
main()
| #!/usr/bin/env python2
import math
import os
import random
import sys
import time
import logging
import argparse
import numpy as np
from six.moves import xrange
import json
import torch
import torch.nn as nn
import torch.optim as optim
from torch import cuda
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
import data_utils
import network
import cPickle as pickle
import datetime
def create_model(source_vocab_size, target_vocab_size, source_vocab_list, target_vocab_list, dropout_rate,
max_source_len, max_target_len):
model = network.Tree2TreeModel(
source_vocab_size,
target_vocab_size,
source_vocab_list,
target_vocab_list,
args.max_depth,
args.embedding_size,
args.hidden_size,
args.num_layers,
args.max_gradient_norm,
args.batch_size,
args.learning_rate,
dropout_rate,
args.no_pf,
args.no_attention)
if cuda.is_available():
model.cuda()
if args.load_model:
print("Reading model parameters from %s" % args.load_model)
pretrained_model = torch.load(args.load_model)
model.load_state_dict(pretrained_model)
else:
print("Created model with fresh parameters.")
model.init_weights(args.param_init)
return model
def step_tree2tree(model, encoder_inputs, init_decoder_inputs, feed_previous=False):
if feed_previous == False:
model.dropout_rate = args.dropout_rate
else:
model.dropout_rate = 0.0
predictions_per_batch, prediction_managers = model(encoder_inputs, init_decoder_inputs, feed_previous=feed_previous)
total_loss = None
for (predictions, target) in predictions_per_batch:
loss = model.loss_function(predictions, target)
if total_loss is None:
total_loss = loss
else:
total_loss += loss
total_loss /= len(encoder_inputs)
if feed_previous:
output_predictions = []
for prediction_manager in prediction_managers:
output_predictions.append(model.tree2seq(prediction_manager, 1))
if feed_previous == False:
model.optimizer.zero_grad()
total_loss.backward()
if args.max_gradient_norm > 0:
clip_grad_norm(model.parameters(), args.max_gradient_norm)
model.optimizer.step()
for idx in range(len(encoder_inputs)):
encoder_inputs[idx].clear_states()
if feed_previous:
return total_loss.data[0], output_predictions
else:
return total_loss.data[0]
def evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list):
test_loss = 0
acc_tokens = 0
tot_tokens = 0
acc_programs = 0
tot_programs = len(test_set)
res = []
for idx in xrange(0, len(test_set), args.batch_size):
encoder_inputs, decoder_inputs = model.get_batch(test_set, start_idx=idx)
eval_loss, raw_outputs = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=True)
test_loss += len(encoder_inputs) * eval_loss
for i in xrange(len(encoder_inputs)):
if idx + i >= len(test_set):
break
current_output = []
for j in xrange(len(raw_outputs[i])):
current_output.append(raw_outputs[i][j])
current_source, current_target, current_source_manager, current_target_manager = test_set[idx + i]
current_target_print = data_utils.serialize_tree_with_vocabulary(current_target, target_vocab)
current_target = data_utils.serialize_tree(current_target)
current_source_print = data_utils.serialize_tree_with_vocabulary(current_source, source_vocab)
current_source = data_utils.serialize_tree(current_source)
# print("Evaluation time: %s seconds" % (datetime.datetime.now() - start_evaluation_datetime))
# print((datetime.datetime.now() - start_evaluation_datetime))
res.append((current_source, current_target, current_output))
current_output_print = data_utils.serialize_seq_with_vocabulary(current_output, target_vocab)
# print("--Current source / Current target / Current output--")
print(current_source_print)
print(current_target_print)
print(current_output_print)
# print(source_vocab)
print("---")
tot_tokens += len(current_target)
all_correct = 1
wrong_tokens = 0
for j in xrange(len(current_output)):
if j >= len(current_target):
break
if current_output[j] == current_target[j]:
acc_tokens += 1
else:
all_correct = 0
wrong_tokens += 1
acc_programs += all_correct
print(acc_tokens, tot_tokens, acc_programs, tot_programs)
test_loss /= tot_programs
print(" eval: loss %.2f" % test_loss)
print(" eval: accuracy of tokens %.2f" % (acc_tokens * 1.0 / tot_tokens))
print(" eval: accuracy of programs %.2f" % (acc_programs * 1.0 / tot_programs))
print(acc_tokens, tot_tokens, acc_programs, tot_programs)
def train(training_dataset, validation_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list, no_train):
train_model = not no_train;
time_training = 0;
# build_from_scratch = True;
# pretrained_model_path = "/home/lola/nn/neuralnetwork.pth";
if (train_model):
print ("Reading training and val data :")
train_set = data_utils.prepare_data(training_dataset, source_vocab, target_vocab)
val_set = data_utils.prepare_data(validation_dataset, source_vocab, target_vocab)
if not os.path.isdir(args.train_dir_checkpoints):
os.makedirs(args.train_dir_checkpoints)
start_time = time.time()
start_datetime = datetime.datetime.now()
# if (build_from_scratch):
print("Creating %d layers of %d units." % (args.num_layers, args.hidden_size))
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list, args.dropout_rate,
args.max_source_len, args.max_target_len)
# else:
# print("Loading pretrained model")
# pretrained_model = torch.load(pretrained_model_path)
# model.load_state_dict(pretrained_model)
print("Training model")
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
training_dataset_size = len(train_set)
for epoch in range(args.num_epochs):
print("epoch: %s/%s" % (epoch+1, args.num_epochs))
batch = 0
random.shuffle(train_set)
for batch_idx in range(0, training_dataset_size, args.batch_size):
batch += 1
start_time = time.time()
encoder_inputs, decoder_inputs = model.get_batch(train_set, start_idx=batch_idx)
step_loss = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=False)
step_time += (time.time() - start_time) / args.steps_per_checkpoint
loss += step_loss / args.steps_per_checkpoint
current_step += 1
print(" batch: %s/%s" % (batch, training_dataset_size/args.batch_size))
if current_step % args.learning_rate_decay_steps == 0 and model.learning_rate > 0.0001:
model.decay_learning_rate(args.learning_rate_decay_factor)
if current_step % args.steps_per_checkpoint == 0:
print ("learning rate %.4f step-time %.2f loss "
"%.2f" % (model.learning_rate, step_time, loss))
previous_losses.append(loss)
ckpt_path = os.path.join(args.train_dir_checkpoints, "translate_" + str(current_step) + ".ckpt")
ckpt = model.state_dict()
torch.save(ckpt, ckpt_path)
step_time, loss = 0.0, 0.0
encoder_inputs, decoder_inputs = model.get_batch(val_set, start_idx=0)
eval_loss, decoder_outputs = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=True)
print(" eval: loss %.2f" % eval_loss)
sys.stdout.flush()
time_training = (datetime.datetime.now() - start_datetime)
print("Saving model")
torch.save(model.state_dict(), "/home/lola/nn/neuralnetwork.pth")
else : # not train_model
print("Loading the pretrained model")
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list,
args.dropout_rate,
args.max_source_len, args.max_target_len)
print("Evaluating model")
start_evaluation_datetime = datetime.datetime.now()
test_dataset = json.load(open(args.test_dataset, 'r'))
test_set = data_utils.prepare_data(test_dataset, source_vocab, target_vocab)
evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
if (train_model):
print("Training time: %s seconds" % time_training)
print("Total Evaluation time: %s seconds" % (datetime.datetime.now() - start_evaluation_datetime))
def test(test_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list):
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list, 0.0,
args.max_source_len, args.max_target_len)
test_set = data_utils.prepare_data(test_dataset, source_vocab, target_vocab)
evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
parser = argparse.ArgumentParser()
parser.add_argument('--param_init', type=float, default=0.1,
help='Parameters are initialized over uniform distribution in (-param_init, param_init)')
parser.add_argument('--num_epochs', type=int, default=30, help='number of training epochs') #default 30
parser.add_argument('--learning_rate', type=float, default=0.005, # default 0.005
help='learning rate')
parser.add_argument('--learning_rate_decay_factor', type=float, default=0.8,
help='learning rate decays by this much')
parser.add_argument('--learning_rate_decay_steps', type=int, default=2000, # default=2000
help='decay the learning rate after certain steps')
parser.add_argument('--max_gradient_norm', type=float, default=5.0,
help='clip gradients to this norm')
parser.add_argument('--batch_size', type=int, default=64, #default 100
help='batch size')
parser.add_argument('--max_depth', type=int, default=100,
help='max depth for tree models')
parser.add_argument('--hidden_size', type=int, default=256,
help='size of each model layer')
parser.add_argument('--embedding_size', type=int, default=256,
help='size of the embedding')
parser.add_argument('--dropout_rate', type=float, default=0.75, # default=0.5
help='dropout rate')
parser.add_argument('--num_layers', type=int, default=1, # default=1,
help='number of layers in the model')
parser.add_argument('--source_vocab_size', type=int, default=0,
help='source vocabulary size (0: no limit)')
parser.add_argument('--target_vocab_size', type=int, default=0,
help='target vocabulary size (0: no limit)')
parser.add_argument('--train_dir_checkpoints', type=str, default='/home/lola/nn/checkpoints', # default='../model_ckpts/tree2tree/',
help='training directory - checkpoints')
parser.add_argument('--training_dataset', type=str, default='/home/lola/nn/models_train.json', # default='../data/CS-JS/BL/preprocessed_progs_train.json',
help='training dataset path')
parser.add_argument('--validation_dataset', type=str, default='/home/lola/nn/models_valid.json', #default='../data/CS-JS/BL/preprocessed_progs_valid.json',
help='validation dataset path')
parser.add_argument('--test_dataset', type=str, default='/home/lola/nn/models_test.json', #default='../data/CS-JS/BL/preprocessed_progs_test.json',
help='test dataset path')
parser.add_argument('--load_model', type=str, default='/home/lola/nn/neuralnetwork.pth', # default=None
help='path to the pretrained model')
parser.add_argument('--vocab_filename', type=str, default=None,
help='filename for the vocabularies')
parser.add_argument('--steps_per_checkpoint', type=int, default=500,
help='number of training steps per checkpoint')
parser.add_argument('--max_source_len', type=int, default=115,
help='max length for input')
parser.add_argument('--max_target_len', type=int, default=315,
help='max length for output')
parser.add_argument('--test', action='store_true', help='set to true for testing')
parser.add_argument('--no_attention', action='store_true', help='set to true to disable attention')
parser.add_argument('--no_pf', action='store_true', help='set to true to disable parent attention feeding')
parser.add_argument('--no_train', help='set to true to prevent the network from training', action='store_true')
args = parser.parse_args()
def main():
if args.no_attention:
args.no_pf = True
training_dataset = json.load(open(args.training_dataset, 'r'))
source_vocab, target_vocab, source_vocab_list, target_vocab_list = data_utils.build_vocab(training_dataset, args.vocab_filename)
if args.test:
test_dataset = json.load(open(args.test_dataset, 'r'))
test(test_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
else:
validation_dataset = json.load(open(args.validation_dataset, 'r'))
# print("Val data %s" % validation_dataset)
train(training_dataset, validation_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list, args.no_train)
main() | en | 0.197644 | #!/usr/bin/env python2 # print("Evaluation time: %s seconds" % (datetime.datetime.now() - start_evaluation_datetime)) # print((datetime.datetime.now() - start_evaluation_datetime)) # print("--Current source / Current target / Current output--") # print(source_vocab) # build_from_scratch = True; # pretrained_model_path = "/home/lola/nn/neuralnetwork.pth"; # if (build_from_scratch): # else: # print("Loading pretrained model") # pretrained_model = torch.load(pretrained_model_path) # model.load_state_dict(pretrained_model) # not train_model #default 30 # default 0.005 # default=2000 #default 100 # default=0.5 # default=1, # default='../model_ckpts/tree2tree/', # default='../data/CS-JS/BL/preprocessed_progs_train.json', #default='../data/CS-JS/BL/preprocessed_progs_valid.json', #default='../data/CS-JS/BL/preprocessed_progs_test.json', # default=None # print("Val data %s" % validation_dataset) | 2.111134 | 2 |
barbican/common/resources.py | stanzikratel/barbican-2 | 0 | 10197 | <gh_stars>0
# Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Shared business logic.
"""
from barbican.common import exception
from barbican.common import utils
from barbican.common import validators
from barbican.model import models
LOG = utils.getLogger(__name__)
def get_or_create_tenant(keystone_id, tenant_repo):
"""Returns tenant with matching keystone_id.
Creates it if it does not exist.
:param keystone_id: The external-to-Barbican ID for this tenant.
:param tenant_repo: Tenant repository.
:return: Tenant model instance
"""
tenant = tenant_repo.find_by_keystone_id(keystone_id,
suppress_exception=True)
if not tenant:
LOG.debug('Creating tenant for {0}'.format(keystone_id))
tenant = models.Tenant()
tenant.keystone_id = keystone_id
tenant.status = models.States.ACTIVE
tenant_repo.create_from(tenant)
return tenant
def create_secret(data, tenant, crypto_manager,
secret_repo, tenant_secret_repo, datum_repo, kek_repo,
ok_to_generate=False):
"""Common business logic to create a secret."""
time_keeper = utils.TimeKeeper('Create Secret Resource')
new_secret = models.Secret(data)
time_keeper.mark('after Secret model create')
new_datum = None
content_type = data.get('payload_content_type',
'application/octet-stream')
if 'payload' in data:
payload = data.get('payload')
content_encoding = data.get('payload_content_encoding')
LOG.debug('Encrypting payload...')
new_datum = crypto_manager.encrypt(payload,
content_type,
content_encoding,
new_secret,
tenant,
kek_repo,
enforce_text_only=True)
time_keeper.mark('after encrypt')
elif ok_to_generate:
LOG.debug('Generating new secret...')
# TODO(atiwari): With new typed Order API proposal
# we need to translate new_secret to meta
# currently it is working as meta will have same attributes
new_datum = crypto_manager. \
generate_symmetric_encryption_key(new_secret,
content_type,
tenant,
kek_repo)
time_keeper.mark('after secret generate')
else:
LOG.debug('Creating metadata only for the new secret. '
'A subsequent PUT is required')
# Create Secret entities in datastore.
secret_repo.create_from(new_secret)
time_keeper.mark('after Secret datastore create')
new_assoc = models.TenantSecret()
time_keeper.mark('after TenantSecret model create')
new_assoc.tenant_id = tenant.id
new_assoc.secret_id = new_secret.id
new_assoc.role = "admin"
new_assoc.status = models.States.ACTIVE
tenant_secret_repo.create_from(new_assoc)
time_keeper.mark('after TenantSecret datastore create')
if new_datum:
new_datum.secret_id = new_secret.id
datum_repo.create_from(new_datum)
time_keeper.mark('after Datum datastore create')
time_keeper.dump()
return new_secret
def create_encrypted_datum(secret, payload,
content_type, content_encoding,
tenant, crypto_manager, datum_repo, kek_repo):
"""Modifies the secret to add the plain_text secret information.
:param secret: the secret entity to associate the secret data to
:param payload: secret data to store
:param content_type: payload content mime type
:param content_encoding: payload content encoding
:param tenant: the tenant (entity) who owns the secret
:param crypto_manager: the crypto plugin manager
:param datum_repo: the encrypted datum repository
:param kek_repo: the KEK metadata repository
:retval The response body, None if N/A
"""
if not payload:
raise exception.NoDataToProcess()
if validators.secret_too_big(payload):
raise exception.LimitExceeded()
if secret.encrypted_data:
raise ValueError('Secret already has encrypted data stored for it.')
# Encrypt payload
LOG.debug('Encrypting secret payload...')
new_datum = crypto_manager.encrypt(payload,
content_type,
content_encoding,
secret,
tenant,
kek_repo)
datum_repo.create_from(new_datum)
return new_datum
| # Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Shared business logic.
"""
from barbican.common import exception
from barbican.common import utils
from barbican.common import validators
from barbican.model import models
LOG = utils.getLogger(__name__)
def get_or_create_tenant(keystone_id, tenant_repo):
"""Returns tenant with matching keystone_id.
Creates it if it does not exist.
:param keystone_id: The external-to-Barbican ID for this tenant.
:param tenant_repo: Tenant repository.
:return: Tenant model instance
"""
tenant = tenant_repo.find_by_keystone_id(keystone_id,
suppress_exception=True)
if not tenant:
LOG.debug('Creating tenant for {0}'.format(keystone_id))
tenant = models.Tenant()
tenant.keystone_id = keystone_id
tenant.status = models.States.ACTIVE
tenant_repo.create_from(tenant)
return tenant
def create_secret(data, tenant, crypto_manager,
secret_repo, tenant_secret_repo, datum_repo, kek_repo,
ok_to_generate=False):
"""Common business logic to create a secret."""
time_keeper = utils.TimeKeeper('Create Secret Resource')
new_secret = models.Secret(data)
time_keeper.mark('after Secret model create')
new_datum = None
content_type = data.get('payload_content_type',
'application/octet-stream')
if 'payload' in data:
payload = data.get('payload')
content_encoding = data.get('payload_content_encoding')
LOG.debug('Encrypting payload...')
new_datum = crypto_manager.encrypt(payload,
content_type,
content_encoding,
new_secret,
tenant,
kek_repo,
enforce_text_only=True)
time_keeper.mark('after encrypt')
elif ok_to_generate:
LOG.debug('Generating new secret...')
# TODO(atiwari): With new typed Order API proposal
# we need to translate new_secret to meta
# currently it is working as meta will have same attributes
new_datum = crypto_manager. \
generate_symmetric_encryption_key(new_secret,
content_type,
tenant,
kek_repo)
time_keeper.mark('after secret generate')
else:
LOG.debug('Creating metadata only for the new secret. '
'A subsequent PUT is required')
# Create Secret entities in datastore.
secret_repo.create_from(new_secret)
time_keeper.mark('after Secret datastore create')
new_assoc = models.TenantSecret()
time_keeper.mark('after TenantSecret model create')
new_assoc.tenant_id = tenant.id
new_assoc.secret_id = new_secret.id
new_assoc.role = "admin"
new_assoc.status = models.States.ACTIVE
tenant_secret_repo.create_from(new_assoc)
time_keeper.mark('after TenantSecret datastore create')
if new_datum:
new_datum.secret_id = new_secret.id
datum_repo.create_from(new_datum)
time_keeper.mark('after Datum datastore create')
time_keeper.dump()
return new_secret
def create_encrypted_datum(secret, payload,
content_type, content_encoding,
tenant, crypto_manager, datum_repo, kek_repo):
"""Modifies the secret to add the plain_text secret information.
:param secret: the secret entity to associate the secret data to
:param payload: secret data to store
:param content_type: payload content mime type
:param content_encoding: payload content encoding
:param tenant: the tenant (entity) who owns the secret
:param crypto_manager: the crypto plugin manager
:param datum_repo: the encrypted datum repository
:param kek_repo: the KEK metadata repository
:retval The response body, None if N/A
"""
if not payload:
raise exception.NoDataToProcess()
if validators.secret_too_big(payload):
raise exception.LimitExceeded()
if secret.encrypted_data:
raise ValueError('Secret already has encrypted data stored for it.')
# Encrypt payload
LOG.debug('Encrypting secret payload...')
new_datum = crypto_manager.encrypt(payload,
content_type,
content_encoding,
secret,
tenant,
kek_repo)
datum_repo.create_from(new_datum)
return new_datum | en | 0.733914 | # Copyright (c) 2013-2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Shared business logic. Returns tenant with matching keystone_id. Creates it if it does not exist. :param keystone_id: The external-to-Barbican ID for this tenant. :param tenant_repo: Tenant repository. :return: Tenant model instance Common business logic to create a secret. # TODO(atiwari): With new typed Order API proposal # we need to translate new_secret to meta # currently it is working as meta will have same attributes # Create Secret entities in datastore. Modifies the secret to add the plain_text secret information. :param secret: the secret entity to associate the secret data to :param payload: secret data to store :param content_type: payload content mime type :param content_encoding: payload content encoding :param tenant: the tenant (entity) who owns the secret :param crypto_manager: the crypto plugin manager :param datum_repo: the encrypted datum repository :param kek_repo: the KEK metadata repository :retval The response body, None if N/A # Encrypt payload | 1.924893 | 2 |
7/prime.py | redfast00/euler | 0 | 10198 | from math import sqrt
def stream_primes(num):
primes = []
candidate = 2
for i in range(num):
prime = next_prime(primes, candidate)
primes.append(prime)
candidate = prime + 1
yield prime
def next_prime(primes, candidate):
while True:
for prime in primes:
if candidate % prime == 0:
break
elif prime > sqrt(candidate):
return candidate
else:
return candidate
candidate += 1
for prime in stream_primes(10001):
print(prime)
| from math import sqrt
def stream_primes(num):
primes = []
candidate = 2
for i in range(num):
prime = next_prime(primes, candidate)
primes.append(prime)
candidate = prime + 1
yield prime
def next_prime(primes, candidate):
while True:
for prime in primes:
if candidate % prime == 0:
break
elif prime > sqrt(candidate):
return candidate
else:
return candidate
candidate += 1
for prime in stream_primes(10001):
print(prime)
| none | 1 | 3.69698 | 4 |
|
app/utils.py | HealYouDown/flo-league | 0 | 10199 | <gh_stars>0
import datetime
from app.models import Log
from flask_login import current_user
from app.extensions import db
# https://stackoverflow.com/questions/6558535/find-the-date-for-the-first-monday-after-a-given-date
def next_weekday(
d: datetime.datetime = datetime.datetime.utcnow(),
weekday: int = 0,
) -> datetime.datetime:
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
# Flatten the current time to just the date
date = datetime.datetime(d.year, d.month, d.day)
return date + datetime.timedelta(days_ahead)
def add_moderator_log(log_text: str) -> None:
db.session.add(Log(
moderator_id=current_user.id,
message=log_text,
))
db.session.commit()
| import datetime
from app.models import Log
from flask_login import current_user
from app.extensions import db
# https://stackoverflow.com/questions/6558535/find-the-date-for-the-first-monday-after-a-given-date
def next_weekday(
d: datetime.datetime = datetime.datetime.utcnow(),
weekday: int = 0,
) -> datetime.datetime:
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
# Flatten the current time to just the date
date = datetime.datetime(d.year, d.month, d.day)
return date + datetime.timedelta(days_ahead)
def add_moderator_log(log_text: str) -> None:
db.session.add(Log(
moderator_id=current_user.id,
message=log_text,
))
db.session.commit() | en | 0.72963 | # https://stackoverflow.com/questions/6558535/find-the-date-for-the-first-monday-after-a-given-date # Target day already happened this week # Flatten the current time to just the date | 2.465444 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.