content
stringlengths 5
1.05M
|
---|
import os
import re
from datetime import datetime
import numpy as np
import pandas as pd
import pytz
from donfig import Config
from jira import JIRA
from loguru import logger
from db import dataframe_to_db
from tqdm import tqdm
config = Config("jira_reporting")
class Report:
def __init__(self, jql_filter: dict):
self.jira_connection = self.get_jira_connection()
self.issues = self.load_jira_issue(filt=jql_filter)
@staticmethod
def get_jira_connection():
user = config.get("USER", None) or os.environ.get("JIRA_USER", None)
token = config.get("TOKEN", None) or os.environ.get("JIRA_TOKEN", None)
server = config.get("SERVER", None) or os.environ.get("JIRA_SERVER", None)
options = {"server": server}
return JIRA(options=options, basic_auth=(user, token))
def load_jira_issue(self, filt):
return self.jira_connection.search_issues(filt, maxResults=1000)
@staticmethod
def get_issue_field_val(issue, field_name, field_data_key=None):
"""Tries to find field in issue and returns value"""
issue_fields = [field for field in issue.raw["fields"].keys()]
assert field_name in issue_fields
if field_data_key:
return issue.raw["fields"][field_name][field_data_key]
return issue.raw["fields"][field_name]
def get_issue_data(self, issue, fields: dict) -> dict:
extracted_issue_data = {}
for field_name, field_key in fields.items():
try:
val = self.get_issue_field_val(issue, field_name, field_key)
if field_name == "resolution":
extracted_issue_data[field_name] = 1
else:
extracted_issue_data[field_name] = (
None if str(val).lower() == "nan" else val
)
except TypeError:
if field_name == "resolution":
extracted_issue_data[field_name] = 0
else:
extracted_issue_data[field_name] = None
return extracted_issue_data
@staticmethod
def get_last_sprint(sprint_data: list) -> str:
"""Looks for latest sprint label"""
sequences = [sprint_val.get("id") for sprint_val in sprint_data]
sprint_names = [sprint_val.get("name", None) for sprint_val in sprint_data]
return sprint_names[sequences.index(max(sequences))]
@staticmethod
def get_all_sprint_data(sprint_data: list) -> list:
sprint_names = [sprint_val.get("name", None) for sprint_val in sprint_data]
return ", ".join([str(z) for z in sprint_names])
def get_sprint_counts(self, issues, sprint_col) -> dict:
sprint_data = [self.get_issue_field_val(issue, sprint_col) for issue in issues]
all_sprint_data = sum(sprint_data, [])
sprint_names = [
re.findall(r"name=[^,]*", str(sprint_val))[0].replace("name=", "")
for sprint_val in all_sprint_data
]
return {k: sprint_names.count(k) for k in set(sprint_names)}
@staticmethod
def get_issues_completed(jira_data):
assert "sprint" in jira_data.columns
completed_cards = list(
jira_data[jira_data["status"].str.lower() == "done"]["sprint"]
)
return {k: completed_cards.count(k) for k in set(completed_cards)}
@staticmethod
def sprint_summary(sprint_counts, completed_issues):
sprint_data = {"sprint": [], "completed": [], "total": []}
for k, v in sprint_counts.items():
sprint_data["sprint"].append(k)
sprint_data["total"].append(v)
try:
sprint_data["completed"].append(completed_issues[k])
except KeyError:
sprint_data["completed"].append(0)
return pd.DataFrame(sprint_data).sort_values("sprint")
class ProjectData:
def __init__(self, jira_connection: JIRA, project_key: str):
self.jira_connection = jira_connection
self.project_key = project_key
self.issues = self.get_project_issues()
self.populated_issues = []
self.all_issue_data = []
self.all_sprint_data = []
self.project_data = pd.DataFrame()
def get_project_issues(self) -> list:
"""
Makes jira JQL search for project and issues types.
Gets latest 100 (maximum)
"""
logger.info(f"Fetching issues for {self.project_key}")
query = (
f"project={self.project_key} "
+ f"and issuetype in (Task, Bug, Subtask, Sub-task) "
+ f"ORDER BY updated DESC"
)
return self.jira_connection.search_issues(query, maxResults=20)
def get_issues_from_search_results(self) -> None:
"""
Search results don't contain all data needed,
so we need to convert to JIRA.issue objects using
the issue ids
"""
logger.info(f"Collecting data for {len(self.issues)} issues")
with tqdm(total=len(self.issues)) as pbar:
for iss in self.issues:
issue_with_data = self.jira_connection.issue(iss.id)
# Filter issues that don't have a sprint assigned to them
if issue_with_data.fields.customfield_10020:
self.populated_issues.append(issue_with_data)
pbar.update(1)
@staticmethod
def get_issue_sprint_data(iss: JIRA.issue) -> list:
issue_sprints = iss.fields.customfield_10020
return [
{
"sprint_name": sprint.name,
"start_date": sprint.startDate if getattr(sprint, 'startDate', None) else None,
"end_date": sprint.endDate if getattr(sprint, 'endDate', None) else None,
"board_id": sprint.boardId,
"sprint_state": sprint.state,
"sprint_number": float(sprint.name.split(" ")[-1]),
}
for sprint in issue_sprints
]
@staticmethod
def extract_issue_data(iss: JIRA.issue) -> dict:
"""Extract key issue field data"""
return {
"key": iss.key,
"id": iss.id,
"project": iss.fields.project.key,
"issue_type": (iss.fields.issuetype.name if iss.fields.issuetype else None),
"summary": iss.fields.summary,
"assignee": (
iss.fields.assignee.displayName if iss.fields.assignee else None
),
"reporter": (
iss.fields.reporter.displayName if iss.fields.reporter else None
),
"priority": (iss.fields.priority.name if iss.fields.priority else None),
"status": (iss.fields.status.name if iss.fields.status else None),
"resolution": (
iss.fields.resolution.name if iss.fields.resolution else None
),
"resolved": (
1
if iss.fields.resolution
and iss.fields.resolution.name in ("Done", "DONE")
else 0
),
"created": iss.fields.created,
"updated": iss.fields.updated,
"due_date": iss.fields.duedate,
"total_time_spent": iss.fields.timespent,
"total_time_estimate": iss.fields.timeestimate,
"original_time_estimate": iss.fields.timeoriginalestimate,
"remaining_time_estimate": (
iss.fields.timetracking.remainingEstimateSeconds
if iss.fields.timetracking.raw
else None
),
}
@staticmethod
def get_issue_worklogs(iss: JIRA.issue) -> list:
issue_worklogs = iss.fields.worklog.worklogs
return [
{
"time_spent": wl.timeSpentSeconds,
"started": wl.started,
"updated": wl.updated,
"worklog_author": wl.author.displayName,
}
for wl in issue_worklogs
]
@staticmethod
def worklog_within_sprint(worklog: dict, sprint_data: dict) -> bool:
try:
utc = pytz.utc
worklog_started = datetime.strptime(
worklog.get("started"), "%Y-%m-%dT%H:%M:%S.%f%z"
)
sprint_start_date = datetime.strptime(
sprint_data.get("start_date"), "%Y-%m-%dT%H:%M:%S.%fZ"
).replace(tzinfo=utc)
sprint_end_date = datetime.strptime(
sprint_data.get("end_date"), "%Y-%m-%dT%H:%M:%S.%fZ"
).replace(tzinfo=utc)
return sprint_start_date <= worklog_started <= sprint_end_date
except TypeError:
return False
def get_sprint_time_spent(self, iss: JIRA.issue, sprint_data: dict) -> dict:
issue_worklogs = self.get_issue_worklogs(iss)
sprint_time = 0
for worklog in issue_worklogs:
if self.worklog_within_sprint(worklog, sprint_data):
sprint_time += worklog.get("time_spent", 0)
return {"issue_key": iss.key, **sprint_data, "sprint_time_spent": sprint_time}
def get_issue_sprint_data_with_time_spent(self, iss: JIRA.issue) -> list:
issue_sprint_data = self.get_issue_sprint_data(iss)
return [self.get_sprint_time_spent(iss, sd) for sd in issue_sprint_data]
def get_issue_and_sprint_data(self) -> None:
issues = self.populated_issues
all_issue_level_data = [self.extract_issue_data(_issue) for _issue in issues]
all_sprint_level_data = [
self.get_issue_sprint_data_with_time_spent(_issue) for _issue in issues
]
self.all_issue_data = all_issue_level_data
self.all_sprint_data = sum(all_sprint_level_data, [])
def merge_issue_and_sprint_data(self):
issue_df = pd.DataFrame(self.all_issue_data)
sprint_df = pd.DataFrame(self.all_sprint_data)
if len(issue_df) > 0 and len(sprint_df) > 0:
project_data = sprint_df.merge(issue_df, left_on="issue_key", right_on="key")
project_data.drop("issue_key", inplace=True, axis=1)
project_data.rename({"id": "issue_id"}, inplace=True, axis=1)
project_data[
[
"total_time_spent",
"total_time_estimate",
"original_time_estimate",
"remaining_time_estimate",
]
] = (
project_data[
[
"total_time_spent",
"total_time_estimate",
"original_time_estimate",
"remaining_time_estimate",
]
]
.copy()
.replace(np.nan, 0)
)
self.project_data = project_data
def save_to_db(self):
if len(self.project_data) > 0:
cols = list(self.project_data.columns)
conflicts = ("key", "sprint_name")
dataframe_to_db(
data=self.project_data,
table_name=config.get("DB_TABLE_NAME", None) or os.environ.get("DB_TABLE_NAME"),
conflicts=conflicts,
cols=cols,
)
else:
logger.info(f"No new data for {self.project_key}")
def refresh_sprint_data(self):
logger.info(f"Refreshing sprint data for {self.project_key}")
self.get_issues_from_search_results()
self.get_issue_and_sprint_data()
self.merge_issue_and_sprint_data()
self.save_to_db()
def list_to_df(dict_list: list) -> pd.DataFrame:
if dict_list:
d = {key: [] for key in dict_list[0].keys()}
for row in dict_list:
for k, v in row.items():
d[k].append(v)
return pd.DataFrame(d)
else:
logger.warning("No data found for this query")
raise Exception("No data found for this query")
def get_jira_connection():
user = config.get("JIRA_USER", None) or os.environ.get("JIRA_USER", None)
token = config.get("JIRA_TOKEN", None) or os.environ.get("JIRA_TOKEN", None)
server = config.get("JIRA_SERVER", None) or os.environ.get("JIRA_SERVER", None)
options = {"server": server}
return JIRA(options=options, basic_auth=(user, token))
def get_project_keys(jira_connection: JIRA) -> list:
projects = jira_connection.projects()
logger.info(f"Found {len(projects)} projects for {jira_connection.client_info()}")
return [project.key for project in projects]
__all__ = [
"Report",
"config",
"list_to_df",
"get_project_keys",
"ProjectData",
"get_jira_connection",
]
|
# script for testing water level reading
# generates a self calibrated percentage of fullness.
#
# bar chart ref - https://alexwlchan.net/2018/05/ascii-bar-charts/
import machine
import time
# define pins
CAP_PIN = const(14)
# define starting values
min_value = 500
max_value = 800
bar_size = 25
# setup inputs and outpus
t = machine.TouchPad(machine.Pin(CAP_PIN))
# main
def run():
print("** Water Level Test **")
try:
print("press ctrl-c to stop")
print("")
while True:
value = t.read()
# adjust if new min/max value
min_value = min(min_value, t)
max_value = max(max_value, t)
percentage = (max_value - value)/(max_value - min_value)
# The ASCII block elements come in chunks of 8, so we work out how
# many fractions of 8 we need.
bar_chunks, remainder = divmod(int(percentage * bar_size * 8), 8)
# draw full width chunks
bar = '█' * bar_chunks
# add chunk fraction
if remainder > 0:
bar += chr(ord('█') + (8 - remainder))
print("Water level: {0:>5.1f}% |{1:<25s}".format(percentage*100, bar))
print(" |{0:<25s}".format(bar))
print("min value: {0:<6d} max value: {1:<6d} raw value: {2:<6d}".format(min_value, max_value, value))
print('\033[A'*4)
time.sleep(.5)
except KeyboardInterrupt:
print("Program stopped by user")
except:
print("Something went wrong")
finally:
print("Goodbye")
if __name__ == '__main__':
run() |
def get_secret (key):
file = open ("secrets.txt", 'r')
secrets = file.read().splitlines()
file.close()
for i in range (len (secrets)):
if secrets[i] == "# " + key:
return secrets[i+1]
print ("Invalid key!!")
def get_account_sid():
return get_secret ('account_sid')
def get_auth_token():
return get_secret ("auth_token")
def get_Twilio_number():
return get_secret ("twilio_number")
|
# Copyright 2013 Cloudbase Solutions SRL
# Copyright 2013 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import platform
import sys
from neutron.agent.l2.extensions import qos as qos_extension
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import topics
from neutron.conf.agent import common as neutron_config
from os_win import exceptions
from os_win import utilsfactory
from oslo_log import log as logging
import oslo_messaging
from networking_hyperv.common.i18n import _, _LI, _LW, _LE # noqa
from networking_hyperv.neutron import _common_utils as c_util
from networking_hyperv.neutron.agent import layer2 as hyperv_base
from networking_hyperv.neutron import config
from networking_hyperv.neutron import constants as h_constant
from networking_hyperv.neutron import exception
from networking_hyperv.neutron import nvgre_ops
from networking_hyperv.neutron import trunk_driver
CONF = config.CONF
LOG = logging.getLogger(__name__)
_port_synchronized = c_util.get_port_synchronized_decorator('n-hv-agent-')
class HyperVSecurityAgent(sg_rpc.SecurityGroupAgentRpc):
def __init__(self, context, plugin_rpc):
super(HyperVSecurityAgent, self).__init__(context, plugin_rpc)
if sg_rpc.is_firewall_enabled():
self._setup_rpc()
@property
def use_enhanced_rpc(self):
return True
def _setup_rpc(self):
self.topic = topics.AGENT
self.endpoints = [HyperVSecurityCallbackMixin(self)]
consumers = [[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
class HyperVSecurityCallbackMixin(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
target = oslo_messaging.Target(version='1.3')
def __init__(self, sg_agent):
super(HyperVSecurityCallbackMixin, self).__init__()
self.sg_agent = sg_agent
class HyperVNeutronAgent(hyperv_base.Layer2Agent):
_AGENT_BINARY = "neutron-hyperv-agent"
_AGENT_TYPE = h_constant.AGENT_TYPE_HYPERV
def __init__(self):
super(HyperVNeutronAgent, self).__init__()
self._agent_id = 'hyperv_%s' % platform.node()
self._qos_ext = None
self._nvgre_enabled = False
self._metricsutils = utilsfactory.get_metricsutils()
self._port_metric_retries = {}
agent_conf = CONF.get('AGENT', {})
security_conf = CONF.get('SECURITYGROUP', {})
self._enable_metrics_collection = agent_conf.get(
'enable_metrics_collection', False)
self._metrics_max_retries = agent_conf.get('metrics_max_retries', 100)
self._enable_security_groups = security_conf.get(
'enable_security_group', False)
self._init_nvgre()
def _get_agent_configurations(self):
configurations = {'vswitch_mappings': self._physical_network_mappings}
if CONF.NVGRE.enable_support:
configurations['arp_responder_enabled'] = False
configurations['tunneling_ip'] = CONF.NVGRE.provider_tunnel_ip
configurations['devices'] = 1
configurations['l2_population'] = False
configurations['tunnel_types'] = [h_constant.TYPE_NVGRE]
configurations['enable_distributed_routing'] = False
configurations['bridge_mappings'] = {}
return configurations
def _setup(self):
"""Setup the layer two agent."""
super(HyperVNeutronAgent, self)._setup()
self._sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self._sec_groups_agent = HyperVSecurityAgent(self._context,
self._sg_plugin_rpc)
self._vlan_driver = trunk_driver.HyperVTrunkDriver(self._context)
if CONF.NVGRE.enable_support:
self._consumers.append([h_constant.TUNNEL, topics.UPDATE])
self._consumers.append([h_constant.LOOKUP, h_constant.UPDATE])
def _setup_qos_extension(self):
"""Setup the QOS extension if it is required."""
if not CONF.AGENT.enable_qos_extension:
return
self._qos_ext = qos_extension.QosAgentExtension()
self._qos_ext.consume_api(self)
self._qos_ext.initialize(self._connection, 'hyperv')
def _init_nvgre(self):
# if NVGRE is enabled, self._nvgre_ops is required in order to properly
# set the agent state (see get_agent_configrations method).
if not CONF.NVGRE.enable_support:
return
if not CONF.NVGRE.provider_tunnel_ip:
err_msg = _('enable_nvgre_support is set to True, but '
'provider tunnel IP is not configured. '
'Check neutron.conf config file.')
LOG.error(err_msg)
raise exception.NetworkingHyperVException(err_msg)
self._nvgre_enabled = True
self._nvgre_ops = nvgre_ops.HyperVNvgreOps(
list(self._physical_network_mappings.values()))
self._nvgre_ops.init_notifier(self._context, self._client)
self._nvgre_ops.tunnel_update(self._context,
CONF.NVGRE.provider_tunnel_ip,
h_constant.TYPE_NVGRE)
def _provision_network(self, port_id, net_uuid, network_type,
physical_network, segmentation_id):
"""Provision the network with the received information."""
LOG.info("Provisioning network %s", net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
if network_type == h_constant.TYPE_VLAN:
# Nothing to do
pass
elif network_type == h_constant.TYPE_FLAT:
# Nothing to do
pass
elif network_type == h_constant.TYPE_LOCAL:
# TODO(alexpilotti): Check that the switch type is private
# or create it if not existing.
pass
elif network_type == h_constant.TYPE_NVGRE and self._nvgre_enabled:
self._nvgre_ops.bind_nvgre_network(segmentation_id, net_uuid,
vswitch_name)
else:
raise exception.NetworkingHyperVException(
(_("Cannot provision unknown network type "
"%(network_type)s for network %(net_uuid)s") %
dict(network_type=network_type, net_uuid=net_uuid)))
vswitch_map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = vswitch_map
def _port_bound(self, port_id, network_id, network_type, physical_network,
segmentation_id):
"""Bind the port to the recived network."""
super(HyperVNeutronAgent, self)._port_bound(
port_id, network_id, network_type, physical_network,
segmentation_id
)
vswitch_map = self._network_vswitch_map[network_id]
if network_type == h_constant.TYPE_VLAN:
self._vlan_driver.bind_vlan_port(port_id, segmentation_id)
elif network_type == h_constant.TYPE_NVGRE and self._nvgre_enabled:
self._nvgre_ops.bind_nvgre_port(
segmentation_id, vswitch_map['vswitch_name'], port_id)
elif network_type == h_constant.TYPE_FLAT:
pass # Nothing to do
elif network_type == h_constant.TYPE_LOCAL:
pass # Nothing to do
else:
LOG.error('Unsupported network type %s', network_type)
if self._enable_metrics_collection:
self._utils.add_metrics_collection_acls(port_id)
self._port_metric_retries[port_id] = self._metrics_max_retries
def _port_enable_control_metrics(self):
if not self._enable_metrics_collection:
return
for port_id in list(self._port_metric_retries.keys()):
try:
if self._utils.is_metrics_collection_allowed(port_id):
self._metricsutils.enable_port_metrics_collection(port_id)
LOG.info('Port metrics enabled for port: %s', port_id)
del self._port_metric_retries[port_id]
elif self._port_metric_retries[port_id] < 1:
self._metricsutils.enable_port_metrics_collection(port_id)
LOG.error('Port metrics raw enabling for port: %s',
port_id)
del self._port_metric_retries[port_id]
else:
self._port_metric_retries[port_id] -= 1
except exceptions.NotFound:
# the vNIC no longer exists. it might have been removed or
# the VM it was attached to was destroyed.
LOG.warning("Port %s no longer exists. Cannot enable "
"metrics.", port_id)
del self._port_metric_retries[port_id]
@_port_synchronized
def _treat_vif_port(self, port_id, network_id, network_type,
physical_network, segmentation_id,
admin_state_up):
if admin_state_up:
self._port_bound(port_id, network_id, network_type,
physical_network, segmentation_id)
# check if security groups is enabled.
# if not, teardown the security group rules
if self._enable_security_groups:
self._sec_groups_agent.refresh_firewall([port_id])
else:
self._utils.remove_all_security_rules(port_id)
else:
self._port_unbound(port_id)
self._sec_groups_agent.remove_devices_filter([port_id])
def _process_added_port(self, device_details):
super(HyperVNeutronAgent, self)._process_added_port(
device_details)
if CONF.AGENT.enable_qos_extension:
self._qos_ext.handle_port(self._context, device_details)
def _process_removed_port(self, device):
super(HyperVNeutronAgent, self)._process_removed_port(device)
try:
self._sec_groups_agent.remove_devices_filter([device])
except Exception:
LOG.exception("Exception encountered while processing"
" port %s.", device)
# Readd the port as "removed", so it can be reprocessed.
self._removed_ports.add(device)
raise
def _work(self):
"""Process the information regarding the available ports."""
super(HyperVNeutronAgent, self)._work()
if self._nvgre_enabled:
self._nvgre_ops.refresh_nvgre_records()
self._port_enable_control_metrics()
def tunnel_update(self, context, **kwargs):
LOG.info('tunnel_update received: kwargs: %s', kwargs)
tunnel_ip = kwargs.get('tunnel_ip')
if tunnel_ip == CONF.NVGRE.provider_tunnel_ip:
# the notification should be ignored if it originates from this
# node.
return
tunnel_type = kwargs.get('tunnel_type')
self._nvgre_ops.tunnel_update(context, tunnel_ip, tunnel_type)
def lookup_update(self, context, **kwargs):
self._nvgre_ops.lookup_update(kwargs)
def main():
"""The entry point for the Hyper-V Neutron Agent."""
neutron_config.register_agent_state_opts_helper(CONF)
common_config.init(sys.argv[1:])
neutron_config.setup_logging()
hyperv_agent = HyperVNeutronAgent()
# Start everything.
LOG.info("Agent initialized successfully, now running... ")
hyperv_agent.daemon_loop()
|
from setuptools import setup
from setuptools import find_packages
setup(
name="l2a",
version="1.0.0",
description="Machine learning sample for integer addition",
author="Philippe Trempe",
author_email="[email protected]",
url="https://github.com/PhTrempe/l2a",
license="MIT",
install_requires=[
"numpy",
"scipy",
"tensorflow",
"keras",
"pyyaml",
"h5py"
],
packages=find_packages()
)
|
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
###############################################################################
# file: __init__.py
# Purpose: Main init for the pyside_pysideviewer module.
#
# Programmer: Cyrus Harrison
# Creation: Tue Apr 3 13:40:18 PDT
#
#
# Modifications:
#
#
###############################################################################
from pyside_gui import *
|
# -*- coding: utf-8 -*-
import random, re
from bot.core import Answer
def substitute(text, factsheet):
t = text
if factsheet is not None and len(factsheet) > 0:
for i in factsheet.get_facts():
t = t.replace('{%s}' % i.get_label(), i.get_value())
return t
def make_answer(templates, context):
random.shuffle(templates)
for i in templates:
try:
t = substitute(i, context)
if re.search(r"{[a-zA-Z0-9_\-]+}", t) is None:
return Answer(message=t)
except:
pass
return None
class BaseActions():
def __init__(self):
pass
def goodbye(self, w, subfacts, conclusions, context):
return Answer(message=random.choice([
"Thank you for talking with me.",
"Good-bye.",
"Thank you, that will be $150. Have a good day!",
"Goodbye. It was nice talking to you.",
"Goodbye. This was really a nice talk.",
"Goodbye. I'm looking forward to our next session.",
]), stop=True)
def bot_name(self, w, subfacts, conclusions, context):
return Answer(message="Yes?")
def what_is_your_name(self, w, subfacts, conclusions, context):
return make_answer([
"You can call me {bot_name}.",
"Call me {bot_name}.",
"My name is {bot_name}.",
], context)
def what_you_can_do(self, w, subfacts, conclusions, context):
return make_answer([
"I am programmed to make a small talk. I've also read all Wikipedia. Try to ask me 'What is RL3?' or 'Who is Alan Turing?'..",
], context)
def who_are_you(self, w, subfacts, conclusions, context):
return make_answer(["I am {bot_name} - a computer program designed to simulate conversation with human users."], context)
def what_is_rl3(self, w, subfacts, conclusions, context):
return make_answer(["RL3 is a rule-based information extraction, named-entity recognition and categorization engine. RL3 is also a programming language intended to simplify implementation, use and support of large libraries of rules and patterns in computational linguistics projects. Read more at <a href=\"https://rl3.zorallabs.com/wiki/Main_Page\">RL3 Information Extraction Engine</a>"], context)
|
from __future__ import absolute_import
from __future__ import print_function
# Inspecting the numpy namespace and classifying numpy's functions
import numpy as np
from collections import defaultdict
import types
import inspect
import six
heading = lambda x : "-"*20 + str(x) + "-"*20
np_types = defaultdict(list)
for name, obj in six.iteritems(np.__dict__):
np_types[type(obj)].append(name)
print("Objects in numpy namespace by type:")
for t, vals in six.iteritems(np_types):
print(heading(t))
print(vals)
print("="*80)
all_ufuncs = np_types[np.ufunc]
unary_ufuncs = []
binary_ufuncs = []
other_ufuncs = []
for ufunc in all_ufuncs:
f = np.__dict__[ufunc]
if f.nin == 1:
unary_ufuncs.append(ufunc)
elif f.nin == 2:
binary_ufuncs.append(ufunc)
else:
other_ufuncs.append(ufunc)
print(heading("Unary ufuncs:"))
print(sorted(unary_ufuncs))
print(heading("Binary ufuncs:"))
print(sorted(binary_ufuncs))
if other_ufuncs:
print(heading("Other ufuncs:"))
print(sorted(other_ufuncs))
all_regular_funcs = np_types[types.FunctionType] + np_types[types.BuiltinFunctionType]
print(heading("Stat functions with keepdims kwarg and ndarray method"))
keepdims_funcs = []
all_other_funcs = []
for func in all_regular_funcs:
try:
f = np.__dict__[func]
keepdims = "keepdims" in inspect.getargspec(f).args
axis = "axis" in inspect.getargspec(f).args
ndarray_method = hasattr(np.ndarray, func)
if keepdims and axis and ndarray_method:
keepdims_funcs.append(func)
else:
all_other_funcs.append(func)
except TypeError:
pass
print(sorted(keepdims_funcs))
print(heading("All other functions"))
print(sorted(all_other_funcs))
|
# Generated by Django 3.1.3 on 2021-09-02 01:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('aao_vender', '0011_auto_20210902_0413'),
]
operations = [
migrations.CreateModel(
name='Aoo_User_Order_Details',
fields=[
('auod_id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('auod_subsc_package', models.PositiveIntegerField(default=1, null=True)),
('auod_start_date', models.DateTimeField(blank=True)),
('auod_end_date', models.DateTimeField(blank=True)),
('auod_created_at', models.DateTimeField(auto_now_add=True)),
('auod_last_modified_on', models.DateTimeField(auto_now=True)),
('auod_is_active', models.BooleanField(default=True)),
('auod_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aoo_user_order_details_user', to='aao_vender.aoo_user_details')),
('auod_vender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aoo_user_order_details_vender', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='vender_details',
name='vd_mob_number',
field=models.TextField(default=None, max_length=20),
),
migrations.CreateModel(
name='Vender_Transection_Details',
fields=[
('vtd_id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('vtd_debit', models.PositiveIntegerField(default=0)),
('vtd_total_remaining', models.PositiveIntegerField(default=0)),
('vtd_created_at', models.DateTimeField(auto_now_add=True)),
('vtd_last_modified_on', models.DateTimeField(auto_now=True)),
('vtd_is_active', models.BooleanField(default=True)),
('vtd_trans_for', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vender_transection_details_trans_for', to='aao_vender.aoo_user_order_details')),
('vtd_vender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vender_transection_details_vender', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Admin_Ading_Credit',
fields=[
('adc_id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('adc_credit_amount', models.PositiveIntegerField(default=0, null=True)),
('adc_created_at', models.DateTimeField(auto_now_add=True)),
('adc_last_modified_on', models.DateTimeField(auto_now=True)),
('adc_is_active', models.BooleanField(default=True)),
('adc_vender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='admin_ading_credit_vender', to=settings.AUTH_USER_MODEL)),
],
),
]
|
from setuptools import setup
setup(name='aaron',
version='0.1',
description='python utils packages by aaron',
url='https://github.com/AaronTao1990/aaron.git',
author='Aaron',
author_email='[email protected]',
license='MIT',
packages=['aaron'],
zip_safe=False)
|
from __future__ import annotations
import sys
import re as regex
from typing import List, Pattern, Dict
class Node:
"""
Decision tree node
"""
def __init__(self, depth: int, name: str = "") -> None:
"""
Creates a new decision tree node
:param depth: Depth of the tree from this node
:param name: Name from the node, T and F will be appended dynamically
"""
self._name = name
self._value = False
# Generate nodes recursively
if depth > 0:
self._true: Node = Node(depth - 1, name + "T")
self._false: Node = Node(depth - 1, name + "F")
def _get_node(self, path: List[bool]) -> Node:
"""
Gets the node from the given decisions from this node
:param path: Decision path through the tree from this node
:return: The node at the given decision path
"""
current: Node = self
# Loop to the depth of the decision path
for i in range(len(path)):
# Get correct node
if path[i]:
current = current._true
else:
current = current._false
return current
def set_value(self, path: List[bool], value: bool) -> None:
"""
Sets the value of the decision node at this given path
:param path: Path of the decision node
:param value: Value to set
"""
self._get_node(path)._value = value
def get_value(self, path: List[bool]) -> bool:
"""
Gets the value at the given decision path in the tree
:param path: Decision path to the node
:return: The value of the node
"""
return self._get_node(path)._value
def __str__(self) -> str:
"""
String representation of the node
:return: The name given to the node, plus its decision path
"""
return self._name
def main(args: List[str]) -> None:
"""
Application entry point
:param args: Argument list, should contain the file to load at index 1
"""
# Create the decision tree and compile the pattern
tree: Node = Node(5)
pattern: Pattern = regex.compile("([#.]{5}) => ([#.])")
# File read
with open(args[1], "r") as f:
# Create the base state with some padding, you might want to increase the offset if your state goes left
offset: int = 5
pots: List[bool] = as_bool(("." * offset) + regex.search("[#.]+", f.readline()).group() + "....")
# Loop through non empty lines
for line in filter(lambda l: len(l.strip()) > 0, f):
decision: List[bool]
value: List[bool]
decision, value = map(as_bool, pattern.search(line).groups())
# If a true path, set the decision tree as such
if value[0]:
tree.set_value(decision, True)
# Setup some stuff
gen20: int
generations: int = 200
prev: int = sum_pots(pots, offset)
diffs: Dict[int, int] = {}
# Loop through a fixed amount of generations
for gen in range(1, generations + 1):
temp: List[bool] = list(pots)
for i in range(2, len(temp) - 2):
value: bool = tree.get_value(temp[i - 2:i + 3])
pots[i] = value
# Add to the right side if needed
add: int = sum(1 for i in range(-4, -2) if pots[i])
for _ in range(add):
pots.append(False)
# Get diff
curr: int = sum_pots(pots, offset)
diff: int = curr - prev
# print(f"Generation: {gen}, Current: {curr}, Diff: {curr - prev}")
prev = curr
# Get generation 20 score
if gen == 20:
gen20 = curr
# Setup diff frequency
if diff not in diffs:
diffs[diff] = 1
else:
diffs[diff] += 1
# print(as_str(pots))
# Print generation 20 score
print("Part one score:", gen20)
# Assume that the diff that appears the most often is gonna be constant
diff = max(diffs, key=lambda d: diffs[d])
print("Part two score:", prev + ((50000000000 - generations) * diff))
def as_bool(data: str) -> List[bool]:
"""
Parses a string into a list of bools, where '#' are true, and everything else is false
:param data: String to parse
:return: The generated list of bools
"""
return list(map(lambda c: c == "#", data))
def as_str(data: List[bool]) -> str:
"""
Parses a list of bools as a string, where True becomes '#', and False '.'
:param data: List of bools to parse
:return: The resulting string
"""
return "".join(map(lambda b: "#" if b else ".", data))
def sum_pots(pots: List[bool], offset: int) -> int:
"""
Sums the pots by their index
:param pots: Pots and their values (alive/dead)
:param offset: The offset from the start to the zero index
:return: The sum of all indices with alive plants
"""
return sum(i - offset for i in range(len(pots)) if pots[i])
# Only run if entry point
if __name__ == "__main__":
main(sys.argv)
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# K Lars Lohn, [email protected]
# Peter Bengtsson, [email protected]
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import datetime
def datetime_from_ISO_string(s):
""" Take an ISO date string of the form YYYY-MM-DDTHH:MM:SS.S
and convert it into an instance of datetime.datetime
"""
try:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
except ValueError:
try:
return datetime.datetime.strptime(s, '%Y-%m-%d')
except ValueError:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
def date_from_ISO_string(s):
""" Take an ISO date string of the form YYYY-MM-DD
and convert it into an instance of datetime.date
"""
return datetime.datetime.strptime(s, '%Y-%m-%d').date()
def datetime_to_ISO_string(aDate):
""" Take a datetime and convert to string of the form YYYY-MM-DDTHH:MM:SS.S
"""
return aDate.isoformat()
def date_to_ISO_string(aDate):
""" Take a datetime and convert to string of the form YYYY-MM-DD
"""
return aDate.strftime('%Y-%m-%d')
def hours_str_to_timedelta(hoursAsString):
return datetime.timedelta(hours=int(hoursAsString))
def timedelta_to_seconds(td):
return td.days * 24 * 60 * 60 + td.seconds
def str_to_timedelta(input_str):
""" a string conversion function for timedelta for strings in the format
DD:HH:MM:SS
"""
days, hours, minutes, seconds = 0, 0, 0, 0
details = input_str.split(':')
if len(details) >= 4:
days = int(details[-4])
if len(details) >= 3:
hours = int(details[-3])
if len(details) >= 2:
minutes = int(details[-2])
if len(details) >= 1:
seconds = int(details[-1])
return datetime.timedelta(days=days,
hours=hours,
minutes=minutes,
seconds=seconds)
def timedelta_to_str(aTimedelta):
""" a conversion function for time deltas to string in the form
DD:HH:MM:SS
"""
days = aTimedelta.days
temp_seconds = aTimedelta.seconds
hours = temp_seconds / 3600
minutes = (temp_seconds - hours * 3600) / 60
seconds = temp_seconds - hours * 3600 - minutes * 60
return '%d:%d:%d:%d' % (days, hours, minutes, seconds)
|
# This material was prepared as an account of work sponsored by an agency of the
# United States Government. Neither the United States Government nor the United
# States Department of Energy, nor Battelle, nor any of their employees, nor any
# jurisdiction or organization that has cooperated in the development of these
# materials, makes any warranty, express or implied, or assumes any legal
# liability or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights. Reference
# herein to any specific commercial product, process, or service by trade name,
# trademark, manufacturer, or otherwise does not necessarily constitute or imply
# its endorsement, recommendation, or favoring by the United States Government
# or any agency thereof, or Battelle Memorial Institute. The views and opinions
# of authors expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by
# BATTELLE
# for the
# UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Flatten
def build_model(self):
# Input: state
layers = []
state_input = Input(shape=(1, self.env.observation_space.shape[0]))
layers.append(state_input)
length = len(self.dense)
# for i, layer_width in enumerate(self.dense):
for i in range(length):
layer_width = self.dense[i]
layers.append(Dense(layer_width, activation=self.activation)(layers[-1]))
# output layer
layers.append(Dense(self.env.action_space.n, activation=self.out_activation)(layers[-1]))
layers.append(Flatten()(layers[-1]))
model = Model(inputs=layers[0], outputs=layers[-1])
# model.summary()
print('', flush=True)
return model
|
#
# flp - Module to load fl forms from fd files
#
# Jack Jansen, December 1991
#
from warnings import warnpy3k
warnpy3k("the flp module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import string
import os
import sys
import FL
SPLITLINE = '--------------------'
FORMLINE = '=============== FORM ==============='
ENDLINE = '=============================='
class error(Exception):
pass
##################################################################
# Part 1 - The parsing routines #
##################################################################
#
# Externally visible function. Load form.
#
def parse_form(filename, formname):
forms = checkcache(filename)
if forms is None:
forms = parse_forms(filename)
if forms.has_key(formname):
return forms[formname]
else:
raise error, 'No such form in fd file'
#
# Externally visible function. Load all forms.
#
def parse_forms(filename):
forms = checkcache(filename)
if forms is not None: return forms
fp = _open_formfile(filename)
nforms = _parse_fd_header(fp)
forms = {}
for i in range(nforms):
form = _parse_fd_form(fp, None)
forms[form[0].Name] = form
writecache(filename, forms)
return forms
#
# Internal: see if a cached version of the file exists
#
MAGIC = '.fdc'
_internal_cache = {} # Used by frozen scripts only
def checkcache(filename):
if _internal_cache.has_key(filename):
altforms = _internal_cache[filename]
return _unpack_cache(altforms)
import marshal
fp, filename = _open_formfile2(filename)
fp.close()
cachename = filename + 'c'
try:
fp = open(cachename, 'r')
except IOError:
#print 'flp: no cache file', cachename
return None
try:
if fp.read(4) != MAGIC:
print 'flp: bad magic word in cache file', cachename
return None
cache_mtime = rdlong(fp)
file_mtime = getmtime(filename)
if cache_mtime != file_mtime:
#print 'flp: outdated cache file', cachename
return None
#print 'flp: valid cache file', cachename
altforms = marshal.load(fp)
return _unpack_cache(altforms)
finally:
fp.close()
def _unpack_cache(altforms):
forms = {}
for name in altforms.keys():
altobj, altlist = altforms[name]
obj = _newobj()
obj.make(altobj)
list = []
for altobj in altlist:
nobj = _newobj()
nobj.make(altobj)
list.append(nobj)
forms[name] = obj, list
return forms
def rdlong(fp):
s = fp.read(4)
if len(s) != 4: return None
a, b, c, d = s[0], s[1], s[2], s[3]
return ord(a)<<24 | ord(b)<<16 | ord(c)<<8 | ord(d)
def wrlong(fp, x):
a, b, c, d = (x>>24)&0xff, (x>>16)&0xff, (x>>8)&0xff, x&0xff
fp.write(chr(a) + chr(b) + chr(c) + chr(d))
def getmtime(filename):
import os
from stat import ST_MTIME
try:
return os.stat(filename)[ST_MTIME]
except os.error:
return None
#
# Internal: write cached version of the form (parsing is too slow!)
#
def writecache(filename, forms):
import marshal
fp, filename = _open_formfile2(filename)
fp.close()
cachename = filename + 'c'
try:
fp = open(cachename, 'w')
except IOError:
print 'flp: can\'t create cache file', cachename
return # Never mind
fp.write('\0\0\0\0') # Seek back and write MAGIC when done
wrlong(fp, getmtime(filename))
altforms = _pack_cache(forms)
marshal.dump(altforms, fp)
fp.seek(0)
fp.write(MAGIC)
fp.close()
#print 'flp: wrote cache file', cachename
#
# External: print some statements that set up the internal cache.
# This is for use with the "freeze" script. You should call
# flp.freeze(filename) for all forms used by the script, and collect
# the output on a file in a module file named "frozenforms.py". Then
# in the main program of the script import frozenforms.
# (Don't forget to take this out when using the unfrozen version of
# the script!)
#
def freeze(filename):
forms = parse_forms(filename)
altforms = _pack_cache(forms)
print 'import flp'
print 'flp._internal_cache[', repr(filename), '] =', altforms
#
# Internal: create the data structure to be placed in the cache
#
def _pack_cache(forms):
altforms = {}
for name in forms.keys():
obj, list = forms[name]
altobj = obj.__dict__
altlist = []
for obj in list: altlist.append(obj.__dict__)
altforms[name] = altobj, altlist
return altforms
#
# Internal: Locate form file (using PYTHONPATH) and open file
#
def _open_formfile(filename):
return _open_formfile2(filename)[0]
def _open_formfile2(filename):
if filename[-3:] != '.fd':
filename = filename + '.fd'
if filename[0] == '/':
try:
fp = open(filename,'r')
except IOError:
fp = None
else:
for pc in sys.path:
pn = os.path.join(pc, filename)
try:
fp = open(pn, 'r')
filename = pn
break
except IOError:
fp = None
if fp is None:
raise error, 'Cannot find forms file ' + filename
return fp, filename
#
# Internal: parse the fd file header, return number of forms
#
def _parse_fd_header(file):
# First read the magic header line
datum = _parse_1_line(file)
if datum != ('Magic', 12321):
raise error, 'Not a forms definition file'
# Now skip until we know number of forms
while 1:
datum = _parse_1_line(file)
if type(datum) == type(()) and datum[0] == 'Numberofforms':
break
return datum[1]
#
# Internal: parse fd form, or skip if name doesn't match.
# the special value None means 'always parse it'.
#
def _parse_fd_form(file, name):
datum = _parse_1_line(file)
if datum != FORMLINE:
raise error, 'Missing === FORM === line'
form = _parse_object(file)
if form.Name == name or name is None:
objs = []
for j in range(form.Numberofobjects):
obj = _parse_object(file)
objs.append(obj)
return (form, objs)
else:
for j in range(form.Numberofobjects):
_skip_object(file)
return None
#
# Internal class: a convenient place to store object info fields
#
class _newobj:
def add(self, name, value):
self.__dict__[name] = value
def make(self, dict):
for name in dict.keys():
self.add(name, dict[name])
#
# Internal parsing routines.
#
def _parse_string(str):
if '\\' in str:
s = '\'' + str + '\''
try:
return eval(s)
except:
pass
return str
def _parse_num(str):
return eval(str)
def _parse_numlist(str):
slist = string.split(str)
nlist = []
for i in slist:
nlist.append(_parse_num(i))
return nlist
# This dictionary maps item names to parsing routines.
# If no routine is given '_parse_num' is default.
_parse_func = { \
'Name': _parse_string, \
'Box': _parse_numlist, \
'Colors': _parse_numlist, \
'Label': _parse_string, \
'Name': _parse_string, \
'Callback': _parse_string, \
'Argument': _parse_string }
# This function parses a line, and returns either
# a string or a tuple (name,value)
import re
prog = re.compile('^([^:]*): *(.*)')
def _parse_line(line):
match = prog.match(line)
if not match:
return line
name, value = match.group(1, 2)
if name[0] == 'N':
name = string.join(string.split(name),'')
name = string.lower(name)
name = string.capitalize(name)
try:
pf = _parse_func[name]
except KeyError:
pf = _parse_num
value = pf(value)
return (name, value)
def _readline(file):
line = file.readline()
if not line:
raise EOFError
return line[:-1]
def _parse_1_line(file):
line = _readline(file)
while line == '':
line = _readline(file)
return _parse_line(line)
def _skip_object(file):
line = ''
while not line in (SPLITLINE, FORMLINE, ENDLINE):
pos = file.tell()
line = _readline(file)
if line == FORMLINE:
file.seek(pos)
def _parse_object(file):
obj = _newobj()
while 1:
pos = file.tell()
datum = _parse_1_line(file)
if datum in (SPLITLINE, FORMLINE, ENDLINE):
if datum == FORMLINE:
file.seek(pos)
return obj
if type(datum) is not type(()) or len(datum) != 2:
raise error, 'Parse error, illegal line in object: '+datum
obj.add(datum[0], datum[1])
#################################################################
# Part 2 - High-level object/form creation routines #
#################################################################
#
# External - Create a form an link to an instance variable.
#
def create_full_form(inst, (fdata, odatalist)):
form = create_form(fdata)
exec 'inst.'+fdata.Name+' = form\n'
for odata in odatalist:
create_object_instance(inst, form, odata)
#
# External - Merge a form into an existing form in an instance
# variable.
#
def merge_full_form(inst, form, (fdata, odatalist)):
exec 'inst.'+fdata.Name+' = form\n'
if odatalist[0].Class != FL.BOX:
raise error, 'merge_full_form() expects FL.BOX as first obj'
for odata in odatalist[1:]:
create_object_instance(inst, form, odata)
#################################################################
# Part 3 - Low-level object/form creation routines #
#################################################################
#
# External Create_form - Create form from parameters
#
def create_form(fdata):
import fl
return fl.make_form(FL.NO_BOX, fdata.Width, fdata.Height)
#
# External create_object - Create an object. Make sure there are
# no callbacks. Returns the object created.
#
def create_object(form, odata):
obj = _create_object(form, odata)
if odata.Callback:
raise error, 'Creating free object with callback'
return obj
#
# External create_object_instance - Create object in an instance.
#
def create_object_instance(inst, form, odata):
obj = _create_object(form, odata)
if odata.Callback:
cbfunc = eval('inst.'+odata.Callback)
obj.set_call_back(cbfunc, odata.Argument)
if odata.Name:
exec 'inst.' + odata.Name + ' = obj\n'
#
# Internal _create_object: Create the object and fill options
#
def _create_object(form, odata):
crfunc = _select_crfunc(form, odata.Class)
obj = crfunc(odata.Type, odata.Box[0], odata.Box[1], odata.Box[2], \
odata.Box[3], odata.Label)
if not odata.Class in (FL.BEGIN_GROUP, FL.END_GROUP):
obj.boxtype = odata.Boxtype
obj.col1 = odata.Colors[0]
obj.col2 = odata.Colors[1]
obj.align = odata.Alignment
obj.lstyle = odata.Style
obj.lsize = odata.Size
obj.lcol = odata.Lcol
return obj
#
# Internal crfunc: helper function that returns correct create function
#
def _select_crfunc(fm, cl):
if cl == FL.BEGIN_GROUP: return fm.bgn_group
elif cl == FL.END_GROUP: return fm.end_group
elif cl == FL.BITMAP: return fm.add_bitmap
elif cl == FL.BOX: return fm.add_box
elif cl == FL.BROWSER: return fm.add_browser
elif cl == FL.BUTTON: return fm.add_button
elif cl == FL.CHART: return fm.add_chart
elif cl == FL.CHOICE: return fm.add_choice
elif cl == FL.CLOCK: return fm.add_clock
elif cl == FL.COUNTER: return fm.add_counter
elif cl == FL.DIAL: return fm.add_dial
elif cl == FL.FREE: return fm.add_free
elif cl == FL.INPUT: return fm.add_input
elif cl == FL.LIGHTBUTTON: return fm.add_lightbutton
elif cl == FL.MENU: return fm.add_menu
elif cl == FL.POSITIONER: return fm.add_positioner
elif cl == FL.ROUNDBUTTON: return fm.add_roundbutton
elif cl == FL.SLIDER: return fm.add_slider
elif cl == FL.VALSLIDER: return fm.add_valslider
elif cl == FL.TEXT: return fm.add_text
elif cl == FL.TIMER: return fm.add_timer
else:
raise error, 'Unknown object type: %r' % (cl,)
def test():
import time
t0 = time.time()
if len(sys.argv) == 2:
forms = parse_forms(sys.argv[1])
t1 = time.time()
print 'parse time:', 0.001*(t1-t0), 'sec.'
keys = forms.keys()
keys.sort()
for i in keys:
_printform(forms[i])
elif len(sys.argv) == 3:
form = parse_form(sys.argv[1], sys.argv[2])
t1 = time.time()
print 'parse time:', round(t1-t0, 3), 'sec.'
_printform(form)
else:
print 'Usage: test fdfile [form]'
def _printform(form):
f = form[0]
objs = form[1]
print 'Form ', f.Name, ', size: ', f.Width, f.Height, ' Nobj ', f.Numberofobjects
for i in objs:
print ' Obj ', i.Name, ' type ', i.Class, i.Type
print ' Box ', i.Box, ' btype ', i.Boxtype
print ' Label ', i.Label, ' size/style/col/align ', i.Size,i.Style, i.Lcol, i.Alignment
print ' cols ', i.Colors
print ' cback ', i.Callback, i.Argument
|
# Generated by Django 2.0.2 on 2018-12-04 00:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='body',
),
migrations.RemoveField(
model_name='product',
name='icon',
),
migrations.RemoveField(
model_name='product',
name='votes_total',
),
]
|
from uliweb.core.template import BaseBlockNode
class PermissionNode(BaseBlockNode):
def generate(self, writer):
writer.write_line('if functions.has_permission(request.user, %s):' %
self.statement, self.line)
with writer.indent():
self.body.generate(writer)
writer.write_line("pass", self.line)
class RoleNode(BaseBlockNode):
def generate(self, writer):
writer.write_line('if functions.has_role(request.user, %s):' %
self.statement, self.line)
with writer.indent():
self.body.generate(writer)
writer.write_line("pass", self.line)
|
"""Add parameter to enable/disable support activity
Revision ID: e9e9adb7e801
Revises: b05231a3afda
Create Date: 2021-06-13 20:52:31.981823
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "e9e9adb7e801"
down_revision = "b05231a3afda"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"company",
sa.Column("require_support_activity", sa.Boolean(), nullable=True),
)
op.execute("UPDATE company SET require_support_activity = false")
op.alter_column("company", "require_support_activity", nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("company", "require_support_activity")
# ### end Alembic commands ###
|
from . import category, common, group, junction
from . import junction_old
|
from http import HTTPStatus
import pytest
from app.model.recipes import Recipe
class TestRecipe:
@classmethod
def setup_class(cls):
Recipe.drop_collection()
@pytest.fixture()
def new_recipe(self):
return {"title": "ovo cozido", "ingredients": ["ovo", "água"], "howto": "cozinhe o ovo na água"}
@pytest.fixture()
def document_recipe(self):
recipe = Recipe(title="ovo frito", ingredients=["ovo", "óleo"], howto="frite o ovo na frigideira")
return recipe.save()
def test_should_post_recipe(self, client, new_recipe):
response = client.post(
"/api/v1/recipes",
json=new_recipe,
)
assert response.status_code == HTTPStatus.CREATED
info_recipe = response.json
assert info_recipe
assert "id" in info_recipe
assert info_recipe["id"] is not None
assert "title" in info_recipe
assert info_recipe["title"] == new_recipe["title"]
assert info_recipe["howto"] is not None
assert info_recipe["howto"] == new_recipe["howto"]
assert info_recipe["ingredients"] is not None
assert type(info_recipe["ingredients"]) is list
assert sorted(info_recipe["ingredients"]) == sorted(new_recipe["ingredients"])
new_recipe_document = Recipe.objects(id=info_recipe["id"]).first()
assert new_recipe_document is not None
assert new_recipe_document.title == new_recipe["title"]
def test_should_get_recipe_by_id(self, client, document_recipe):
response = client.get(f"/api/v1/recipes/{document_recipe.id}")
assert response.status_code == HTTPStatus.OK
info_recipe = response.json
assert info_recipe
assert "id" in info_recipe
assert info_recipe["id"] is not None
assert "title" in info_recipe
assert info_recipe["title"] == document_recipe.title
assert info_recipe["howto"] is not None
assert info_recipe["howto"] == document_recipe.howto
assert info_recipe["ingredients"] is not None
assert type(info_recipe["ingredients"]) is list
assert sorted(info_recipe["ingredients"]) == sorted(document_recipe.ingredients)
def test_should_not_get_recipe_with_nonexistent_id(self, client):
# fake id
response = client.get("/api/v1/recipes/5f95ca454ff087dd3e3eae91")
assert response.status_code == HTTPStatus.NOT_FOUND
def test_should_not_get_recipe_with_wrong_id(self, client):
# wrong id
response = client.get("/api/v1/recipes/wrong_id")
assert response.status_code == HTTPStatus.BAD_REQUEST
response_json = response.json
assert "code" in response_json
assert "message" in response_json
def test_should_update_recipe_by_id(self, client, document_recipe):
response = client.get(f"/api/v1/recipes/{document_recipe.id}")
assert response.status_code == HTTPStatus.OK
recipe = response.json
del recipe["id"]
recipe["ingredients"].append("sal")
recipe["howto"] = "frite o ovo na frigideira. sal a gosto"
response = client.put(f"/api/v1/recipes/{document_recipe.id}", json=recipe)
assert response.status_code == HTTPStatus.OK
info_recipe = response.json
assert info_recipe
assert "id" in info_recipe
assert info_recipe["id"] is not None
assert "title" in info_recipe
assert info_recipe["title"] == recipe["title"]
assert info_recipe["howto"] is not None
assert info_recipe["howto"] == recipe["howto"]
assert info_recipe["ingredients"] is not None
assert type(info_recipe["ingredients"]) is list
assert sorted(info_recipe["ingredients"]) == sorted(recipe["ingredients"])
def test_should_delete_recipe_by_id(self, client, document_recipe):
response = client.delete(f"/api/v1/recipes/{document_recipe.id}")
assert response.status_code == HTTPStatus.NO_CONTENT
def test_should_get_all_recipes(self, client, document_recipe):
# fake id
response = client.get("/api/v1/recipes")
assert response.status_code == HTTPStatus.OK
assert "X-Pagination" in response.headers
info_recipes = response.json
assert info_recipes
assert type(info_recipes) is list
assert any(["id" in recipe for recipe in info_recipes])
assert any([recipe["id"] == str(document_recipe.id) for recipe in info_recipes])
assert any([recipe["title"] == document_recipe.title for recipe in info_recipes])
|
import asyncio
import logging
from queue import Empty
from . import randomizer
logger = logging.getLogger(__name__)
class AnimaLiaison:
"""
Anima Liaison is responsible for the actual operation of our automaton
At its core is a state machine which queues and monitors jobs to be satisfied by the captain. Those jobs
are executed by calling upon the following components:
captain.angler
captain.eyes
captain.legs
captain.voice
"""
def __init__(self, captain):
self.captain = captain
def dispatch(self):
""" This is the application entry point where the `operate` coroutine is invoked """
try:
asyncio.run(self.operate())
except (KeyboardInterrupt, SystemExit):
self.captain.kill()
logger.debug('Exiting')
async def operate(self):
""" Main logic loop """
while self.captain.alive:
if not self.captain.ready:
await self.captain.train()
await self.captain.update()
self.captain.look()
try:
while True:
result = await self.captain.perform_next_action()
if result is not None:
await result()
except Empty:
logger.debug(f'No more actions in queue')
# Wait a random amount before proceeding
await asyncio.sleep(randomizer.random_wait())
__all__ = ['AnimaLiaison']
|
import json
import os
import io
import sys
from string import Template
__version__ = "1.0.18"
__mode__ = "dev"
def read_kv_file(fileName):
myvars = {}
if os.path.isfile(fileName):
with open(fileName) as kvfile:
for line in kvfile:
name, var = line.partition("=")[::2]
myvars[name.strip()] = var.strip().replace('"', '')
return myvars
else:
return myvars
def files(path):
dir_files = []
for file in os.listdir(path):
"""
ignore directories and files starting with __
"""
if (os.path.isfile(os.path.join(path, file)) and not file.startswith("__")):
dir_files.append(file)
return dir_files
versions_folder = os.path.join("versions")
pkg_map = {"version" : __version__, "packages" : [] }
default_json = read_kv_file(os.path.join(versions_folder, "__default"))
pkg_map.update(default_json)
pkg_files = files(os.path.join("versions"))
version_folder = os.path.join("versions", __mode__, __version__)
# get release date
pkg_rel_date = read_kv_file(os.path.join(version_folder, "__default"))
pkg_map.update(pkg_rel_date)
# get individual package files from versions root folder
for pkg_file in pkg_files:
pkg = read_kv_file(os.path.join("versions", pkg_file))
pkg_map["packages"].append(pkg)
# get package information for version
for pkg_item in pkg_map["packages"]:
pkg_item_info = read_kv_file(os.path.join(version_folder, pkg_item["PkgName"]))
pkg_item.update(pkg_item_info)
from datetime import datetime
pkg_map["PkgDownloadLink"] = pkg_map["PkgDlURLRoot"] + "/" + __mode__ + "/" + pkg_map["version"]
pkg_rel_date = datetime.strptime( pkg_map["PkgBundleReleaseDate"], "%d.%m.%Y")
pkg_map["PkgReleasedOn"] = pkg_rel_date.strftime("%d %B %Y")
from string import Template
HEADER_TEMPLATE = Template("""
**Current Version**
* $PkgBundleFriendlyname $version `download link <$PkgDownloadLink>`_ , released on: $PkgReleasedOn
""")
BODY_TEMPLATE = Template("""
- `$PkgName <$PkgGitURL>`_ : `$PkgVersion <$PkgGitURL/tree/$PkgGitHash>`_
""")
version_info_tmpl = []
version_info_tmpl.append(HEADER_TEMPLATE.substitute(pkg_map))
for pkg_item in pkg_map["packages"]:
try:
pkg_item_tmpl = BODY_TEMPLATE.substitute(pkg_item)
version_info_tmpl.append(pkg_item_tmpl)
except:
pass
print("".join(version_info_tmpl))
|
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, chi2, f_classif
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
from sklearn import metrics
from old_hamshahri_reader import OldHamshahriReader
import config
tuned_params = [{'C': [1, 10, 100, 1000]}]
svc_tuned_params = [{'kernel': ['rbf'], 'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001]},]
if __name__ == '__main__':
rd = OldHamshahriReader(root=config.CORPORA_ROOT)
docs, labels = rd.sklearn_docs(config.TOT_DOCS)
#vectorizer = CountVectorizer(docs)
vectorizer = TfidfVectorizer(lowercase=False, max_df=0.8)
fs = vectorizer.fit_transform(docs)
#vectorizer.build_preprocessor()
selector = SelectPercentile(chi2, percentile=10)
selector.fit(fs, labels)
fs = selector.transform(fs)
fs_train, fs_test, labels_train, labels_test = train_test_split(
fs, labels, test_size=0.4, random_state=0
)
clf = None
pred = None
grid_search = False
if config.CLASSIFIER == 'NaiveBayes':
clf = BernoulliNB()
elif config.CLASSIFIER == 'LinearSVC':
if config.SELF_TRAINING:
clf = LinearSVC(C=1)
else:
clf = GridSearchCV(LinearSVC(), tuned_params, cv=5, scoring='accuracy')
grid_search = True
elif config.CLASSIFIER == 'SVC':
clf = GridSearchCV(SVC(), svc_tuned_params, cv=5, scoring='accuracy')
grid_search = True
elif config.CLASSIFIER == 'DecisionTree':
clf = DecisionTreeClassifier()
fs_train = fs_train.toarray()
fs_test = fs_test.toarray()
elif config.CLASSIFIER == 'Ensemble':
#clf = AdaBoostClassifier(n_estimators=100)
clf = GradientBoostingClassifier(n_estimators=5, random_state=0)
fs_train = fs_train.toarray()
fs_test = fs_test.toarray()
if config.SELF_TRAINING:
fl = fs_train.shape[0]
ll = labels_train.shape[0]
fsarr = fs_train.toarray()
cur_fs = fsarr[:fl / 10]
cur_labels = labels_train[:ll / 10]
clf.fit(cur_fs, cur_labels)
print clf.classes_
for i in range(1, 10):
new_fs = fsarr[(i * fl) / 10:((i + 1) * fl) / 10]
confidence_scores = clf.decision_function(new_fs)
most_confident_samples = confidence_scores.max(axis=1).argsort()[
-1 * (confidence_scores.shape[0] / 10):]
most_confident_labels = confidence_scores[most_confident_samples].argmax(axis=1)
cur_fs = np.append(cur_fs, new_fs[most_confident_samples], axis=0)
cur_labels = np.append(cur_labels, clf.classes_[most_confident_labels])
clf.fit(cur_fs, cur_labels)
pred = clf.predict(fs_test)
else:
clf.fit(fs_train, labels_train)
pred = clf.predict(fs_test)
if grid_search:
print clf.best_estimator_
#print metrics.classification_report(labels_test, pred)
print "%s ** Accuracy: %f\tPrecision: %f\tRecall: %f\tF1: %f" % (
config.CLASSIFIER,
metrics.accuracy_score(labels_test, pred),
metrics.precision_score(labels_test, pred),
metrics.recall_score(labels_test, pred),
metrics.f1_score(labels_test, pred),
)
|
# %% 这是在这个项目里面测试的时候才跑的,修改当前目录到这个项目的主文件夹
import os
import sys
sys.path += [os.path.realpath('.')]
# %% 现在开始才是使用时应该用的内容
from pathlib import Path
import pickle
import warnings
import numpy as np
import pandas as pd
from pandas.core.common import SettingWithCopyWarning
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
import torch
from pytorch_forecasting import GroupNormalizer, TemporalFusionTransformer, TimeSeriesDataSet
from pytorch_forecasting.data.examples import get_stallion_data
from pytorch_forecasting.metrics import MAE, RMSE, SMAPE, PoissonLoss, QuantileLoss
from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
from pytorch_forecasting.utils import profile
warnings.simplefilter("error", category=SettingWithCopyWarning)
data = get_stallion_data()
data["month"] = data.date.dt.month.astype("str").astype("category")
data["log_volume"] = np.log(data.volume + 1e-8)
data["time_idx"] = data["date"].dt.year * 12 + data["date"].dt.month
data["time_idx"] -= data["time_idx"].min()
data["avg_volume_by_sku"] = data.groupby(["time_idx", "sku"], observed=True).volume.transform("mean")
data["avg_volume_by_agency"] = data.groupby(["time_idx", "agency"], observed=True).volume.transform("mean")
# data = data[lambda x: (x.sku == data.iloc[0]["sku"]) & (x.agency == data.iloc[0]["agency"])]
special_days = [
"easter_day",
"good_friday",
"new_year",
"christmas",
"labor_day",
"independence_day",
"revolution_day_memorial",
"regional_games",
"fifa_u_17_world_cup",
"football_gold_cup",
"beer_capital",
"music_fest",
]
data[special_days] = data[special_days].apply(lambda x: x.map({0: "", 1: x.name})).astype("category")
training_cutoff = data["time_idx"].max() - 6
max_encoder_length = 36
max_prediction_length = 6
training = TimeSeriesDataSet(
data[lambda x: x.time_idx <= training_cutoff],
time_idx="time_idx",
target="volume",
group_ids=["agency", "sku"],
min_encoder_length=max_encoder_length // 2, # allow encoder lengths from 0 to max_prediction_length
max_encoder_length=max_encoder_length,
min_prediction_length=1,
max_prediction_length=max_prediction_length,
static_categoricals=["agency", "sku"],
static_reals=["avg_population_2017", "avg_yearly_household_income_2017"],
time_varying_known_categoricals=["special_days", "month"],
variable_groups={"special_days": special_days}, # group of categorical variables can be treated as one variable
time_varying_known_reals=["time_idx", "price_regular", "discount_in_percent"],
time_varying_unknown_categoricals=[],
time_varying_unknown_reals=[
"volume",
"log_volume",
"industry_volume",
"soda_volume",
"avg_max_temp",
"avg_volume_by_agency",
"avg_volume_by_sku",
],
target_normalizer=GroupNormalizer(
groups=["agency", "sku"], transformation="softplus", center=False
), # use softplus with beta=1.0 and normalize by group
add_relative_time_idx=True,
add_target_scales=True,
add_encoder_length=True,
)
validation = TimeSeriesDataSet.from_dataset(training, data, predict=True, stop_randomization=True)
batch_size = 64
train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)
# save datasets
training.save("t raining.pkl")
validation.save("validation.pkl")
early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=10, verbose=False, mode="min")
lr_logger = LearningRateMonitor()
logger = TensorBoardLogger(log_graph=True)
trainer = pl.Trainer(
max_epochs=100,
gpus=0,
weights_summary="top",
gradient_clip_val=0.1,
limit_train_batches=30,
# val_check_interval=20,
# limit_val_batches=1,
# fast_dev_run=True,
logger=logger,
# profiler=True,
callbacks=[lr_logger, early_stop_callback],
)
tft = TemporalFusionTransformer.from_dataset(
training,
learning_rate=0.03,
hidden_size=16,
attention_head_size=1,
dropout=0.1,
hidden_continuous_size=8,
output_size=7,
loss=QuantileLoss(),
log_interval=10,
log_val_interval=1,
reduce_on_plateau_patience=3,
)
print(f"Number of parameters in network: {tft.size()/1e3:.1f}k")
# # find optimal learning rate
# # remove logging and artificial epoch size
# tft.hparams.log_interval = -1
# tft.hparams.log_val_interval = -1
# trainer.limit_train_batches = 1.0
# # run learning rate finder
# res = trainer.tuner.lr_find(
# tft, train_dataloader=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2
# )
# print(f"suggested learning rate: {res.suggestion()}")
# fig = res.plot(show=True, suggest=True)
# fig.show()
# tft.hparams.learning_rate = res.suggestion()
# trainer.fit(
# tft,
# train_dataloader=train_dataloader,
# val_dataloaders=val_dataloader,
# )
# # make a prediction on entire validation set
# preds, index = tft.predict(val_dataloader, return_index=True, fast_dev_run=True)
# tune
study = optimize_hyperparameters(
train_dataloader,
val_dataloader,
model_path="optuna_test",
n_trials=200,
max_epochs=50,
gradient_clip_val_range=(0.01, 1.0), # TODO 看一下下面这些都是啥
hidden_size_range=(8, 128), # TODO 看一下下面这些都是啥
hidden_continuous_size_range=(8, 128),
attention_head_size_range=(1, 4),
learning_rate_range=(0.001, 0.1),
dropout_range=(0.1, 0.3),
trainer_kwargs=dict(limit_train_batches=30),
reduce_on_plateau_patience=4,
use_learning_rate_finder=False,
)
with open("test_study.pkl", "wb") as fout:
pickle.dump(study, fout)
# profile speed
# profile(
# trainer.fit,
# profile_fname="profile.prof",
# model=tft,
# period=0.001,
# filter="pytorch_forecasting",
# train_dataloader=train_dataloader,
# val_dataloaders=val_dataloader,
# )
|
import random
import math
import datetime
def four_gamete_test(marker1,marker2,missing_char='N'):
if len(marker1)!=len(marker2):
raise ValueError, 'unequal number of genotypes'
gametes=[]
for i in xrange(len(marker1)):
if marker1[i]==missing_char or marker2[i]==missing_char:
continue
if (marker1[i],marker2[i]) not in gametes:
gametes.append((marker1[i],marker2[i]))
if len(gametes)<=3:
return True
else:
return False
def assign_phenotype_quant(marker,variance_proportions=[0.7],epistasis=0,epi_effect=0.0,maf_range=[0,1],epi_hapl=1,causal_index_pre=0,diploid=0,dominant=0,raw=[]):
if epistasis and len(variance_proportions)!=2:
raise ValueError, 'Epistasis only for two QTNs defined'
if causal_index_pre and len(variance_proportions)!=1:
raise ValueError, 'Only one QTN with predefined position possible'
maf=0.0
if causal_index_pre:
causal_index=causal_index_pre
mafs=[]
causal_indices=[]
for geno in marker:
if geno[causal_index]=='1':
maf+=1.0/len(marker)
mafs.append(maf)
causal_indices.append(causal_index)
i=0
while i<len(variance_proportions) and not causal_index_pre:
if i==0:
causal_indices=[]
mafs=[]
i+=1
maf=0.0
count=0
while maf<=maf_range[0] or maf>=maf_range[1]:
count+=1
if count>=2*len(marker[0]):
raise RuntimeError, 'No SNP in MAF range found'
maf=0.0
epi_freq=0.0
if epistasis and i>1 and epi_hapl:
snp1=[]
if diploid:
marker2=raw
else:
marker2=marker
for k in xrange(len(marker2)):
snp1.append(marker2[k][causal_indices[0]])
end_haploblock=causal_indices[0]+1
for j in xrange(causal_indices[0]+1,int(0.9*len(marker2[0]))):
snp2=[]
for k in xrange(len(marker2)):
snp2.append(marker2[k][j])
#print j, four_gamete_test(snp1,snp2)
if not four_gamete_test(snp1,snp2):
#print j
end_haploblock=j-1
break
else:
end_haploblock=j
if causal_indices[0]+2>=end_haploblock:
i=0
break
causal_index=random.randint(causal_indices[0]+1,end_haploblock)
epi_freq=0.0
for geno in marker:
if diploid:
if geno[causal_indices[0]]!='0' and geno[causal_index]!='0':
epi_freq+=1.0/len(marker)
else:
if geno[causal_indices[0]]=='1' and geno[causal_index]=='1':
epi_freq+=1.0/len(marker)
if epi_freq<=0.000001 or epi_freq>=0.999999:
i=0
break
elif epistasis and i>1:
#causal_index=random.randint(int(0.1*len(marker[0])),int(0.9*len(marker[0])))
causal_index=random.randint(int((i-1)/float(len(variance_proportions))*len(marker[0])),int((i)/float(len(variance_proportions))*len(marker[0])-1))
epi_freq=0.0
for geno in marker:
if diploid:
if geno[causal_indices[0]]!='0' and geno[causal_index]!='0':
epi_freq+=1.0/len(marker)
else:
if geno[causal_indices[0]]=='1' and geno[causal_index]=='1':
epi_freq+=1.0/len(marker)
if epi_freq<=0.000001 or epi_freq>=0.999999:
i=0
break
else:
#causal_index=random.randint(int(0.1*len(marker[0])),int(0.9*len(marker[0])))
causal_index=random.randint(int((i-1)/float(len(variance_proportions))*len(marker[0])),int((i)/float(len(variance_proportions))*len(marker[0])-1))
print causal_index, len(marker[0]),i
if causal_index in causal_indices:
continue
for geno in marker:
if diploid:
if geno[causal_index]=='1':
maf+=0.5/len(marker)
elif geno[causal_index]=='2':
maf+=1.0/len(marker)
else:
if geno[causal_index]=='1':
maf+=1.0/len(marker)
mafs.append(maf)
causal_indices.append(causal_index)
#print causal_indices
phenotypes=[]
if (math.sqrt(1-sum(map(abs,variance_proportions))-epi_effect))<0:
raise ValueError,'Illegal Variance Proportions'
for geno in marker:
alleles=[]
for i in causal_indices:
alleles.append(geno[i])
alleles=map(int,alleles)
if diploid:
alleles=map(lambda x:x-1,alleles)
phen=math.sqrt(1-sum(map(abs,variance_proportions)))*random.gauss(0,1)
for i in xrange(len(alleles)):
if diploid and not dominant:
root=variance_proportions[i]/(2*mafs[i]*(1-mafs[i]))
phen+=alleles[i]*math.sqrt(root)
else:
root=variance_proportions[i]/(mafs[i]*(1-mafs[i]))
if alleles[i]>0:
phen+=1*math.sqrt(root)
else:
phen+=0*math.sqrt(root)
if epistasis:
if diploid:
if alleles[0]>0 and alleles[1]>0:
root=epi_effect/(epi_freq*(1-epi_freq))
phen+=1*math.sqrt(root)
else:
if alleles[0] and alleles[1]:
root=epi_effect/(epi_freq*(1-epi_freq))
phen+=1*math.sqrt(root)
phenotypes.append(phen)
return causal_indices,phenotypes,mafs
def assign_phenotype_qual(marker,wt_affect,mut_affect,het_affect=0,maf_range=[0,1],causal_index_pre=0,diploid=0):
maf=0.0
if causal_index_pre:
maf=0.0
causal_index=causal_index_pre
for geno in marker:
if diploid:
if geno[causal_index]=='1':
maf+=0.5/len(marker)
elif geno[causal_index]=='2':
maf+=1.0/len(marker)
elif geno[causal_index]=='1':
maf+=1.0/len(marker)
count=0
while (maf<=maf_range[0] or maf>=maf_range[1]) and not causal_index_pre :
count+=1
if count>=len(marker[0]):
raise RuntimeError, 'No SNP in MAF range found'
maf=0.0
causal_index=random.randint(int(0.1*len(marker[0])),int(0.9*len(marker[0])))
for geno in marker:
if diploid:
if geno[causal_index]=='1':
maf+=0.5/len(marker)
elif geno[causal_index]=='2':
maf+=1.0/len(marker)
elif geno[causal_index]=='1':
maf+=1.0/len(marker)
phenotypes=[]
for geno in marker:
if diploid:
if geno[causal_index]=='0' and random.random()<wt_affect:
phen='1'
elif geno[causal_index]=='1' and random.random()<het_affect:
phen='1'
elif geno[causal_index]=='2' and random.random()<mut_affect:
phen='1'
else:
phen='0'
else:
if geno[causal_index]=='0' and random.random()<wt_affect:
phen='1'
elif geno[causal_index]=='1' and random.random()<mut_affect:
phen='1'
else:
phen='0'
phenotypes.append(phen)
return [causal_index],phenotypes,[maf]
def assign_phenotype_qual_2locus(marker,penetrances,diploid=0,maf_range=[0,1],epi_hapl=0,raw=[]):
causal_indices=[]
mafs=[]
for i in range(2):
maf=0.0
count=0
while (maf<=maf_range[0] or maf>=maf_range[1]):
if epi_hapl and i==1:
snp1=[]
if diploid:
marker2=raw
else:
marker2=marker
for k in xrange(len(marker2)):
snp1.append(marker2[k][causal_indices[0]])
end_haploblock=causal_indices[0]+1
for j in xrange(causal_indices[0]+1,int(0.9*len(marker2[0]))):
snp2=[]
for k in xrange(len(marker2)):
snp2.append(marker2[k][j])
#print j, four_gamete_test(snp1,snp2)
if not four_gamete_test(snp1,snp2):
#print j
end_haploblock=j-1
break
else:
end_haploblock=j
if causal_indices[0]+2>=end_haploblock:
i=0
break
causal_index=random.randint(causal_indices[0]+1,end_haploblock)
else:
count+=1
if count>=len(marker[0]):
raise RuntimeError, 'No SNP in MAF range found'
causal_index=random.randint(int(0.1*len(marker[0])),int(0.9*len(marker[0])))
if causal_index in causal_indices:
continue
maf=0.0
for geno in marker:
if diploid:
if geno[causal_index]=='1':
maf+=0.5/len(marker)
elif geno[causal_index]=='2':
maf+=1.0/len(marker)
elif geno[causal_index]=='1':
maf+=1.0/len(marker)
causal_indices.append(causal_index)
mafs.append(maf)
phenotypes=[]
for geno in marker:
pen=penetrances[(geno[causal_indices[0]],geno[causal_indices[1]])]
if random.random()<pen:
phen='1'
else:
phen='0'
phenotypes.append(phen)
return causal_indices,phenotypes,mafs
|
import os
import numpy as np
import soundfile as sf
import glob
import librosa
from urllib.request import urlretrieve
EPS = np.finfo(float).eps
COEFS_SIG = np.array([9.651228012789436761e-01, 6.592637550310214145e-01,
7.572372955623894730e-02])
COEFS_BAK = np.array([-3.733460011101781717e+00,2.700114234092929166e+00,
-1.721332907340922813e-01])
COEFS_OVR = np.array([8.924546794696789354e-01, 6.609981731940616223e-01,
7.600269530243179694e-02])
def is_clipped(audio, clipping_threshold=0.99):
return any(abs(audio) > clipping_threshold)
def normalize(audio, target_level=-25):
'''Normalize the signal to the target level'''
rms = (audio ** 2).mean() ** 0.5
scalar = 10 ** (target_level / 20) / (rms + EPS)
audio = audio * scalar
return audio
def normalize_segmental_rms(audio, rms, target_level=-25):
'''Normalize the signal to the target level
based on segmental RMS'''
scalar = 10 ** (target_level / 20) / (rms + EPS)
audio = audio * scalar
return audio
def audioread(path, norm=False, start=0, stop=None, target_level=-25):
'''Function to read audio'''
path = os.path.abspath(path)
if not os.path.exists(path):
raise ValueError("[{}] does not exist!".format(path))
try:
audio, sample_rate = sf.read(path, start=start, stop=stop)
except RuntimeError: # fix for sph pcm-embedded shortened v2
print('WARNING: Audio type not supported')
if len(audio.shape) == 1: # mono
if norm:
rms = (audio ** 2).mean() ** 0.5
scalar = 10 ** (target_level / 20) / (rms + EPS)
audio = audio * scalar
else: # multi-channel
audio = audio.T
audio = audio.sum(axis=0) / audio.shape[0]
if norm:
audio = normalize(audio, target_level)
return audio, sample_rate
def audiowrite(destpath, audio, sample_rate=16000, norm=False, target_level=-25, \
clipping_threshold=0.99, clip_test=False):
'''Function to write audio'''
if clip_test:
if is_clipped(audio, clipping_threshold=clipping_threshold):
raise ValueError("Clipping detected in audiowrite()! " + \
destpath + " file not written to disk.")
if norm:
audio = normalize(audio, target_level)
max_amp = max(abs(audio))
if max_amp >= clipping_threshold:
audio = audio / max_amp * (clipping_threshold - EPS)
destpath = os.path.abspath(destpath)
destdir = os.path.dirname(destpath)
if not os.path.exists(destdir):
os.makedirs(destdir)
sf.write(destpath, audio, sample_rate)
return
def add_clipping(audio, max_thresh_perc=0.8):
'''Function to add clipping'''
threshold = max(abs(audio)) * max_thresh_perc
audioclipped = np.clip(audio, -threshold, threshold)
return audioclipped
def snr_mixer(params, clean, noise, snr, target_level=-25, clipping_threshold=0.99):
'''Function to mix clean speech and noise at various SNR levels'''
cfg = params['cfg']
if len(clean) > len(noise):
noise = np.append(noise, np.zeros(len(clean) - len(noise)))
else:
clean = np.append(clean, np.zeros(len(noise) - len(clean)))
# Normalizing to -25 dB FS
clean = clean / (max(abs(clean)) + EPS)
clean = normalize(clean, target_level)
rmsclean = (clean ** 2).mean() ** 0.5
noise = noise / (max(abs(noise)) + EPS)
noise = normalize(noise, target_level)
rmsnoise = (noise ** 2).mean() ** 0.5
# Set the noise level for a given SNR
noisescalar = rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS)
noisenewlevel = noise * noisescalar
# Mix noise and clean speech
noisyspeech = clean + noisenewlevel
# Randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value
# There is a chance of clipping that might happen with very less probability, which is not a major issue.
noisy_rms_level = np.random.randint(params['target_level_lower'], params['target_level_upper'])
rmsnoisy = (noisyspeech ** 2).mean() ** 0.5
scalarnoisy = 10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS)
noisyspeech = noisyspeech * scalarnoisy
clean = clean * scalarnoisy
noisenewlevel = noisenewlevel * scalarnoisy
# Final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
if is_clipped(noisyspeech):
noisyspeech_maxamplevel = max(abs(noisyspeech)) / (clipping_threshold - EPS)
noisyspeech = noisyspeech / noisyspeech_maxamplevel
clean = clean / noisyspeech_maxamplevel
noisenewlevel = noisenewlevel / noisyspeech_maxamplevel
noisy_rms_level = int(20 * np.log10(scalarnoisy / noisyspeech_maxamplevel * (rmsnoisy + EPS)))
return clean, noisenewlevel, noisyspeech, noisy_rms_level
def segmental_snr_mixer(clean, noise, snr, target_level=-25, clipping_threshold=0.99, target_lower=-35, target_upper=-15):
'''Function to mix clean speech and noise at various segmental SNR levels'''
if len(clean) > len(noise):
noise = np.append(noise, np.zeros(len(clean) - len(noise)))
else:
clean = np.append(clean, np.zeros(len(noise) - len(clean)))
clean = clean / (max(abs(clean)) + EPS)
noise = noise / (max(abs(noise)) + EPS)
rmsclean, rmsnoise = active_rms(clean=clean, noise=noise)
clean = normalize_segmental_rms(clean, rms=rmsclean, target_level=target_level)
noise = normalize_segmental_rms(noise, rms=rmsnoise, target_level=target_level)
# Set the noise level for a given SNR
noisescalar = rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS)
noisenewlevel = noise * noisescalar
# Mix noise and clean speech
noisyspeech = clean + noisenewlevel
noisy_rms_level = np.random.randint(target_lower, target_upper)
rmsnoisy = (noisyspeech ** 2).mean() ** 0.5
scalarnoisy = 10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS)
noisyspeech = noisyspeech * scalarnoisy
clean = clean * scalarnoisy
noisenewlevel = noisenewlevel * scalarnoisy
# Final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
if is_clipped(noisyspeech):
noisyspeech_maxamplevel = max(abs(noisyspeech)) / (clipping_threshold - EPS)
noisyspeech = noisyspeech / noisyspeech_maxamplevel
clean = clean / noisyspeech_maxamplevel
noisenewlevel = noisenewlevel / noisyspeech_maxamplevel
noisy_rms_level = int(20 * np.log10(scalarnoisy / noisyspeech_maxamplevel * (rmsnoisy + EPS)))
return clean, noisenewlevel, noisyspeech, noisy_rms_level
def active_rms(clean, noise, fs=16000, energy_thresh=-50):
'''Returns the clean and noise RMS of the noise calculated only in the active portions'''
window_size = 100 # in ms
window_samples = int(fs * window_size / 1000)
sample_start = 0
noise_active_segs = []
clean_active_segs = []
while sample_start < len(noise):
sample_end = min(sample_start + window_samples, len(noise))
noise_win = noise[sample_start:sample_end]
clean_win = clean[sample_start:sample_end]
noise_seg_rms = 20 * np.log10((noise_win ** 2).mean() + EPS)
# Considering frames with energy
if noise_seg_rms > energy_thresh:
noise_active_segs = np.append(noise_active_segs, noise_win)
clean_active_segs = np.append(clean_active_segs, clean_win)
sample_start += window_samples
if len(noise_active_segs) != 0:
noise_rms = (noise_active_segs ** 2).mean() ** 0.5
else:
noise_rms = EPS
if len(clean_active_segs) != 0:
clean_rms = (clean_active_segs ** 2).mean() ** 0.5
else:
clean_rms = EPS
return clean_rms, noise_rms
def activitydetector(audio, fs=16000, energy_thresh=0.13, target_level=-25):
'''Return the percentage of the time the audio signal is above an energy threshold'''
audio = normalize(audio, target_level)
window_size = 50 # in ms
window_samples = int(fs * window_size / 1000)
sample_start = 0
cnt = 0
prev_energy_prob = 0
active_frames = 0
a = -1
b = 0.2
alpha_rel = 0.05
alpha_att = 0.8
while sample_start < len(audio):
sample_end = min(sample_start + window_samples, len(audio))
audio_win = audio[sample_start:sample_end]
frame_rms = 20 * np.log10(sum(audio_win ** 2) + EPS)
frame_energy_prob = 1. / (1 + np.exp(-(a + b * frame_rms)))
if frame_energy_prob > prev_energy_prob:
smoothed_energy_prob = frame_energy_prob * alpha_att + prev_energy_prob * (1 - alpha_att)
else:
smoothed_energy_prob = frame_energy_prob * alpha_rel + prev_energy_prob * (1 - alpha_rel)
if smoothed_energy_prob > energy_thresh:
active_frames += 1
prev_energy_prob = frame_energy_prob
sample_start += window_samples
cnt += 1
perc_active = active_frames / cnt
return perc_active
def resampler(input_dir, target_sr=16000, ext='*.wav'):
'''Resamples the audio files in input_dir to target_sr'''
files = glob.glob(f"{input_dir}/" + ext)
for pathname in files:
print(pathname)
try:
audio, fs = audioread(pathname)
audio_resampled = librosa.core.resample(audio, fs, target_sr)
audiowrite(pathname, audio_resampled, target_sr)
except:
continue
def audio_segmenter(input_dir, dest_dir, segment_len=10, ext='*.wav'):
'''Segments the audio clips in dir to segment_len in secs'''
files = glob.glob(f"{input_dir}/" + ext)
for i in range(len(files)):
audio, fs = audioread(files[i])
if len(audio) > (segment_len * fs) and len(audio) % (segment_len * fs) != 0:
audio = np.append(audio, audio[0: segment_len * fs - (len(audio) % (segment_len * fs))])
if len(audio) < (segment_len * fs):
while len(audio) < (segment_len * fs):
audio = np.append(audio, audio)
audio = audio[:segment_len * fs]
num_segments = int(len(audio) / (segment_len * fs))
audio_segments = np.split(audio, num_segments)
basefilename = os.path.basename(files[i])
basename, ext = os.path.splitext(basefilename)
for j in range(len(audio_segments)):
newname = basename + '_' + str(j) + ext
destpath = os.path.join(dest_dir, newname)
audiowrite(destpath, audio_segments[j], fs)
def standardize_audio_size(audio, fs, input_len):
"""
Adjust audio size to be of size fs * self.input_len_second
If len(audio) > fs * self.input_len_second, sample a sub audio clip of size fs * self.input_len_second
If len(audio) < fs * self.input_len_second, pad the audio clip with itself
:param audio: np.array
:param fs: int, sampling rate
:param input_len: input len in second
:return:
"""
audio = np.tile(audio, np.ceil(fs * input_len / audio.shape[0]).astype('int32'))
start_idx = np.random.randint(0, len(audio) - input_len * fs + 1)
end_idx = start_idx + int(input_len * fs)
return audio[start_idx:end_idx]
def audio_logpowspec(audio, nfft=320, hop_length=160, sr=16000):
"""
Log-power specturm for each time window
:param audio: audio numpy array
:param nfft:
:param hop_length: int, window hop
:param sr: int, sample rate
:return: (time, freq) spectogram of size (nframes , 1 + nfft/2)
"""
powspec = (np.abs(librosa.core.stft(audio, n_fft=nfft, hop_length=hop_length)))**2
logpowspec = np.log10(np.maximum(powspec, 10**(-12)))
return logpowspec.T
def load_audio_file(clip_url, temp_folder='./temp', input_length=9.0, remove=True, standardize=True):
"""
:param clip_url: path to audio clip
:return: np.array, int sample rate
"""
os.makedirs(temp_folder, exist_ok=True)
local = True
if clip_url.startswith('https:'):
local = False
local_url = os.path.basename(clip_url)
local_url = os.path.join(temp_folder, local_url)
try:
local_name, _ = urlretrieve(clip_url, local_url)
print(f'Loading file {clip_url}')
except:
print(f'Error when reading file {clip_url}')
return None, None
else:
local_name = clip_url
audio, fs = sf.read(local_name)
if standardize:
audio = standardize_audio_size(audio, fs, input_length)
if remove and local == False:
os.remove(local_name)
return audio, fs
def audio_melspec(audio, n_mels=64, window_len=400, hop_length=160, sr=16000, center=True, window='hann'):
"""
MelLog-power specturm for each time window
:param audio: audio numpy array
:param window_len:
:param hop_length: int, window hop
:param sr: int, sample rate
:return: (time, freq) spectogram of size (nframes , 1 + nfft/2)
"""
n_fft = 2 ** int(np.ceil(np.log(window_len) / np.log(2.0)))
mel_spec = librosa.feature.melspectrogram(y=audio, sr=sr, n_fft=n_fft,
hop_length=hop_length,
n_mels=n_mels,
center=center,
win_length=window_len,
window=window
)
log_mel_spec = librosa.power_to_db(mel_spec, ref=1, amin=1e-10)
log_mel_spec = log_mel_spec.astype(np.float32)
return log_mel_spec.T
def get_one_zero_label(label_codes, tag_mapping, num_labels=521):
"""
Convert labels code into one-hot vector
:param label_codes:
:param tag_mapping: dictionary with key equal to label codes
:param num_class:
:return:
"""
label_codes = label_codes.split(',')
labels_numerical = [int(tag_mapping[lab]['tag_numerical']) for lab in label_codes if lab in tag_mapping]
label_one_hot = np.zeros(num_labels)
for lab in labels_numerical:
label_one_hot[lab] = 1
return label_one_hot
def infer_mos(audio, fs, input_length, session_sig, session_bak_ovr):
"""
Compute mos_sig, mos_bak, mos_ovr predicted by models in session_sig, session_bak_ovr
:param audio:
:param fs:
:param input_length:
:param session_sig:
:param session_bak_ovr:
:return:
"""
input_length = input_length
num_hops = int(np.floor(len(audio) / fs) - input_length) + 1
hop_len_samples = fs
predicted_mos_sig_seg = []
predicted_mos_bak_seg = []
predicted_mos_ovr_seg = []
for idx in range(num_hops):
audio_seg = audio[int(idx * hop_len_samples): int((idx + input_length) * hop_len_samples)]
input_features = np.array(audio_logpowspec(audio=audio_seg, sr=fs)).astype('float32')[np.newaxis, :, :]
# sig predictions
onnx_inputs_sig = {inp.name: input_features for inp in session_sig.get_inputs()}
mos_sig = np.polynomial.polynomial.polyval(session_sig.run(None, onnx_inputs_sig), COEFS_SIG)
# bak_mos predicitions
onnx_inputs_bak_ovr = {inp.name: input_features[:, :-1, :] for inp in session_bak_ovr.get_inputs()}
mos_bak_ovr = session_bak_ovr.run(None, onnx_inputs_bak_ovr)
mos_bak = np.polynomial.polynomial.polyval(mos_bak_ovr[0][0][1], COEFS_BAK)
mos_ovr = np.polynomial.polynomial.polyval(mos_bak_ovr[0][0][2], COEFS_OVR)
predicted_mos_sig_seg.append(mos_sig)
predicted_mos_bak_seg.append(mos_bak)
predicted_mos_ovr_seg.append(mos_ovr)
return np.mean(predicted_mos_sig_seg), np.mean(predicted_mos_bak_seg), np.mean(predicted_mos_ovr_seg) |
import pandas as pd
# Kind of data handled by pandas
df = pd.DataFrame({
"Name": ["Jenny", "Little", "Owens"],
"Age": [23, 45, 67],
"Favouratie_color": ["Red", "Blue", "Black"]
})
# geting data from a specific column
name_data = df["Name"]
# getting the maxiumum nuber in the colunm
biggest_value = df["Age"].max()
# defining data in a Dataframe using "Series"
person = pd.Series(["Tall", "beautiful", "Brave"], name="Name")
# reading and writing tabular data
|
# Imports -----------------------------------------------------------
import os
os.environ["PREFECT__FLOWS__CHECKPOINTING"] = "true"
from prefect import Flow, Parameter, task
from xpersist.prefect.result import XpersistResult
from cmip6_downscaling.config.config import ( # dask_executor,; kubernetes_run_config,; storage,
intermediate_cache_store,
results_cache_store,
serializer,
)
from cmip6_downscaling.methods.bcsd import (
fit_and_predict,
get_coarse_obs,
get_spatial_anomalies,
make_flow_paths,
postprocess_bcsd,
return_obs,
return_x_predict_rechunked,
return_x_train_full_time,
return_y_full_time,
)
# Transform Functions into Tasks -----------------------------------------------------------
target_naming_str = "{gcm}-{scenario}-{train_period_start}-{train_period_end}-{predict_period_start}-{predict_period_end}-{variable}.zarr"
make_flow_paths_task = task(make_flow_paths, log_stdout=True, nout=4)
return_obs_task = task(
return_obs,
result=XpersistResult(intermediate_cache_store, serializer=serializer),
target="obs-ds",
)
get_coarse_obs_task = task(
get_coarse_obs,
result=XpersistResult(intermediate_cache_store, serializer=serializer),
target="coarse-obs-ds",
)
get_spatial_anomalies_task = task(
get_spatial_anomalies,
result=XpersistResult(intermediate_cache_store, serializer=serializer),
target="spatial-anomalies-ds-" + target_naming_str,
)
return_y_full_time_task = task(
return_y_full_time,
result=XpersistResult(intermediate_cache_store, serializer=serializer),
target="y-full-time-" + target_naming_str,
)
return_x_train_full_time_task = task(
return_x_train_full_time,
result=XpersistResult(intermediate_cache_store, serializer=serializer),
target="x-train-full-time-" + target_naming_str,
)
return_x_predict_rechunked_task = task(
return_x_predict_rechunked,
result=XpersistResult(intermediate_cache_store, serializer=serializer),
target="x-predict-rechunked-" + target_naming_str,
)
fit_and_predict_task = task(
fit_and_predict,
result=XpersistResult(intermediate_cache_store, serializer=serializer),
target="fit-and-predict-" + target_naming_str,
)
postprocess_bcsd_task = task(
postprocess_bcsd,
log_stdout=True,
result=XpersistResult(results_cache_store, serializer=serializer),
target="postprocess-results-" + target_naming_str,
)
# Main Flow -----------------------------------------------------------
# with Flow(name="bcsd-testing", storage=storage, run_config=run_config) as flow:
# with Flow(name="bcsd-testing", storage=storage, run_config=kubernetes_run_config, executor=dask_executor) as flow:
with Flow(name="bcsd-testing") as flow:
gcm = Parameter("GCM")
scenario = Parameter("SCENARIO")
train_period_start = Parameter("TRAIN_PERIOD_START")
train_period_end = Parameter("TRAIN_PERIOD_END")
predict_period_start = Parameter("PREDICT_PERIOD_START")
predict_period_end = Parameter("PREDICT_PERIOD_END")
variable = Parameter("VARIABLE")
(
coarse_obs_path,
spatial_anomalies_path,
bias_corrected_path,
final_out_path,
) = make_flow_paths_task(
GCM=gcm,
SCENARIO=scenario,
TRAIN_PERIOD_START=train_period_start,
TRAIN_PERIOD_END=train_period_end,
PREDICT_PERIOD_START=predict_period_start,
PREDICT_PERIOD_END=predict_period_end,
VARIABLE=variable,
)
# preprocess_bcsd_tasks(s):
obs_ds = return_obs_task(train_period_start, train_period_end, variable)
coarse_obs_ds = get_coarse_obs_task(obs_ds, variable)
spatial_anomalies_ds = get_spatial_anomalies_task(
coarse_obs_ds,
obs_ds,
gcm,
scenario,
train_period_start,
train_period_end,
predict_period_start,
predict_period_end,
variable,
)
# prep_bcsd_inputs_task(s):
y_full_time_ds = return_y_full_time_task(
coarse_obs_ds,
gcm,
scenario,
train_period_start,
train_period_end,
predict_period_start,
predict_period_end,
variable,
)
x_train_full_time_ds = return_x_train_full_time_task(
y_full_time_ds,
gcm,
scenario,
train_period_start,
train_period_end,
predict_period_start,
predict_period_end,
variable,
)
x_predict_rechunked_ds = return_x_predict_rechunked_task(
x_train_full_time_ds,
gcm,
scenario,
train_period_start,
train_period_end,
predict_period_start,
predict_period_end,
variable,
)
# fit and predict tasks(s):
bias_corrected_ds = fit_and_predict_task(
x_train_full_time_ds,
y_full_time_ds,
x_predict_rechunked_ds,
gcm,
scenario,
train_period_start,
train_period_end,
predict_period_start,
predict_period_end,
variable,
)
# postprocess_bcsd_task(s):
postprocess_bcsd_ds = postprocess_bcsd_task(
bias_corrected_ds,
spatial_anomalies_ds,
gcm,
scenario,
train_period_start,
train_period_end,
predict_period_start,
predict_period_end,
variable,
)
|
import responses
from binance.spot import Spot as Client
from tests.util import mock_http_response
from tests.util import random_str
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
params = {"asset": ["LTC", "EOS"]}
def test_withdraw_without_coin():
"""Tests the API endpoint to transfer dust without coin"""
client = Client(key, secret)
client.transfer_dust.when.called_with("").should.throw(ParameterRequiredError)
@mock_http_response(
responses.POST, "/sapi/v1/asset/dust\\?asset=LTC&asset=EOS", mock_item, 200
)
def test_withdraw():
"""Tests the API endpoint to transfer dust"""
client = Client(key, secret)
response = client.transfer_dust(**params)
response.should.equal(mock_item)
|
# -*- coding: utf-8 -*-
"""
Return/control aspects of the grains data
Grains set or altered with this module are stored in the 'grains'
file on the minions. By default, this file is located at: ``/etc/salt/grains``
.. Note::
This does **NOT** override any grains set in the minion config file.
"""
import collections
import logging
import math
import hubblestack.utils.data
import hubblestack.utils.json
from hubblestack.defaults import ( # pylint: disable=3rd-party-module-not-gated
DEFAULT_TARGET_DELIM,
)
__proxyenabled__ = ["*"]
# Seed the grains dict so cython will build
__grains__ = {}
# Change the default outputter to make it more readable
__outputter__ = {
"items": "nested",
"item": "nested",
"setval": "nested",
}
# http://stackoverflow.com/a/12414913/127816
_infinitedict = lambda: collections.defaultdict(_infinitedict)
_non_existent_key = "NonExistentValueMagicNumberSpK3hnufdHfeBUXCfqVK"
log = logging.getLogger(__name__)
def _serial_sanitizer(instr):
"""Replaces the last 1/4 of a string with X's"""
length = len(instr)
index = int(math.floor(length * 0.75))
return "{0}{1}".format(instr[:index], "X" * (length - index))
_FQDN_SANITIZER = lambda x: "MINION.DOMAINNAME"
_HOSTNAME_SANITIZER = lambda x: "MINION"
_DOMAINNAME_SANITIZER = lambda x: "DOMAINNAME"
# A dictionary of grain -> function mappings for sanitizing grain output. This
# is used when the 'sanitize' flag is given.
_SANITIZERS = {
"serialnumber": _serial_sanitizer,
"domain": _DOMAINNAME_SANITIZER,
"fqdn": _FQDN_SANITIZER,
"id": _FQDN_SANITIZER,
"host": _HOSTNAME_SANITIZER,
"localhost": _HOSTNAME_SANITIZER,
"nodename": _HOSTNAME_SANITIZER,
}
def get(key, default="", delimiter=DEFAULT_TARGET_DELIM, ordered=True):
"""
Attempt to retrieve the named value from grains, if the named value is not
available return the passed default. The default return is an empty string.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict in grains looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
:param delimiter:
Specify an alternate delimiter to use when traversing a nested dict.
This is useful for when the desired key contains a colon. See CLI
example below for usage.
.. versionadded:: 2014.7.0
:param ordered:
Outputs an ordered dict if applicable (default: True)
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt '*' grains.get pkg:apache
salt '*' grains.get abc::def|ghi delimiter='|'
"""
if ordered is True:
grains = __grains__
else:
grains = hubblestack.utils.json.loads(hubblestack.utils.json.dumps(__grains__))
return hubblestack.utils.data.traverse_dict_and_list(grains, key, default, delimiter)
def has_value(key):
"""
Determine whether a key exists in the grains dictionary.
Given a grains dictionary that contains the following structure::
{'pkg': {'apache': 'httpd'}}
One would determine if the apache key in the pkg dict exists by::
pkg:apache
CLI Example:
.. code-block:: bash
salt '*' grains.has_value pkg:apache
"""
return (
hubblestack.utils.data.traverse_dict_and_list(__grains__, key, KeyError)
is not KeyError
)
def items(sanitize=False):
"""
Return all of the minion's grains
CLI Example:
.. code-block:: bash
salt '*' grains.items
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.items sanitize=True
"""
if hubblestack.utils.data.is_true(sanitize):
out = dict(__grains__)
for key, func in _SANITIZERS.items():
if key in out:
out[key] = func(out[key])
return out
else:
return __grains__
def item(*args, **kwargs):
"""
Return one or more grains
CLI Example:
.. code-block:: bash
salt '*' grains.item os
salt '*' grains.item os osrelease oscodename
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.item host sanitize=True
"""
ret = {}
default = kwargs.get("default", "")
delimiter = kwargs.get("delimiter", DEFAULT_TARGET_DELIM)
try:
for arg in args:
ret[arg] = hubblestack.utils.data.traverse_dict_and_list(
__grains__, arg, default, delimiter
)
except KeyError:
pass
if hubblestack.utils.data.is_true(kwargs.get("sanitize")):
for arg, func in _SANITIZERS.items():
if arg in ret:
ret[arg] = func(ret[arg])
return ret
def ls(): # pylint: disable=C0103
"""
Return a list of all available grains
CLI Example:
.. code-block:: bash
salt '*' grains.ls
"""
return sorted(__grains__)
def equals(key, value):
"""
Used to make sure the minion's grain key/value matches.
Returns ``True`` if matches otherwise ``False``.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' grains.equals fqdn <expected_fqdn>
salt '*' grains.equals systemd:version 219
"""
return str(value) == str(get(key))
# Provide a jinja function call compatible get aliased as fetch
fetch = get
|
#############################################################################
# Copyright (c) 2007-2016 Balabit
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# As an additional exemption you are allowed to compile & link against the
# OpenSSL libraries as published by the OpenSSL project. See the file
# COPYING for details.
#
#############################################################################
import os
def is_running_in_build_tree():
return 'SYSLOG_NG_BINARY' not in os.environ
def get_module_path_from_binary():
module_path = os.popen("%s --version | grep \"Module-Path:\" | cut -d ' ' -f 2" % get_syslog_ng_binary(), 'r').read().strip()
return module_path
def format_module_path_for_intree_modules():
module_path = ''
for (root, dirs, files) in os.walk(os.path.abspath(os.path.join(os.environ['top_builddir'], 'modules'))):
module_path += ':'.join(map(lambda x: root + '/' + x + '/.libs', dirs))
break
return module_path
def get_module_path():
if is_running_in_build_tree():
module_path = format_module_path_for_intree_modules()
else:
module_path = get_module_path_from_binary()
return module_path
def get_syslog_ng_binary():
return os.getenv('SYSLOG_NG_BINARY', '../../syslog-ng/syslog-ng')
def is_premium():
version = os.popen('%s -V' % get_syslog_ng_binary(), 'r').read()
if version.find('premium-edition') != -1:
return True
return False
def has_module(module):
avail_mods = os.popen('%s -V | grep ^Available-Modules: ' % get_syslog_ng_binary(), 'r').read()
if avail_mods.find(module) != -1:
return True
return False
is_premium_edition = is_premium()
if is_premium_edition:
logstore_store_supported = True
wildcard_file_source_supported = True
else:
logstore_store_supported = False
wildcard_file_source_supported = False
port_number = os.getpid() % 30000 + 33000
ssl_port_number = port_number + 1
port_number_syslog = port_number + 2
port_number_network = port_number + 3
current_dir = os.getcwd()
try:
src_dir = os.environ["srcdir"]
except KeyError:
src_dir = current_dir
|
from disnake.appinfo import *
from disnake.appinfo import __dict__ as __original_dict__
locals().update(__original_dict__)
|
# This code will get all the odd birthdays and print it
birthdays = [12, 4, 21, 11, 24] # O(1)
odd_birthdays = [] # O(1)
for birthday in birthdays: # O(n)
if birthday % 2 == 1: # O(1)*O(n) = O(n)
odd_birthdays.append(birthday) # O(1)*O(n) = O(n)
print(odd_birthdays) # O(1)
# Sum = O(1) + O(1) + O(n) + O(n) + O(n) + O(1)
# Sum = 3*O(1) + 3*O(n)
# Final Running Time = O(n)
|
# https://leetcode.com/problems/two-sum/
# Given an array of integers nums and an integer target,
# return indices of the two numbers such that they add up to target.
# You may assume that each input would have exactly one solution, and you may not use the same element twice.
# You can return the answer in any order.
import pytest
class TwoSum(object):
def twoSum(self, nums: list[int], target: int) -> list[int]:
for i, val_i in enumerate(nums):
for j in range(i + 1, len(nums)):
if val_i + nums[j] == target:
return [i, j]
return []
def twoSum_with_O_N(self, nums: list[int], target: int) -> list[int]:
result = []
dict_nums = {}
dict_index = {}
num = nums[0]
for i, val in enumerate(nums):
try:
dict_nums[val] += 1
dict_index[val].append(i)
except KeyError:
dict_nums[val] = 1
dict_index[val] = [i]
for i in dict_nums:
if target - i == i:
if dict_nums[i] == 2:
num = i
return dict_index[num]
else:
continue
if target - i in dict_nums:
num = i
num2 = target - num
return [dict_index[num][0], dict_index[num2][0]]
return result
print(TwoSum().twoSum_with_O_N([1, 2, 2, 5], 4))
print(TwoSum().twoSum_with_O_N([3, 3], 6))
print(TwoSum().twoSum_with_O_N([2, 7, 11, 15], 9))
print(TwoSum().twoSum_with_O_N([3, 2, 4], 6))
@pytest.mark.parametrize(
("nums", "target", "expected"),
[([1, 2, 2, 5], 4, [1, 2]), ([1, 2, 3, 4], 7, [2, 3])],
)
def test_basic(nums: list, target: int, expected: list):
assert expected == TwoSum().twoSum(nums, target)
|
from __future__ import absolute_import, division, print_function
import sys
import os
import shutil
from libtbx import subversion
from libtbx.option_parser import option_parser
import libtbx.file_clutter
def clean_clutter_in(files, tabsize=8):
if not files: return
for fname in files:
tmpname = fname + '.bak'
if os.path.isfile(tmpname):
print("found temporary file {temp}, ignoring {original}.".format(temp=tmpname, original=fname))
continue
if os.path.isfile(fname):
try:
print(fname)
with open(fname, 'rb') as ifh, open(tmpname, 'wb') as ofh:
# explicitly convert Windows linebreaks into Unix linebreaks
lines = ifh.read().replace(b'\r\n', b'\n').split(b'\n')
n_empty = 0
for line in lines:
clean_line = line.expandtabs(tabsize).rstrip()
if clean_line:
ofh.write(b"\n" * n_empty + clean_line + b"\n")
n_empty = 0
else:
n_empty += 1
shutil.move(tmpname, fname)
except: # intentional
# to trap KeyboardInterrupt, too
os.remove(tmpname)
raise
def isort(path):
# Potential ImportErrors are caught upstream
import mock
from isort.main import main
return # Disable isort pending resolution of https://github.com/timothycrosley/isort/issues/606
with mock.patch.object(sys, 'argv', ['isort', '-y', '-ac', '-vb']):
oldcwd = os.getcwd()
try:
os.chdir(path)
main()
finally:
os.chdir(oldcwd)
def run():
opt_parser = (option_parser(
usage="""
clean_clutter [-t n | --tabsize=n] file1 file2 ...
clean_clutter [-t n | --tabsize=n] [directory]
clean_clutter [-t n | --tabsize=n] [--committing|-c]""",
description="""The first form cleans the specified files whereas the second
form cleans all files in the hierarchy rooted in the given directory or
the current directory is none is given.
The -c options restricts cleaning to those files which would be committed
by running svn commit.""")
.option("-t", "--tabsize",
action="store",
type="int",
default=8,
help="the number of spaces a tab is to be replaced by",
metavar="INT")
.option("-c", "--committing",
action="store_true",
default=False,
help="whether to clean the files which are to be committed")
)
command_line = opt_parser.process(args=sys.argv[1:])
co = command_line.options
files = command_line.args
if co.committing and files:
opt_parser.show_help()
exit(1)
run_isort_in_path = False
if co.committing:
try:
files = list(subversion.marked_for_commit())
except RuntimeError as err:
print(err)
exit(1)
else:
if len(files) <= 1:
if not files: dir = '.'
else: dir = files[0]
files = [ c.path for c in libtbx.file_clutter.gather([dir])
if c.is_cluttered(flag_x=False) ]
if os.path.exists(os.path.join(dir, '.isort.cfg')):
run_isort_in_path = dir
clean_clutter_in(files, tabsize=co.tabsize)
if run_isort_in_path:
try:
isort(run_isort_in_path)
except Exception as e:
print("Did not run isort (%s)" % str(e))
if __name__ == "__main__":
run()
|
from django.conf.urls import url, include
from rest_framework import routers
from .views import PlayerViewSet, TeamViewSet, CoachViewSet
router = routers.DefaultRouter()
router.register(r'players', PlayerViewSet)
router.register(r'teams', TeamViewSet)
router.register(r'coach', CoachViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 19:28:17 2019
@author: Wanling Song
"""
import mdtraj as md
import numpy as np
import pandas as pd
import argparse
import sys
from collections import defaultdict
import pickle
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import networkx as nx
import seaborn as sns
from matplotlib.ticker import MultipleLocator
from scipy.optimize import curve_fit
from scipy.sparse import coo_matrix
from scipy import sparse
from statsmodels.nonparametric.kernel_density import KDEMultivariate
import community
import warnings
from shutil import copyfile
import datetime
from itertools import product
import logomaker
import re
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore')
np.seterr(all='ignore')
###################################
###### Parameter settings #######
###################################
parser = argparse.ArgumentParser()
parser.add_argument("-f", nargs="+", metavar="./run/md.xtc", help="List of trajectories, seperated by space, \
Supports xtc, gro format. Used by mdtraj.load()")
parser.add_argument("-c", nargs="+", metavar="./run/system.gro", \
help="List of coordinates of trajectory, in the same order as -f, required when inputs of -f are xtc trajectories, \
Supported format: gro, pdb, etc., Used by mdtraj.load()")
parser.add_argument("-stride", default=1, metavar=1, help="Striding through trajectories. Only every stride-th will be analized." )
parser.add_argument("-dt", default=None, metavar="None", help="The time interval between two adjacent frames in the trajectories. \
If not specified, the mdtraj will deduce from the trajectories. This works for trajectories in format of e.g. xtc which \
include timestep information. For trajectories in dcd format, users have to provide the time interval manually, \
in a time unite consistent with -tu")
parser.add_argument("-tu", default="us", choices=["ns", "us"], metavar="us", \
help="Time unit for interaction duration calculation. Available options: ns, us. This will affect the unit of koff as well.")
parser.add_argument("-save_dir", default=None, metavar="None", help="The directory where all the generated results will be put in. \
The directory will be created if not existing. Using the current working directory if not specified.")
parser.add_argument("-cutoffs", nargs=2, default=(0.55, 1.0), metavar=(0.55, 1.0), \
help="Double cutoff seperated by space. In unit of nm. Default is 0.55 1.0. The double cutoffs are used to define lipid \
interactions. A continuous lipid contact with a given residue starts when the lipid moves to the given residue \
closer than the smaller cutoff; and ends when the lipid moves farther than the larger cutoff. The standard single \
cutoff can be acheived by setting the same value for both cutoffs.")
parser.add_argument("-lipids", nargs="+", metavar="POPC", default="POPC CHOL POP2", \
help="Lipid species to check, seperated by space. Should be consistent with residue names in your trajectories.")
parser.add_argument("-lipid_atoms", nargs="+", metavar="PO4", default=None, \
help="Lipid atoms to check, seperated by space. Should be consistent with the atom names in your trajectories.")
parser.add_argument("-radii", nargs="+", default=None, metavar="BB:0.26 SC1:0.23", help="Change/Define the radius of atoms/beads \
that is used for the calculation of binding site surface area. Values need to be in the unit of nm. Supported syntax is \
BB:0.26, which defines the radius of bead BB as 0.26 nm, or CA:0.12 which defines the radius of atom CA as 0.12 nm. For \
atomistic simulations, the default radii are taken from \
mdtraj https://github.com/mdtraj/mdtraj/blob/master/mdtraj/geometry/sasa.py#L56. For coarse-grained \
simulations, this script defines the radius of the MARTINI 2 beads of BB as 0.26 nm and SC1/2/3 as 0.23 nm.")
parser.add_argument("-nprot", default=1, metavar="1", \
help="num. of proteins (or chains) in the simulation system. The calculated results will be averaged among these proteins \
(or chains). The proteins (or chains) need to be identical, otherwise the averaging will fail.")
parser.add_argument("-resi_offset", default=0, metavar="0", help="Shifting the residue index. It is useful if you need to change the residue \
index in your trajectories. For example, to change the residue indeces from 5,6,7,..., to 10,11,12,..., use -resi_offset 4. \
All the outputs, including plotted figures and saved coordinates, will be changed by this.")
parser.add_argument("-resi_list", nargs="+", default=[], metavar="1-10 20-30", help="The indices of residues on which the calculations are done. \
This option is useful for those proteins with large regions that don't require calculation. Skipping those calculations could \
save time and memory. Accepted syntax include 1/ defining a range, like 1-10 (both ends included); 2/ single residue index, \
like 25 26 17. All the selections are seperated by space. For example, -resi_list 1-10 20-30 40 45 46 means selecting \
residues 1-10, 20-30, 40, 45 and 46 for calculation. The residue indices are not affected by -resi_offset, i.e. they \
should be consistent with the indices in your trajectories.")
parser.add_argument("-chain_breaks", nargs="+", default=[], metavar="100 281 420", help="Start a new chain at the X-th residue (starting at 1) in \
the trajectory topology. This identifier is independent of the residue index but checks the residue order in the topology. \
Multiple chain breaks are supported. This option is useful when the simulation system contains \
multiple differnt chains, or users want to see the difference between chains even if these chains are identical. Using this flag \
will generate seperate figures for each of the chains. But the binding site detection will still treat the proteins in the \
system collectively, i.e. those binding sites that cover at multiple chains will be identified.")
parser.add_argument("-nbootstrap", default=10, metavar=10, help="The number of samples for bootstrapping the calcultion of koff. \
The default is 10. The larger the number, the more time-consuming the calculation will be. The closer the bootstrapped \
residence time/koffs are to the original values, the more reliable those original values are. The bootstrapped results \
are ploted in each of the koff plots and plotted apposed to the original values in the figure showing residence time. ")
parser.add_argument("-save_dataset", nargs="?", default=True, const=True, metavar="True", help="Save dataset in Pickle. Default is True")
parser.add_argument("-gen_binding_poses", default=5, metavar=5, help="The num. of top-scored lipid binding poses to be generated for each binding \
site. The default is 5. A scoring function is generated for each binding site based on the sum of the probability density function of each atom/bead \
the lipid molecule. Score = sum(PDF(atom_i) * Weight(atom_i)) for atom_i in the lipid molecule. The weight function Weight(atom_i) \
is specified by the flag -score_weights.")
parser.add_argument("-save_pose_format", default="gro", metavar="gro", help="The format the generated lipid binding poses are written into. This function \
is carried out by mdtraj.save(), hence supports the formats that are included by mdtraj. ")
parser.add_argument("-score_weights", nargs="+", default=None, metavar="PO4:1 C1:1", help="The weight of each of the lipid atom/bead contributes to the scoring function. \
Top-rated lipid binding poses can be generated based on users' specification. The bounds poses of each binding site are scored based \
on the scoring function Score = sum(PDF(atom_i) * Weight(atom_i)) for atom_i in the lipid molecule.")
parser.add_argument("-letter_map", nargs="+", default=None, metavar="ARG:K GLY:G", help="Map the three-letter amino acids to one letter. This map is \
used in making logomaker figures (https://logomaker.readthedocs.io/en/latest/). The common 20 amino acids are defined \
by this script. Users need to use this flag to define maps for uncommon amino acids in their systems.")
parser.add_argument("-pdb", default=None, metavar="None", help="Provide a PDB structure onto which the binding site information will be mapped. \
Using this flag will generate a 'show_binding_site_info.py' file in the -save_dir directory, which allows users to check the \
mapped binding site information in PyMol. Users can run the generated script by 'python show_binding_site_info.py' \
to open such a PyMol session.")
parser.add_argument("-pymol_gui", nargs="?", default=True, const=True, metavar="True", help="Show the PyMol session of binding site information \
at the end of the calcution. Need to be used in conjuction with -pdb.")
args = parser.parse_args(sys.argv[1:])
##########################################
########## assisting functions ###########
##########################################
def get_atom_index_for_lipid(lipid, traj, part=None):
whole_atom_index = traj.top.select("resname {}".format(lipid))
if part != None:
parts_atom_index = [traj.topology.atom(idx).index for idx in whole_atom_index if traj.topology.atom(idx).name in part]
return parts_atom_index
else:
return whole_atom_index
class Durations():
def __init__(self, contact_residues_low, contact_residue_high, dt):
self.contact_low = contact_residues_low
self.contact_high = contact_residue_high
self.dt = dt
def cal_duration(self):
self.pointer = [np.zeros_like(self.contact_high[idx], dtype=np.int) for idx in range(len(self.contact_high))]
durations = []
for i in range(len(self.contact_low)):
for j in range(len(self.contact_low[i])):
pos = np.where(self.contact_high[i] == self.contact_low[i][j])[0][0]
if self.pointer[i][pos] == 0:
durations.append(self.get_duration(i, pos))
if len(durations) == 0:
return [0]
else:
return durations
def get_duration(self, i, j):
count = 1
self.pointer[i][j] = 1
lipid_to_search = self.contact_high[i][j]
for k in range(i+1, len(self.contact_high)):
locations = np.where(self.contact_high[k] == lipid_to_search)[0]
if len(locations) == 0:
return count * self.dt
else:
pos = locations[0]
self.pointer[k][pos] = 1
count +=1
return (count - 1) * self.dt
def cal_interaction_intensity(contact_residues_high):
"""
The probablily of finding the lipids around the selected residue plus the number of
lipids found around the selected residue, the average number of lipid per contact
"""
contact_counts = [len(item) for item in contact_residues_high]
mask = np.array(contact_counts) > 0
contact_counts_nonzero = np.array(contact_counts)[mask]
return 100 * len(contact_counts_nonzero)/len(contact_residues_high), np.nan_to_num(contact_counts_nonzero.mean())
def cal_sigma(durations, num_of_lipids, T_total, delta_t_range):
sigma = {}
for delta_t in delta_t_range:
if delta_t == 0:
sigma[delta_t] = 1
sigma0 = float(sum([restime - delta_t for restime in durations if restime >= delta_t])) / ((T_total - delta_t) * num_of_lipids)
else:
try:
sigma[delta_t] = float(sum([restime - delta_t for restime in durations if restime >= delta_t])) / ((T_total - delta_t) * num_of_lipids * sigma0)
except ZeroDivisionError:
sigma[delta_t] = 0
return sigma
def cal_restime_koff(sigma, initial_guess):
"""
fit the exponential curve y=A*e^(-k1*x)+B*e^(-k2*x)
"""
delta_t_range = list(sigma.keys())
delta_t_range.sort() # x
hist_values = np.nan_to_num([sigma[delta_t] for delta_t in delta_t_range]) # y
try:
popt, pcov = curve_fit(bi_expo, np.array(delta_t_range, dtype=np.float128), np.array(hist_values, dtype=np.float128), p0=initial_guess, maxfev=100000)
n_fitted = bi_expo(np.array(delta_t_range, dtype=np.float128), *popt)
r_squared = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values))**2)/np.sum((hist_values - np.mean(hist_values))**2)
ks = [abs(k) for k in popt[:2]]
koff = np.min(ks)
restime = 1/koff
except RuntimeError:
koff = 0
restime = 0
r_squared = 0
popt = [0, 0, 0, 0]
return restime, koff, r_squared, popt
def bi_expo(x, k1, k2, A, B):
return A*np.exp(-k1*x) + B*np.exp(-k2*x)
def check_dir(save_dir, suffix=None):
if save_dir == None:
save_dir = os.getcwd()
else:
save_dir = os.path.abspath(save_dir)
if suffix != None:
save_dir = os.path.join(save_dir, suffix)
if not os.path.isdir(save_dir):
print("Creating new director: {}".format(save_dir))
os.makedirs(save_dir)
return save_dir
def sparse_corrcoef(A, B=None):
if B is not None:
A = sparse.vstack((A, B), format='csr')
A = A.astype(np.float64)
n = A.shape[1]
# Compute the covariance matrix
rowsum = A.sum(1)
centering = rowsum.dot(rowsum.T.conjugate()) / n
C = (A.dot(A.T.conjugate()) - centering) / (n - 1)
# The correlation coefficients are given by
# C_{i,j} / sqrt(C_{i} * C_{j})
d = np.diag(C)
coeffs = C / np.sqrt(np.outer(d, d))
return coeffs
#####################################
####### Main Class object ###########
#####################################
class LipidInteraction():
def __init__(self, trajfile_list, grofile_list=None, stride=1, dt=None, cutoff=[0.55, 1.0], \
lipid="POPC", lipid_atoms=None, nprot=1, resi_list=[], resi_offset=0, save_dir=None, timeunit="us"):
if grofile_list != None:
assert len(trajfile_list) == len(grofile_list), \
"List of coordinates should be in the same order and length of list of trajectories!"
self.save_dir = check_dir(save_dir)
self.trajfile_list = trajfile_list
self.grofile_list = grofile_list
self.dt = dt
self.nrepeats = len(self.trajfile_list)
self.cutoff = np.sort(np.array(cutoff, dtype=float))
self.lipid = lipid
self.lipid_atoms = lipid_atoms
self.nprot = int(nprot)
self.timeunit = timeunit
self.koff = {}
self.sigmas = {}
self.params = {}
self.r_squared = {}
self.res_time = {}
self.koff_b = {}
self.koff_b_cv = {}
self.res_time_b = {}
self.res_time_b_cv = {}
self.r_squared_b = {}
self.interaction_duration = defaultdict(list)
self.interaction_occupancy = defaultdict(list)
self.lipid_count = defaultdict(list)
self.contact_residues_high = defaultdict(list)
self.contact_residues_low = defaultdict(list)
self.stride = int(stride)
self.resi_offset = resi_offset
self.resi_list = resi_list
self.residue_set = []
self._protein_ref = None
self._lipid_ref = None
return
def _get_traj_stats(self, traj, lipid, lipid_atoms):
lipid_atom_indices = traj.top.select("resn {}".format(self.lipid))
lipid_resi_indices = set()
for atom in lipid_atom_indices:
lipid_resi_indices.add(traj.top.atom(atom).residue.index)
num_of_lipids = len(lipid_resi_indices)
lipid_resi_indices = list(lipid_resi_indices)
lipid_resi_indices.sort()
lipid_resi_indices_original = lipid_resi_indices
if self._lipid_ref == None:
one_lipid_indices = []
for lipid_id in np.sort(traj.top.select("resn {}".format(self.lipid))):
if len(one_lipid_indices) == 0:
one_lipid_indices.append(lipid_id)
elif traj.top.atom(lipid_id).residue.index != traj.top.atom(one_lipid_indices[-1]).residue.index:
break
else:
one_lipid_indices.append(lipid_id)
self._lipid_ref = traj[0].atom_slice(np.unique(one_lipid_indices))
if lipid_atoms != None:
lipid_haystack = get_atom_index_for_lipid(lipid, traj, part=lipid_atoms)
selected_atom_indices = np.hstack([traj.top.select("protein"), lipid_haystack])
new_xyz = [frame[selected_atom_indices] for frame in traj.xyz]
reduced_frame = traj[0].atom_slice(selected_atom_indices)
reduced_top = reduced_frame.top
new_traj = md.Trajectory(new_xyz, reduced_top, time=traj.time, unitcell_lengths=traj.unitcell_lengths, \
unitcell_angles=traj.unitcell_angles)
lipid_resi_indices = [new_traj.top.atom(new_traj.top.select("protein")[-1]).residue.index+1+idx \
for idx in np.arange(num_of_lipids)]
else:
new_traj = traj
all_protein_atom_indices = new_traj.top.select("protein")
natoms_per_protein = int(len(all_protein_atom_indices)/self.nprot)
prot_atom_indices = all_protein_atom_indices[:natoms_per_protein]
nresi_per_protein = new_traj.top.atom(prot_atom_indices[-1]).residue.index - \
new_traj.top.atom(prot_atom_indices[0]).residue.index + 1
selected_protein_resi_set = []
if len(self.resi_list) == 0:
residue_set = ["{}{}".format(new_traj.top.residue(resi).resSeq+self.resi_offset, new_traj.top.residue(resi).name) \
for resi in np.arange(new_traj.top.atom(prot_atom_indices[0]).residue.index, \
new_traj.top.atom(prot_atom_indices[-1]).residue.index + 1)]
residue_set = np.array(residue_set, dtype=str) # residue id in structure instead of builtin index in mdtraj
for protein_idx in range(self.nprot):
selected_protein_resi_set.append(np.unique([new_traj.top.atom(atom_idx).residue.index \
for atom_idx in \
all_protein_atom_indices[protein_idx*natoms_per_protein:(protein_idx+1)*natoms_per_protein]]))
elif len(self.resi_list) > 0:
resi_list = np.sort(np.array(np.hstack(self.resi_list), dtype=int))
for protein_idx in range(self.nprot):
selected_protein_resi_set.append(np.unique([new_traj.top.atom(atom_idx).residue.index \
for atom_idx in \
all_protein_atom_indices[protein_idx*natoms_per_protein:(protein_idx+1)*natoms_per_protein] \
if new_traj.top.atom(atom_idx).residue.resSeq in resi_list]))
residue_set = ["{}{}".format(new_traj.top.residue(resi).resSeq+self.resi_offset, new_traj.top.residue(resi).name) \
for resi in selected_protein_resi_set[0]]
residue_set = np.array(residue_set, dtype=str)
if self._protein_ref == None:
self._protein_ref = new_traj[0].atom_slice(prot_atom_indices)
self._selected_residue_indices = selected_protein_resi_set[0]
return new_traj, {"natoms_per_protein": natoms_per_protein, "nresi_per_protein": nresi_per_protein,
"selected_protein_resi_set": selected_protein_resi_set,
"residue_set": residue_set, "num_of_lipids": num_of_lipids,
"lipid_resi_indices": lipid_resi_indices, "lipid_resi_indices_original": lipid_resi_indices_original}
def cal_interactions(self, save_dir=None, save_dataset=True, nbootstrap=10):
if save_dir == None:
self.save_dir = check_dir(self.save_dir, "Interaction_{}".format(self.lipid))
else:
self.save_dir = check_dir(save_dir, "Interaction_{}".format(self.lipid))
with open("{}/calculation_log_{}.txt".format(self.save_dir, self.lipid), "w") as f:
f.write("###### Lipid: {}\n".format(self.lipid))
f.write("###### Lipid Atoms: {}\n".format(self.lipid_atoms))
f.write("###### Cutoffs: {}\n".format(self.cutoff))
f.write("###### nprot: {}\n".format(self.nprot))
f.write("###### Trajectories:\n")
for traj_fn in self.trajfile_list:
f.write(" {}\n".format(traj_fn))
f.write("###### Coordinates:\n")
for gro_fn in self.grofile_list:
f.write(" {}\n".format(gro_fn))
f.write("\n")
row = []
col = []
data = []
self.num_of_lipids = []
self.lipid_resi_set = []
self.T_total = []
self.timesteps = []
self.nresi_per_protein = []
ncol_start = 0
for traj_idx, trajfile in enumerate(self.trajfile_list):
print("\n########## Start calculation of {} interaction in \n########## {} \n".format(self.lipid, self.trajfile_list[traj_idx]))
f.write("\n###### Start calculation of {} interaction in \n###### {} \n".format(self.lipid, self.trajfile_list[traj_idx]))
traj = md.load(trajfile, top=self.grofile_list[traj_idx], stride=self.stride)
if self.dt == None:
timestep = traj.timestep/1000000.0 if self.timeunit == "us" else traj.timestep/1000.0
else:
timestep = float(self.dt * self.stride)
self.T_total.append((traj.n_frames - 1) * timestep)
self.timesteps.append(timestep)
new_traj, traj_stats = self._get_traj_stats(traj, self.lipid, self.lipid_atoms)
self.num_of_lipids.append(traj_stats["num_of_lipids"])
self.lipid_resi_set.append(traj_stats["lipid_resi_indices_original"])
self.nresi_per_protein.append(len(traj_stats["residue_set"]))
self.residue_set = traj_stats["residue_set"] if len(traj_stats["residue_set"]) > len(self.residue_set) else self.residue_set
ncol_per_protein = traj_stats["num_of_lipids"] * new_traj.n_frames
for idx_protein in np.arange(self.nprot):
for resid, (residue_index, residue) in enumerate(zip(traj_stats["selected_protein_resi_set"][idx_protein], traj_stats["residue_set"])):
pairs = list(product([residue_index], traj_stats["lipid_resi_indices"]))
dist_matrix_resi, _ = md.compute_contacts(new_traj, pairs, scheme="closest", periodic=True)
contact_residues_low = [[] for dummy in np.arange(new_traj.n_frames)]
contact_residues_high = [[] for dummy in np.arange(new_traj.n_frames)]
frame_id_set_low, lipid_id_set_low = np.where(dist_matrix_resi <= self.cutoff[0])
frame_id_set_high, lipid_id_set_high = np.where(dist_matrix_resi <= self.cutoff[1])
for frame_id, lipid_id in zip(frame_id_set_low, lipid_id_set_low):
contact_residues_low[frame_id].append(int(lipid_id))
for frame_id, lipid_id in zip(frame_id_set_high, lipid_id_set_high):
contact_residues_high[frame_id].append(int(lipid_id))
col.append([ncol_start + ncol_per_protein*idx_protein + lipid_id*new_traj.n_frames + \
frame_id for frame_id, lipid_id in zip(frame_id_set_low, lipid_id_set_low)])
contact_low = [np.array(contact, dtype=int) for contact in contact_residues_low]
contact_high = [np.array(contact, dtype=int) for contact in contact_residues_high]
row.append([resid for dummy in np.arange(len(frame_id_set_low))])
data.append(dist_matrix_resi[frame_id_set_low, lipid_id_set_low])
self.contact_residues_high[resid].append(contact_high)
self.contact_residues_low[resid].append(contact_low)
self.interaction_duration[residue].append(Durations(contact_low, contact_high, timestep).cal_duration())
occupancy, lipidcount = cal_interaction_intensity(contact_high)
self.interaction_occupancy[residue].append(occupancy)
self.lipid_count[residue].append(lipidcount)
ncol_start += ncol_per_protein * self.nprot
###############################################
###### get some statistics for this traj ######
###############################################
durations = np.array([np.concatenate(self.interaction_duration[residue][-self.nprot:]).mean() for residue in traj_stats["residue_set"]])
duration_arg_idx = np.argsort(durations)[::-1]
occupancies = np.array([np.mean(self.interaction_occupancy[residue][-self.nprot:]) for residue in traj_stats["residue_set"]])
occupancy_arg_idx = np.argsort(occupancies)[::-1]
lipidcounts = np.array([np.mean(self.lipid_count[residue][-self.nprot:]) for residue in traj_stats["residue_set"]])
lipidcount_arg_idx = np.argsort(lipidcounts)[::-1]
log_text = "10 residues that showed longest average interaction durations ({}):\n".format(self.timeunit)
for residue, duration in zip(traj_stats["residue_set"][duration_arg_idx][:10], durations[duration_arg_idx][:10]):
log_text += "{:^8s} -- {:^8.3f}\n".format(residue, duration)
log_text += "10 residues that showed highest lipid occupancy (100%):\n"
for residue, occupancy in zip(traj_stats["residue_set"][occupancy_arg_idx][:10], occupancies[occupancy_arg_idx][:10]):
log_text += "{:^8s} -- {:^8.2f}\n".format(residue, occupancy)
log_text += "10 residues that have the largest number of surrounding lipids (count):\n"
for residue, lipidcount in zip(traj_stats["residue_set"][lipidcount_arg_idx][:10], lipidcounts[lipidcount_arg_idx][:10]):
log_text += "{:^8s} -- {:^8.2f}\n".format(residue, lipidcount)
print(log_text)
f.write(log_text)
row = np.concatenate(row)
col = np.concatenate(col)
data = np.concatenate(data)
contact_info = coo_matrix((data, (row, col)), shape=(max(self.nresi_per_protein), ncol_start))
self.interaction_covariance = sparse_corrcoef(contact_info)
###################################################
############ calculate and plot koffs #############
###################################################
koff_dir = check_dir(self.save_dir, "Koffs_{}".format(self.lipid))
for residue in self.residue_set:
duration_raw = np.concatenate(self.interaction_duration[residue])
if np.sum(duration_raw) > 0:
bootstrap_results = self.bootstrap(duration_raw, residue, "{}/{}_{}.pdf".format(koff_dir, self.lipid, residue), \
nbootstrap=nbootstrap)
self.sigmas[residue] = bootstrap_results["sigma"]
self.koff[residue] = bootstrap_results["koff"]
self.res_time[residue] = bootstrap_results["restime"]
self.params[residue] = bootstrap_results["params"]
self.r_squared[residue] = bootstrap_results["r_squared"]
self.koff_b[residue] = bootstrap_results["koff_b_avg"]
self.koff_b_cv[residue] = bootstrap_results["koff_b_cv"]
self.res_time_b[residue] = bootstrap_results["res_time_b_avg"]
self.res_time_b_cv[residue] = bootstrap_results["res_time_b_cv"]
self.r_squared_b[residue] = bootstrap_results["r_squared_b_avg"]
else:
delta_t_range = np.arange(0, self.T_total[traj_idx], np.min(self.timesteps))
self.sigmas[residue] = {key:value for key, value in zip(delta_t_range, np.zeros(len(delta_t_range)))}
self.koff[residue] = 0
self.res_time[residue] = 0
self.params[residue] = [0, 0, 0, 0]
self.r_squared[residue] = 0.0
self.koff_b[residue] = 0
self.koff_b_cv[residue] = 0
self.res_time_b[residue] = 0
self.res_time_b_cv[residue] = 0
self.r_squared_b[residue] = 0.0
##############################################
########## wrapping up dataset ###############
##############################################
T_max = np.max(self.T_total)
Res_Time = np.array([self.res_time[residue] for residue in self.residue_set])
Capped = Res_Time > T_max
Res_Time[Capped] = T_max
Res_Time_B = np.array([self.res_time_b[residue] for residue in self.residue_set])
Capped = Res_Time_B > T_max
Res_Time_B[Capped] = T_max
dataset = pd.DataFrame({"Residue": [residue for residue in self.residue_set],
"Residue idx": self._selected_residue_indices,
"Occupancy": np.array([np.mean(self.interaction_occupancy[residue]) \
for residue in self.residue_set]),
"Occupancy_std": np.array([np.std(self.interaction_occupancy[residue]) \
for residue in self.residue_set]),
"Duration": np.array([np.mean(np.concatenate(self.interaction_duration[residue])) \
for residue in self.residue_set]),
"Duration_std": np.array([np.std(np.concatenate(self.interaction_duration[residue])) \
for residue in self.residue_set]),
"Residence Time": Res_Time,
"Capped": Capped,
"R squared": np.array([self.r_squared[residue] for residue in self.residue_set]),
"Koff": np.array([self.koff[residue] for residue in self.residue_set]),
"Residence Time_boot": Res_Time_B,
"Residence Time_boot_cv": np.array([self.res_time_b_cv[residue] for residue in self.residue_set]),
"Koff_boot": np.array([self.koff_b[residue] for residue in self.residue_set]),
"Koff_boot_cv": np.array([self.koff_b_cv[residue] for residue in self.residue_set]),
"R squared_boot": np.array([self.r_squared_b[residue] for residue in self.residue_set]),
"LipidCount": np.array([np.mean(self.lipid_count[residue]) \
for residue in self.residue_set]),
"LipidCount_std": np.array([np.std(self.lipid_count[residue]) \
for residue in self.residue_set])})
dataset.to_csv("{}/Interactions_{}.csv".format(self.save_dir, self.lipid), index=False)
self.dataset = dataset
reminder = """
NOTE:
Occupancy: percentage of frames where lipid is in contact
with the given residue (0-100%);
Duration: Average length of a continuous interaction of lipid
with the given residue (in unit of {timeunit});
LipidCount: Average number of lipid surrounding the given residue within the longer cutoff;
Koff: Koff of lipid with the given residue (in unit of ({timeunit})^(-1));
""".format(**{"timeunit": self.timeunit})
print(reminder)
print()
if save_dataset:
dataset_dir = check_dir(self.save_dir, "Dataset")
with open("{}/interaction_durations_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.interaction_duration, f, 2)
with open("{}/sigmas_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.sigmas, f, 2)
with open("{}/curve_fitting_params_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.params, f, 2)
with open("{}/interaction_covariance_matrix_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.interaction_covariance, f, 2)
return
def bootstrap(self, durations, label, fig_fn, nbootstrap=10):
"""
bootstrap durations to calculate koffs, return bootstrapped values
"""
initial_guess = (1., 1., 1., 1.)
##### prep for plotting ######
plt.rcParams["font.size"] = 10
plt.rcParams["font.weight"] = "bold"
if self.timeunit == "ns":
xlabel = "Duration (ns)"
elif self.timeunit == "us":
xlabel = r"Duration ($\mu s$)"
fig = plt.figure(1, figsize=(8.2, 3.5))
left, width = 0.0975, 0.23
bottom, height = 0.17, 0.75
left_h = left + width + 0.0375
rect_scatter = [left, bottom, width, height]
rect_histy = [left_h, bottom, width, height]
axScatter = fig.add_axes(rect_scatter)
axHisty = fig.add_axes(rect_histy)
######## start bootstrapping ######
delta_t_range = np.arange(0, np.min(self.T_total), np.min(self.timesteps))
duration_sampled_set = [np.random.choice(durations, size=len(durations)) for dummy in range(nbootstrap)]
koff1_sampled_set = []
koff2_sampled_set = []
restime_sampled_set = []
r_squared_sampled_set = []
for duration_sampled in duration_sampled_set:
sigma_sampled = cal_sigma(duration_sampled, len(duration_sampled), np.max(self.T_total), delta_t_range)
hist_values_sampled = np.array([sigma_sampled[delta_t] for delta_t in delta_t_range])
axHisty.plot(delta_t_range, hist_values_sampled, color="gray", alpha=0.5)
restime_sampled, koff_sampled, r_squared_sampled, params_sampled = cal_restime_koff(sigma_sampled, initial_guess)
n_fitted = bi_expo(np.array(delta_t_range), *params_sampled)
r_squared_sampled = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values_sampled))**2)/np.sum((hist_values_sampled - np.mean(hist_values_sampled))**2)
ks_sampled = [abs(k) for k in params_sampled[:2]]
ks_sampled.sort()
koff1_sampled_set.append(ks_sampled[0])
koff2_sampled_set.append(ks_sampled[1])
restime_sampled_set.append(restime_sampled)
r_squared_sampled_set.append(r_squared_sampled)
######## plot original data #########
sigma = cal_sigma(durations, len(durations), np.max(self.T_total), delta_t_range)
x = np.sort(durations)
y = np.arange(len(x)) + 1
axScatter.scatter(x[::-1], y, label=label, s=10)
axScatter.set_xlim(0, x[-1] * 1.1)
axScatter.legend(loc="upper right", prop={"size": 10}, frameon=False)
axScatter.set_ylabel("Sorted Index", fontsize=10, weight="bold")
axScatter.set_xlabel(xlabel, fontsize=10, weight="bold")
hist_values = np.array([sigma[delta_t] for delta_t in delta_t_range])
axHisty.scatter(delta_t_range, hist_values, zorder=8, s=3, label="sigma func.")
axHisty.yaxis.set_label_position("right")
axHisty.yaxis.tick_right()
axHisty.set_xlabel(r"$\Delta t$", fontsize=10, weight="bold")
axHisty.set_ylabel("Probability", fontsize=10, weight="bold")
axHisty.set_yticks([0, 0.25, 0.5, 0.75, 1.0])
axHisty.set_ylim(-0.1, 1.1)
restime, koff, r_squared, params = cal_restime_koff(sigma, initial_guess)
n_fitted = bi_expo(np.array(delta_t_range), *params)
r_squared = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values))**2)/np.sum((hist_values - np.mean(hist_values))**2)
ks = [abs(k) for k in params[:2]]
ks.sort()
axHisty.plot(delta_t_range, n_fitted, 'r--', linewidth=3, zorder=10, label="Fitted biexpo.")
axHisty.legend(loc="upper right", prop={"size": 10}, frameon=False)
######### labels ############
if self.timeunit == "ns":
text = "{:18s} = {:.3f} ns$^{{-1}} $\n".format("$k_{{off1}}$", ks[0])
text += "{:18s} = {:.3f} ns$^{{-1}} $\n".format("$k_{{off2}}$", ks[1])
text += "{:14s} = {:.4f}\n".format("$R^2$", r_squared)
text += "{:18s} = {:.3f} ns$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off1, boot}}$", np.mean(koff1_sampled_set), 100*np.std(koff1_sampled_set)/np.mean(koff1_sampled_set))
text += "{:18s} = {:.3f} ns$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off2, boot}}$", np.mean(koff2_sampled_set), 100*np.std(koff2_sampled_set)/np.mean(koff2_sampled_set))
text += "{:18s} = {:.4f}\n".format("$R^2$$_{{boot, avg}}$", np.mean(r_squared_sampled_set))
elif self.timeunit == "us":
text = "{:18s} = {:.3f} $\mu s^{{-1}} $\n".format("$k_{{off1}}$", ks[0])
text += "{:18s} = {:.3f} $\mu s^{{-1}} $\n".format("$k_{{off2}}$", ks[1])
text += "{:14s} = {:.4f}\n".format("$R^2$", r_squared)
text += "{:18s} = {:.3f} $\mu s^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off1, boot}}$", np.mean(koff1_sampled_set), 100*np.std(koff1_sampled_set)/np.mean(koff1_sampled_set))
text += "{:18s} = {:.3f} $\mu s^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off2, boot}}$", np.mean(koff2_sampled_set), 100*np.std(koff2_sampled_set)/np.mean(koff2_sampled_set))
text += "{:18s} = {:.4f}\n".format("$R^2$$_{{boot, avg}}$", np.mean(r_squared_sampled_set))
axHisty.text(1.4, 1.0, text, verticalalignment='top', horizontalalignment='left', transform=axHisty.transAxes, \
fontdict={"size": 8, "weight": "bold"})
plt.savefig(fig_fn, dpi=300)
plt.close()
return {"koff": koff, "restime": restime, "sigma": sigma, "params": params, "r_squared": r_squared,
"koff_b_avg": np.mean(koff1_sampled_set), "koff_b_cv": np.std(koff1_sampled_set)/np.mean(koff1_sampled_set),
"res_time_b_avg": np.mean(restime_sampled_set), "res_time_b_cv": np.std(restime_sampled_set)/np.mean(restime_sampled_set),
"r_squared_b_avg": np.mean(r_squared_sampled_set)}
def cal_interaction_network(self, save_dir=None, pdb=None, pymol_gui=True, save_dataset=True, nbootstrap=10, \
radii=None, gen_binding_poses=5, score_weights=None, save_pose_format="pdb", kde_bw=0.15):
Residue_property_book = {"ARG": "Pos. Charge", "HIS": "Pos. Charge", "LYS": "Pos. Charge",
"ASP": "Neg. Charge", "GLU": "Neg. Charge",
"SER": "Polar", "THR": "Polar", "ASN": "Polar", "GLN": "Polar",
"CYS": "Special", "SEC": "Special", "GLY": "Special", "PRO": "Special",
"ALA": "Hydrophobic", "VAL": "Hydrophobic", "ILE": "Hydrophobic", "LEU": "Hydrophobic",
"MET": "Hydrophobic", "PHE": "Hydrophobic", "TYR": "Hydrophobic", "TRP": "Hydrophobic"}
MARTINI_CG_radii = {"BB": 0.26, "SC1": 0.23, "SC2": 0.23, "SC3": 0.23}
if radii == None:
radii_book = MARTINI_CG_radii
else:
radii_book = {**MARTINI_CG_radii, **radii}
if save_dir == None:
save_dir = check_dir(self.save_dir, "Binding_Sites_{}".format(self.lipid))
else:
save_dir = check_dir(save_dir, "Binding_Sites_{}".format(self.lipid))
interaction_covariance = np.nan_to_num(self.interaction_covariance)
f = open("{}/BindingSites_Info_{}.txt".format(save_dir, self.lipid), "w")
##### write out info ######
reminder = """
# Occupancy: percentage of frames where lipid is in contact with the given residue (0-100%);
# Duration/Residence Time: average length of a continuous interaction of lipid with the given residue (in unit of {timeunit});
# Koff: Koff of lipid with the given residue/binding site (in unit of ({timeunit})^(-1));
# Pos. Charge: ARG, HIS, LYS;
# Neg. Charge: ASP, GLU;
# Polar: SER, THR, ASN, GLN;
# Hydrophobic: ALA, VAL, ILE, LEU, MET, PHE, TYR, TRP;
# Special: CYS, SEC, GLY, PRO.
""".format(**{"timeunit": self.timeunit})
f.write(reminder)
f.write("\n")
binding_site_id = 0
covariance_network = np.copy(interaction_covariance)
covariance_network[covariance_network < 0.0] = 0.0
residue_network_raw = nx.Graph(covariance_network)
part = community.best_partition(residue_network_raw, weight='weight')
values = [part.get(node) for node in residue_network_raw.nodes()]
binding_site_identifiers = np.ones(len(self.residue_set), dtype=int) * 999
self.interaction_duration_BS = defaultdict(list)
self.interaction_occupancy_BS = defaultdict(list)
self.lipid_count_BS = defaultdict(list)
self.sigmas_BS = {}
self.params_BS = {}
BS_restime = np.zeros(len(self.residue_set))
BS_koff = np.zeros(len(self.residue_set))
BS_rsquared = np.zeros(len(self.residue_set))
BS_duration = np.zeros(len(self.residue_set))
BS_lipidcount = np.zeros(len(self.residue_set))
BS_occupancy = np.zeros(len(self.residue_set))
BS_koff_b = np.zeros(len(self.residue_set))
BS_koff_b_cv = np.zeros(len(self.residue_set))
BS_restime_b = np.zeros(len(self.residue_set))
BS_restime_b_cv = np.zeros(len(self.residue_set))
BS_rsquared_b = np.zeros(len(self.residue_set))
BS_surface_area = np.zeros(len(self.residue_set))
t_total_max = np.max(self.T_total)
node_list_set = []
for value in range(max(values)):
node_list = [k for k,v in part.items() if v == value]
if len(node_list) >= 3:
binding_site_identifiers[node_list] = binding_site_id
node_list_set.append(node_list)
binding_site_id += 1
########### cal site koff and surface area ############
if len(node_list_set) > 0:
surface_area_all = defaultdict(list)
self._coordinate_pool = [[] for dummy in np.arange(len(node_list_set))]
for traj_idx, trajfile in enumerate(self.trajfile_list):
traj = md.load(trajfile, top=self.grofile_list[traj_idx], stride=self.stride)
if self.dt == None:
timestep = traj.timestep/1000000.0 if self.timeunit == "us" else traj.timestep/1000.0
else:
timestep = float(self.dt)
protein_indices_all = traj.top.select("protein")
natoms_per_protein = int(len(protein_indices_all)/self.nprot)
for idx_protein in np.arange(self.nprot):
protein_indices = protein_indices_all[idx_protein*natoms_per_protein:(idx_protein+1)*natoms_per_protein]
for binding_site_id, node_list in enumerate(node_list_set):
contact_BS_low = []
contact_BS_high = []
list_to_take = traj_idx*self.nprot+idx_protein
for frame_idx in range(len(self.contact_residues_high[node_list[0]][list_to_take])):
contact_high_frame = np.unique(np.concatenate([self.contact_residues_high[node][list_to_take][frame_idx] for node in node_list]))
contact_low_frame = np.unique(np.concatenate([self.contact_residues_low[node][list_to_take][frame_idx] for node in node_list]))
contact_BS_high.append(contact_high_frame)
contact_BS_low.append(contact_low_frame)
self.interaction_duration_BS[binding_site_id].append(Durations(contact_BS_low, contact_BS_high, timestep).cal_duration())
occupancy, lipidcount = cal_interaction_intensity(contact_BS_high)
self.interaction_occupancy_BS[binding_site_id].append(occupancy)
self.lipid_count_BS[binding_site_id].append(lipidcount)
########### store lipid binding poses ############
for frame_id in range(len(contact_BS_low)):
for lipid_id in contact_BS_low[frame_id]:
lipid_index = self.lipid_resi_set[traj_idx][lipid_id]
lipid_indices = np.sort([atom.index for atom in traj.top.residue(lipid_index).atoms])
self._coordinate_pool[binding_site_id].append([np.copy(traj.xyz[frame_id, np.hstack([protein_indices, lipid_indices])]), \
np.copy(traj.unitcell_angles[frame_id]), \
np.copy(traj.unitcell_lengths[frame_id])])
### calculate area ###
new_xyz = []
for frame in traj.xyz:
new_frame = frame[protein_indices]
new_xyz.append(new_frame)
reduced_frame = traj[0].atom_slice(protein_indices)
reduced_top = reduced_frame.top
if reduced_top.residue(0).index != 0:
starting_index = reduced_top.residue(0).index
for residue in reduced_top.residues:
residue.index -= starting_index
new_traj = md.Trajectory(new_xyz, reduced_top, time=traj.time, unitcell_lengths=traj.unitcell_lengths, unitcell_angles=traj.unitcell_angles)
areas = md.shrake_rupley(new_traj, mode='residue', change_radii=radii_book)
for binding_site_id, node_list in enumerate(node_list_set):
surface_area_all[binding_site_id].append(areas[:, node_list].sum(axis=1))
########### write and plot results ###########
for binding_site_id in np.arange(len(node_list_set)):
duration_raw = np.concatenate(self.interaction_duration_BS[binding_site_id])
mask = (binding_site_identifiers == binding_site_id)
bootstrap_results = self.bootstrap(duration_raw, "BS id: {}".format(binding_site_id), "{}/BS_koff_id{}.pdf".format(save_dir, binding_site_id), nbootstrap=nbootstrap)
self.sigmas_BS[binding_site_id] = bootstrap_results["sigma"]
self.params_BS[binding_site_id] = bootstrap_results["params"]
BS_restime[mask] = bootstrap_results["restime"]
BS_koff[mask] = bootstrap_results["koff"]
BS_rsquared[mask] = bootstrap_results["r_squared"]
BS_koff_b[mask] = bootstrap_results["koff_b_avg"]
BS_koff_b_cv[mask] = bootstrap_results["koff_b_cv"]
BS_restime_b[mask] = bootstrap_results["res_time_b_avg"]
BS_restime_b_cv[mask] = bootstrap_results["res_time_b_cv"]
BS_rsquared_b[mask] = bootstrap_results["r_squared_b_avg"]
bs_area = np.concatenate(surface_area_all[binding_site_id]).mean()
BS_surface_area[mask] = bs_area
############# write results ###############
f.write("# Binding site {}\n".format(binding_site_id))
BS_restime[mask] = bootstrap_results["restime"] if bootstrap_results["restime"] <= t_total_max else t_total_max
if bootstrap_results["restime"] <= t_total_max:
f.write("{:20s} {:10.3f} {:5s} R squared: {:7.4f}\n".format(" BS Residence Time:", bootstrap_results["restime"], self.timeunit, bootstrap_results["r_squared"]))
else:
f.write("{:20s} {:10.3f} {:5s}** R squared: {:7.4f}\n".format(" BS Residence Time:", t_total_max, self.timeunit, bootstrap_results["r_squared"]))
f.write("{:20s} {:10.3f}\n".format(" BS koff:", bootstrap_results["koff"]))
f.write("{:20s} {:10.3f} +- {:10.3f}\n".format(" BS koff Bootstrap:", bootstrap_results["koff_b_avg"], bootstrap_results["koff_b_cv"]))
duration = np.mean(np.concatenate(self.interaction_duration_BS[binding_site_id]))
BS_duration[mask] = duration
f.write("{:20s} {:10.3f} {:5s}\n".format(" BS Duration:", duration, self.timeunit))
occupancy = np.mean(self.interaction_occupancy_BS[binding_site_id])
BS_occupancy[mask] = occupancy
f.write("{:20s} {:10.3f} %\n".format(" BS Lipid Occupancy:", occupancy))
lipidcount = np.mean(self.lipid_count_BS[binding_site_id])
BS_lipidcount[mask] = lipidcount
f.write("{:20s} {:10.3f}\n".format(" BS Lipid Count:", lipidcount))
f.write("{:20s} {:10.3f} nm^2 +- {:10.3f}\n".format(" BS Surface Area:", bs_area, np.concatenate(surface_area_all[binding_site_id]).std()))
res_stats = {"Pos. Charge": 0, "Neg. Charge": 0, "Polar": 0, "Special": 0, "Hydrophobic": 0}
for residue in self.residue_set[mask]:
res_stats[Residue_property_book[re.findall("[a-zA-Z]+$", residue)[0]]] += 1
BS_num_resi = np.sum(mask)
f.write("{:20s} {:10s}\n".format(" Pos. Charge:", "/".join([str(res_stats["Pos. Charge"]), str(BS_num_resi)])))
f.write("{:20s} {:10s}\n".format(" Neg. Charge:", "/".join([str(res_stats["Neg. Charge"]), str(BS_num_resi)])))
f.write("{:20s} {:10s}\n".format(" Polar:", "/".join([str(res_stats["Polar"]), str(BS_num_resi)])))
f.write("{:20s} {:10s}\n".format(" Hydrophobic:", "/".join([str(res_stats["Hydrophobic"]), str(BS_num_resi)])))
f.write("{:20s} {:10s}\n".format(" Special:", "/".join([str(res_stats["Special"]), str(BS_num_resi)])))
f.write("{:^9s}{:^9s}{:^13s}{:^11s}{:^10s}{:^10s}{:^10s}{:^13s}{:^10s}{:^10s}\n".format("Residue", "Duration", "Duration std", \
"Res. Time", "R squared", "Occupancy", "Occu. std", "Lipid Count", "L. C. std", "Koff"))
for residue in self.residue_set[mask]:
f.write("{Residue:^9s}{Duration:^9.3f}{Duration_std:^13.3f}{Residence Time:^11.3f}{R squared:^10.4f}{Occupancy:^10.3f}{Occupancy_std:^10.3f}{LipidCount:^13.3f}{LipidCount_std:^10.3f}{Koff:^10.4f}\n".format(\
**self.dataset[self.dataset["Residue"]==residue].to_dict("records")[0] ))
f.write("\n")
f.write("\n")
f.close()
######################## plot area stats ##########################
bs_id_set = []
bs_area_set = []
for binding_site_id in surface_area_all.keys():
bs_area_set.append(np.concatenate(surface_area_all[binding_site_id]))
bs_id_set.append([binding_site_id for dummy in np.arange(len(np.concatenate(surface_area_all[binding_site_id])))])
d_area = pd.DataFrame({"BS id": np.concatenate(bs_id_set), "Area (nm^2)": np.concatenate(bs_area_set)})
plt.rcParams["font.size"] = 8
plt.rcParams["font.weight"] = "bold"
if len(surface_area_all.keys()) <= 8:
fig, ax = plt.subplots(figsize=(4.5, 2.8))
elif len(surface_area_all.keys()) > 8 and len(surface_area_all.keys()) <= 15:
fig, ax = plt.subplots(figsize=(6.5, 2.8))
else:
fig, ax = plt.subplots(figsize=(9.5, 3))
sns.violinplot(x="BS id", y="Area (nm^2)", data=d_area, palette="Set3", bw=.2, cut=1, linewidth=1, ax=ax)
ax.set_xlabel("BS id", fontsize=8, weight="bold")
ax.set_ylabel(r"Surface Area (nm$^2$)", fontsize=8, weight="bold")
ax.set_title("{} Binding Site Surface Area".format(self.lipid), fontsize=8, weight="bold")
plt.tight_layout()
plt.savefig("{}/BS_surface_area.pdf".format(save_dir), dpi=300)
plt.close()
################ update dataset ########################
self.dataset["Binding site"] = binding_site_identifiers
self.dataset["BS Residence Time"] = BS_restime
self.dataset["BS koff"] = BS_koff
self.dataset["BS Duration"] = BS_duration
self.dataset["BS Occupancy"] = BS_occupancy
self.dataset["BS LipidCount"] = BS_lipidcount
self.dataset["BS R squared"] = BS_rsquared
self.dataset["BS Residence Time_boot"] = BS_restime_b
self.dataset["BS Residence Time_boot_cv"] = BS_restime_b_cv
self.dataset["BS koff_boot"] = BS_koff_b
self.dataset["BS koff_boot_cv"] = BS_koff_b_cv
self.dataset["BS R squared_boot"] = BS_rsquared_b
self.dataset["BS Surface Area"] = BS_surface_area
self.dataset.to_csv("{}/Interactions_{}.csv".format(self.save_dir, self.lipid), index=False)
################ save dataset ###################
if save_dataset:
dataset_dir = check_dir(self.save_dir, "Dataset")
with open("{}/BS_interaction_duration_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.interaction_duration_BS, f, 2)
with open("{}/BS_sigmas_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.sigmas_BS, f, 2)
with open("{}/BS_curve_fitting_params_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.params_BS, f, 2)
with open("{}/BS_surface_area_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(surface_area_all, f, 2)
################## generate binding poses ################
if gen_binding_poses > 0 and len(node_list_set) > 0:
coords_save_dir = check_dir(save_dir, "Binding_Poses")
lipid_atom_map = {atom.index:atom.name for atom in self._lipid_ref.top.atoms}
weights = {name:1 for index, name in lipid_atom_map.items()}
if score_weights != None:
weights.update(score_weights)
binding_site_id_set = np.arange(len(self._coordinate_pool))
if len(self.resi_list) == 0:
selected_protein_atoms = [[atom.index for atom in residue.atoms] for residue in self._protein_ref.top.residues]
else:
selected_protein_atoms = [[atom.index for atom in residue.atoms] for residue in self._protein_ref.top.residues \
if residue.resSeq in self.resi_list]
lipid_atoms = [self._protein_ref.n_atoms + atom_idx for atom_idx in np.arange(self._lipid_ref.n_atoms)]
joined_top = self._protein_ref.top.join(self._lipid_ref.top)
for binding_site_id in binding_site_id_set:
num_of_poses = gen_binding_poses if gen_binding_poses <= len(self._coordinate_pool[binding_site_id]) \
else len(self._coordinate_pool[binding_site_id])
node_list = node_list_set[binding_site_id]
new_traj = md.Trajectory([frame[0] for frame in self._coordinate_pool[binding_site_id]], joined_top, \
time=np.arange(len(self._coordinate_pool[binding_site_id])), \
unitcell_angles=[frame[1] for frame in self._coordinate_pool[binding_site_id]], \
unitcell_lengths=[frame[2] for frame in self._coordinate_pool[binding_site_id]])
dist_per_atom = [[md.compute_distances(new_traj, list(product([lipid_atoms[idx]], selected_protein_atoms[resi])), periodic=True).min(axis=1) \
for resi in node_list] for idx in np.arange(self._lipid_ref.n_atoms)]
kde_funcs = {}
var_type = ""
bw = []
for idx in range(len(dist_per_atom[0])):
var_type += "c"
bw.append(kde_bw)
try:
for atom_idx in np.arange(self._lipid_ref.n_atoms):
kde_funcs[atom_idx] = KDEMultivariate(data=np.array(dist_per_atom[atom_idx]).T, \
var_type=var_type, bw=bw)
### evaluate binding poses ###
scores = np.sum([weights[lipid_atom_map[idx]] * kde_funcs[idx].pdf() \
for idx in np.arange(self._lipid_ref.n_atoms)], axis=0)
selected_indices = np.argsort(scores)[::-1][:num_of_poses]
###############################
for pose_id in np.arange(num_of_poses, dtype=int):
new_traj[selected_indices[pose_id]].save("{}/BSid{}_No{}.{}".format(coords_save_dir, \
binding_site_id, pose_id, save_pose_format))
except ValueError:
with open("{}/Error.txt".format(coords_save_dir), "a+") as error_file:
error_file.write("BSid {}: Pose generation error -- possibly due to insufficient number of binding event.\n".format(binding_site_id))
######################################################################
###### show binding site residues with scaled spheres in pymol #######
######################################################################
if pdb != None:
############ check if pdb has a path to it ##########
pdb_new_loc = os.path.join(self.save_dir, os.path.basename(pdb))
copyfile(pdb, pdb_new_loc)
struct_ref = md.load(pdb_new_loc)
########### write out a pymol pml file ###############
binding_site_id += 1
text = """
import pandas as pd
import numpy as np
import mdtraj as md
import pymol
from pymol import cmd
pymol.finish_launching()
dataset = pd.read_csv("{HOME_DIR}/Interactions_{LIPID}.csv")
residue_set = np.array(dataset["Residue"].tolist())
binding_site_id = {BINDING_SITE_ID}
binding_site_identifiers = np.array(dataset["Binding site"].tolist())
struct_ref = md.load("{PDB}")
######### calculate scale ###############
residue_idx_set = dataset["Residue idx"]
interactions = np.zeros(residue_idx_set.max()+1)
values_to_check = dataset["Residence Time"]
interactions[residue_idx_set] = values_to_check
MID = values_to_check.quantile(0.5)
SCALES = 1.5 / 5 + np.exp(-30 * (interactions - MID))
######################################
######## some pymol settings #########
cmd.set("retain_order", 1)
cmd.set("cartoon_oval_length", 1.0)
cmd.set("cartoon_oval_width", 0.3)
cmd.set("cartoon_color", "white")
cmd.set("stick_radius", 0.35)
##################################
cmd.load("{PDB}", "Prot_{LIPID}")
prefix = "Prot_{LIPID}"
cmd.hide("everything")
cmd.show("cartoon", prefix)
cmd.center(prefix)
cmd.orient(prefix)
colors = np.array([np.random.choice(np.arange(256, dtype=float), size=3) for dummy in range(binding_site_id)])
colors /= 255.0
""".format(**{"HOME_DIR": self.save_dir, "LIPID": self.lipid, "BINDING_SITE_ID": binding_site_id, "PDB": pdb_new_loc})
text += r"""
for bs_id in np.arange(binding_site_id):
cmd.set_color("tmp_{}".format(bs_id), list(colors[bs_id]))
for selected_residue in np.where(binding_site_identifiers == bs_id)[0]:
selected_residue_index = residue_idx_set[selected_residue]
selected_atom_indices = np.array([atom.index for atom in struct_ref.top.residue(selected_residue_index).atoms], dtype=str)
selected_resid = struct_ref.top.residue(selected_residue_index).resSeq
selected_resn = struct_ref.top.residue(selected_residue_index).name
cmd.select("BS{}_{}{}".format(bs_id, selected_resid, selected_resn), "rank {} and (not name C+O+N)".format("+".join(selected_atom_indices)))
cmd.show("spheres", "BS{}_{}{}".format(bs_id, selected_resid, selected_resn))
cmd.set("sphere_scale", SCALES[selected_residue_index], selection="BS{}_{}{}".format(bs_id, selected_resid, selected_resn))
cmd.color("tmp_{}".format(bs_id), "BS{}_{}{}".format(bs_id, selected_resid, selected_resn))
cmd.group("BS{}".format(bs_id), "BS{}_*".format(bs_id))
"""
with open("{}/show_binding_sites_info.py".format(self.save_dir), "w") as f:
f.write(text)
################## Launch a pymol session #######################
if pymol_gui:
import pymol
from pymol import cmd
pymol.finish_launching(['pymol', '-q'])
##### do some pymol settings #####
residue_idx_set = self.dataset["Residue idx"]
interactions = np.zeros(residue_idx_set.max()+1)
values_to_check = self.dataset["Residence Time"]
interactions[residue_idx_set] = values_to_check
MID = values_to_check.quantile(0.5)
SCALES = 1.5 / 5 + np.exp(-30 * (interactions - MID))
##### do some pymol settings #####
cmd.set("retain_order", 1)
cmd.set("cartoon_oval_length", 1.0)
cmd.set("cartoon_oval_width", 0.3)
cmd.set("cartoon_color", "white")
cmd.set("stick_radius", 0.35)
##################################
cmd.load(pdb_new_loc, "Prot_{}".format(self.lipid))
prefix = "Prot_{}".format(self.lipid)
cmd.hide("everything")
cmd.show("cartoon", prefix)
cmd.center(prefix)
cmd.orient(prefix)
colors = np.array([np.random.choice(np.arange(256, dtype=float), size=3) for dummy in range(binding_site_id)])
colors /= 255.0
for bs_id in np.arange(binding_site_id):
cmd.set_color("tmp_{}".format(bs_id), list(colors[bs_id]))
for selected_residue in np.where(binding_site_identifiers == bs_id)[0]:
selected_residue_index = residue_idx_set[selected_residue]
selected_atom_indices = np.array([atom.index for atom in struct_ref.top.residue(selected_residue_index).atoms], dtype=str)
selected_resid = struct_ref.top.residue(selected_residue_index).resSeq
selected_resn = struct_ref.top.residue(selected_residue_index).name
cmd.select("{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn), \
"rank {} and (not name C+O+N)".format("+".join(selected_atom_indices)))
cmd.show("spheres", "{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn))
cmd.set("sphere_scale", SCALES[selected_residue_index], selection="{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn))
cmd.color("tmp_{}".format(bs_id), "{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn))
cmd.group("{}_BS{}".format(self.lipid, bs_id), "{}_BS{}_*".format(self.lipid, bs_id))
return
def plot_interactions(self, item="Duration", save_dir=None, letter_map=None, chain_breaks=[]):
if save_dir == None:
save_dir = check_dir(self.save_dir, "Figures_{}".format(self.lipid))
else:
save_dir = check_dir(save_dir, "Figures_{}".format(self.lipid))
### single-letter dictionary ###
single_letter = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
if letter_map != None:
single_letter.update(letter_map)
if len(chain_breaks) == 0:
chain_break_points = [0, len(self.dataset)]
no_break = True
else:
chain_break_points = [0]
for points in chain_breaks:
chain_break_points.append(points)
chain_break_points.append(len(self.dataset))
no_break = False
plt.rcParams["font.size"] = 8
plt.rcParams["font.weight"] = "bold"
for point_idx in np.arange(1, len(chain_break_points), dtype=int):
dataset = self.dataset[chain_break_points[point_idx-1]:chain_break_points[point_idx]]
data = dataset[item]
if len(data) == 0:
continue
resi = np.array([int(re.findall("^[0-9]+", residue)[0]) for residue in self.residue_set])[chain_break_points[point_idx-1]:chain_break_points[point_idx]]
SL_resn = [single_letter[re.findall("[a-zA-Z]+$", residue)[0]] for residue in self.residue_set][chain_break_points[point_idx-1]:chain_break_points[point_idx]]
width = 1
sns.set_style("ticks", {'xtick.major.size': 5.0, 'ytick.major.size': 5.0})
if item == "Residence Time":
if len(data) <= 500:
fig = plt.figure(figsize=(5.5, 5))
elif len(data) > 500 and len(data) <= 1500:
fig = plt.figure(figsize=(7.5, 5))
else:
fig = plt.figure(figsize=(9, 6))
ax_R2 = fig.add_axes([0.18, 0.79, 0.75, 0.10])
ax_capped = fig.add_axes([0.18, 0.71, 0.75, 0.05])
ax_data = fig.add_axes([0.18, 0.50, 0.75, 0.18])
ax_boot = fig.add_axes([0.18, 0.22, 0.75, 0.18])
ax_boot_cv = fig.add_axes([0.18, 0.08, 0.75, 0.10])
ax_boot.xaxis.tick_top()
ax_boot.invert_yaxis()
ax_boot_cv.invert_yaxis()
for ax in [ax_data, ax_capped, ax_R2, ax_boot, ax_boot_cv]:
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
for ax in [ax_capped, ax_R2, ax_boot_cv]:
ax.xaxis.set_ticks_position('none')
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticklabels([])
ax_data.spines['top'].set_visible(False)
ax_boot.spines['bottom'].set_visible(False)
if len(data) > 1000:
ax_data.xaxis.set_major_locator(MultipleLocator(200))
ax_data.xaxis.set_minor_locator(MultipleLocator(50))
ax_boot.xaxis.set_major_locator(MultipleLocator(200))
ax_boot.xaxis.set_minor_locator(MultipleLocator(50))
elif len(data) <= 1000 and len(data) > 100:
ax_data.xaxis.set_major_locator(MultipleLocator(100))
ax_data.xaxis.set_minor_locator(MultipleLocator(10))
ax_boot.xaxis.set_major_locator(MultipleLocator(100))
ax_boot.xaxis.set_minor_locator(MultipleLocator(10))
elif len(data) <= 100:
ax_data.xaxis.set_major_locator(MultipleLocator(10))
ax_data.xaxis.set_minor_locator(MultipleLocator(1))
ax_boot.xaxis.set_major_locator(MultipleLocator(10))
ax_boot.xaxis.set_minor_locator(MultipleLocator(1))
if self.timeunit == "ns":
timeunit = " (ns) "
elif self.timeunit == "us":
timeunit = r" ($\mu s$)"
ax_data.bar(resi, data, width, linewidth=0, color="#F75C03")
ax_data.set_ylabel("Res. Time {}".format(timeunit), fontsize=8, weight="bold", va="center")
ax_data.set_xlabel("Residue Index", fontsize=8, weight="bold")
ax_capped.plot(resi, dataset["Capped"]*1, linewidth=0, marker="+", markerfacecolor="#38040E", \
markeredgecolor="#38040E", markersize=2)
ax_capped.set_ylim(0.9, 1.1)
ax_capped.set_yticks([1.0])
ax_capped.set_yticklabels(["Capped"], fontsize=8, weight="bold")
ax_capped.set_xlim(ax_data.get_xlim())
mask = dataset["R squared"] > 0
ax_R2.plot(resi[mask], dataset["R squared"][mask], linewidth=0, marker="+", markerfacecolor="#0FA3B1", markeredgecolor="#0FA3B1", \
markersize=2)
ax_R2.set_xlim(ax_data.get_xlim())
ax_R2.set_ylabel(r"$R^2$", fontsize=8, weight="bold", va="center")
ax_R2.set_title("{} {}".format(self.lipid, item), fontsize=8, weight="bold")
ax_boot.bar(resi, dataset["Residence Time_boot"], width, linewidth=0, color="#F75C03")
ax_boot.set_xlim(ax_data.get_xlim())
ax_boot.set_ylabel("Res. Time \n Boot. {}".format(timeunit), fontsize=8, weight="bold", va="center")
ax_boot.set_xticklabels([])
mask = dataset["R squared_boot"] > 0
mask = dataset["Residence Time_boot_cv"] > 0
ax_boot_cv.plot(resi[mask], dataset["Residence Time_boot_cv"][mask], linewidth=0, marker="+", markerfacecolor="#0FA3B1", markeredgecolor="#F7B538",
markersize=2)
ax_boot_cv.set_ylabel("Coef. Var.", fontsize=8, weight="bold", va="center")
ax_boot_cv.set_xlim(ax_data.get_xlim())
for ax in [ax_data, ax_capped, ax_R2, ax_boot, ax_boot_cv]:
ax.yaxis.set_label_coords(-0.15, 0.5, transform=ax.transAxes)
if no_break:
plt.savefig("{}/{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300)
else:
plt.savefig("{}/{}_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300)
plt.close()
###### logomater #####
df = pd.DataFrame({"Resid": resi, "Resn": SL_resn, "Data": data})
matrix = df.pivot(index="Resid", columns='Resn', values="Data").fillna(0)
n_rows = 1 + resi[-1]//100 - resi[0]//100
start = (resi[0]//100)*100
length = start + 100 - resi[0]
fig, axes = plt.subplots(n_rows, 1, figsize=(4.5, 1.3*n_rows), sharey=True)
plt.subplots_adjust(hspace=0.5)
for idx, ax in enumerate(np.atleast_1d(axes)):
if idx == (n_rows - 1):
logomaker.Logo(matrix[(idx-1)*100 + length:], color_scheme="chemistry", ax=ax)
ax.set_xlabel("Residue Index", fontsize=8, weight="bold")
elif idx == 0:
logomaker.Logo(matrix[:length], color_scheme="chemistry", ax=ax)
else:
logomaker.Logo(matrix[(idx-1)*100+length:idx*100+length], color_scheme="chemistry", ax=ax)
ax.xaxis.set_major_locator(MultipleLocator(20))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.set_xlim(idx*100+start, (idx+1)*100+start)
ax.set_ylim(0, data.max()*1.05)
ax.set_ylabel("Res. Time {}".format(timeunit), fontsize=8, weight="bold", va="center")
for label in ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels():
plt.setp(label, fontsize=8, weight="bold")
plt.tight_layout()
if no_break:
plt.savefig("{}/{}_logo_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300)
else:
plt.savefig("{}/{}_logo_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300)
plt.close()
else:
fig, ax = plt.subplots(1, 1, figsize=(4.5,2.8))
ax.bar(resi, data, width, linewidth=0, color=sns.xkcd_rgb["red"])
sns.despine(fig, top=True, right=True, trim=False)
if len(data) > 1000:
ax.xaxis.set_major_locator(MultipleLocator(200))
ax.xaxis.set_minor_locator(MultipleLocator(50))
elif len(data) <= 1000:
ax.xaxis.set_major_locator(MultipleLocator(100))
ax.xaxis.set_minor_locator(MultipleLocator(10))
ax.set_xlabel("Residue Index", fontsize=8, weight="bold")
if self.timeunit == "ns":
timeunit = " (ns) "
elif self.timeunit == "us":
timeunit = r" ($\mu s$)"
if item == "Duration":
ylabel = item + timeunit
elif item == "Occupancy":
ylabel = item + " 100% "
elif item == "LipidCount":
ylabel = "Num. of Lipids"
ax.set_ylabel(ylabel, fontsize=8, weight="bold")
for label in ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels():
plt.setp(label, fontsize=8, weight="bold")
ax.set_title("{} {}".format(self.lipid, item), fontsize=8, weight="bold")
plt.tight_layout()
if no_break:
plt.savefig("{}/{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300)
else:
plt.savefig("{}/{}_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300)
plt.close()
###### logomater #####
df = pd.DataFrame({"Resid": resi, "Resn": SL_resn, "Data": data})
matrix = df.pivot(index="Resid", columns='Resn', values="Data").fillna(0)
n_rows = 1 + resi[-1]//100 - resi[0]//100
start = (resi[0]//100)*100
length = start + 100 - resi[0]
fig, axes = plt.subplots(n_rows, 1, figsize=(4.5, 1.3*n_rows), sharey=True)
plt.subplots_adjust(hspace=0.5)
for idx, ax in enumerate(np.atleast_1d(axes)):
if idx == (n_rows - 1):
logomaker.Logo(matrix[(idx-1)*100 + length:], color_scheme="chemistry", ax=ax)
ax.set_xlabel("Residue Index", fontsize=8, weight="bold")
elif idx == 0:
logomaker.Logo(matrix[:length], color_scheme="chemistry", ax=ax)
else:
logomaker.Logo(matrix[(idx-1)*100+length:idx*100+length], color_scheme="chemistry", ax=ax)
ax.xaxis.set_major_locator(MultipleLocator(20))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.set_xlim(idx*100+start, (idx+1)*100+start)
ax.set_ylim(0, data.max()*1.05)
ax.set_ylabel(ylabel, fontsize=8, weight="bold", va="center")
for label in ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels():
plt.setp(label, fontsize=8, weight="bold")
plt.tight_layout()
if no_break:
plt.savefig("{}/{}_logo_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300)
else:
plt.savefig("{}/{}_logo_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300)
plt.close()
return
def write_to_pdb(self, item, save_dir=None):
if save_dir == None:
save_dir = check_dir(self.save_dir, "Coordinates_{}".format(self.lipid))
else:
save_dir = check_dir(save_dir, "Coordinates_{}".format(self.lipid))
##### load coords ######
data = self.dataset[item]
coords = self._protein_ref.xyz[0]
table, _ = self._protein_ref.top.to_dataframe()
atom_idx_set = table.serial
resid_set = table.resSeq + self.resi_offset
atom_name_set = table.name
resn_set = table.resName
chainID = [chr(65+int(idx)) for idx in table.chainID]
data_expanded = np.zeros(len(table))
residue_indices = np.array([atom.residue.index for atom in self._protein_ref.top.atoms])
for value, selected_residue_index in zip(data, self._selected_residue_indices):
locations = np.where(residue_indices == selected_residue_index)[0]
data_expanded[locations] = value
######## write out coords ###########
fn = "{}/Coords_{}.pdb".format(save_dir, "_".join(item.split()))
with open(fn, "w") as f:
for idx in np.arange(self._protein_ref.n_atoms):
coords_dictionary = {"HEADER": "ATOM",
"ATOM_ID": atom_idx_set[idx],
"ATOM_NAME": atom_name_set[idx],
"SPARE": "",
"RESN": resn_set[idx],
"CHAIN_ID": chainID[idx],
"RESI": resid_set[idx],
"COORDX": coords[idx, 0] * 10,
"COORDY": coords[idx, 1] * 10,
"COORDZ": coords[idx, 2] * 10,
"OCCUP": 1.0,
"BFACTOR": data_expanded[idx]}
row = "{HEADER:6s}{ATOM_ID:5d} ".format(**coords_dictionary) +\
"{ATOM_NAME:^4s}{SPARE:1s}{RESN:3s} ".format(**coords_dictionary) +\
"{CHAIN_ID:1s}{RESI:4d}{SPARE:1s} ".format(**coords_dictionary) +\
"{COORDX:8.3f}{COORDY:8.3f}{COORDZ:8.3f}{OCCUP:6.2f}{BFACTOR:6.2f}\n".format(**coords_dictionary)
f.write(row)
f.write("TER")
return
######################################################
########### Load params and do calculation ###########
######################################################
if __name__ == '__main__':
trajfile_list = args.f
grofile_list = args.c
lipid_set = args.lipids
cutoff = [float(data) for data in args.cutoffs]
save_dir = check_dir(args.save_dir)
#######################################################################
######## write a backup file of params for reproducibility ############
fn = os.path.join(save_dir, "pylipid_backup_{}.txt".format(datetime.datetime.now().strftime("%Y_%m_%d_%H%M")))
with open(fn, "w") as f:
f.write("##### Record params for reproducibility #####\n")
f.write("python {}\n".format(" ".join(sys.argv)))
######################################################################
######################### process resi_list ##########################
resi_list = []
if len(args.resi_list) > 0:
for item in args.resi_list:
if "-" in item:
item_list = item.split("-")
resi_list.append(np.arange(int(item_list[0]), int(item_list[-1])+1))
else:
resi_list.append(int(item))
resi_list = np.hstack(resi_list)
#######################################################################
############################ change of radii ##########################
##### mdtraj default radii:
##### https://github.com/mdtraj/mdtraj/blob/b28df2cd6e5c35fa006fe3c24728857880793abb/mdtraj/geometry/sasa.py#L56
if args.radii == None:
radii_book = None
else:
radii_book = {}
for item in args.radii:
radius = item.split(":")
radii_book[radius[0]] = float(radius[1])
#######################################################################
################# score weight for kde calculation ####################
if args.score_weights == None:
score_weights = None
else:
score_weights = {}
for item in args.score_weights:
weight = item.split(":")
score_weights[weight[0]] = float(weight[1])
#######################################################################
################# map three letter to single letter ###################
letter_map = None
if args.letter_map != None:
letter_map = {}
for item in args.letter_map:
letter_map[item.split(":")[0]] = item.split(":")[1]
#######################################################################
################# process chain breaks ################################
chain_breaks = [] if len(args.chain_breaks) == 0 else [int(num)-1 for num in args.chain_breaks]
#######################################################################
for lipid in lipid_set:
li = LipidInteraction(trajfile_list, grofile_list, stride=int(args.stride), dt=args.dt, cutoff=cutoff, lipid=lipid, \
lipid_atoms=args.lipid_atoms, nprot=args.nprot, timeunit=args.tu, resi_offset=int(args.resi_offset), \
resi_list=resi_list, save_dir=args.save_dir)
li.cal_interactions(save_dataset=args.save_dataset, nbootstrap=int(args.nbootstrap))
li.plot_interactions(item="Duration", letter_map=letter_map, chain_breaks=chain_breaks)
li.plot_interactions(item="Residence Time", letter_map=letter_map, chain_breaks=chain_breaks)
li.plot_interactions(item="Occupancy", letter_map=letter_map, chain_breaks=chain_breaks)
li.plot_interactions(item="LipidCount", letter_map=letter_map, chain_breaks=chain_breaks)
li.write_to_pdb(item="Duration")
li.write_to_pdb(item="Residence Time")
li.write_to_pdb(item="Occupancy")
li.write_to_pdb(item="LipidCount")
li.cal_interaction_network(pdb=args.pdb, save_dataset=args.save_dataset, \
pymol_gui=args.pymol_gui, radii=radii_book, gen_binding_poses=int(args.gen_binding_poses), \
score_weights=score_weights, save_pose_format=args.save_pose_format)
|
import logging
import os
import sys
from locust import Locust
from locust import TaskSet
from locust import task
from locust import HttpLocust
logger = logging.getLogger("dummy_locust")
# class MyTaskSet(TaskSet):
# @task
# def my_task(self):
# print('Locust instance (%r) executing "my_task"'.format(self.locust))
# class MyLocust(Locust):
# task_set = MyTaskSet
VERBOSE_LOGS = True
class ActionsTaskSet(TaskSet):
"""
This is a container class that holds all locust load tests we want to run sequentally
"""
@task
def index(self):
"""Hit https://example.com/ endpoint
Arguments:
N/A
Decorators:
task
"""
response = self.client.get("/")
logger.info("Response status code: {}".format(response.status_code))
if VERBOSE_LOGS:
logger.debug("Response content: {}".format(response.content))
class MyLocust(HttpLocust):
"""Locust action wrapper class, this is what actually performs the load tests
Arguments:
N/A
Decorators:
task
"""
weight = 1
task_set = ActionsTaskSet
min_wait = 2000
max_wait = 9000
# host = DOMAIN
|
import numpy as np
import joblib
import pprint
from utils.BaseModel import BaseModel, R2
from utils.SpecialPlotters import train_cv_analyzer_plotter
from utils.AwesomeTimeIt import timeit
from utils.RegressionReport import evaluate_regression
from utils.FeatureImportanceReport import report_feature_importance
import statsmodels.api as sm
from scipy import stats
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_validate
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.metrics import make_scorer
class Linear(BaseModel):
def __init__(self, name, dl):
self.n_top_features = dl.n_top_features
self.k = dl.k
self.dl = dl
self.name = name
def initialize(self, model_name):
super().__init__(self.name, model_name, self.dl)
def load(self):
self.X_train, self.X_test, self.Y_train, self.Y_test, \
self.dates_train, self.dates_test = self.dl.load_with_test()
self.X, self.Y, _ = self.dl.load_all()
def set_params(self, **params):
self.alpha = params.pop('alpha', 0.002)
self.fit_intercept = params.pop('fit_intercept', True)
self.should_cross_val = params.pop('should_cross_val', False)
def log_params(self):
self.log.info(pprint.pformat({
"Model_type": self.model_name,
"alpha" : self.alpha,
"fit_intercept" : self.fit_intercept,
"should_cross_val" : self.should_cross_val,
'random_state': self.dl.random_state
}))
def fit_ols(self):
self.fit(model_name = 'OLS')
def fit_linear(self):
self.fit(model_name = 'Linear')
def fit_lasso(self):
self.fit(model_name = 'Lasso')
def fit_ridge(self):
self.fit(model_name = 'Ridge')
@timeit
def fit(self, model_name = 'linear'):
self.initialize(model_name)
self.load()
self.log_params()
if model_name.lower() == 'ols':
X_train, X_test = sm.add_constant(self.X_train), sm.add_constant(self.X_test)
model = sm.OLS(self.Y_train, X_train)
model = model.fit()
self.log.info(str(model.summary()))
else:
if model_name.lower() == 'linear':
model = LinearRegression(fit_intercept = self.fit_intercept,
normalize=False)
else:
lin_model = Lasso if model_name.lower() is 'lasso' else Ridge
model = lin_model(alpha = self.alpha,
fit_intercept = self.fit_intercept,
max_iter = 10000)
if self.should_cross_val:
r2_scorer = make_scorer(R2, greater_is_better=False)
mse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
scores = cross_validate(model, self.X, self.Y,
cv=self.k, verbose=0, scoring= {'MSE': mse_scorer, 'R2' : r2_scorer})
self.log.info( f"Cross validation is done for {model_name}. "\
f"RMSE: {(-np.mean(scores['test_MSE']))**0.5:.2f}, "\
f"MSE: {-np.mean(scores['test_MSE']):.2f},"\
f" R2: {-np.mean(scores['test_R2']):.2f}")
print (f"|- Cross validation is done for {model_name} "\
f"RMSE: {(-np.mean(scores['test_MSE']))**0.5:.2f},"\
f"MSE: {-np.mean(scores['test_MSE']):.2f}, "
f"R2: {-np.mean(scores['test_R2']):.2f} -|")
model.fit(self.X_train, self.Y_train)
evaluate_regression(['OnTrain', self.X_train, self.Y_train, self.dates_train],
['OnTest', self.X_test, self.Y_test, self.dates_test],
direc = self.directory,
model = model,
model_name = model_name,
logger = self.log,
slicer = 1,
should_check_hetero = True,
should_log_inverse = self.dl.should_log_inverse)
joblib.dump(model, self.directory + f"/{model_name}.pkl")
# Plotting the Importances
coeffs = dict(zip(self.X_train.columns, model.coef_))
report_feature_importance(self.directory, model.coef_, self.X, self.Y,
self.n_top_features, model_name, self.log)
self.log.info(f"{model_name} Coefficients:\n" + pprint.pformat(coeffs))
@timeit
def analyze(self, model_name = 'Lasso', start = 0.0000001, end=100, step=2):
lin_model = Lasso if model_name.lower() is 'lasso' else Ridge
train_error_list, cv_error_list, xticks = [], [], []
i = start
while i < end:
xticks.append(i)
model = lin_model(alpha=i, fit_intercept = True, normalize = False, max_iter=10000)
model.fit(self.X_train, self.Y_train)
train_error_list.append(mean_squared_error(self.Y_train, model.predict(self.X_train)))
cv_error_list.append(mean_squared_error(self.Y_test, model.predict(self.X_test)))
print (f"Step {i:.4f} of regularization is done")
i = i *step
train_cv_analyzer_plotter(train_error_list, cv_error_list, self.directory,
f'{model_name}_Regularization_Analysis', xticks = xticks) |
"""
[2016-12-05] Challenge #294 [Easy] Rack management 1
https://www.reddit.com/r/dailyprogrammer/comments/5go843/20161205_challenge_294_easy_rack_management_1/
# Description
Today's challenge is inspired by the board game Scrabble. Given a set of 7 letter tiles and a word, determine whether
you can make the given word using the given tiles.
Feel free to format your input and output however you like. You don't need to read from your program's input if you
don't want to - you can just write a function that does the logic. I'm representing a set of tiles as a single string,
but you can represent it using whatever data structure you want.
# Examples
scrabble("ladilmy", "daily") -> true
scrabble("eerriin", "eerie") -> false
scrabble("orrpgma", "program") -> true
scrabble("orppgma", "program") -> false
# Optional Bonus 1
Handle blank tiles (represented by `"?"`). These are "wild card" tiles that can stand in for any single letter.
scrabble("pizza??", "pizzazz") -> true
scrabble("piizza?", "pizzazz") -> false
scrabble("a??????", "program") -> true
scrabble("b??????", "program") -> false
# Optional Bonus 2
Given a set of up to 20 letter tiles, determine the longest word from [the enable1 English word
list](https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/dotnetperls-controls/enable1.txt)
that can be formed using the tiles.
longest("dcthoyueorza") -> "coauthored"
longest("uruqrnytrois") -> "turquois"
longest("rryqeiaegicgeo??") -> "greengrocery"
longest("udosjanyuiuebr??") -> "subordinately"
longest("vaakojeaietg????????") -> "ovolactovegetarian"
(For all of these examples, there is a unique longest word from the list. In the case of a tie, any word that's tied
for the longest is a valid output.)
# Optional Bonus 3
Consider the case where every tile you use is worth a certain number of points, [given on the Wikpedia page for
Scrabble](https://en.wikipedia.org/wiki/Scrabble_letter_distributions#English). E.g. `a` is worth 1 point, `b` is worth
3 points, etc.
For the purpose of this problem, if you use a blank tile to form a word, it counts as 0 points. For instance, spelling
`"program"` from `"progaaf????"` gets you 8 points, because you have to use blanks for the `m` and one of the `r`s,
spelling `prog?a?`. This scores 3 + 1 + 1 + 2 + 1 = 8 points, for the `p`, `r`, `o`, `g`, and `a`, respectively.
Given a set of up to 20 tiles, determine the highest-scoring word from the word list that can be formed using the tiles.
highest("dcthoyueorza") -> "zydeco"
highest("uruqrnytrois") -> "squinty"
highest("rryqeiaegicgeo??") -> "reacquiring"
highest("udosjanyuiuebr??") -> "jaybirds"
highest("vaakojeaietg????????") -> "straightjacketed"
"""
def main():
pass
if __name__ == "__main__":
main()
|
import sys
import os
import re
def camel_to_sletter(filename):
""" transform function names of camel case to that of small letters """
o_filename = os.path.splitext(filename)[0] + "_converted.py"
print("input : ", os.path.splitext(filename))
print("output : ", o_filename)
# _regex_camel = re.compile(r'[.\s()+=\-#][a-z]+[A-Z][a-zA-Z_]*')
_regex_camel = re.compile(r'[.\s()+=\-#](?!gl)[a-z]+[A-Z][0-9a-zA-Z_]*')
_regex_upper = re.compile(r'[A-Z][a-z]*')
with open(filename, 'r') as i_file, open(o_filename, 'w') as o_file:
for line in i_file:
_camels = _regex_camel.findall(line)
for _c in _camels:
_camel = _c[1:]
_uppers = _regex_upper.findall(_camel)
print("origin : {0}".format(_camel))
for _upper in _uppers:
_camel = _camel.replace(_upper, '_' + _upper.lower())
print("replaced : {0}".format(_camel))
line = line.replace(_c, _c[0] + _camel)
print("replaced line : {0}".format(line))
o_file.write(line)
camel_to_sletter("../resource/ys_motion_loader.py")
camel_to_sletter("../resource/ys_ogre_data_loader.py")
|
#noinspection PyUnresolvedReferences
import sys
|
import pytest
from pre_commit_hooks.check_json import main
from testing.util import get_resource_path
@pytest.mark.parametrize(
('filename', 'expected_retval'), (
('bad_json.notjson', 1),
('bad_json_latin1.nonjson', 1),
('ok_json.json', 0),
('duplicate_key_json.json', 1),
),
)
def test_main(capsys, filename, expected_retval):
ret = main([get_resource_path(filename)])
assert ret == expected_retval
if expected_retval == 1:
stdout, _ = capsys.readouterr()
assert filename in stdout
def test_non_utf8_file(tmpdir):
f = tmpdir.join('t.json')
f.write_binary(b'\xa9\xfe\x12')
assert main((str(f),))
|
#!/usr/bin/env python3
import argparse
import datetime
import re
import subprocess
import sys
from typing import Any, Tuple
parser = argparse.ArgumentParser(description="Tags and releases the next version of this project.")
parser.add_argument("tag", help="Semantic version number to use as the tag.")
parser.add_argument("-e", "--edit", action="store_true", help="Force edits of git commits.")
parser.add_argument("-n", "--dry-run", action="store_true", help="Don't modify files.")
parser.add_argument("-v", "--verbose", action="store_true", help="Print debug information.")
def parse_changelog(args: Any) -> Tuple[str, str]:
"""Return an updated changelog and and the list of changes."""
with open("CHANGELOG.rst", "r", encoding="utf-8") as file:
match = re.match(
pattern=r"(.*?Unreleased\n---+\n)(.+?)(\n*[^\n]+\n---+\n.*)",
string=file.read(),
flags=re.DOTALL,
)
assert match
header, changes, tail = match.groups()
tag = "%s - %s" % (args.tag, datetime.date.today().isoformat())
tagged = "\n%s\n%s\n%s" % (tag, "-" * len(tag), changes)
if args.verbose:
print(tagged)
return "".join((header, tagged, tail)), changes
def main() -> None:
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
if args.verbose:
print(args)
new_changelog, changes = parse_changelog(args)
if not args.dry_run:
with open("CHANGELOG.rst", "w", encoding="utf-8") as f:
f.write(new_changelog)
edit = ["-e"] if args.edit else []
subprocess.check_call(["git", "commit", "-avm", "Prepare %s release." % args.tag] + edit)
subprocess.check_call(["git", "tag", args.tag, "-am", "%s\n\n%s" % (args.tag, changes)] + edit)
if __name__ == "__main__":
main()
|
"""Support routines for the qdyn_prop_gate utility"""
import re
import numpy as np
from .units import UnitFloat
def _isqrt(n):
"""Integer square root of n > 0
>>> _isqrt(1024**2)
1024
>>> _isqrt(10)
3
"""
assert n >= 0
x = n
y = (x + 1) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
def get_prop_gate_of_t(gates_file, with_t=False):
r"""Yield gates in `gates_file`, where `gates_file` is in the format
written by the ``qdyn_prop_gate`` utility's ``--write-gate`` option. That
is, each row in `gates_files` has $2 n^2 + 1$ columns. The first column is
a time stamp, the remaining columns are the real and imaginary part for
each entry in the $n \times n$ gate (vectorized in column-major format). If
`with_t` is False (default), yield only the gates, otherwise yield both the
gates and the time stamp for each gate
Returns:
* If ``with_t=False``, iterator over gates, where each gate is a
complex $n \times n$ numpy matrix, or a Gate2Q instance for a $4
\times 4$ gate
* If ``with_t=True``, iterator of tuples ``(gate, t)``, where ``t`` is
a float or an instance of UnitFloat if the time unit can be derived
from the header of `gates_file`
"""
with open(gates_file) as in_fh:
time_unit = None
for line in in_fh:
if line.startswith('#'):
try:
time_unit = re.search(r't\s*\[(\w+)\]', line).group(1)
except AttributeError:
pass
else:
vals = np.array([float(v) for v in line.split()])
n = _isqrt((len(vals) - 1) // 2)
assert 2 * n * n + 1 == len(vals)
shape = (n, n)
gate = np.reshape(
vals[1::2], shape, order='F'
) + 1j * np.reshape(vals[2::2], shape, order='F')
if with_t:
if time_unit is not None:
yield gate, UnitFloat(vals[0], time_unit)
else:
yield gate, vals[0]
else:
yield gate
|
def rekurzivna_funkcija(s):
if len(s) ==0:
return s
else:
return rekurzivna_funkcija (s[1:])+s[0]
rez=rekurzivna_funkcija("programiranje")
print(rez)
|
import unittest
import torch
import numpy as np
from connectomics.data.augmentation import *
class TestModelBlock(unittest.TestCase):
def test_mixup(self):
"""Test mixup for numpy.ndarray and torch.Tensor.
"""
mixup_augmentor = MixupAugmentor(num_aug=2)
volume = np.ones((4,1,8,32,32))
volume = mixup_augmentor(volume)
volume = torch.ones(4,1,8,32,32)
volume = mixup_augmentor(volume)
def test_copypaste(self):
"""Test copypaste augment for numpy.ndarray and torch.Tensor.
"""
np.random.seed(42)
cp_augmentor = CopyPasteAugmentor()
volume, label = np.random.randn(8,32,32), np.zeros((8,32,32))
label[2:4, 10:20, 10:20] = 1
volume_np = cp_augmentor({'image': volume, 'label':label})
volume, label = torch.from_numpy(volume), torch.from_numpy(label)
volume_torch = cp_augmentor({'image': volume, 'label':label})
self.assertTrue(torch.allclose(volume_torch, torch.from_numpy(volume_np), atol=1e-6))
if __name__ == '__main__':
unittest.main()
|
import time
from setuptools import find_packages
from distutils.core import setup
patch_level = int(time.time())
ver = "0.1." + str(patch_level)[4:]
setup(
name = 'slackbot_ts',
packages = find_packages(),
version = ver,
description = 'Python Code for Tech Em Studios Classes',
author = 'Tech Em Studios',
author_email = '[email protected]',
url = 'https://github.com/wray/',
download_url = 'https://github.com/wray//tarball/'+ver,
keywords = ['slackbot', 'RPi', 'AWS'],
classifiers = [],
)
|
from .base_bev_backbone import BaseBEVBackbone
from .hg_bev_backbone import HgBEVBackbone
__all__ = {
'BaseBEVBackbone': BaseBEVBackbone,
'HgBEVBackbone': HgBEVBackbone
}
|
import torch
import random
import inspect
import re
import logging
import types
import syft as sy
from ... import workers
from ... import utils
from . import utils as torch_utils
from .tensor import _SyftTensor, _LocalTensor, _PointerTensor, _FixedPrecisionTensor, _TorchTensor
from .tensor import _TorchVariable
class TorchHook(object):
r""" A Hook which Overrides Methods on PyTorch Variables & Tensors -
**Currently compatible with PyTorch 0.3.1**
The purpose of this class is to:
* extend torch methods to allow for the moving of tensors
and variables from one worker to another
* override torch methods to execute commands on one worker
that are called on tensors controlled by the local worker.
This class is typically the first thing you will initialize when
using PySyft with PyTorch because it is responsible for augmenting
PyTorch with PySyft's added functionality (such as remote execution).
:Parameters:
* **local_worker (**:class:`.workers.BaseWorker` **, optional)**
you can optionally provide a local worker as a parameter which
TorchHook will assume to be the worker owned by the local machine.
If you leave it empty, TorchClient will automatically initialize
a :class:`.workers.VirtualWorker` under the assumption you're
looking to do local experimentation/development.
* **is_client (bool, optional)** whether or not the TorchHook is
being initialized as an end-user client. This can impact whether
or not variables are deleted when they fall out of scope. If you set
this incorrectly on a end user client, Tensors and Variables will
never be deleted. If you set this incorrectly on a remote machine
(not a client), tensors will not get saved. It's really only
important if you're not initializing the local worker yourself. (Default: True)
* **verbose (bool, optional)** whether or not to print operations
as they occur. (Defalt: True)
* **queue_size (int, optional)** max length of the list storing messages
to be sent. (Default: 0)
:Example:
>>> import syft as sy
>>> hook = sy.TorchHook()
Hooking into Torch...
Overloading Complete.
>>> x = sy.FloatTensor([-2,-1,0,1,2,3])
>>> x
-2
-1
0
1
2
3
[syft.core.frameworks.torch.tensor.FloatTensor of size 6]
"""
def __init__(self, local_worker=None, is_client=True, verbose=True, queue_size=0):
self.local_worker = local_worker
if not hasattr(torch, 'torch_hooked'):
torch.torch_hooked = 0
else:
torch.torch_hooked += 1
# Methods that caused infinite recursion during testing
# TODO: May want to handle the ones in "exclude" manually at
# some point
self.exclude = (['ndimension', 'nelement', 'size', 'numel',
'type', 'tolist', 'dim', '__iter__', 'select',
'__getattr__', '_get_type'])
self.to_auto_overload = {}
if torch.torch_hooked > 0:
logging.warn("Torch was already hooked... skipping hooking process")
self.local_worker = torch.local_worker
else:
if self.local_worker is None:
# Every TorchHook instance should have a local worker which is responsible for
# interfacing with other workers. The worker interface is what allows the Torch
# specific code in TorchHook to be agnostic to the means by which workers communicate
# (such as peer-to-peer, sockets, through local ports, or all within the same process)
self.local_worker = workers.VirtualWorker(hook=self, is_client_worker=is_client,
queue_size=queue_size)
else:
# if the local_worker already exists, then it MUST not know about the hook which is
# just being created. Thus, we must inform it.
self.local_worker.hook = self
for typ in torch.tensorvar_types:
self._hook_native_tensors_and_variables(typ)
self._hook_syft_tensor_types(typ)
self._hook_torch_module()
self._hook_backward()
self._hook_module()
torch.local_worker = self.local_worker
def _hook_native_tensors_and_variables(self, tensor_type):
"""Overloads given tensor_type (native)"""
# Overload 'special' methods here
self._add_registration_to___init__(tensor_type, register_child_instead=True)
self._hook_properties(tensor_type)
self.to_auto_overload[tensor_type] = self._which_methods_should_we_auto_overload(
tensor_type)
self._rename_native_functions(tensor_type)
self._assign_methods_to_use_child(tensor_type)
self._add_methods_from__TorchObject(tensor_type)
def _hook_syft_tensor_types(self, tensor_type):
"""Overloads syft tensor_types"""
self._hook_LocalTensor(tensor_type)
self._hook_SyftTensor(tensor_type)
self._hook_PointerTensor(tensor_type)
def _add_registration_to___init__(hook_self, tensorvar_type, register_child_instead=False):
"""Overloads tensor_type.__new__ or Variable.__new__"""
# TODO: This is added because of the following contradiction: instanciate x = FloatTensor()
# and ask x.__module__, you'll get `sy.core.frameworks.torch.tensor.FloatTensor`
# but now ask for dir(sy.core.frameworks.torch.tensor) you'll find no FloatTensor attribute
# and x.float() will raise an exception because of this.
# Why is x.__module__ == 'sy.core...'? How can we do this with elegance?
if tensorvar_type.__module__ != sy._SyftTensor.__module__:
setattr(sy.core.frameworks.torch.tensor, tensorvar_type.__name__, tensorvar_type)
if 'native___init__' not in dir(tensorvar_type):
tensorvar_type.native___init__ = tensorvar_type.__init__
def new___init__(cls, *args, **kwargs):
if 'owner' in kwargs and kwargs['owner'] is not None:
owner = kwargs['owner']
else:
owner = hook_self.local_worker
if 'id' in kwargs:
id = kwargs['id']
else:
id = None
if register_child_instead:
cls.native___init__()
_ = cls.child
_ = "ignore pep8"
else:
cls.native___init__(*args, **kwargs)
if id is None:
id = random.randint(0, 1e10)
cls.id = id
cls.owner = owner
if 'skip_register' in kwargs and kwargs['skip_register']:
pass
else:
owner.register_object(cls, id=id)
tensorvar_type.__init__ = new___init__
def _hook_properties(hook_self, tensor_type):
"""Overloads tensor_type properties"""
@property
def child(self):
try:
if hasattr(self, '_child') and self._child is not None:
return self._child
else:
self._child = _LocalTensor(child=self,
parent=self,
torch_type='syft.' + type(self).__name__)
return self._child
except TypeError:
# for some reason, hasattr(self, '_child') returns a TypeError saying
# "TypeError: 'NoneType' object is not callable". It's supposed to only
# return False and I can't get to the bottom of it. So, for now, I'm
# going to break a personal rule and use try/catch for logic, but
# this is merely supposed to evaluate whether self has ._child as an
# attribute. Note this only seems to happen when self is a
# torch.autograd.Variable
self._child = _LocalTensor(child=self,
parent=self,
torch_type='syft.' + type(self).__name__)
return self._child
@child.setter
def child(self, value):
self._child = value
tensor_type.child = child
@property
def id(self):
return self.child.id
# TODO: this should not be possible, but it should also be possible to define a FloatTensor
# with a specific id. This is in theory possible, but it doesnt seem to work in practice
@id.setter
def id(self, new_id):
self.child.id = new_id
return self
tensor_type.id = id
@property
def location(self):
return self.child.location
tensor_type.location = location
@property
def id_at_location(self):
return self.child.id_at_location
tensor_type.id_at_location = id_at_location
@property
def owner(self):
return self.child.owner
tensor_type.owner = owner
def _which_methods_should_we_auto_overload(self, tensor_type=torch.FloatTensor):
"""Creates list of methods to auto overload"""
to_overload = list()
for attr in dir(tensor_type):
# Conditions for inclusion/exclusion
if attr in self.exclude:
continue
lit = getattr(tensor_type, attr)
is_base = attr in dir(object)
is_desc = inspect.ismethoddescriptor(lit)
is_func = isinstance(lit, types.FunctionType)
try:
is_service_func = 'HookService' in lit.__qualname__
except:
is_service_func = False
is_old = re.match('native*', attr) is not None
if (is_desc or (is_func and not is_service_func)) and not is_base and not is_old:
to_overload.append(attr)
return to_overload
def _rename_native_functions(self, tensor_type):
"""Renames functions that are auto overloaded"""
for attr in self.to_auto_overload[tensor_type]:
lit = getattr(tensor_type, attr)
# if we haven't already overloaded this function
if 'native_{}'.format(attr) not in dir(tensor_type):
setattr(tensor_type, 'native_{}'.format(attr), lit)
setattr(tensor_type, attr, None)
def _assign_methods_to_use_child(self, tensor_type):
"""Assigns methods to use as child for auto overloaded functions"""
for attr in self.to_auto_overload[tensor_type]:
def forward_method_to_child(self, *args, **kwargs):
child_args = torch_utils.get_child_in_args(*args, **kwargs)
response = getattr(self.child, attr)(*child_args, **kwargs)
return response
new_attr = self._get_overloaded_method(attr)
# if we haven't already overloaded this method
if attr not in dir(tensor_type) or getattr(tensor_type, attr) is None:
setattr(tensor_type, attr, new_attr)
def _add_methods_from__TorchObject(self, tensor_type):
"""Add methods to auto overloaded functions"""
exclude = ['__class__',
'__delattr__',
'__dir__',
'__doc__',
'__dict__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__gt__',
'__hash__',
'__init__',
'__init_subclass__',
'__le__',
'__lt__',
'__weakref__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__setattr__',
'__sizeof__',
'__subclasshook__',
'_get_type']
if issubclass(tensor_type, torch._TensorBase):
parent_syft_obj = _TorchTensor
else:
parent_syft_obj = _TorchVariable
for attr in dir(parent_syft_obj):
if attr not in exclude:
if attr in dir(tensor_type) and "native_" + str(attr) not in dir(tensor_type):
setattr(tensor_type, "native_" + str(attr), getattr(tensor_type, attr))
setattr(tensor_type, attr, getattr(parent_syft_obj, attr))
def _hook_LocalTensor(self, tensor_type):
"""Overloads LocalTensor"""
# iterate through all methods and tell them to call the native function
# on self.child
for attr in self.to_auto_overload[tensor_type]:
def forward_method_to_child(self, *args, **kwargs):
child_args = torch_utils.get_child_in_args(*args, **kwargs)
if attr == 'zero_':
response = getattr(self.child, 'native_' + attr)()
else:
response = getattr(self.child, 'native_' + attr)(*child_args, **kwargs)
syft_node = type(self)(child=response.child, parent=None,
torch_type=type(response).__name__)
# Insert the new node just before the wrapper
# syft_node.child = response.child
response.child.parent = syft_node
response.child = syft_node
syft_node.parent = response
return response
new_attr = self._get_overloaded_method(attr)
# if we haven't already overloaded this method
if attr not in dir(_LocalTensor) or getattr(_LocalTensor, attr) is None:
setattr(_LocalTensor, attr, new_attr)
def _hook_SyftTensor(hook_self, tensor_type):
"""Overloads SyftTensor"""
hook_self._add_registration_to___init__(_SyftTensor)
for attr in hook_self.to_auto_overload[tensor_type]:
def forward_method_to_child(self, *args, **kwargs):
child_args = torch_utils.get_child_in_args(*args, **kwargs)
response = getattr(self.child, attr)(*child_args, **kwargs)
syft_node = type(self)(child=response.child)
# Insert the new node just before the wrapper
# syft_node.child = response.child
response.child.parent = syft_node
response.child = syft_node
syft_node.parent = response
return response
new_attr = hook_self._get_overloaded_method(attr)
# if we haven't already overloaded this method
if attr not in dir(_SyftTensor) or getattr(_SyftTensor, attr) is None:
# call child method
setattr(_SyftTensor, attr, new_attr)
def _hook_PointerTensor(self, tensor_type):
"""Overloads PointerTensor"""
for attr in self.to_auto_overload[tensor_type]:
# # if we haven't already overloaded this method
# if attr not in dir(_PointerTensor) or getattr(_PointerTensor, attr) is None:
setattr(_PointerTensor, attr, self._get_overloaded_method(attr))
def _get_overloaded_method(hook_self, attr):
"""
Wrapper overloading partial objects of methods in the torch
module. Compiles command, checks for Tensors and Variables in
the args/kwargs, determines locations of all Tensors and
Variables involved in computation, and handles the computation
accordingly.
"""
def _execute_method_call(self, *args, **kwargs):
return hook_self._execute_call(attr, self, *args, **kwargs)
return _execute_method_call
def _hook_torch_module(self):
"""
Overloads functions in the main torch module.
The way this is accomplished is by first moving all existing module functions in the torch
module to native_<function_name_here>. Thus, the real :func:`torch.cat` will become
:func:`torch.native_cat` and :func:`torch.cat` will have our hooking code.
"""
for module_name, module_funcs in torch.torch_modules.items():
torch_module = eval(module_name)
for attr in module_funcs:
# Some functions we want to ignore (not override). Such functions have been hard
# coded into the attribute self.torch_exclude
if attr in torch.torch_exclude:
continue
# if we haven't already overloaded this function
if 'native_{}'.format(attr) in dir(torch_module):
continue
# if we haven't already overloaded this function (redundancy allowed)
if 'native_' in attr:
continue
# Where the overloading happens
lit = getattr(torch_module, attr)
if type(lit) in [types.FunctionType, types.BuiltinFunctionType]:
new_attr = self._get_overloaded_function(module_name + '.' + attr)
setattr(torch_module, 'native_{}'.format(attr), lit)
setattr(torch_module, attr, new_attr)
def _get_overloaded_function(hook_self, attr):
"""
Wrapper overloading partial objects of functions in the torch
module. Compiles command, checks for Tensors and Variables in
the args/kwargs, determines locations of all Tensors and
Variables involved in computation, and handles the computation
accordingly.
"""
def _execute_function_call(*args, **kwargs):
return hook_self._execute_call(attr, None, *args, **kwargs)
return _execute_function_call
def _execute_call(hook_self, attr, self, *args, **kwargs):
"""
Forward the call to the local_worker
"""
return hook_self.local_worker._execute_call(attr, self, *args, **kwargs)
def _hook_backward(hook_self):
"""
Overloads backward method used to compute gradients
of all the variables that are part of the computational
graph which produced self. Because native backward breaks
things (especially the .id attribute of the gradient),
we store the id of all variables we can access
(only the leaf variables of the graph) and we reconstruct our
variables correctly after backward was performed by basically
restoring the grad "envelope" (including its id)
"""
sy.Variable.native_native_backward = sy.Variable.native_backward
def new_backward(self, *args, **kwargs):
worker = self.owner
# Retrieve all the variable ids involved in the computation graph
variable_ids = torch_utils.get_connected_variables(self)
variable_ids = [var_id for var_id in variable_ids if var_id in worker._objects]
# Save all the gradients (to keep the id) and reset the grads
saved_grads = {}
for variable_id in variable_ids:
syft_tensor = worker.get_obj(variable_id)
var = syft_tensor.parent
assert var.id == variable_id
saved_grads[variable_id] = var.grad
var.grad = None
# Performs the backward
self.native_native_backward(*args, **kwargs)
# Put back the original grad envelop and insert the new grad value in it
for variable_id in variable_ids:
syft_tensor = worker.get_obj(variable_id)
# retrieve the var to fix
var = syft_tensor.parent
# retrieve the old grad, and insert it (to keep the chain) [first the envelope, then the data]
saved_grad = saved_grads[variable_id]
if saved_grad is not None:
# store the computed gradient
computed_grad = var.grad
var.assign_grad_(saved_grad)
# Insert the value of the computed_grad
if computed_grad is not None:
var.grad.data.native_set_(computed_grad.data)
# Make sure everyone has the right owner
torch_utils.enforce_owner(var, worker)
# Fix the .data and .grad attributes on the chain
torch_utils.link_var_chain_to_data_and_grad_chains(var, var.data, var.grad)
sy.Variable.native_backward = new_backward
def _hook_module(self):
"""Overloading for torch.nn.Module"""
def module_is_missing_grad(model):
"""Overloads missing grad parameter in model"""
missing_grad = False
for p in model.parameters():
if p.grad is None:
missing_grad = True
return missing_grad
def create_grad_objects(model):
"""Overloads create grad parameter for model"""
for p in model.parameters():
o = p.sum()
o.backward()
p.grad -= p.grad
def module_send_(self, dest):
"""Overloads send to remote for torch.nn.Module"""
if (module_is_missing_grad(self)):
create_grad_objects(self)
for p in self.parameters():
p.send(dest)
torch.nn.Module.send = module_send_
def module_get_(self):
"""Overload get from remote for torch.nn.Module"""
for p in self.parameters():
p.get()
torch.nn.Module.get = module_get_
|
import cv2
import time
import torch
import pprint
import numpy as np
from pathlib import Path
from psdet.utils.config import get_config
from psdet.utils.common import get_logger
from psdet.models.builder import build_model
def draw_parking_slot(image, pred_dicts):
slots_pred = pred_dicts['slots_pred']
width = 512
height = 512
VSLOT_MIN_DIST = 0.044771278151623496
VSLOT_MAX_DIST = 0.1099427457599304
HSLOT_MIN_DIST = 0.15057789144568634
HSLOT_MAX_DIST = 0.44449496544202816
SHORT_SEPARATOR_LENGTH = 0.199519231
LONG_SEPARATOR_LENGTH = 0.46875
junctions = []
for j in range(len(slots_pred[0])):
position = slots_pred[0][j][1]
p0_x = width * position[0] - 0.5
p0_y = height * position[1] - 0.5
p1_x = width * position[2] - 0.5
p1_y = height * position[3] - 0.5
vec = np.array([p1_x - p0_x, p1_y - p0_y])
vec = vec / np.linalg.norm(vec)
distance =( position[0] - position[2] )**2 + ( position[1] - position[3] )**2
if VSLOT_MIN_DIST <= distance <= VSLOT_MAX_DIST:
separating_length = LONG_SEPARATOR_LENGTH
else:
separating_length = SHORT_SEPARATOR_LENGTH
p2_x = p0_x + height * separating_length * vec[1]
p2_y = p0_y - width * separating_length * vec[0]
p3_x = p1_x + height * separating_length * vec[1]
p3_y = p1_y - width * separating_length * vec[0]
p0_x = int(round(p0_x))
p0_y = int(round(p0_y))
p1_x = int(round(p1_x))
p1_y = int(round(p1_y))
p2_x = int(round(p2_x))
p2_y = int(round(p2_y))
p3_x = int(round(p3_x))
p3_y = int(round(p3_y))
cv2.line(image, (p0_x, p0_y), (p1_x, p1_y), (255, 0, 0), 2)
cv2.line(image, (p0_x, p0_y), (p2_x, p2_y), (255, 0, 0), 2)
cv2.line(image, (p1_x, p1_y), (p3_x, p3_y), (255, 0, 0), 2)
#cv2.circle(image, (p0_x, p0_y), 3, (0, 0, 255), 4)
junctions.append((p0_x, p0_y))
junctions.append((p1_x, p1_y))
for junction in junctions:
cv2.circle(image, junction, 3, (0, 0, 255), 4)
return image
def main():
cfg = get_config()
logger = get_logger(cfg.log_dir, cfg.tag)
logger.info(pprint.pformat(cfg))
model = build_model(cfg.model)
logger.info(model)
image_dir = Path(cfg.data_root) / 'testing' / 'outdoor-normal daylight'
display = False
# load checkpoint
model.load_params_from_file(filename=cfg.ckpt, logger=logger, to_cpu=False)
model.cuda()
model.eval()
if display:
car = cv2.imread('images/car.png')
car = cv2.resize(car, (512, 512))
with torch.no_grad():
for img_path in image_dir.glob('*.jpg'):
img_name = img_path.stem
data_dict = {}
image = cv2.imread(str(img_path))
image0 = cv2.resize(image, (512, 512))
image = image0/255.
data_dict['image'] = torch.from_numpy(image).float().permute(2, 0, 1).unsqueeze(0).cuda()
start_time = time.time()
pred_dicts, ret_dict = model(data_dict)
sec_per_example = (time.time() - start_time)
print('Info speed: %.4f second per example.' % sec_per_example)
if display:
image = draw_parking_slot(image0, pred_dicts)
image[145:365, 210:300] = 0
image += car
cv2.imshow('image',image.astype(np.uint8))
cv2.waitKey(50)
save_dir = Path(cfg.output_dir) / 'predictions'
save_dir.mkdir(parents=True, exist_ok=True)
save_path = save_dir / ('%s.jpg' % img_name)
cv2.imwrite(str(save_path), image)
if display:
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
from QUBEKit.utils import constants
from collections import OrderedDict
from copy import deepcopy
import xml.etree.ElementTree as ET
class Parametrisation:
"""
Class of methods which perform the initial parametrisation for the molecule.
The Parameters will be stored into the molecule as dictionaries as this is easy to manipulate and convert
to a parameter tree.
Note all parameters gathered here are indexed from 0,
whereas the ligand object indices start from 1 for all networkx related properties such as bonds!
Parameters
---------
molecule : QUBEKit molecule object
input_file : an OpenMM style xml file associated with the molecule object
fftype : the FF type the molecule will be parametrised with
only needed in the case of gaff or gaff2 else will be assigned based on class used.
Returns
-------
AtomTypes : dictionary of the atom names, the associated OPLS type and class type stored under number.
{0: [C00, OPLS_800, C800]}
Residues : dictionary of residue names indexed by the order they appear.
HarmonicBondForce : dictionary of equilibrium distances and force constants stored under the bond tuple.
{(0, 1): [eqr=456, fc=984375]}
HarmonicAngleForce : dictionary of equilibrium angles and force constant stored under the angle tuple.
PeriodicTorsionForce : dictionary of periodicity, barrier and phase stored under the torsion tuple.
NonbondedForce : dictionary of charge, sigma and epsilon stored under the original atom ordering.
"""
def __init__(self, molecule, input_file=None, fftype=None):
self.molecule = molecule
self.input_file = input_file
self.fftype = fftype
self.molecule.combination = 'opls'
self.combination = 'amber'
# could be a problem for boron compounds
self.molecule.AtomTypes = {}
try:
self.molecule.HarmonicBondForce = {bond: [0, 0] for bond in self.molecule.bond_lengths.keys()}
self.molecule.HarmonicAngleForce = {angle: [0, 0] for angle in self.molecule.angle_values.keys()}
except AttributeError:
self.molecule.HarmonicBondForce = {}
self.molecule.HarmonicAngleForce = {}
self.molecule.NonbondedForce = OrderedDict((number, [0, 0, 0]) for number in range(len(self.molecule.atoms)))
self.molecule.PeriodicTorsionForce = OrderedDict()
self.sites = {}
def __repr__(self):
return f'{self.__class__.__name__}({self.__dict__!r})'
def gather_parameters(self):
"""
This method parses the serialised xml file and collects the parameters ready to pass them
to build tree.
"""
# Try to gather the AtomTypes first
for atom in self.molecule.atoms:
self.molecule.AtomTypes[atom.atom_index] = [atom.atom_name, 'QUBE_' + str(000 + atom.atom_index),
str(atom.atomic_symbol) + str(000 + atom.atom_index)]
phases = [0, constants.PI, 0, constants.PI]
try:
in_root = ET.parse('serialised.xml').getroot()
# Extract any virtual site data only supports local coords atm, charges are added later
for i, virtual_site in enumerate(in_root.iter('LocalCoordinatesSite')):
self.sites[i] = [
(int(virtual_site.get('p1')), int(virtual_site.get('p2')), int(virtual_site.get('p3'))),
(float(virtual_site.get('pos1')), float(virtual_site.get('pos2')), float(virtual_site.get('pos3')))
]
# Extract all bond data
for Bond in in_root.iter('Bond'):
bond = (int(Bond.get('p1')), int(Bond.get('p2')))
if bond in self.molecule.HarmonicBondForce:
self.molecule.HarmonicBondForce[bond] = [float(Bond.get('d')), float(Bond.get('k'))]
else:
self.molecule.HarmonicBondForce[bond[::-1]] = [float(Bond.get('d')), float(Bond.get('k'))]
# Extract all angle data
for Angle in in_root.iter('Angle'):
angle = int(Angle.get('p1')), int(Angle.get('p2')), int(Angle.get('p3'))
if angle in self.molecule.HarmonicAngleForce:
self.molecule.HarmonicAngleForce[angle] = [float(Angle.get('a')), float(Angle.get('k'))]
else:
self.molecule.HarmonicAngleForce[angle[::-1]] = [float(Angle.get('a')), float(Angle.get('k'))]
# Extract all non-bonded data, do not add virtual site info to the nonbonded list
atom_num, site_num = 0, 0
for Atom in in_root.iter('Particle'):
if "eps" in Atom.attrib:
if atom_num >= len(self.molecule.atoms):
self.sites[site_num].append(float(Atom.get('q')))
site_num += 1
else:
self.molecule.NonbondedForce[atom_num] = [float(Atom.get('q')), float(Atom.get('sig')), float(Atom.get('eps'))]
self.molecule.atoms[atom_num].partial_charge = float(Atom.get('q'))
atom_num += 1
# Check if we found any sites
self.molecule.extra_sites = self.sites or None
# Extract all of the torsion data
for Torsion in in_root.iter('Torsion'):
tor_str_forward = tuple(int(Torsion.get(f'p{i}')) for i in range(1, 5))
tor_str_back = tuple(reversed(tor_str_forward))
if tor_str_forward not in self.molecule.PeriodicTorsionForce and tor_str_back not in self.molecule.PeriodicTorsionForce:
self.molecule.PeriodicTorsionForce[tor_str_forward] = [
[int(Torsion.get('periodicity')), float(Torsion.get('k')), phases[int(Torsion.get('periodicity')) - 1]]]
elif tor_str_forward in self.molecule.PeriodicTorsionForce:
self.molecule.PeriodicTorsionForce[tor_str_forward].append(
[int(Torsion.get('periodicity')), float(Torsion.get('k')), phases[int(Torsion.get('periodicity')) - 1]])
elif tor_str_back in self.molecule.PeriodicTorsionForce:
self.molecule.PeriodicTorsionForce[tor_str_back].append(
[int(Torsion.get('periodicity')), float(Torsion.get('k')), phases[int(Torsion.get('periodicity')) - 1]])
except FileNotFoundError:
# Check what parameter engine we are using if not none then raise an error
if self.molecule.parameter_engine != 'none':
raise FileNotFoundError('Molecule could not be serialised from OpenMM')
# Now we have all of the torsions from the OpenMM system
# we should check if any torsions we found in the molecule do not have parameters
# if they don't, give them the default 0 parameter this will not change the energy
if self.molecule.dihedrals is not None:
for tor_list in self.molecule.dihedrals.values():
for torsion in tor_list:
if torsion not in self.molecule.PeriodicTorsionForce and tuple(reversed(torsion)) not in self.molecule.PeriodicTorsionForce:
self.molecule.PeriodicTorsionForce[torsion] = [[1, 0, 0], [2, 0, constants.PI],
[3, 0, 0], [4, 0, constants.PI]]
torsions = [sorted(key) for key in self.molecule.PeriodicTorsionForce.keys()]
if self.molecule.improper_torsions is not None:
for torsion in self.molecule.improper_torsions:
if sorted(torsion) not in torsions:
# The improper torsion is missing and should be added with no energy
self.molecule.PeriodicTorsionForce[torsion] = [[1, 0, 0], [2, 0, constants.PI],
[3, 0, 0], [4, 0, constants.PI]]
# Now we need to fill in all blank phases of the Torsions
for key, val in self.molecule.PeriodicTorsionForce.items():
vns = [1, 2, 3, 4]
if len(val) < 4:
# now need to add the missing terms from the torsion force
for force in val:
vns.remove(force[0])
for i in vns:
val.append([i, 0, phases[int(i) - 1]])
# sort by periodicity using lambda function
for val in self.molecule.PeriodicTorsionForce.values():
val.sort(key=lambda x: x[0])
# now we need to tag the proper and improper torsions and reorder them so the first atom is the central
improper_torsions = None
if self.molecule.improper_torsions is not None:
improper_torsions = OrderedDict()
for improper in self.molecule.improper_torsions:
for key, val in self.molecule.PeriodicTorsionForce.items():
# for each improper find the corresponding torsion parameters and save
if sorted(key) == sorted(improper):
# if they match tag the dihedral
self.molecule.PeriodicTorsionForce[key].append('Improper')
# replace the key with the strict improper order first atom is center
improper_torsions.setdefault(improper, []).append(val)
# If the improper has been split across multiple combinations we need to collapse them to one
for improper, params in improper_torsions.items():
if len(params) != 1:
# Now we have to sum the k values across the same terms
new_params = params[0]
for values in params[1:]:
for i in range(4):
new_params[i][1] += values[i][1]
# Store the summed k values
improper_torsions[improper] = new_params
else:
improper_torsions[improper] = params[0] # This unpacks the list if we only find one term
torsions = deepcopy(self.molecule.PeriodicTorsionForce)
# Remake the torsion; store in the ligand
self.molecule.PeriodicTorsionForce = OrderedDict((v, k) for v, k in torsions.items() if k[-1] != 'Improper')
# Add the improper at the end of the torsion object
if improper_torsions is not None:
for key, val in improper_torsions.items():
self.molecule.PeriodicTorsionForce[key] = val
|
from .litmus import *
|
__version__ = '2.3.5'
|
"""
TODO:
Actors need to be able to reference each other.
* this means we need to be able to pass a reference
that can post a message to an actor's executor.
Actors need to be able to create more actors.
* This should be fairly simple if the first task is complete.
Idea:
maintain a list of weakreferences to all actor executors ever created
in a thread. Actors must have a way of interacting with this thread.
"""
from __future__ import absolute_import, division, print_function
from concurrent.futures import _base
import utool as ut
(print, rrr, profile) = ut.inject2(__name__)
class ActorExecutor(_base.Executor):
"""
Executor to manage exactly one actor.
This class lives in the main thread, manages a process containing exactly
one Actor, and is used to send messages to that actor. Responses are
returned in the form of a `Future` object.
"""
def post(self, message):
"""
analagous to _base.Executor.submit, but sends a message to the actor
controlled by this Executor, and returns a Future.
"""
raise NotImplementedError(
'use ProcessActorExecutor or ThreadActorExecutor')
class Actor(object):
"""
Base actor class.
Actors receive messages, which are arbitrary objects from their managing
executor.
The main difference is that we expose an `Actor` class which can be
inherited from and provides the `executor` classmethod. This creates an
asynchronously maintained instance of this class in a separate
thread/process
Example:
>>> from concurrent.futures import ThreadActor
>>> class MyActor(ThreadActor):
>>> def __init__(self):
>>> self.state = 0
>>> #
>>> def handle(self, message):
>>> self.state += message
>>> return self.state
>>> #
>>> executor = MyActor.executor()
>>> f = executor.post('message')
>>> print(f.result())
"""
@classmethod
def executor(cls):
"""
Creates an asychronous instance of this Actor and returns the executor
to manage it.
"""
raise NotImplementedError('use ProcessActor or ThreadActor')
def handle(self, message):
"""
This method recieves, handles, and responds to the messages sent from
the executor. This function can return arbitrary values. These values
can be accessed from the main thread using the Future object returned
when the message was posted to this actor by the executor.
"""
raise NotImplementedError('must implement message handler')
|
#!/usr/bin/env python
from typing import List, Tuple
from bisect import bisect_left
"""
Code for https://leetcode.com/problems/search-in-rotated-sorted-array/
"""
def search(nums: List[int], target: int) -> int:
pivot_idx = find_pivot(nums, 0, len(nums) - 1)
if pivot_idx == -1:
return binary_search(nums, target, 0, len(nums) - 1)
if nums[pivot_idx] == target:
return pivot_idx
if nums[0] <= target:
return binary_search(nums, target, 0, pivot_idx - 1)
return binary_search(nums, target, pivot_idx + 1, len(nums) - 1)
def binary_search(a: List[int], x: int, lo: int, hi: int) -> int:
idx = bisect_left(a, x, lo, hi)
return idx if idx != len(a) and a[idx] == x else -1
def find_pivot(nums: List[int], lo: int, hi: int) -> int:
"""
Find index of pivot element if nums is indeed rotated, else return -1
"""
# Base cases to prevent endless recursion
if lo > hi:
return -1
if lo == hi:
return lo
mid = (lo + hi) // 2
if mid < hi and nums[mid] > nums[mid + 1]:
return mid
if mid > lo and nums[mid] < nums[mid - 1]:
return mid - 1
if nums[lo] >= nums[mid]:
return find_pivot(nums, lo, mid - 1)
return find_pivot(nums, mid + 1, hi)
def main():
xs = [3, 1]
xs2 = [4, 5, 6, 7, 0, 1, 2]
xs3 = [6, 7, 1, 2, 3, 4, 5]
result = search(xs, 3)
result2 = search(xs2, 0)
result3 = search(xs3, 6)
print(result)
print(result2)
print(result3)
if __name__ == "__main__":
main()
|
from typing import Dict, Optional, List
from torch.nn import Linear
import torch
from torch.autograd import Variable
from torch.nn.functional import normalize
from allennlp.common import Params
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, last_dim_softmax, weighted_sum, replace_masked_values
from allennlp.training.metrics import CategoricalAccuracy
from endtasks import util
from endtasks.modules import VariationalDropout
@Model.register("esim-pair2vec")
class ESIMPair2Vec(Model):
"""
This ``Model`` implements the ESIM sequence model described in `"Enhanced LSTM for Natural Language Inference"
<https://www.semanticscholar.org/paper/Enhanced-LSTM-for-Natural-Language-Inference-Chen-Zhu/83e7654d545fbbaaf2328df365a781fb67b841b4>`_
by Chen et al., 2017.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``premise`` and ``hypothesis`` ``TextFields`` we get as input to the
model.
attend_feedforward : ``FeedForward``
This feedforward network is applied to the encoded sentence representations before the
similarity matrix is computed between words in the premise and words in the hypothesis.
similarity_function : ``SimilarityFunction``
This is the similarity function used when computing the similarity matrix between words in
the premise and words in the hypothesis.
compare_feedforward : ``FeedForward``
This feedforward network is applied to the aligned premise and hypothesis representations,
individually.
aggregate_feedforward : ``FeedForward``
This final feedforward network is applied to the concatenated, summed result of the
``compare_feedforward`` network, and its output is used as the entailment class logits.
premise_encoder : ``Seq2SeqEncoder``, optional (default=``None``)
After embedding the premise, we can optionally apply an encoder. If this is ``None``, we
will do nothing.
hypothesis_encoder : ``Seq2SeqEncoder``, optional (default=``None``)
After embedding the hypothesis, we can optionally apply an encoder. If this is ``None``,
we will use the ``premise_encoder`` for the encoding (doing nothing if ``premise_encoder``
is also ``None``).
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
encoder_keys: List[str],
mask_key: str,
pair2vec_config_file: str,
pair2vec_model_file: str,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
similarity_function: SimilarityFunction,
projection_feedforward: FeedForward,
inference_encoder: Seq2SeqEncoder,
output_feedforward: FeedForward,
output_logit: FeedForward,
initializer: InitializerApplicator = InitializerApplicator(),
dropout: float = 0.5,
pair2vec_dropout: float = 0.0,
bidirectional_pair2vec: bool = True,
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self._vocab = vocab
self.pair2vec = util.get_pair2vec(pair2vec_config_file, pair2vec_model_file)
self._encoder_keys = encoder_keys
self._mask_key = mask_key
self._text_field_embedder = text_field_embedder
self._projection_feedforward = projection_feedforward
self._encoder = encoder
from allennlp.modules.matrix_attention import DotProductMatrixAttention
self._matrix_attention = DotProductMatrixAttention()
self._inference_encoder = inference_encoder
self._pair2vec_dropout = torch.nn.Dropout(pair2vec_dropout)
self._bidirectional_pair2vec = bidirectional_pair2vec
if dropout:
self.dropout = torch.nn.Dropout(dropout)
self.rnn_input_dropout = VariationalDropout(dropout)
else:
self.dropout = None
self.rnn_input_dropout = None
self._output_feedforward = output_feedforward
self._output_logit = output_logit
self._num_labels = vocab.get_vocab_size(namespace="labels")
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self, # type: ignore
premise: Dict[str, torch.LongTensor],
hypothesis: Dict[str, torch.LongTensor],
label: torch.IntTensor = None,
metadata: Dict = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
premise : Dict[str, torch.LongTensor]
From a ``TextField``
hypothesis : Dict[str, torch.LongTensor]
From a ``TextField``
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log
probabilities of the entailment label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the
entailment label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_premise = util.get_encoder_input(self._text_field_embedder, premise, self._encoder_keys)
embedded_hypothesis = util.get_encoder_input(self._text_field_embedder, hypothesis, self._encoder_keys)
premise_as_args = util.get_pair2vec_word_embeddings(self.pair2vec, premise['pair2vec_tokens'])
hypothesis_as_args = util.get_pair2vec_word_embeddings(self.pair2vec, hypothesis['pair2vec_tokens'])
premise_mask = util.get_mask(premise, self._mask_key).float()
hypothesis_mask = util.get_mask(hypothesis, self._mask_key).float()
# apply dropout for LSTM
if self.rnn_input_dropout:
embedded_premise = self.rnn_input_dropout(embedded_premise)
embedded_hypothesis = self.rnn_input_dropout(embedded_hypothesis)
# encode premise and hypothesis
encoded_premise = self._encoder(embedded_premise, premise_mask)
encoded_hypothesis = self._encoder(embedded_hypothesis, hypothesis_mask)
# Shape: (batch_size, premise_length, hypothesis_length)
similarity_matrix = self._matrix_attention(encoded_premise, encoded_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
p2h_attention = last_dim_softmax(similarity_matrix, hypothesis_mask)
# Shape: (batch_size, premise_length, embedding_dim)
attended_hypothesis = weighted_sum(encoded_hypothesis, p2h_attention)
# Shape: (batch_size, hypothesis_length, premise_length)
h2p_attention = last_dim_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask)
# Shape: (batch_size, hypothesis_length, embedding_dim)
attended_premise = weighted_sum(encoded_premise, h2p_attention)
# cross sequence embeddings
ph_pair_embeddings = normalize(util.get_pair_embeddings(self.pair2vec, premise_as_args, hypothesis_as_args), dim=-1)
hp_pair_embeddings = normalize(util.get_pair_embeddings(self.pair2vec, hypothesis_as_args, premise_as_args), dim=-1)
if self._bidirectional_pair2vec:
temp = torch.cat((ph_pair_embeddings, hp_pair_embeddings.transpose(1,2)), dim=-1)
hp_pair_embeddings = torch.cat((hp_pair_embeddings, ph_pair_embeddings.transpose(1,2)), dim=-1)
ph_pair_embeddings = temp
# pair_embeddings = torch.cat((ph_pair_embeddings, hp_pair_embeddings.transpose(1,2)), dim=-1)
# pair2vec masks
pair2vec_premise_mask = 1 - (torch.eq(premise['pair2vec_tokens'], 0).long() + torch.eq(premise['pair2vec_tokens'], 1).long())
pair2vec_hypothesis_mask = 1 - (torch.eq(hypothesis['pair2vec_tokens'], 0).long() + torch.eq(hypothesis['pair2vec_tokens'], 1).long())
# re-normalize attention using pair2vec masks
h2p_attention = last_dim_softmax(similarity_matrix.transpose(1, 2).contiguous(), pair2vec_premise_mask)
p2h_attention = last_dim_softmax(similarity_matrix, pair2vec_hypothesis_mask)
attended_hypothesis_pairs = self._pair2vec_dropout(weighted_sum(ph_pair_embeddings, p2h_attention)) * pair2vec_premise_mask.float().unsqueeze(-1)
attended_premise_pairs = self._pair2vec_dropout(weighted_sum(hp_pair_embeddings, h2p_attention)) * pair2vec_hypothesis_mask.float().unsqueeze(-1)
# the "enhancement" layer
premise_enhanced = torch.cat(
[encoded_premise, attended_hypothesis,
encoded_premise - attended_hypothesis,
encoded_premise * attended_hypothesis,
attended_hypothesis_pairs],
dim=-1
)
hypothesis_enhanced = torch.cat(
[encoded_hypothesis, attended_premise,
encoded_hypothesis - attended_premise,
encoded_hypothesis * attended_premise,
attended_premise_pairs],
dim=-1
)
projected_enhanced_premise = self._projection_feedforward(premise_enhanced)
projected_enhanced_hypothesis = self._projection_feedforward(hypothesis_enhanced)
# Run the inference layer
if self.rnn_input_dropout:
projected_enhanced_premise = self.rnn_input_dropout(projected_enhanced_premise)
projected_enhanced_hypothesis = self.rnn_input_dropout(projected_enhanced_hypothesis)
v_ai = self._inference_encoder(projected_enhanced_premise, premise_mask)
v_bi = self._inference_encoder(projected_enhanced_hypothesis, hypothesis_mask)
# The pooling layer -- max and avg pooling.
# (batch_size, model_dim)
v_a_max, _ = replace_masked_values(
v_ai, premise_mask.unsqueeze(-1), -1e7
).max(dim=1)
v_b_max, _ = replace_masked_values(
v_bi, hypothesis_mask.unsqueeze(-1), -1e7
).max(dim=1)
v_a_avg = torch.sum(v_ai * premise_mask.unsqueeze(-1), dim=1) / torch.sum(premise_mask, 1, keepdim=True)
v_b_avg = torch.sum(v_bi * hypothesis_mask.unsqueeze(-1), dim=1) / torch.sum(hypothesis_mask, 1, keepdim=True)
# Now concat
# (batch_size, model_dim * 2 * 4)
v = torch.cat([v_a_avg, v_a_max, v_b_avg, v_b_max], dim=1)
# the final MLP -- apply dropout to input, and MLP applies to output & hidden
if self.dropout:
v = self.dropout(v)
output_hidden = self._output_feedforward(v)
label_logits = self._output_logit(output_hidden)
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
output_dict = {"label_logits": label_logits, "label_probs": label_probs}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label)
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
'accuracy': self._accuracy.get_metric(reset),
}
|
# find the longest match in the running window
def findLongestMatch(searchSpace: bytes, matchBytes: bytes) -> tuple:
matchLength = len(matchBytes)
searchSize = len(searchSpace)
while matchLength > 0:
lookup = matchBytes[:matchLength]
if lookup in searchSpace:
return searchSize - searchSpace.rindex(lookup), matchLength
matchLength -= 1
return 0, 0
# convert lz77 compressed file back into bytes
def lz77rev (inp: bytes) -> bytes:
length, index = len(inp), 0
output, j = b'', 0
for i in range(0, length, 3):
relativePos = int(inp[i] | ((inp[i+1] & 0x0F) << 8))
matchLength = int((inp[i + 1] & 0xF0) >> 4)
nextCharacter = inp[i + 2] & 0xFF
if matchLength:
output += output[j-relativePos:j-relativePos+matchLength]
j += matchLength
elif nextCharacter:
output += nextCharacter.to_bytes(1, byteorder='little')
j += 1
return output
def lz77 (inp: bytes, file) -> bytes:
index, output, SEARCH_SIZE, LOOKAHEAD_SIZE = 0, bytes(), 4095, 15
while index < len(inp):
searchStart = max(0, index - SEARCH_SIZE)
lookaheadEnd = min(len(inp), index + LOOKAHEAD_SIZE)
# run the matching algorithm
relativePos, matchLength = findLongestMatch(inp[searchStart: index], inp[index: lookaheadEnd])
# relative offset
nextChar = 0 if index + matchLength >= len(inp) else inp[index + matchLength]
output += (0xFF & relativePos).to_bytes(1, byteorder='little') # 8 bits of relative position
output += ((0xF00 & relativePos) >> 8 | (0xF & matchLength) << 4).to_bytes(1, byteorder='little') # 4 bits of position and 4 bits of length
output += nextChar.to_bytes(1, byteorder='little') # 8 bits of next character
index += max(1, matchLength)
file.write(output)
f = open('huff.b', 'rb')
ff = open('lz77.b', 'wb')
# enc = lz77(bytes(f.read()), ff)
enc = lz77(bytes(f.read()), ff)
# ff.write(enc) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2002-2004 Detlev Offenbach <[email protected]>
# Adapted for usage with Debian by Torsten Marek <[email protected]>
# Changed by Gudjon, only to create qt apis
import os
import sys
#import PyQt4.pyqtconfig as pyqtconfig
import PyQt5.QtCore
apidir = sys.argv[1]
if not os.path.isdir(apidir):
print("Generating the api directory.")
os.makedirs(apidir)
sip = "/usr/bin/sip"
def createAPIFiles(baseDir, modules, defaultImports, getFlags):
for mod in modules:
try:
sipfile = os.path.join(baseDir, mod, "%smod.sip" % (mod, ))
apifile = os.path.join(apidir, os.path.split(sipfile.replace("mod.sip", ".api"))[1])
args = [sip, "-a", apifile,
"-I", os.path.join(baseDir, mod),
"-I", baseDir] \
+ defaultImports \
+ getFlags(mod) \
+ [sipfile]
print("Generating %s ..." % apifile)
ret = os.spawnv(os.P_WAIT, sip, args)
if ret != 0:
print("Error: the process returned the exit code %d" % ret)
except OSError:
print("Warning: The module '%s' does not exist." % mod)
#qtsipdir = os.path.abspath(pyqtconfig._pkg_config['pyqt_sip_dir'])
# I don't like to have these hardcoded but I found no other way.
qtsipdir = "/usr/share/sip/PyQt5/"
modules = ['QtSql', 'QtWidgets', 'QtSvg', 'QtNetwork', 'QtWebKitWidgets', 'QtXml', 'QtPrintSupport',\
'QtOpenGL', 'QtHelp', 'QtWebEngineWidgets', 'QtQuick', 'QtMultimediaWidgets', 'QtDBus',\
'QtWebChannel', 'QtWebKit', 'QtMultimedia', 'QtQuickWidgets', 'QtSensors',\
'QtDesigner', 'QtX11Extras', 'QtCore', 'QtWebSockets', 'QtBluetooth', 'QtMacExtras', 'QtWinExtras',\
'QtSerialPort', 'QtGui', 'QtQml', 'QtTest', 'QtXmlPatterns', 'QtPositioning', 'Enginio']
#pyqtconfig._pkg_config['pyqt_modules'].split()
#createAPIFiles(qtsipdir, modules, [],
# lambda x: pyqtconfig._pkg_config["pyqt_%s_sip_flags" % (x,)].split())
#createAPIFiles(qtsipdir, modules, [],
# lambda x: pyqtconfig._pkg_config["pyqt_sip_flags"].split())
pyqt_sip_flags = PyQt5.QtCore.PYQT_CONFIGURATION
createAPIFiles(qtsipdir, modules, [],
lambda x: pyqt_sip_flags["sip_flags"].split())
#try:
#import PyKDE4.pykdeconfig as pykdeconfig
#kdesipdir = "/usr/share/sip/PyKDE4"
#modules = pykdeconfig._pkg_config['pykde_modules'].split()
#extraimport = []
## just import anything for anything else, so we get rid of keeping track of the
## inter-module deps
#for mod in modules:
#extraimport.extend(["-I", os.path.join(kdesipdir, mod)])
#extraimport.extend(["-I", qtsipdir])
#createAPIFiles(kdesipdir, modules, extraimport,
#lambda x: pykdeconfig._pkg_config["pykde_kde_sip_flags"].split())
#except:
#print "Error: No PyKDE4 api files generated"
|
from math import factorial
from input_validator import validate_input
def Q_US(n, p, C, validate=True):
"""
:param n: zero or positive integer, depth
:param p: positive integer, number of atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param validate: if True, validates that the input provided is in the correct form
:return: integer, representing the number of formulae with at most profundity n and containing (all or some) P
"""
if validate:
validate_input(n, p, C)
if n < 0:
return 0
if n == 0:
return p
else:
Cmax = max(C) if C else 0
return p + sum(len(C[i]) * (Q_US(n-1, p, C, False) ** i) for i in range(1, Cmax+1) if i in C)
def Q_ES(n, p, C, validate=True):
"""
:param n: zero or positive integer, depth
:param p: positive integer, number of atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param validate: if True, validates that the input provided is in the correct form
:return: integer, representing the number of formulae with exactly profundity n and containing (all or some) P
"""
if validate:
validate_input(n, p, C)
if n == 0:
return p
else:
return Q_US(n, p, C, False) - Q_US(n-1, p, C, False)
def choose(n, r):
"""
:param n: zero or positive integer
:param p: zero or positive integer
:return: standard combinatorics combinations (nCr)
"""
return factorial(n) // (factorial(n-r) * factorial(r))
def Q_EA(n, p, C, validate=True):
"""
:param n: zero or positive integer, depth
:param p: positive integer, number of atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param validate: if True, validates that the input provided is in the correct form
:return: integer, representing the number of formulae with exactly profundity n and containing all P
"""
if validate:
validate_input(n, p, C)
if p > max(C) ** n:
return 0
else:
return Q_ES(n, p, C, False) - sum(choose(p, j) * Q_EA(n, j, C, False) for j in range(1, p))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, List, Sized, Tuple
import torch as torch
import torch.nn as nn
from pytext.utils.cuda_utils import xaviervar
class Element:
"""
Generic element representing a token / non-terminal / sub-tree on a stack.
Used to compute valid actions in the RNNG parser.
"""
def __init__(self, node: Any) -> None:
self.node = node
def __eq__(self, other) -> bool:
return self.node == other.node
def __repr__(self) -> str:
return str(self.node)
class StackLSTM(Sized):
"""
The Stack LSTM from Dyer et al: https://arxiv.org/abs/1505.08075
"""
def __init__(
self,
lstm: nn.LSTM,
initial_state: Tuple[torch.Tensor, torch.Tensor],
empty_embedding: torch.Tensor,
):
"""
Shapes:
initial_state: (lstm_layers, 1, lstm_hidden_dim) each
empty_embedding: (1, lstm_hidden_dim)
"""
self.empty = empty_embedding
self.lstm = lstm
# Stack of (state, (embedding, element))
self.stack = (
[(initial_state, (self._lstm_output(initial_state), Element("Root")))]
if initial_state
else None
)
def _lstm_output(self, state: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
Shapes:
state: (lstm_layers, 1, lstm_hidden_dim) each
return value: (1, lstm_hidden_dim)
"""
return state[0][-1]
def push(self, expression: torch.Tensor, element: Element) -> None:
"""
Shapes:
expression: (1, lstm_input_dim)
"""
old_top_state = self.stack[-1][0]
# Unsqueezing expression for sequence_length = 1
_, new_top_state = self.lstm(expression.unsqueeze(0), old_top_state)
# Push in (state, (embedding, element))
self.stack.append((new_top_state, (self._lstm_output(new_top_state), element)))
def pop(self) -> Tuple[torch.Tensor, Element]:
"""
Pops and returns tuple of output embedding (1, lstm_hidden_dim) and element
"""
return self.stack.pop()[1]
def embedding(self) -> torch.Tensor:
"""
Shapes:
return value: (1, lstm_hidden_dim)
"""
if len(self.stack) < 1:
return self.empty
top_state = self.stack[-1][0]
return self._lstm_output(top_state)
def element_from_top(self, index: int) -> Element:
return self.stack[-(index + 1)][1][1]
def __len__(self) -> int:
return len(self.stack) - 1
def __str__(self) -> str:
return "->".join([str(x[1][1]) for x in self.stack])
def copy(self):
other = StackLSTM(self.lstm, None, self.empty)
other.stack = list(self.stack)
return other
class CompositionFunction(nn.Module):
"""
Combines a list / sequence of embeddings into one
"""
def __init__(self):
super().__init__()
class CompositionalNN(CompositionFunction):
"""
Combines a list / sequence of embeddings into one using a biLSTM
"""
def __init__(self, lstm_dim: int):
super().__init__()
self.lstm_dim = lstm_dim
self.lstm_fwd = nn.LSTM(lstm_dim, lstm_dim, num_layers=1)
self.lstm_rev = nn.LSTM(lstm_dim, lstm_dim, num_layers=1)
self.linear_seq = nn.Sequential(nn.Linear(2 * lstm_dim, lstm_dim), nn.Tanh())
def forward(self, x: List[torch.Tensor]) -> torch.Tensor:
"""
Embed the sequence. If the input corresponds to [IN:GL where am I at]:
- x will contain the embeddings of [at I am where IN:GL] in that order.
- Forward LSTM will embed the sequence [IN:GL where am I at].
- Backward LSTM will embed the sequence [IN:GL at I am where].
The final hidden states are concatenated and then projected.
Args:
x: Embeddings of the input tokens in *reversed* order
Shapes:
x: (1, lstm_dim) each
return value: (1, lstm_dim)
"""
# reset hidden state every time
lstm_hidden_fwd = (
xaviervar(1, 1, self.lstm_dim),
xaviervar(1, 1, self.lstm_dim),
)
lstm_hidden_rev = (
xaviervar(1, 1, self.lstm_dim),
xaviervar(1, 1, self.lstm_dim),
)
nonterminal_element = x[-1]
reversed_rest = x[:-1]
# Always put nonterminal_element at the front
fwd_input = [nonterminal_element] + reversed_rest[::-1]
rev_input = [nonterminal_element] + reversed_rest
stacked_fwd = self.lstm_fwd(torch.stack(fwd_input), lstm_hidden_fwd)[0][0]
stacked_rev = self.lstm_rev(torch.stack(rev_input), lstm_hidden_rev)[0][0]
combined = torch.cat([stacked_fwd, stacked_rev], dim=1)
subtree_embedding = self.linear_seq(combined)
return subtree_embedding
class CompositionalSummationNN(CompositionFunction):
"""
Simpler version of CompositionalNN
"""
def __init__(self, lstm_dim: int):
super().__init__()
self.lstm_dim = lstm_dim
self.linear_seq = nn.Sequential(nn.Linear(lstm_dim, lstm_dim), nn.Tanh())
def forward(self, x: List[torch.Tensor]) -> torch.Tensor:
combined = torch.sum(torch.cat(x, dim=0), dim=0, keepdim=True)
subtree_embedding = self.linear_seq(combined)
return subtree_embedding
class ParserState:
"""
Maintains state of the Parser. Useful for beam search
"""
def __init__(self, parser=None):
if not parser:
return
self.buffer_stackrnn = StackLSTM(
parser.buff_rnn, parser.init_lstm(), parser.pempty_buffer_emb
)
self.stack_stackrnn = StackLSTM(
parser.stack_rnn, parser.init_lstm(), parser.empty_stack_emb
)
self.action_stackrnn = StackLSTM(
parser.action_rnn, parser.init_lstm(), parser.empty_action_emb
)
self.predicted_actions_idx = []
self.action_scores = []
self.num_open_NT = 0
self.is_open_NT: List[bool] = []
self.found_unsupported = False
# negative cumulative log prob so sort(states) is in descending order
self.neg_prob = 0
def finished(self):
return len(self.stack_stackrnn) == 1 and len(self.buffer_stackrnn) == 0
def copy(self):
other = ParserState()
other.buffer_stackrnn = self.buffer_stackrnn.copy()
other.stack_stackrnn = self.stack_stackrnn.copy()
other.action_stackrnn = self.action_stackrnn.copy()
other.predicted_actions_idx = self.predicted_actions_idx.copy()
other.action_scores = self.action_scores.copy()
other.num_open_NT = self.num_open_NT
other.is_open_NT = self.is_open_NT.copy()
other.neg_prob = self.neg_prob
other.found_unsupported = self.found_unsupported
return other
def __gt__(self, other):
return self.neg_prob > other.neg_prob
def __eq__(self, other):
return self.neg_prob == other.neg_prob
|
'''
a basic character-level sequence to sequence model.
'''
# coding=utf-8
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
# config
batch_size = 64
epochs = 1
hidden_dim = 256
num_samples = 10000
data_path = './fra-eng/fra.txt'
save_model_path = './s2s.h5'
# vector of data
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
lines = open(data_path, 'r', encoding='utf-8').read().split("\n")
for line in lines[:min(num_samples, len(lines) - 1)]:
input_text, target_text = line.split("\t")
target_text = "\t" + target_text + "\n"
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_len = max([len(text) for text in input_texts])
max_decoder_seq_len = max([len(text) for text in target_texts])
print('num of samples:', len(input_texts))
print('num of unique input tokens:', num_encoder_tokens)
print('num of unique output tokens:', num_decoder_tokens)
print('max sequence length for inputs:', max_encoder_seq_len)
print('max sequence length for outputs:', max_decoder_seq_len)
input_token_index = dict([(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict([(char, i) for i, char in enumerate(target_characters)])
encoder_input_data = np.zeros((len(input_texts), max_encoder_seq_len, num_encoder_tokens), dtype='float32')
decoder_input_data = np.zeros((len(input_texts), max_decoder_seq_len, num_decoder_tokens), dtype='float32')
decoder_target_data = np.zeros((len(input_texts), max_decoder_seq_len, num_decoder_tokens), dtype='float32')
# one hot representation
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.0
for t, char in enumerate(target_text):
# decoder_target_data is a head of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.0
if t > 0:
decoder_target_data[i, t - 1, target_token_index[char]] = 1.0
# encoder decoder process
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(hidden_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# discard 'encoder_outputs' and only keep the states
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# the decoder to return full output sequences and internal states
decoder_lstm = LSTM(hidden_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
# save
model.save(save_model_path)
print('save model:', save_model_path)
# inference
# sample models
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(hidden_dim,))
decoder_state_input_c = Input(shape=(hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs
)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
# reverse lookup token index to decode sequences back
reverse_input_char_index = dict((i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict((i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# encoder the input as state vectors
states_value = encoder_model.predict(input_seq)
# generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.0
# sampling loop for a batch of sequences
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
# sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# exit condition
if sampled_char == '\n' or len(decoded_sentence) > max_decoder_seq_len:
stop_condition = True
# update the target sequence
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.0
# update states
states_value = [h, c]
return decoded_sentence
for seq_index in range(10):
# take one sequence (part of the training set) for decoding.
input_seq = encoder_input_data[seq_index:seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('input sentence:', input_texts[seq_index])
print('decoded sentence:', decoded_sentence)
|
import os
import subprocess
from tempfile import NamedTemporaryFile
from backuppy.location import SshTarget, SshSource
RESOURCE_PATH = '/'.join(
(os.path.dirname(os.path.abspath(__file__)), 'resources'))
CONFIGURATION_PATH = '/'.join((RESOURCE_PATH, 'configuration'))
def build_files_stage_1(path):
"""Build a directory structure with files to back up.
:param path: str
"""
with open(os.path.join(path, 'some.file'), mode='w+t') as f:
f.write('This is just some file...')
f.flush()
os.makedirs(os.path.join(path, 'sub'))
with open(os.path.join(path, 'sub', 'some.file.in.subdirectory'), mode='w+t') as f:
f.write('This is just some other file in a subdirectory...')
f.flush()
def build_files_stage_2(path):
"""Extend a directory structure with files to back up.
This should be called after build_files_stage_1().
:param path: str
"""
with open(os.path.join(path, 'sub', 'some.file.in.subdirectory'), mode='w+t') as f:
f.write(
'This is just some other file in a subdirectory that we made some changes to...')
f.flush()
with open(os.path.join(path, 'some.later.file'), mode='w+t') as f:
f.write('These contents were added much later.')
f.flush()
def assert_paths_identical(test, source_path, target_path):
"""Assert the source and target directories are identical.
:param test: unittest.TestCase
:param source_path: str
:param target_path: str
:raise: AssertionError
"""
assert_path_appears(test, source_path, target_path)
assert_path_appears(test, target_path, source_path)
def assert_path_appears(test, source_path, target_path):
"""Assert the contents of one directory appear in another.
:param test: unittest.TestCase
:param source_path: str
:param target_path: str
:raise: AssertionError
"""
source_path = source_path.rstrip('/') + '/'
target_path = target_path.rstrip('/') + '/'
try:
for target_dir_path, child_dir_names, child_file_names in os.walk(target_path):
source_dir_path = os.path.join(
source_path, target_dir_path[len(target_path):])
for child_file_name in child_file_names:
with open(os.path.join(target_dir_path, child_file_name)) as target_f:
with open(os.path.join(source_dir_path, child_file_name)) as source_f:
assert_file(test, source_f, target_f)
except Exception:
raise AssertionError(
'The source contents under the path `%s` are not equal to the target contents under `%s`.' % (
source_path, target_path))
def assert_file(test, source_f, target_f):
"""Assert two source and target files are identical.
:param test: unittest.TestCase
:param source_f: File
:param target_f: File
:raise: AssertionError
"""
source_f.seek(0)
target_f.seek(0)
test.assertEquals(source_f.read(), target_f.read())
class SshLocationContainer(object):
"""Run a Docker container to serve as an SSH location."""
NAME = 'backuppy_test'
PORT = 22
USERNAME = 'root'
PASSWORD = 'root'
IDENTITY = os.path.join(RESOURCE_PATH, 'id_rsa')
PATH = '/backuppy/'
def __init__(self, mount_point=None):
"""Initialize a new instance."""
self._started = False
self._ip = None
self._fingerprint = None
self._known_hosts = None
self._mount_point = mount_point
def _ensure_started(self):
"""Ensure the container has been started."""
if not self._started:
raise RuntimeError('This container has not been started yet.')
def start(self):
"""Start the container."""
docker_args = []
if self._mount_point is not None:
docker_args += ['-v', '%s:%s' %
(self._mount_point, self.PATH)]
self.stop()
subprocess.check_call(['docker', 'run', '-d', '--name',
self.NAME] + docker_args + ['backuppy_ssh_location'])
self._started = True
self.await()
subprocess.check_call(['sshpass', '-p', self.PASSWORD, 'scp', '-o', 'UserKnownHostsFile=%s' % self.known_hosts(
).name, '%s.pub' % self.IDENTITY, '%s@%s:~/.ssh/authorized_keys' % (self.USERNAME, self.ip)])
def stop(self):
"""Stop the container."""
if not self._started:
return
self._started = False
subprocess.check_call(['docker', 'stop', self.NAME])
subprocess.check_call(['docker', 'container', 'rm', self.NAME])
self._known_hosts.close()
@property
def ip(self):
"""Get the container's IP address.
:return: str
"""
self._ensure_started()
if not self._ip:
self._ip = str(subprocess.check_output(
['docker', 'inspect', '-f', '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}',
self.NAME]).strip().decode('utf-8'))
return self._ip
@property
def fingerprint(self):
"""Get the container's SSH host key fingerprint.
:return: str
"""
self._ensure_started()
if not self._fingerprint:
self._fingerprint = str(subprocess.check_output(
['ssh-keyscan', '-t', 'rsa', self.ip]).decode('utf-8'))
return self._fingerprint
def known_hosts(self):
"""Get an SSH known_hosts file containing just this container.
:return: File
"""
if self._known_hosts:
return self._known_hosts
self._known_hosts = NamedTemporaryFile(mode='r+')
self._known_hosts.write(self.fingerprint)
self._known_hosts.flush()
return self._known_hosts
def await(self):
"""Wait until the container is ready."""
subprocess.check_call(['./vendor/bin/wait-for-it', '%s:%d' % (self.ip, self.PORT)])
def source(self, configuration):
"""Get the back-up source to this container.
:return: backuppy.location.Source
"""
return SshSource(configuration.notifier, self.USERNAME, self.ip, self.PATH, identity=self.IDENTITY, host_keys=self.known_hosts().name)
def target(self, configuration):
"""Get the back-up target to this container.
:return: backuppy.location.Target
"""
return SshTarget(configuration.notifier, self.USERNAME, self.ip, self.PATH, identity=self.IDENTITY, host_keys=self.known_hosts().name)
|
from lecture_07.homework7.tasks.task1 import find_occurrences
example_tree = {
0: [("RED",), "BLUE", [], (), {}],
(1, 2): {
"simple_key": [False, "list", 800, {"RED", "set"}],
},
1: {
"abc": ("BLUE",),
("j", "h", "l"): "RED",
5: {
"key2": "RED",
("tuple", "as", "key"): [{"strings", "in", "set"}, {True: "RED"}],
},
},
2: ([{"RED"}],),
}
def test_find_occurrences():
assert find_occurrences(example_tree, "RED") == 6
|
#!/usr/bin/env python3
# Copyright (C) Daniel Carter 2019
# Licensed under the 2-clause BSD licence
# Web scraper for Android Vulnerability Bulletins
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from datetime import datetime, date
import os
import utils
import json
from collections import defaultdict
import re
import urllib.request
import atexit
import copy
import pprint
MANUAL_KEYS = ['Surface', 'Vector', 'Target', 'Channel', 'Condition', 'Privilege']
MANUAL_KEYS_REQUIRED = {'Surface', 'Target', 'Channel', 'Condition', 'Privilege'}
NIST_URL = 'https://nvd.nist.gov/vuln/data-feeds'
KNOWN_MANUFACTURERS = {'Qualcomm', 'NVIDIA', 'Broadcom', 'LG', 'MediaTek', 'HTC'}
REFERENCE_REGEX = r'(References)|((Android )?bug\(?s?\)?( with AOSP link(s)?)?)'
VERSION_REGEX = r'(Updated|Affected) (AOSP )?versions'
def get_subnode(node, key):
"""Returns the requested value from a dictionary, while ignoring null values"""
if node != None and key in node:
return node[key]
return None
def load_from_year(year, cves):
"""Loads descriptions from the NIST data for all vulnerabilities in a given year"""
path = 'cve-data/nvdcve-1.0-{year}.json'.format(year=year)
descriptions = dict()
with open(path, 'r') as f:
items = json.load(f)['CVE_Items']
if items != None:
for item in items:
cve_object = get_subnode(item, 'cve')
cve_data = get_subnode(cve_object, 'CVE_data_meta')
cve = get_subnode(cve_data, 'ID')
if cve in cves:
#print("Processing " + cve)
cve_output_data = dict()
description_data = get_subnode(get_subnode(cve_object, 'description'),
'description_data')
if description_data != None and len(description_data) > 0:
value = get_subnode(description_data[0], 'value')
cve_output_data['Description'] = value
cwe_data = get_subnode(get_subnode(cve_object, 'problemtype'), 'problemtype_data')
if cwe_data != None and len(cwe_data) > 0:
cwe_description_data = get_subnode(cwe_data[0], 'description')
if cwe_description_data != None and len(cwe_description_data) > 0:
value = get_subnode(cwe_description_data[0], 'value')
cve_output_data['CWE'] = value
impact = get_subnode(item, 'impact')
baseMetricV3 = get_subnode(impact, 'baseMetricV3')
if baseMetricV3 != None:
cvssV3 = get_subnode(baseMetricV3, 'cvssV3')
cve_output_data['Attack_method'] = get_subnode(cvssV3, 'attackVector')
else:
baseMetricV2 = get_subnode(impact, 'baseMetricV2')
cvssV2 = get_subnode(baseMetricV2, 'cvssV2')
cve_output_data['Attack_method'] = get_subnode(cvssV2, 'accessVector')
descriptions[cve] = cve_output_data
return descriptions
def get_descriptions(cves):
"""Loads vulnerability descriptions from the NIST data"""
descriptions = dict()
cve_years = defaultdict(list)
for cve in cves:
year = cve.split('-')[1]
cve_years[year].append(cve)
for year, cves in cve_years.items():
descriptions.update(load_from_year(year, set(cves)))
return descriptions
def load_date_from_commit(url, driver):
"""Given the URL of a commit identifier, returns the date of the commit"""
if 'googlesource.com' in url:
try:
with urllib.request.urlopen(url + '?format=JSON') as source:
src = source.read()[5:]
data = json.loads(src.decode())
time_string = data['author']['time']
time = datetime.strptime(time_string, '%a %b %d %H:%M:%S %Y %z')
return time.date()
except urllib.error.HTTPError:
# Dealing with the fact that Google's JSON links sometimes don't work
utils.fetchPage(driver, url)
rows = driver.find_elements_by_xpath('//div[contains(@class, "Metadata")]/table/tbody/tr')
for row in rows:
if row.find_element_by_tag_name('th').get_attribute('innerHTML') != 'author':
continue
time_string = row.find_elements_by_xpath('./td')[1].get_attribute('innerHTML')
time = datetime.strptime(time_string, '%a %b %d %H:%M:%S %Y %z')
return time.date()
elif 'codeaurora.org' in url or 'git.kernel.org' in url:
utils.fetchPage(driver, url)
rows = driver.find_elements_by_xpath('//table[@class="commit-info"]/tbody/tr')
for row in rows:
if row.find_element_by_tag_name('th').get_attribute('innerHTML') != 'author':
continue
time_string = row.find_element_by_xpath('./td[@class="right"]').get_attribute('innerHTML')
time = datetime.strptime(time_string, '%Y-%m-%d %H:%M:%S %z')
return time.date()
elif 'github.com' in url:
utils.fetchPage(driver, url)
time_string = driver.find_element_by_xpath(
'//div[contains(@class, "commit-meta")]//relative-time').get_attribute('datetime')
# Assuming the date is always in UTC (Z) - this is clumsy, but Python pre-3.7 doesn't have anything better
time = datetime.strptime(time_string, '%Y-%m-%dT%H:%M:%SZ')
return time.date()
# If it's not one of these sources, we don't know
raise Exception("Don't know how to deal with " + url)
def load_manual_data(cve):
"""Returns manually entered data on the vulnerability, to be combined with automatically scraped data"""
path = 'manual-data/{cve}.json'.format(cve=cve)
data = dict()
if os.path.isfile(path):
with open(path, 'r') as f:
rjson = json.load(f)
for key, value in rjson.items():
data[key] = value
return data
def write_manual_data(cve, data):
"""Writes manually entered data out to a file"""
with open('manual-data/{cve}.json'.format(cve=cve), 'w') as f:
json.dump(data, f, indent=2)
def write_data(cve, data):
"""Writes all data out to a file"""
with open('data/{cve}.json'.format(cve=cve), 'w') as f:
json.dump(data, f, indent=2)
def make_reference(url):
"""Creates a reference object (stored as a dictionary) for a given URL"""
ref_dict = dict()
if url != None:
ref_dict['url'] = url
return ref_dict
def regexp_versions(versions_string):
"""Converts the list of versions from the bulletin data into a regexp"""
if len(versions_string) == 0:
return ''
versions = versions_string.replace('and', ',').replace(' ', '').replace('.', '\\.').split(',')
regexp = ''
for version in versions:
dots = version.count('.')
if dots == 2:
regexp += ('(' + version + ')|')
elif dots == 1:
regexp += ('(' + version + '\\.[0-9])|')
elif dots == 0:
regexp += ('(' + version + '\\.[0-9]\\.[0-9])|')
else:
raise ValueError('Invalid version string provided')
return regexp[:-1]
def check_blank(text, ref):
"""Formats a data-reference pair and avoids references being given to blank data items"""
if text == None or text == '':
return []
return [[text, ref]]
def decode_lookup(key, dataset, description):
"""Convert a reference to a description to be used in data files"""
if key in dataset:
return dataset[key]
else:
decoded = input("Please enter {desc} for {key}: ".format(desc=description, key=key))
dataset[key] = decoded
return decoded
def write_data_for_website(cve, data):
"""Process data and write out to a JSON file suitable for loading into androidvulnerabilities.org"""
export = dict()
ref_out = dict()
for key, value in (data['References']).items():
if key != '*':
ref_out[key] = value
nist_ref = 'NIST-' + cve
ref_out[nist_ref] = make_reference(NIST_URL)
bulletin_ref = 'Bulletin-' + cve
ref_out[bulletin_ref] = make_reference(data['URL'])
discovery_ref = 'Discovery-' + cve
ref_out[discovery_ref] = make_reference(data['Discovered_by_ref'])
discovery_date = None
if 'Date reported' in data:
# Use the date it was reported to Google as the (approximate) date of discovery
try:
discovery_date = datetime.strptime(data['Date reported'], '%b %d, %Y').date().isoformat()
except ValueError:
pass
# N.B. Report date is when it was first reported publicly
report_date = re.search(r'[0-9]{4}-[0-9]{2}-[0-9]{2}(?=\.html)', data['URL'])
export['name'] = cve
export['CVE'] = [[cve, bulletin_ref]]
# Coordinated disclosure
export['Coordinated_disclosure'] = "unknown"
# Slightly different categories than in original set, but still usable
export['Categories'] = [data['Category']]
export['Details'] = check_blank(data['Description'], nist_ref)
export['Discovered_on'] = check_blank(discovery_date, bulletin_ref)
export['Discovered_by'] = check_blank(data['Discovered_by'], discovery_ref)
export['Submission'] = data['Submission']
if report_date != None:
export['Reported_on'] = [[report_date.group(), bulletin_ref]]
else:
export['Reported_on'] = []
export['Fixed_on'] = check_blank(data['Fixed_on'], data['Fixed_on_ref'])
export['Fix_released_on'] = check_blank(data['Fix_released_on'], bulletin_ref)
export['Affected_versions'] = check_blank(data['Affected versions'], bulletin_ref)
# Affected devices
export['Affected_devices'] = []
if 'Affected_versions_regexp' in data:
export['Affected_versions_regexp'] = [data['Affected_versions_regexp']]
else:
export['Affected_versions_regexp'] = [regexp_versions(data['Affected versions'])]
# Initially assume all devices are affected
manufacturer_affected = 'all'
for manufacturer in KNOWN_MANUFACTURERS:
if manufacturer in data['Category']:
# A specific manufacturer is named, so use that
manufacturer_affected = manufacturer
export['Affected_manufacturers'] = [[manufacturer_affected, bulletin_ref]]
export['Fixed_versions'] = check_blank(data['Updated AOSP versions'], bulletin_ref)
if 'Fixed_versions_regexp' in data:
export['Fixed_versions_regexp'] = [data['Fixed_versions_regexp']]
else:
export['Fixed_versions_regexp'] = [regexp_versions(data['Updated AOSP versions'])]
export['references'] = ref_out
export['Surface'] = data['Surface']
export['Vector'] = data['Vector']
export['Target'] = data['Target']
export['Channel'] = data['Channel']
export['Condition'] = data['Condition']
export['Privilege'] = data['Privilege']
export['CWE'] = check_blank(data['CWE'], nist_ref)
with open('website-data/{cve}.json'.format(cve=cve), 'w') as f:
json.dump(export, f, indent=2)
def parse_references(table_cell):
"""Parse the contents of a table cell and produce a reference dictionary"""
ref_data = dict()
# Take references which link to URLs
refs = table_cell.find_elements_by_tag_name('a')
for ref in refs:
text = ref.get_attribute('innerHTML').replace('\n', ' ').strip()
if text != '*':
url = make_reference(ref.get_attribute('href'))
ref_data[text] = url
# Strip out links, line breaks and square brackets, and take the remaining sections of the string as references
regex = r'(\<a(.*?)\>(.*?)\<\/a\>)|(\<br( *)\/?\>)|(\<\/?p\>)|(\n)|\[|\]'
contents = table_cell.get_attribute('innerHTML')
text_items = re.sub(regex, ' ', contents, flags=re.S).split()
for item in text_items:
ref_data[item] = make_reference(None)
return ref_data
def merge_rows(row1, row2):
"""Merge two rows of the table of CVE data"""
output = copy.deepcopy(row1)
for key in row2:
if key not in output:
output[key] = row2[key]
elif output[key] == row2[key]:
continue
elif key == 'References':
output['References'].update(row2['References'])
elif key == 'Severity':
if output['Severity'] == 'Critical' or row2['Severity'] == 'Critical':
output['Severity'] = 'Critical'
else:
output[key] = '{old}, {new}'.format(old=output[key], new=row2[key])
else:
output[key] = '{old}, {new}'.format(old=output[key], new=row2[key])
return output
def process_table(table, category, source_url, date_fix_released_on):
"""Produce a list of dictionaries of vulnerabilities from an HTML table"""
rows = table.find_elements_by_tag_name('tr')
headers = []
for header in table.find_elements_by_tag_name('th'):
headers.append(header.get_attribute('innerHTML'))
table_data = dict()
multispans = dict()
prev_row = None
# Exclude the top (title) row
for row in rows[1:]:
row_data = defaultdict(str)
# Find each cell of the table
items = row.find_elements_by_tag_name('td')
if(len(items) + len(multispans)) != len(headers):
raise Exception("Invalid table")
index = 0
for row_header in headers:
header = row_header.replace('*', '')
if header in multispans:
# Data from previous row needs to "spill over"
row_data[header] = prev_row[header]
multispans[header] -= 1
if multispans[header] == 0:
del multispans[header]
else:
# Take the appropriate column of the table
item = items[index]
index += 1
rowspan = item.get_attribute('rowspan')
if rowspan != None and int(rowspan) > 1:
# This row needs to "spill over" into the next
multispans[header] = int(rowspan) -1
if re.search(VERSION_REGEX, header, flags=re.I) != None:
# Do this in addition to loading the text directly below
row_data['Affected versions'] = item.get_attribute('innerHTML').strip()
if re.search(REFERENCE_REGEX, header, flags=re.I) != None:
row_data['References'] = parse_references(item)
elif header == 'Updated versions':
row_data['Updated AOSP versions'] = item.get_attribute('innerHTML').strip()
else:
row_data[header] = item.get_attribute('innerHTML').strip()
if 'CVE' in row_data:
cve = row_data['CVE']
row_data['Category'] = category
row_data['URL'] = source_url
row_data['Fix_released_on'] = date_fix_released_on
if prev_row != None and prev_row['CVE'] == cve:
row_data = merge_rows(prev_row, row_data)
prev_row = row_data
table_data[cve] = row_data
return table_data
def get_submitter_name():
"""Loads the submitter's name in from a file if the file exists, or prompts for it otherwise"""
if os.path.isfile('submitter'):
with open('submitter', 'r') as f:
return f.readline().strip()
else:
return input("Enter the name to record submissions under...")
def get_discoverer_data(driver, url):
"""Loads the list of people who have discovered bugs"""
output = defaultdict(str)
utils.fetchPage(driver, url)
tables = driver.find_elements_by_xpath('//div[@class="devsite-table-wrapper"]/table')
for table in tables:
rows = table.find_elements_by_tag_name('tr')
for row in rows:
cells = row.find_elements_by_tag_name('td')
if len(cells) < 2:
# We're on the header row, which uses <th> elements, or an invalid row
continue
cves = cells[1].text.split(',')
text = cells[0].text.strip()
for cve in cves:
output[cve.strip()] = text
return output
# Setup
driver = utils.getDriver()
vulnerabilities = dict()
# Submission details
submission = dict()
submission['by'] = get_submitter_name()
submission['on'] = date.today().strftime('%Y-%m-%d')
# Fix release dates (done per bulletin)
fix_dates = dict()
today = date.today()
discoverer_url = 'https://source.android.com/security/overview/acknowledgements'
discoverers = get_discoverer_data(driver, discoverer_url)
for year in range(2015, (today.year)+1):
#for year in range(2015, 2018):
fix_dates[year] = dict()
urls = []
url = 'https://source.android.com/security/bulletin/{year}'.format(year=year)
utils.fetchPage(driver, url)
table = driver.find_element_by_xpath('//div[@class="devsite-table-wrapper"]/table')
rows = table.find_elements_by_tag_name('tr')
for row in rows:
cells = row.find_elements_by_tag_name('td')
if len(cells) == 0:
# We're on the header row, which uses <th> elements
continue
links = cells[0].find_elements_by_tag_name('a')
if len(links) == 0:
# No links in this cell, so skip it
continue
url = links[0].get_attribute('href')
urls.append(url)
for url in urls:
date_string = re.search(r'\d{4}-\d{2}-\d{2}(?=\.html)', url).group()
report_date = datetime.strptime(date_string, '%Y-%m-%d').date()
utils.fetchPage(driver, url)
month_fix_date = None
search_exp = '{:d}-{:02d}-[0-9][0-9]'.format(report_date.year, report_date.month)
date_para = driver.find_elements_by_xpath('//div[contains(@class, "devsite-article-body")]/p')[1]
date_text = re.search(search_exp, date_para.get_attribute('innerHTML'))
if date_text != None:
month_fix_date = date_text.group()
fix_dates[report_date.year][report_date.month] = month_fix_date
contents = driver.find_elements_by_xpath('//devsite-heading | //div[@class="devsite-table-wrapper"]/table')
title = None
for item in contents:
if item.get_attribute('level') == 'h3':
# If item is a title
title = item.get_attribute('text').replace('\n', ' ')
elif title != None:
# If item is a table, and there hasn't been a table since the last title
vulnerabilities.update(process_table(item, title, url, month_fix_date))
title = None
descriptions = get_descriptions(vulnerabilities.keys())
for cve in descriptions.keys():
vulnerabilities[cve].update(descriptions[cve])
#pprint.pprint(vulnerabilities)
# Load datasets to give descriptions
cwe_dataset = load_manual_data('attributes/cwe')
version_dataset = load_manual_data('attributes/versions')
# Store previous manual data set for quick repeat operations
prev_manual_data = None
for cve, vulnerability in vulnerabilities.items():
if vulnerability['Severity'] == 'Critical':
# Get the fix date
# Using the latest date of any of the commits as "fixed" date
fixed = None
fixed_ref = None
for ref_name, reference in vulnerability['References'].items():
if 'url' in reference.keys():
commit_date = load_date_from_commit(reference['url'], driver)
if fixed == None or commit_date > fixed:
fixed = commit_date
fixed_ref = ref_name
if fixed != None:
vulnerability['Fixed_on'] = fixed.isoformat()
vulnerability['Fixed_on_ref'] = fixed_ref
vulnerability['Discovered_by'] = discoverers[cve]
vulnerability['Discovered_by_ref'] = discoverer_url
# If fixed versions regexp is complicated, do it manually
affected = vulnerability['Affected versions']
if 'below' in affected or 'above' in affected:
vulnerability['Affected_versions_regexp'] = decode_lookup(affected.strip(), version_dataset, 'regexp')
fixed = vulnerability['Updated AOSP versions']
if 'below' in fixed or 'above' in fixed:
vulnerability['Fixed_versions_regexp'] = decode_lookup(fixed.strip(), version_dataset, 'regexp')
pprint.pprint(vulnerability)
# If no stored submission date, assume today
manual_data = load_manual_data(cve)
if 'Submission' not in manual_data.keys():
manual_data['Submission'] = [submission]
for key in MANUAL_KEYS:
if key not in manual_data:
if key in MANUAL_KEYS_REQUIRED:
entered = input("Enter {key}: ".format(key=key))
if entered == '^':
manual_data.update(prev_manual_data)
elif entered != '':
manual_data[key] = entered.split(',')
else:
manual_data[key] = []
elif key == 'Vector':
cwe = vulnerability['CWE']
if cwe == '' or cwe == 'NVD-CWE-Other':
# No specific CWE, so ask each time
vector = input("Please enter vector for this vulnerability: ")
if vector == '':
manual_data['Vector'] = []
else:
manual_data['Vector'] = [vector]
else:
# Otherwise, as this is automatically generated, we don't add it to manual_data
vector = decode_lookup(cwe, cwe_dataset, 'vector')
if vector == '':
vulnerability['Vector'] = []
else:
vulnerability['Vector'] = [vector]
else:
manual_data[key] = []
write_manual_data(cve, manual_data)
if 'References' in manual_data:
vulnerability['References'].update(manual_data['References'])
del manual_data['References']
vulnerability.update(manual_data)
prev_manual_data = manual_data
write_data_for_website(cve, vulnerability)
write_data(cve, vulnerability)
@atexit.register
def cleanup():
# Write datasets back to disk
write_manual_data('attributes/cwe', cwe_dataset)
write_manual_data('attributes/versions', version_dataset)
utils.quitDriver(driver)
|
# file test_fedora/test_cryptutil.py
#
# Copyright 2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import unittest
from eulfedora import cryptutil
from eulfedora.util import force_text
class CryptTest(unittest.TestCase):
def test_to_blocksize(self):
def test_valid_blocksize(text):
block = cryptutil.to_blocksize(text)
self.assertEqual(0, len(block) % cryptutil.EncryptionAlgorithm.block_size,
'''text '%s' has correct block size for encryption algorithm''' % block)
self.assert_(text in block, 'block-sized text contains original text')
# test text of several sizes
test_valid_blocksize('text')
test_valid_blocksize('texty')
test_valid_blocksize('textish')
test_valid_blocksize('this is some text')
test_valid_blocksize('this would be a really long password')
test_valid_blocksize('can you imagine typing this every time you logged in?')
def test_encrypt_decrypt(self):
def test_encrypt_decrypt(text):
encrypted = cryptutil.encrypt(text)
self.assertNotEqual(text, encrypted,
"encrypted text should not match original")
decrypted = cryptutil.decrypt(encrypted)
self.assertEqual(text, force_text(decrypted),
"decrypted text (%s) should match original encrypted text (%s)" % (force_text(decrypted), text))
test_encrypt_decrypt('text')
test_encrypt_decrypt('texty')
test_encrypt_decrypt('textier')
test_encrypt_decrypt('textiest')
test_encrypt_decrypt('longish password-type text')
|
import matplotlib.pyplot as plt
import numpy as np
import json
with open('../tokamaks.json') as f:
machines = json.load(f)
# by type of machine
plt.figure()
types_labels = ["Tokamaks", "Stellarators", "Inertial", "Others"]
types_machine = ["tokamak", "stellarator", "inertial", "alternate_concept"]
bottom = 0
countries = np.unique([machine["country"] for machine in machines])
for country in countries:
country_data = []
for type_machine in types_machine:
country_data.append(
len(
[machine for machine in machines
if machine["configuration"] == type_machine and
machine["country"] == country]))
plt.bar(
types_labels, country_data,
bottom=bottom, color="tab:blue", edgecolor="white")
bottom += np.array(country_data)
# by country
plt.figure()
countries = np.unique([machine["country"] for machine in machines])
countries_total = [
len([machine for machine in machines if machine["country"] == country])
for country in countries]
countries = [x for _, x in sorted(zip(countries_total, countries))]
countries_total = sorted(countries_total)
tokamaks, stellarators, intertials = [], [], []
left = 0
for type_machine, label in zip(types_machine, types_labels):
type_data = []
for country in countries:
type_data.append(
len([machine for machine in machines
if machine["configuration"] == type_machine and
machine["country"] == country]))
plt.barh(countries, type_data, label=label, left=left)
left += np.array(type_data)
plt.legend()
plt.tight_layout()
plt.show()
|
"""Global fixtures"""
import os
import shutil
from os.path import join
import pytest
from hdx.hdx_configuration import Configuration
@pytest.fixture(scope="function")
def configuration():
project_config_yaml = join("tests", "fixtures", "project_configuration.yml")
Configuration._create(
hdx_site="prod",
user_agent="test",
hdx_read_only=True,
project_config_yaml=project_config_yaml,
)
return Configuration.read()
@pytest.fixture(scope="function")
def database_failure():
dbfile = "test_freshness_failure.db"
dbpath = join("tests", dbfile)
try:
os.remove(dbpath)
except FileNotFoundError:
pass
shutil.copyfile(join("tests", "fixtures", dbfile), dbpath)
return {"driver": "sqlite", "database": dbpath}
|
import uuid
from django.db import models
from django.utils.translation import gettext_lazy as _
from heroku_connect.db import models as hc_models
__all__ = ("NumberModel", "OtherModel")
def frozen_uuid_generator():
return uuid.UUID(hex="653d1c6863404b9689b75fa930c9d0a0")
class NumberModel(hc_models.HerokuConnectModel):
sf_access = hc_models.READ_WRITE
sf_object_name = "Number_Object__c"
a_number = hc_models.Number(
_("yet another number"),
sf_field_name="A_Number__c",
max_digits=3,
decimal_places=2,
)
external_id = hc_models.ExternalID(
sf_field_name="External_ID", default=frozen_uuid_generator, upsert=True
)
class OtherModel(models.Model):
number = models.ForeignKey(NumberModel, on_delete=models.CASCADE)
other_number = models.ForeignKey(
"testapp.NumberModel", on_delete=models.CASCADE, db_constraint=False
)
more_numbers = models.ManyToManyField(NumberModel)
class DateTimeModel(hc_models.HerokuConnectModel):
sf_access = hc_models.READ_WRITE
sf_object_name = "DateTime_Object__c"
a_datetime = hc_models.DateTime(
_("a date time field"), sf_field_name="A_DateTime__c"
)
class ReadOnlyModel(hc_models.HerokuConnectModel):
sf_object_name = "ReadOnly__c"
sf_access = hc_models.READ_ONLY
|
import pickle
import os
files = ['logdeparture', 'cmass', 'tau', 'T', 'vturb']
folders = ['prd', 'prd_2']
destination_folder = 'prd_full'
if not os.path.exists(f'../data/{destination_folder}'):
os.makedirs(f'../data/{destination_folder}')
for file in files:
dataset = []
for folder in folders:
with open(f'../data/{folder}/train_{file}.pkl', 'rb') as filehandle:
print(f'reading file: \t ../data/{folder}/train_{file}.pkl')
loaded = pickle.load(filehandle)
dataset += loaded.copy()
del loaded
with open(f'../data/{destination_folder}/train_{file}.pkl', 'wb') as filehandle:
pickle.dump(dataset, filehandle)
|
import json
import requests
from requests.exceptions import RequestException
import re
import time
import random
def get_one_page(url):
try:
response = requests.get(url)
if response.status_code == 200:
response.encoding = 'utf-8'
return response.text
return None
except RequestException:
return None
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a'
+ '.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+ '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2],
'actor': item[3].strip()[3:],
'time': item[4].strip()[5:],
'score': item[5] + item[6]
}
def write_html(offset, html):
with open("code/{}.html".format(offset), mode='w+', encoding='utf-8') as f:
f.write(html)
def write_result(content):
with open('result.txt', 'a') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n')
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
write_html(offset, html)
for item in parse_one_page(html):
print(item)
write_result(item)
if __name__ == '__main__':
for i in range(10):
main(offset=i * 10)
time.sleep(random.randint(1, 4))
# import re
# content = 'Hello, my phone number is 13512345678 and email is [email protected], \
# and my website is https://github.com/carmel.'
# result = re.search('(1[3456789]\d{9})', content, re.S)
# if result:
# print(result.group())
# print(result.group(1))
# print(result.span(), '\n')
# result = re.search('([a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+))+', content, re.S)
# if result:
# print(result.group(1))
# html = '''<div id="songs-list">
# <h2 class="title">经典老歌</h2>
# <p class="introduction">
# 经典老歌列表
# </p>
# <ul id="list" class="list-group">
# <li data-view="2">一路上有你</li>
# <li data-view="7">
# <a href="/2.mp3" singer="任贤齐">沧海一声笑</a>
# </li>
# <li data-view="4" class="active">
# <a href="/3.mp3" singer="齐秦">往事随风</a>
# </li>
# <li data-view="6"><a href="/4.mp3" singer="beyond">光辉岁月</a></li>
# <li data-view="5"><a href="/5.mp3" singer="陈慧琳">记事本</a></li>
# <li data-view="5">
# <a href="/6.mp3" singer="邓丽君"><i class="fa fa-user"></i>但愿人长久</a>
# </li>
# </ul>
# </div>'''
# results = re.findall('<li.*?href="(.*?)".*?singer="(.*?)">(.*?)</a>', html, re.S)
# print(results)
# for result in results:
# print(result)
# print(result[0], result[1], result[2], '\n')
# html = re.sub('<a.*?>|</a>', '', html)
# print(html)
# results = re.findall('<li.*?>(.*?)</li>', html, re.S)
# for result in results:
# print(result.strip(), '\n')
# content1 = '2016-12-15 12:00'
# content2 = '2016-12-17 12:55'
# content3 = '2016-12-22 13:21'
# pattern = re.compile('\d{2}:\d{2}')
# result1 = re.sub(pattern, '', content1)
# result2 = re.sub(pattern, '', content2)
# result3 = re.sub(pattern, '', content3)
# print(result1, result2, result3, '\n')
# content = 'Hello 1234567 World_This is a Regex Demo'
# result = re.match('^Hello\s(\d+)\sWorld', content, re.I)
# print(result)
# print(result.group())
# print(result.group(1))
# print(result.span())
# print(len('Hello 1234567 World'))
# 抓取网页
# headers = {
# 'User-Agent':
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
# }
# r = requests.get("https://www.zhihu.com/explore", headers=headers)
# r.encoding = r.apparent_encoding
# pattern = re.compile(
# 'ExploreRoundtableCard-title|ExploreSpecialCard-title.*?>(.*?)</a>', re.S)
# titles = re.findall(pattern, r.text)
# print(titles, '\n')
# 抓取文件
# r = requests.get("https://github.com/favicon.ico")
# with open('favicon.ico', 'wb') as f:
# f.write(r.content)
# r = requests.get('https://www.jianshu.com')
# print(r.status_code)
# print(r.headers)
# print(r.cookies)
# print(r.url)
# print(r.history)
# 文件上传
# files = {'file': open('favicon.ico', 'rb')}
# r = requests.post("http://httpbin.org/post", files=files)
# print(r.text, '\n')
# # 获取cookie
# r = requests.get("https://www.baidu.com")
# for key, value in r.cookies.items():
# print("{0}={1}\n".format(key, value))
# header添加cookie
# cookies = 'q_c1=31653b264a074fc9a57816d1ea93ed8b|1474273938000|1474273938000; d_c0="AGDAs254kAqPTr6NW1U3XTLFzKhMPQ6H_nc=|1474273938"; __utmv=51854390.100-1|2=registration_date=20130902=1^3=entry_date=20130902=1;a_t="2.0AACAfbwdAAAXAAAAso0QWAAAgH28HQAAAGDAs254kAoXAAAAYQJVTQ4FCVgA360us8BAklzLYNEHUd6kmHtRQX5a6hiZxKCynnycerLQ3gIkoJLOCQ==";z_c0=Mi4wQUFDQWZid2RBQUFBWU1DemJuaVFDaGNBQUFCaEFsVk5EZ1VKV0FEZnJTNnp3RUNTWE10ZzBRZFIzcVNZZTFGQmZn|1474887858|64b4d4234a21de774c42c837fe0b672fdb5763b0'
# jar = requests.cookies.RequestsCookieJar()
# headers = {
# 'Host': 'www.zhihu.com',
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'
# }
# for cookie in cookies.split(';'):
# key, value = cookie.split('=', 1)
# jar.set(key, value)
# r = requests.get("http://www.zhihu.com", cookies=jar, headers=headers)
# print(r.text)
# 指定ca证书
# response = requests.get('https://www.12306.cn', cert=('/path/server.crt', '/path/key'))
# print(response.status_code)
# 指定代理
# proxies = {"http": "127.0.0.1:8888"}
# r = requests.get("https://www.taobao.com", proxies=proxies)
# print(r.status_code)
# from urllib.robotparser import RobotFileParser
# from urllib.request import urlopen
# rp = RobotFileParser()
# rp.set_url('http://www.jianshu.com/robots.txt')
# rp.read()
# print(rp.can_fetch('*', 'https://www.jianshu.com/p/b67554025d7d'))
# print(rp.can_fetch('*', "http://www.jianshu.com/search?q=python&page=1&type=collections"))
# rp.parse(urlopen('https://www.jianshu.com/robots.txt').read().decode('utf-8').split('\n'))
# print(rp.can_fetch('*', 'https://www.jianshu.com/p/b67554025d7d'))
# print(rp.can_fetch('*', "https://www.jianshu.com/search?q=python&page=1&type=collections"))
# from urllib.parse import urlparse, urlunparse, urlsplit, urlunsplit, \
# urljoin, urlencode, parse_qs, parse_qsl, quote, unquote
# url = 'https://www.baidu.com/index.html;user?id=5#comment'
# print(urlparse(url, allow_fragments=False), '\n')
# # 目标地址解析成scheme, netloc, path, params, query, fragments六个部分
# print(urlsplit(url, allow_fragments=False), '\n')
# # 目标地址解析成scheme, netloc, path, query, fragments五个部分
# data = ['http', 'wwww.baidu.com', 'index.html', 'user', 'a=6', 'comment']
# print(urlunparse(data), '\n')
# data = ['http', 'wwww.baidu.com', 'index.html', 'a=6', 'comment']
# print(urlunsplit(data), '\n')
# print(urljoin(base='http://www.baidu.com', url='FAQ.html'))
# print(urljoin('http://www.baidu.com', 'https://www.zhihu.com/FAQ.html'))
# print(urljoin('http://www.baidu.com', '?category=2#comment'))
# print(urljoin('www.baidu.com', '?category=2#comment'))
# print(urljoin('www.baidu.com#comment', '?category=2'), '\n')
# params = {
# 'name': 'Vector',
# 'age': 30
# }
# url = urlencode(params)
# print(url, '\n')
# print(parse_qs(url), '\n')
# print(parse_qsl(url), '\n')
# quote_word = quote('中国')
# print('https://www.baidu.com/s?wd={0}'.format(quote_word), '> quote_word\n')
# print(unquote(quote_word), '> unquote\n')
# from urllib import request as rq, parse as pr
# from urllib.error import URLError
# from urllib.request import ProxyHandler, build_opener
# request = rq.Request('https://python.org')
# response = rq.urlopen(request)
# print(response.read().decode('utf-8'))
# url = 'http://httpbin.org/post'
# headers = {
# 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
# 'Host': 'httpbin.org'
# }
# data = bytes(pr.urlencode({'name': 'Germey'}), encoding='utf-8')
# request = rq.Request(url, data, headers=headers, method='POST')
# request.add_header('Cookie', 'Hm_lvt_eaa57ca47dacb4ad4f5a257001a3457c=1602235479')
# response = rq.urlopen(request)
# print(response.read().decode('utf-8'))
# proxy_handler = ProxyHandler({'http': '127.0.0.1:8888'})
# opener = build_opener(proxy_handler)
# try:
# response = opener.open(rq.Request("http://www.baidu.com/"))
# print(response.read().decode('utf-8'))
# except URLError as e:
# print(e.reason)
# import socket
# import urllib.parse as up
# import urllib.request as ur
# import urllib.error as ue
# data = bytes(up.urlencode({'word': 'hello'}), encoding='utf-8')
# try:
# response = ur.urlopen('http://httpbin.org/post', data=data, timeout=3)
# print(response.read().decode('utf-8'))
# except ue.URLError as e:
# if (isinstance(e.reason, socket.timeout)):
# print('Time is out')
# import urllib.request as ur
# response = ur.urlopen('https://www.python.org')
# html = response.read().decode('utf-8')
# # print(html)
# with open('code/python.org.html', mode='w+') as f:
# f.write(html)
# print(f.closed)
# print(help(type(response)))
# print(response.status, '> response.status\n')
# print(response.getheaders(), '> response.getheaders()\n')
# print(response.getheader('Server'), "> response.getheader('Server')\n")
|
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class GUI:
__gui_image = {}
__gui_image["moveBothHands"] = Image.open("Brain_Waves_Analysis/Training_Data_Acquisition/src/resources/moveBoth.png")
__gui_image["moveLeftHand"] = Image.open("Brain_Waves_Analysis/Training_Data_Acquisition/src/resources/moveLeft.png")
__gui_image["moveRightHand"] = Image.open("Brain_Waves_Analysis/Training_Data_Acquisition/src/resources/moveRight.png")
__gui_image["rest"] = Image.open("Brain_Waves_Analysis/Training_Data_Acquisition/src/resources/rest.png")
def __init__(self):
self.configureMatplot()
def configureMatplot(self):
plt.ion()
plt.axis('off')
plt.show()
def loadImage(self, state):
plt.imshow(self.__gui_image[state])
self.pausePlotToLoadImage()
def pausePlotToLoadImage(self): # matplotlib needs to be paused for a fraction of a second
plt.pause(0.001) # in order to fully load image.
def closeImage(self):
plt.close()
|
# from .optimization import *
# from .benchmarks import *
|
#!/usr/bin/env python
# coding: utf-8
import os
import pandas as pd
import logging
import json
from tqdm import tqdm
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
filename='met_data_metadata.log',
filemode='w',
format='%(asctime)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
)
logger = logging.getLogger('met_data_metadata')
PROJECT_DIR = os.getcwd()
def load_local_json_file(json_local_record):
with open(json_local_record, encoding='utf-8') as json_file:
return json.load(json_file)
def main():
start_time = datetime.now()
json_extracted_files \
= os.listdir(PROJECT_DIR + '/data')
logger.info(f'[ARTWORK-METADATA-EXTRACTION] - Total Files in data folder: {len(json_extracted_files)}')
json_valid_records \
= [file for file in json_extracted_files if "json" in file]
logger.info(f'[ARTWORK-METADATA-EXTRACTION] - Total valid records: {len(json_valid_records)}')
list_met_metadata_values = []
for record in tqdm(json_valid_records):
json_local_record \
= PROJECT_DIR + '/data/' + record
json_data \
= load_local_json_file(json_local_record)
try:
objectID = json_data.get("objectID")
isHighlight = json_data.get("isHighlight")
accessionNumber = json_data.get("accessionNumber")
accessionYear = json_data.get("accessionYear")
isPublicDomain = json_data.get("isPublicDomain")
primaryImage = json_data.get("primaryImage")
primaryImageSmall = json_data.get("primaryImageSmall")
department = json_data.get("department")
objectName = json_data.get("objectName")
title = json_data.get("title")
culture = json_data.get("culture")
period = json_data.get("period")
dynasty = json_data.get("dynasty")
reign = json_data.get("reign")
portfolio = json_data.get("portfolio")
artistRole = json_data.get("artistRole")
artistPrefix = json_data.get("artistPrefix")
artistDisplayName = json_data.get("artistDisplayName")
artistDisplayBio = json_data.get("artistDisplayBio")
artistSuffix = json_data.get("artistSuffix")
artistAlphaSort = json_data.get("artistAlphaSort")
artistNationality = json_data.get("artistNationality")
artistBeginDate = json_data.get("artistBeginDate")
artistEndDate = json_data.get("artistEndDate")
artistGender = json_data.get("artistGender")
artistWikidata_URL = json_data.get("artistWikidata_URL")
artistULAN_URL = json_data.get("artistULAN_URL")
objectDate = json_data.get("objectDate")
objectBeginDate = json_data.get("objectBeginDate")
objectEndDate = json_data.get("objectEndDate")
medium = json_data.get("medium")
dimensions = json_data.get("dimensions")
creditLine = json_data.get("creditLine")
geographyType = json_data.get("geographyType")
city = json_data.get("city")
state = json_data.get("state")
county = json_data.get("county")
country = json_data.get("country")
region = json_data.get("region")
subregion = json_data.get("subregion")
locale = json_data.get("locale")
locus = json_data.get("locus")
excavation = json_data.get("excavation")
river = json_data.get("river")
classification = json_data.get("classification")
rightsAndReproduction = json_data.get("rightsAndReproduction")
linkResource = json_data.get("linkResource")
metadataDate = json_data.get("metadataDate")
repository = json_data.get("repository")
objectURL = json_data.get("objectURL")
tags = json_data.get("tags")
objectWikidata_URL = json_data.get("objectWikidata_URL")
isTimelineWork = json_data.get("isTimelineWork")
GalleryNumber = json_data.get("GalleryNumber")
constituents_constituentID = None
constituents_role = None
constituents_name = None
constituents_constituentULAN_URL = None
constituents_constituentWikidata_URL = None
constituents_gender = None
measurements_elementName = None
measurements_elementDescription = None
measurements_elementDescription_elementMeasurements_Height = None
measurements_elementDescription_elementMeasurements_Width = None
try:
constituents_constituentID \
= json_data["constituents"][0]["constituentID"]
except:
pass
try:
constituents_role \
= json_data["constituents"][0]["role"]
except:
pass
try:
constituents_name \
= json_data["constituents"][0]["name"]
except:
pass
try:
constituents_constituentULAN_URL \
= json_data["constituents"][0]["constituentULAN_URL"]
except:
pass
try:
constituents_constituentWikidata_URL \
= json_data["constituents"][0]["constituentWikidata_URL"]
except:
pass
try:
constituents_gender \
= json_data["constituents"][0]["gender"]
except:
pass
try:
measurements_elementName \
= json_data["measurements"][0]["elementName"]
except:
pass
try:
measurements_elementDescription \
= json_data["measurements"][0]["elementDescription"]
except:
pass
try:
measurements_elementDescription_elementMeasurements_Height \
= json_data["measurements"][0]["elementMeasurements"]["Height"]
except:
pass
try:
measurements_elementDescription_elementMeasurements_Width \
= json_data["measurements"][0]["elementMeasurements"]["Width"]
except:
pass
list_met_metadata_values.append((
objectID,
isHighlight,
accessionNumber,
accessionYear,
isPublicDomain,
primaryImage,
primaryImageSmall,
department,
objectName,
title,
culture,
period,
dynasty,
reign,
portfolio,
artistRole,
artistPrefix,
artistDisplayName,
artistDisplayBio,
artistSuffix,
artistAlphaSort,
artistNationality,
artistBeginDate,
artistEndDate,
artistGender,
artistWikidata_URL,
artistULAN_URL,
objectDate,
objectBeginDate,
objectEndDate,
medium,
dimensions,
creditLine,
geographyType,
city,
state,
county,
country,
region,
subregion,
locale,
locus,
excavation,
river,
classification,
rightsAndReproduction,
linkResource,
metadataDate,
repository,
objectURL,
tags,
objectWikidata_URL,
isTimelineWork,
GalleryNumber,
constituents_constituentID,
constituents_role,
constituents_name,
constituents_constituentULAN_URL,
constituents_constituentWikidata_URL,
constituents_gender,
measurements_elementName,
measurements_elementDescription,
measurements_elementDescription_elementMeasurements_Height,
measurements_elementDescription_elementMeasurements_Width
))
except Exception as e:
logger.info(f'[ARTWORK-METADATA-EXTRACTION] - {e} - Record: {record} skipped.')
time_elapsed = datetime.now() - start_time
logger.info(f'[ARTWORK-METADATA-EXTRACTION] - Time elapsed (hh:mm:ss.ms) {time_elapsed}')
columns = [
'objectID',
'isHighlight',
'accessionNumber',
'accessionYear',
'isPublicDomain',
'primaryImage',
'primaryImageSmall',
'department',
'objectName',
'title',
'culture',
'period',
'dynasty',
'reign',
'portfolio',
'artistRole',
'artistPrefix',
'artistDisplayName',
'artistDisplayBio',
'artistSuffix',
'artistAlphaSort',
'artistNationality',
'artistBeginDate',
'artistEndDate',
'artistGender',
'artistWikidata_URL',
'artistULAN_URL',
'objectDate',
'objectBeginDate',
'objectEndDate',
'medium',
'dimensions',
'creditLine',
'geographyType',
'city',
'state',
'county',
'country',
'region',
'subregion',
'locale',
'locus',
'excavation',
'river',
'classification',
'rightsAndReproduction',
'linkResource',
'metadataDate',
'repository',
'objectURL',
'tags',
'objectWikidata_URL',
'isTimelineWork',
'GalleryNumber',
'constituents_constituentID',
'constituents_role',
'constituents_name',
'constituents_constituentULAN_URL',
'constituents_constituentWikidata_URL',
'constituents_gender',
'measurements_elementName',
'measurements_elementDescription',
'measurements_elementDescription_elementMeasurements_Height',
'measurements_elementDescription_elementMeasurements_Width'
]
df_met_metadata_values \
= pd.DataFrame(list_met_metadata_values, columns=columns)
df_met_metadata_values.to_csv(PROJECT_DIR + '/metadata/' + 'df_met_metadata_values.txt', sep="|")
logger.info(f'[ARTWORK-METADATA-EXTRACTION] - Metadata extracted and persisted in disk')
if __name__ == '__main__':
main()
|
import unittest
# import HTMLTestRunner
import sys
sys.path.append("..")
import ascend
import numpy as np
import cv2
import os
def decode(heatmap, scale, offset, landmark, size, thresh=0.1):
heatmap = np.squeeze(heatmap[0])
scale0, scale1 = scale[0, 0, :, :], scale[0, 1, :, :]
offset0, offset1 = offset[0, 0, :, :], offset[0, 1, :, :]
c0, c1 = np.where(heatmap > thresh)
boxes, lms = [], []
if len(c0) > 0:
for i in range(len(c0)):
s0, s1 = np.exp(scale0[c0[i], c1[i]]) * 4, np.exp(scale1[c0[i], c1[i]]) * 4
o0, o1 = offset0[c0[i], c1[i]], offset1[c0[i], c1[i]]
s = heatmap[c0[i], c1[i]]
x1, y1 = max(0, (c1[i] + o1 + 0.5) * 4 - s1 / 2), max(0, (c0[i] + o0 + 0.5) * 4 - s0 / 2)
x1, y1 = min(x1, size[1]), min(y1, size[0])
boxes.append([x1, y1, min(x1 + s1, size[1]), min(y1 + s0, size[0]), s])
lm = []
for j in range(5):
lm.append(landmark[0, j * 2 + 1, c0[i], c1[i]] * s1 + x1)
lm.append(landmark[0, j * 2, c0[i], c1[i]] * s0 + y1)
lms.append(lm)
boxes = np.asarray(boxes, dtype=np.float32)
keep = ascend.nms(boxes[:, :4], boxes[:, 4], 0.3)
boxes = boxes[keep, :]
lms = np.asarray(lms, dtype=np.float32)
lms = lms[keep, :]
return boxes, lms
def post_process(heatmap, scale, offset, lms, thresh=0.3, scale_shape=None, with_landmark=False):
w, h, scale_w, scale_h = scale_shape
dets, lms = decode(heatmap, scale, offset, lms, (h, w), thresh=thresh)
if len(dets) > 0:
dets[:, 0:4:2], dets[:, 1:4:2] = dets[:, 0:4:2] / scale_w, dets[:, 1:4:2] / scale_h
lms[:, 0:10:2], lms[:, 1:10:2] = lms[:, 0:10:2] / scale_w, lms[:, 1:10:2] / scale_h
else:
dets = np.empty(shape=[0, 5], dtype=np.float32)
lms = np.empty(shape=[0, 10], dtype=np.float32)
if with_landmark:
return dets, lms
else:
return dets
class InferTest(unittest.TestCase):
@classmethod
def setUpClass(self):
print("setUpClass: executed before all testcase.")
@classmethod
def tearDownClass(self):
print("tearDownClass: executed after all testcase.")
def setUp(self):
print("execute setUp")
def tearDown(self):
print("execute tearDown")
def assertTrue(self, expr, msg=None):
print('[FAIL] %s' % msg if not expr else '[SUCCESS]')
super(InferTest, self).assertTrue(expr, msg)
# all testcase write below:
def test_rawdata_infer_001(self):
resource = ascend.Context({0})
model_path = "../tools/model/centerface_noaipp.om"
array = np.ones(shape=(1, 3, 384, 384), dtype='float32')
dev_data = ascend.AscendArray(shape=(1, 3, 384, 384), dtype=np.dtype('float32'))
dev_data.to_ascend(array)
print(dev_data.to_np)
dev_data2 = ascend.AscendArray.clone(array)
for _, ctx in resource.context_dict.items():
model = ascend.AscendModel(ctx, model_path)
model.feed_data({'data':dev_data2})
model.run()
ascend_out = model.get_tensor_by_name('537:0:537')
out = ascend_out.to_np
print(ascend_out.to_np)
print(out.data)
print(out.dtype)
print(out.shape)
print(out.nbytes)
print(out.itemsize)
ascend_out = model.get_tensor_by_name('538:0:538')
out = ascend_out.to_np
print(ascend_out.to_np)
print(out.data)
print(out.dtype)
print(out.shape)
print(out.nbytes)
print(out.itemsize)
self.assertTrue(out is not None, msg="test ok")
del model
del resource
def test_rawdata_infer_002(self):
resource = ascend.Context({0})
model_path = "../tools/model/centerface_noaipp.om"
array = np.ones(shape=(3, 384, 384), dtype='float32')
dev_data2 = ascend.AscendArray.clone(array)
for _, ctx in resource.context_dict.items():
model = ascend.AscendModel(ctx, model_path)
model.feed_data({'data':dev_data2})
model.run()
ascend_out = model.get_tensor_by_name('537:0:537')
print(ascend_out.to_np)
ascend_out = model.get_tensor_by_name('538:0:538')
print(ascend_out.to_np)
self.assertTrue(True, msg="test ok")
del model
del resource
def test_rawdata_infer_003(self):
resource = ascend.Context({0})
model_path = "../tools/model/centerface_static_aipp_nv12.om"
array = np.ones(shape=(int(384*1.5), 384), dtype='float32')
dev_data = ascend.AscendArray(shape=(int(384*1.5), 384), dtype=np.dtype('float32'))
dev_data.to_ascend(array)
print(dev_data.to_np)
for _, ctx in resource.context_dict.items():
model = ascend.AscendModel(ctx, model_path)
model.feed_data({'data':dev_data})
model.run()
ascend_out = model.get_tensor_by_name('537:0:537')
print(ascend_out.to_np)
ascend_out = model.get_tensor_by_name('538:0:538')
print(ascend_out.to_np)
self.assertTrue(True, msg="test ok")
del model
del resource
def test_video_infer_004(self):
resource = ascend.Context({0})
model_path = "../tools/model/centerface_static_aipp_nv12.om"
video_stream_path = os.getcwd() + '/video/test-1k.264'
landmarks = False
ctx = resource.context_dict[0]
Img = ascend.Image(ctx)
cap = ascend.VideoCapture(ctx, video_stream_path)
mdl = ascend.AscendModel(ctx, model_path)
while cap.is_open():
yuv_img, frame_id = cap.read()
if yuv_img:
yuv_resized = Img.imrescale(yuv_img, (384, 384))
yuv_pad = Img.impad(yuv_resized, shape=(384, 384))
img_color = cv2.cvtColor(yuv_resized.to_np, cv2.COLOR_YUV2RGB_NV21)
mdl.feed_data({'data':yuv_pad})
mdl.run()
heatmap = mdl.get_tensor_by_name('537:0:537').to_np
scale = mdl.get_tensor_by_name('538:0:538').to_np
offset = mdl.get_tensor_by_name('539:0:539').to_np
lms = mdl.get_tensor_by_name('540:0:540').to_np
dets = post_process(heatmap, scale, offset, lms, thresh=0.3, scale_shape=(384, 384, 1.0, 1.0))
for det in dets:
boxes, score = det[:4], det[4]
cv2.rectangle(img_color, (int(boxes[0]), int(boxes[1])), (int(boxes[2]), int(boxes[3])), (2, 255, 0), 1)
if landmarks:
for lm in lms:
for i in range(0, 5):
cv2.circle(img_color, (int(lm[i * 2]), int(lm[i * 2 + 1])), 2, (0, 0, 255), -1)
cv2.imshow('out', img_color)
cv2.waitKey(25)
self.assertTrue(True, msg="test ok")
del mdl
del resource
def test_batch_image_infer_005(self):
resource = ascend.Context({0})
model_path = "../tools/model/centerface_8batch_static_aipp_nv12.om"
video_stream_path = os.getcwd() + '/video/test-1k.264'
landmarks = False
ctx = resource.context_dict[0]
Img = ascend.Image(ctx)
cap = ascend.VideoCapture(ctx, video_stream_path)
mdl = ascend.AscendModel(ctx, model_path)
count = 0
imgs = []
while cap.is_open():
yuv_img, frame_id = cap.read()
if yuv_img:
yuv_show = Img.imrescale(yuv_img, (1280, 720))
yuv_resized = Img.imrescale(yuv_img, (384, 384))
yuv_pad = Img.impad(yuv_resized, shape=(384, 384))
if count != mdl.tensor['data'].shape[0]:
imgs.append(yuv_pad)
count = count + 1
continue
in_data = ascend.imgs2tensor(imgs, tensor_fmt='NHWC')
mdl.feed_data({'data':yuv_pad})
mdl.run()
count = 0
imgs = []
heatmap = mdl.get_tensor_by_name('537:0:537').to_np
scale = mdl.get_tensor_by_name('538:0:538').to_np
offset = mdl.get_tensor_by_name('539:0:539').to_np
lms = mdl.get_tensor_by_name('540:0:540').to_np
scale_shape = yuv_show.shape + (384/max(yuv_show.shape), 384/max(yuv_show.shape))
dets = post_process(heatmap, scale, offset, lms, thresh=0.3, scale_shape=scale_shape)
ascend.show_bbox(yuv_show, dets, color=(0,255,0) , thickness=1, wait_time_ms=5)
self.assertTrue(True, msg="test ok")
del mdl
del resource
def test_image_infer_006(self):
resource = ascend.Context({0})
model_path = "../tools/model/centerface_static_aipp_nv12.om"
for _, ctx in resource.context_dict.items():
# image decode and resize
img = ascend.Image(ctx)
data = np.fromfile('./image/girl1.jpg', dtype=np.uint8)
yuv = img.imdecode(data)
yuv_resized = img.imresize(yuv, (384, 384))
# do model inference
model = ascend.AscendModel(ctx, model_path)
model.feed_data({'data':yuv_resized})
model.run()
heatmap = model.get_tensor_by_name('537:0:537').to_np
scale = model.get_tensor_by_name('538:0:538').to_np
offset = model.get_tensor_by_name('539:0:539').to_np
lms = model.get_tensor_by_name('540:0:540').to_np
scale_shape = yuv.shape + (384/max(yuv.shape), 384/max(yuv.shape))
dets = post_process(heatmap, scale, offset, lms, thresh=0.3, scale_shape=scale_shape)
ascend.show_bbox(yuv, dets, color=(0,255,0) , thickness=2, wait_time_ms=0)
self.assertTrue(dets is not None, msg="test ok")
del model
del resource
def test_model_profiling_007(self):
context = ascend.Context({1})
model_path = "../tools/model/yolov5s_bs1_fp16.om"
array = np.ones(shape=(1, 3, 640, 640), dtype='float16')
data = ascend.AscendArray.clone(array)
for ctx in context:
model = ascend.AscendModel(ctx, model_path)
model.feed_data({'images':data})
print(model.tensor_names)
prof = ascend.Profiling(ctx, model.model_id)
@prof.elapse_time
@prof.profiling
def run():
model.run()
run()
prof.info_print(sort=True)
ascend_out = model.get_tensor_by_name('Transpose_271:0')
out = ascend_out.to_np
# print(ascend_out.to_np)
# print(out.data)
self.assertTrue(out is not None, msg="test ok")
del model
del context
def test_mul_device_008(self):
context = ascend.Context({0, 1})
model_path1 = "../tools/model/BERT_text.om"
model_path2 = "../tools/model/centerface_static_aipp_nv12.om"
model1 = ascend.AscendModel(context.context_dict[0], model_path1)
array = np.ones(shape=(1, 512), dtype='int32')
data = ascend.AscendArray.clone(array)
model1.feed_data({'input_ids':data, 'input_mask':data})
ascend_out = model1.get_tensor_by_name('item_embedding:0')
print(ascend_out.to_np)
model2 = ascend.AscendModel(context.context_dict[1], model_path2)
img = ascend.Image(context.context_dict[1])
data = np.fromfile('./image/girl1.jpg', dtype=np.uint8)
yuv = img.imdecode(data)
yuv_resized = img.imresize(yuv, (384, 384))
model.feed_data({'data':yuv_resized})
model.run()
heatmap = model.get_tensor_by_name('537:0:537').to_np
scale = model.get_tensor_by_name('538:0:538').to_np
offset = model.get_tensor_by_name('539:0:539').to_np
lms = model.get_tensor_by_name('540:0:540').to_np
scale_shape = yuv.shape + (384/max(yuv.shape), 384/max(yuv.shape))
dets = post_process(heatmap, scale, offset, lms, thresh=0.3, scale_shape=scale_shape)
ascend.show_bbox(yuv, dets, color=(0,255,0) , thickness=2, wait_time_ms=0)
if __name__ == '__main__':
#####################################
# 1.test single case
# InferTest is the object name, test_TS_001 is the case name
suite = unittest.TestSuite()
# suite.addTest(InferTest("test_rawdata_infer_001"))
# suite.addTest(InferTest("test_rawdata_infer_002"))
# suite.addTest(InferTest("test_rawdata_infer_003"))
# suite.addTest(InferTest("test_video_infer_004"))
# suite.addTest(InferTest("test_batch_image_infer_005"))
# suite.addTest(InferTest("test_image_infer_006"))
suite.addTest(InferTest("test_model_profiling_007"))
# suite.addTest(InferTest("test_model_profiling_002"))
runner = unittest.TextTestRunner().run(suite)
######################################
# 2. test all case
# unittest.main(testRunner=unittest.TextTestRunner(stream=None, verbosity=2))
|
"""aws lambda function to download videos using youtube-dl to a S3 bucket.
use it for read/watch-it-later things or for archiving."""
import boto3
import youtube_dl
import os
# define s3 bucket
s3_bucket = os.environ["S3_BUCKET"]
region = os.environ["AWS_REGION"]
# define boto client
s3 = boto3.client("s3", region_name=region)
# define youtube-dl options
ytdl_options = {
"format": "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best",
"outtmpl": "/tmp/%(title)s.%(ext)s",
"restrictfilenames": True,
"cachedir": False,
}
def download_file(video):
try:
ytdl = youtube_dl.YoutubeDL(ytdl_options)
except:
raise
info = ytdl.extract_info(video, download=True)
return ytdl.prepare_filename(info)
def s3_upload(filename):
"""upload file to s3"""
try:
print("uploading {} to s3".format(filename))
s3.upload_file(filename, s3_bucket, os.path.basename(filename))
except:
raise
return True
def lambda_handler(event, context):
"""aws lambda handler to down/upload files."""
input_url = event
try:
filename = download_file(input_url)
except:
raise
try:
upload = s3_upload(filename) # trigger s3 upload
except:
raise
return True
if __name__ == "__main__":
lambda_handler("https://www.youtube.com/watch?v=7bXjWRXDFV8", None)
|
n = int(input())
for a in range(1, n+1):
for b in range(1, n+1):
for c in range(1, n+1):
print(chr(96+a)+chr(96+b)+chr(96+c)) |
import numpy as np
from bsym.configuration import Configuration
def is_square( m ):
"""
Test whether a numpy matrix is square.
Args:
m (np.matrix): The matrix.
Returns:
(bool): True | False.
"""
return m.shape[0] == m.shape[1]
def is_permutation_matrix( m ):
"""
Test whether a numpy array is a `permutation matrix`_.
.. _permutation_matrix: https://en.wikipedia.org/wiki/Permutation_matrix
Args:
m (mp.matrix): The matrix.
Returns:
(bool): True | False.
"""
m = np.asanyarray(m)
return (m.ndim == 2 and m.shape[0] == m.shape[1] and
(m.sum(axis=0) == 1).all() and
(m.sum(axis=1) == 1).all() and
((m == 1) | (m == 0)).all())
class SymmetryOperation:
"""
`SymmetryOperation` class.
"""
def __init__( self, matrix, label=None ):
"""
Initialise a `SymmetryOperation` object
Args:
matrix (numpy.matrix|numpy.ndarray|list): square 2D vector as either a
`numpy.matrix`, `numpy.ndarray`, or `list`.
for this symmetry operation.
label (default=None) (str): optional string label for this `SymmetryOperation` object.
Raises:
TypeError: if matrix is not `numpy.matrix`, `numpy.ndarray`, or `list`.
ValueError: if matrix is not square.
ValueError: if matrix is not a `permutation matrix`_.
.. _permutation_matrix: https://en.wikipedia.org/wiki/Permutation_matrix
Notes:
To construct a `SymmetryOperation` object from a vector of site mappings
use the `SymmetryOperation.from_vector()` method.
Returns:
None
"""
if isinstance( matrix, np.matrix ):
self.matrix = np.array( matrix )
elif isinstance( matrix, np.ndarray ):
self.matrix = np.array( matrix )
elif isinstance( matrix, list):
self.matrix = np.array( matrix )
else:
raise TypeError
if not is_square( self.matrix ):
raise ValueError('Not a square matrix')
if not is_permutation_matrix( self.matrix ):
raise ValueError('Not a permutation matrix')
self.label = label
self.index_mapping = np.array( [ np.array(row).tolist().index(1) for row in matrix ] )
def __mul__( self, other ):
"""
Multiply this `SymmetryOperation` matrix with another `SymmetryOperation`.
Args:
other (SymmetryOperation, Configuration): the other symmetry operation or configuration or matrix
for the matrix multiplication self * other.
Returns:
(SymmetryOperation): a new `SymmetryOperation` instance with the resultant matrix.
(Configuration): if `other` is a `Configuration`.
"""
if isinstance( other, SymmetryOperation ):
return SymmetryOperation( self.matrix.dot( other.matrix ) )
elif isinstance( other, Configuration ):
return self.operate_on( other )
else:
raise TypeError
def invert( self, label=None ):
"""
Invert this `SymmetryOperation` object.
Args:
None
Returns:
A new `SymmetryOperation` object corresponding to the inverse matrix operation.
"""
return SymmetryOperation( np.linalg.inv( self.matrix ).astype( int ), label=label )
@classmethod
def from_vector( cls, vector, count_from_zero=False, label=None ):
"""
Initialise a SymmetryOperation object from a vector of site mappings.
Args:
vector (list): vector of integers defining a symmetry operation mapping.
count_from_zero (default = False) (bool): set to True if the site index counts from zero.
label (default=None) (str): optional string label for this `SymmetryOperation` object.
Returns:
a new SymmetryOperation object
"""
if not count_from_zero:
vector = [ x - 1 for x in vector ]
dim = len( vector )
matrix = np.zeros( ( dim, dim ) )
for index, element in enumerate( vector ):
matrix[ element, index ] = 1
new_symmetry_operation = cls( matrix, label=label )
return new_symmetry_operation
def similarity_transform( self, s, label=None ):
"""
Generate the SymmetryOperation produced by a similarity transform S^{-1}.M.S
Args:
s: the symmetry operation or matrix S.
label (:obj:`str`, optional): the label to assign to the new SymmetryOperation. Defaults to None.
Returns:
the SymmetryOperation produced by the similarity transform
"""
s_new = s.invert() * ( self * s )
if label:
s_new.set_label( label )
return s_new
def operate_on( self, configuration ):
"""
Return the Configuration generated by appliying this symmetry operation
Args:
configuration (Configuration): the configuration / occupation vector to operate on
Returns:
(Configuration): the new configuration obtained by operating on configuration with this symmetry operation.
"""
if not isinstance( configuration, Configuration ):
raise TypeError
return Configuration( configuration.vector[ self.index_mapping ] )
#return Configuration( self.matrix.dot( configuration.vector ) )
def character( self ):
"""
Return the character of this symmetry operation (the trace of `self.matrix`).
Args:
none
Returns:
np.trace( self.matrix )
"""
return np.trace( self.matrix )
def as_vector( self, count_from_zero=False ):
"""
Return a vector representation of this symmetry operation
Args:
count_from_zero (default = False) (bool): set to True if the vector representation counts from zero
Returns:
a vector representation of this symmetry operation (as a list)
"""
offset = 0 if count_from_zero else 1
return [ row.tolist().index( 1 ) + offset for row in self.matrix.T ]
def set_label( self, label ):
"""
Set the label for this symmetry operation.
Args:
label: label to set for this symmetry operation
Returns:
self
"""
self.label = label
return self
def pprint( self ):
"""
Pretty print for this symmetry operation
Args:
None
Returns:
None
"""
label = self.label if self.label else '---'
print( label + ' : ' + ' '.join( [ str(e) for e in self.as_vector() ] ) )
def __repr__( self ):
label = self.label if self.label else '---'
return 'SymmetryOperation\nlabel(' + label + ")\n" + self.matrix.__repr__()
|
from tkinter import*
import sqlite3
user_pas=Tk()
user_pas.geometry("300x200")
user_pas.title("User_pas")
db=sqlite3.connect("user_pas.db")
#cursor
c=db.cursor()
#insert into tabels
c.execute("""CREATE TABLE user_pas(
username text,
password text
)""")
#commit_changes
db.commit()
#close connection
db.close()
user_pas.mainloop()
|
class Solution:
def largestValsFromLabels(self, values: List[int], labels: List[int], num_wanted: int, use_limit: int) -> int:
idxes = list(range(len(values)))
ans = 0
m = collections.Counter()
for idx in sorted(idxes, key=lambda x : values[x], reverse=True):
label = labels[idx]
if m[label] < use_limit:
m[label] += 1
ans += values[idx]
num_wanted -= 1
if not num_wanted:
break
return ans
|
# coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class Booking(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, reference=None, lead_customer_id=None, agent_id=None, agent_reference=None, source=None, price=None, discount=None, status=None, reserved_until=None, cancellation_fee=None, comment=None, parent_id=None, decimal_price=None, real_decimal_price=None, arrival_date=None, created_at_local=None, lead_customer=None, payments=None, refunds=None):
"""
Booking - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'reference': 'str',
'lead_customer_id': 'int',
'agent_id': 'int',
'agent_reference': 'str',
'source': 'str',
'price': 'int',
'discount': 'int',
'status': 'str',
'reserved_until': 'date',
'cancellation_fee': 'int',
'comment': 'str',
'parent_id': 'int',
'decimal_price': 'str',
'real_decimal_price': 'str',
'arrival_date': 'date',
'created_at_local': 'date',
'lead_customer': 'Customer',
'payments': 'list[Payment]',
'refunds': 'list[Refund]'
}
self.attribute_map = {
'id': 'id',
'reference': 'reference',
'lead_customer_id': 'lead_customer_id',
'agent_id': 'agent_id',
'agent_reference': 'agent_reference',
'source': 'source',
'price': 'price',
'discount': 'discount',
'status': 'status',
'reserved_until': 'reserved_until',
'cancellation_fee': 'cancellation_fee',
'comment': 'comment',
'parent_id': 'parent_id',
'decimal_price': 'decimal_price',
'real_decimal_price': 'real_decimal_price',
'arrival_date': 'arrival_date',
'created_at_local': 'created_at_local',
'lead_customer': 'lead_customer',
'payments': 'payments',
'refunds': 'refunds'
}
self._id = id
self._reference = reference
self._lead_customer_id = lead_customer_id
self._agent_id = agent_id
self._agent_reference = agent_reference
self._source = source
self._price = price
self._discount = discount
self._status = status
self._reserved_until = reserved_until
self._cancellation_fee = cancellation_fee
self._comment = comment
self._parent_id = parent_id
self._decimal_price = decimal_price
self._real_decimal_price = real_decimal_price
self._arrival_date = arrival_date
self._created_at_local = created_at_local
self._lead_customer = lead_customer
self._payments = payments
self._refunds = refunds
@property
def id(self):
"""
Gets the id of this Booking.
:return: The id of this Booking.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Booking.
:param id: The id of this Booking.
:type: int
"""
self._id = id
@property
def reference(self):
"""
Gets the reference of this Booking.
:return: The reference of this Booking.
:rtype: str
"""
return self._reference
@reference.setter
def reference(self, reference):
"""
Sets the reference of this Booking.
:param reference: The reference of this Booking.
:type: str
"""
self._reference = reference
@property
def lead_customer_id(self):
"""
Gets the lead_customer_id of this Booking.
:return: The lead_customer_id of this Booking.
:rtype: int
"""
return self._lead_customer_id
@lead_customer_id.setter
def lead_customer_id(self, lead_customer_id):
"""
Sets the lead_customer_id of this Booking.
:param lead_customer_id: The lead_customer_id of this Booking.
:type: int
"""
self._lead_customer_id = lead_customer_id
@property
def agent_id(self):
"""
Gets the agent_id of this Booking.
:return: The agent_id of this Booking.
:rtype: int
"""
return self._agent_id
@agent_id.setter
def agent_id(self, agent_id):
"""
Sets the agent_id of this Booking.
:param agent_id: The agent_id of this Booking.
:type: int
"""
self._agent_id = agent_id
@property
def agent_reference(self):
"""
Gets the agent_reference of this Booking.
:return: The agent_reference of this Booking.
:rtype: str
"""
return self._agent_reference
@agent_reference.setter
def agent_reference(self, agent_reference):
"""
Sets the agent_reference of this Booking.
:param agent_reference: The agent_reference of this Booking.
:type: str
"""
self._agent_reference = agent_reference
@property
def source(self):
"""
Gets the source of this Booking.
:return: The source of this Booking.
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this Booking.
:param source: The source of this Booking.
:type: str
"""
self._source = source
@property
def price(self):
"""
Gets the price of this Booking.
:return: The price of this Booking.
:rtype: int
"""
return self._price
@price.setter
def price(self, price):
"""
Sets the price of this Booking.
:param price: The price of this Booking.
:type: int
"""
self._price = price
@property
def discount(self):
"""
Gets the discount of this Booking.
:return: The discount of this Booking.
:rtype: int
"""
return self._discount
@discount.setter
def discount(self, discount):
"""
Sets the discount of this Booking.
:param discount: The discount of this Booking.
:type: int
"""
self._discount = discount
@property
def status(self):
"""
Gets the status of this Booking.
:return: The status of this Booking.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Booking.
:param status: The status of this Booking.
:type: str
"""
self._status = status
@property
def reserved_until(self):
"""
Gets the reserved_until of this Booking.
:return: The reserved_until of this Booking.
:rtype: date
"""
return self._reserved_until
@reserved_until.setter
def reserved_until(self, reserved_until):
"""
Sets the reserved_until of this Booking.
:param reserved_until: The reserved_until of this Booking.
:type: date
"""
self._reserved_until = reserved_until
@property
def cancellation_fee(self):
"""
Gets the cancellation_fee of this Booking.
:return: The cancellation_fee of this Booking.
:rtype: int
"""
return self._cancellation_fee
@cancellation_fee.setter
def cancellation_fee(self, cancellation_fee):
"""
Sets the cancellation_fee of this Booking.
:param cancellation_fee: The cancellation_fee of this Booking.
:type: int
"""
self._cancellation_fee = cancellation_fee
@property
def comment(self):
"""
Gets the comment of this Booking.
:return: The comment of this Booking.
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""
Sets the comment of this Booking.
:param comment: The comment of this Booking.
:type: str
"""
self._comment = comment
@property
def parent_id(self):
"""
Gets the parent_id of this Booking.
:return: The parent_id of this Booking.
:rtype: int
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""
Sets the parent_id of this Booking.
:param parent_id: The parent_id of this Booking.
:type: int
"""
self._parent_id = parent_id
@property
def decimal_price(self):
"""
Gets the decimal_price of this Booking.
:return: The decimal_price of this Booking.
:rtype: str
"""
return self._decimal_price
@decimal_price.setter
def decimal_price(self, decimal_price):
"""
Sets the decimal_price of this Booking.
:param decimal_price: The decimal_price of this Booking.
:type: str
"""
self._decimal_price = decimal_price
@property
def real_decimal_price(self):
"""
Gets the real_decimal_price of this Booking.
:return: The real_decimal_price of this Booking.
:rtype: str
"""
return self._real_decimal_price
@real_decimal_price.setter
def real_decimal_price(self, real_decimal_price):
"""
Sets the real_decimal_price of this Booking.
:param real_decimal_price: The real_decimal_price of this Booking.
:type: str
"""
self._real_decimal_price = real_decimal_price
@property
def arrival_date(self):
"""
Gets the arrival_date of this Booking.
:return: The arrival_date of this Booking.
:rtype: date
"""
return self._arrival_date
@arrival_date.setter
def arrival_date(self, arrival_date):
"""
Sets the arrival_date of this Booking.
:param arrival_date: The arrival_date of this Booking.
:type: date
"""
self._arrival_date = arrival_date
@property
def created_at_local(self):
"""
Gets the created_at_local of this Booking.
:return: The created_at_local of this Booking.
:rtype: date
"""
return self._created_at_local
@created_at_local.setter
def created_at_local(self, created_at_local):
"""
Sets the created_at_local of this Booking.
:param created_at_local: The created_at_local of this Booking.
:type: date
"""
self._created_at_local = created_at_local
@property
def lead_customer(self):
"""
Gets the lead_customer of this Booking.
:return: The lead_customer of this Booking.
:rtype: Customer
"""
return self._lead_customer
@lead_customer.setter
def lead_customer(self, lead_customer):
"""
Sets the lead_customer of this Booking.
:param lead_customer: The lead_customer of this Booking.
:type: Customer
"""
self._lead_customer = lead_customer
@property
def payments(self):
"""
Gets the payments of this Booking.
:return: The payments of this Booking.
:rtype: list[Payment]
"""
return self._payments
@payments.setter
def payments(self, payments):
"""
Sets the payments of this Booking.
:param payments: The payments of this Booking.
:type: list[Payment]
"""
self._payments = payments
@property
def refunds(self):
"""
Gets the refunds of this Booking.
:return: The refunds of this Booking.
:rtype: list[Refund]
"""
return self._refunds
@refunds.setter
def refunds(self, refunds):
"""
Sets the refunds of this Booking.
:param refunds: The refunds of this Booking.
:type: list[Refund]
"""
self._refunds = refunds
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
### Code: Lambda function to filter ec2, s3 and vpc events.
### Author: Nitin Sharma
### SDK: Python 3.6 boto3
### More info: Refer to 4hathacker.in
import json
import base64
import gzip
import boto3
import zlib
import base64
from datetime import datetime
from io import StringIO
import ast
import random
print('Loading function')
def lambda_handler(event, context):
# ---------------------
# Seed generation and timestamp generation for unique file name
random.seed()
curr_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# ---------------------
# ---------------------
# All log events in encrypted format and deciphering it
outEvent = str(event['awslogs']['data'])
outEvent = zlib.decompress(base64.b64decode(outEvent), 16 + zlib.MAX_WBITS).decode('utf-8')
# print (outEvent)
# ---------------------
# ---------------------
# Extracting list of log-events
cleanEvent = json.loads(outEvent)
cleanLog=cleanEvent['logEvents']
# print (cleanLog)
# ---------------------
# ---------------------
# Declaring filter event names in services dictionary
services = {
"vpc":("CreateVpc","CreateVpcEndpoint","CreateVpcPeeringConnection","CreateVpcPeeringAuthorization","CreateVpcPeeringConnection","CreateVpcLink","CreateDefaultVpc","DeleteVpc","DeleteVpcEndpoints","DeleteVpcPeeringConnection","DeleteVpcPeeringAuthorization","DeleteVpcPeeringConnection","DeleteVpcLink","UpdateVpcLink","ModifyVpcAttribute","ModifyVpcEndpoint","AcceptVpcPeeringConnection","AssociateVpcCidrBlock","AttachClassicLinkVpc","DetachClassicLinkVpc","DisableVpcClassicLink","DisassociateVpcCidrBlock","EnableDefaultTenancyForVpc","EnableVpcClassicLink","MoveAddressToVpc","RejectVpcPeeringConnection","AssociateVPCWithHostedZone","DisassociateVPCFromHostedZone"),
"ec2":("RunInstances", "RebootInstances", "StartInstances", "StopInstances", "TerminateInstances","CreateCustomerGateway","CreateDefaultSubnet","CreateVpnGateway","CreateDhcpOptions","CreateEgressOnlyInternetGateway","CreateImage","CreateInstanceExportTask","CreateInternetGateway","CreateKeyPair","CreateNatGateway","CreateNetworkAcl","CreateNetworkAclEntry","CreateNetworkInterface","CreatePlacementGroup","CreateReservedInstancesListing","CreateRoute","CreateRouteTable","CreateSnapshot","CreateSpotDatafeedSubscription","CreateVpnConnectionRoute","CreateSubnet","CreateTags","CreateVolume","CreateVpnConnection","DeleteCustomerGateway","DeleteVpnGateway","DeleteDhcpOptions","DeleteEgressOnlyInternetGateway","DeleteInternetGateway","DeleteKeyPair","DeleteNatGateway","DeleteNetworkAcl","DeleteNetworkAclEntry","DeleteNetworkInterface","DeletePlacementGroup","DeleteRoute","DeleteRouteTable","DeleteVpnConnectionRoute","DeleteSnapshot","DeleteSpotDatafeedSubscription","DeleteSubnet","DeleteTags","DeleteVolume","DeleteVpnConnection","DeleteVpnConnectionRoute","ModifyHosts","ModifyIdentityIdFormat","ModifyImageAttribute","ModifyInstanceAttribute","ModifyInstancePlacement","ModifyNetworkInterfaceAttribute","ModifyReservedInstances","ModifySnapshotAttribute","ModifySubnetAttribute","ModifyVolume","ModifyVolumeAttribute","AcceptReservedInstancesExchangeQuote","AcceptVpcPeeringConnection","AllocateAddress","AllocateHosts","AssignIpv6Addresses","AssignPrivateIpAddresses","AssociateAddress","AssociateDhcpOptions","AssociateIamInstanceProfile","AssociateRouteTable","AssociateSubnetCidrBlock","AssociateVpcCidrBlock","AttachClassicLinkVpc","AttachInternetGateway","AttachNetworkInterface","AttachVolume","AttachVpnGateway","AuthorizeSecurityGroupEgress","AuthorizeSecurityGroupIngress","BundleInstance","CancelBundleTask","CancelConversionTask","CancelExportTask","CancelImportTask","CancelReservedInstancesListing","CancelSpotInstanceRequests","CopyImage","CopySnapshot","DeregisterImage","DetachClassicLinkVpc","DetachInternetGateway","DetachNetworkInterface","DetachVolume","DetachVpnGateway","DisableVgwRoutePropagation","DisableVpcClassicLink","DisassociateAddress","DisassociateIamInstanceProfile","DisassociateRouteTable","DisassociateSubnetCidrBlock","DisassociateVpcCidrBlock","EnableDefaultTenancyForVpc","EnableVgwRoutePropagation","EnableVolumeIO","EnableVpcClassicLink","ImportImage","ImportInstance","ImportKeyPair","ImportSnapshot","ImportVolume","DeregisterImage","DetachClassicLinkVpc","DetachInternetGateway","DetachNetworkInterface","DetachVolume","DetachVpnGateway","DisableVgwRoutePropagation","DisableVpcClassicLink","DisassociateAddress","DisassociateIamInstanceProfile","DisassociateRouteTable","DisassociateSubnetCidrBlock","DisassociateVpcCidrBlock","EnableDefaultTenancyForVpc","EnableVgwRoutePropagation","EnableVolumeIO","EnableVpcClassicLink","ImportImage","ImportInstance","ImportKeyPair","ImportSnapshot","ImportVolume"),
"s3":("CreateBucket","DeleteBucket","DeleteBucketCors","DeleteBucketEncryption","DeleteBucketLifecycle","DeleteBucketPolicy","DeleteBucketReplication","DeleteBucketTagging","DeleteBucketWebsite","PutBucketAcl","PutBucketCors","PutBucketEncryption","PutBucketLifecycle","PutBucketLogging","PutBucketNotification","PutBucketPolicy","PutBucketReplication","PutBucketRequestPayment","PutBucketTagging","PutBucketVersioning","PutBucketWebsite")
}
# ---------------------
# ---------------------
# Creating S3 boto3 client
client = boto3.client('s3')
# ---------------------
# ----------------------
# Filtering the events and saving in respective folders in s3 bucket
for i in cleanLog:
evName = ((json.loads(i["message"]))["eventName"])
for k,v in services.items():
if v.count(evName) > 0:
key = k + '/' + evName + '_' + curr_time + "_" + str(random.random()).replace('.','') + ".json"
response = client.put_object(Body=json.dumps(json.loads(i["message"])), Bucket='all-logs-filtered-123', Key=key)
break
# -----------------------
|
from tests import BiTestCase as TestCase
class TestApp(TestCase):
def test_the_testing(self):
self.assertTrue(True)
|
import unittest
try:
from unittest.mock import Mock, patch
except:
from mock import Mock, patch
from ixnetwork_restpy.tests.fixtures.mocks import Mocks
from ixnetwork_restpy.testplatform.testplatform import TestPlatform
class TestConnect(unittest.TestCase):
@patch('ixnetwork_restpy.connection.Connection._request', side_effect=Mocks.mocked_request)
def test_ipv6_hostname(self, mock_request):
for hostname in ['2620:10d:c0a8:21::2a', '[::1]']:
testplatform = TestPlatform(hostname, rest_port='11009')
assert('[' in testplatform.Hostname and ']' in testplatform.Hostname)
@patch('ixnetwork_restpy.connection.Connection._request', side_effect=Mocks.mocked_request)
def test_ipv4_hostname(self, mock_request):
for hostname in ['127.0.0.1']:
testplatform = TestPlatform(hostname, rest_port='11009')
assert('[' not in testplatform.Hostname and ']' not in testplatform.Hostname)
# @patch('ixnetwork_restpy.connection.Connection._request', side_effect=Mocks.mocked_request)
# def test_ipv6_redirect(self, mock_request):
# for hostname in ['2620:10d:c0a8:21::2a', '[::1]']:
# testplatform = TestPlatform(hostname, rest_port='11009')
# testplatform._connection._read('api/v1/redirect')
# assert('[' in testplatform.Hostname and ']' in testplatform.Hostname)
if __name__ == '__main__':
unittest.main() |
import sys
sys.path.append("..") # 先跳出当前目录
from bean.word_unit import WordUnit
class EntityCombine:
"""将分词词性标注后得到的words与netags进行合并"""
def combine(self, words, netags):
"""根据命名实体的B-I-E进行词合并
Args:
words: WordUnit list,分词与词性标注后得到的words
netags: list,命名实体识别结果
Returns:
words_combine: WordUnit list,连接后的结果
"""
words_combine = [] # 存储连接后的结果
length = len(netags)
n = 1 # 实体计数,从1开始
i = 0
while i < length:
if 'B-' in netags[i]:
newword = words[i].lemma
j = i + 1
while j < length:
if 'I-' in netags[j]:
newword += words[j].lemma
elif 'E-' in netags[j]:
newword += words[j].lemma
break
elif 'O' == netags[j] or (j+1) == length:
break
j += 1
words_combine.append(WordUnit(n, newword, self.judge_postag(netags[j-1])))
n += 1
i = j
else:
words[i].ID = n
n += 1
words_combine.append(words[i])
i += 1
return self.combine_comm(words_combine)
def combine_comm(self, words):
"""根据词性标注进行普通实体合并
Args:
words: WordUnit list,进行命名实体合并后的words
Returns:
words_combine: WordUnit list,进行普通实体连接后的words
"""
newword = words[0].lemma # 第一个词,作为新词
words_combine = [] # 存储合并后的结果
n = 1
i = 1 # 当前词ID
while i < len(words):
word = words[i]
# 词合并: (前后词都是实体) and (前后词的词性相同 or 前词 in ["nz", "j"] or 后词 in ["nz", "j"])
if (self.is_entity(word.postag) and self.is_entity(words[i-1].postag)
and (word.postag in {'nz', 'j'} or words[i-1].postag in {'nz', 'j'})):
newword += word.lemma
else:
words_combine.append(WordUnit(n, newword, words[i-1].postag)) # 添加上一个词
n += 1
newword = word.lemma # 当前词作为新词
i += 1
# 添加最后一个词
words_combine.append(WordUnit(n, newword, words[len(words)-1].postag))
return words_combine
def judge_postag(self, netag):
"""根据命名实体识别结果判断该连接实体的词性标注
Args:
netag: string,该词的词性标注
Returns:
entity_postag: string,判别得到的该连接实体的词性
"""
entity_postag = ''
if '-Ns' in netag:
entity_postag = 'ns' # 地名
elif '-Ni' in netag:
entity_postag = 'ni' # 机构名
elif '-Nh' in netag:
entity_postag = 'nh' # 人名
return entity_postag
def is_entity(self, netag):
"""根据词性判断该词是否是候选实体
Args:
netag: string,该词的词性标注
Returns:
flag: bool, 实体标志,实体(True),非实体(False)
"""
flag = False # 默认该词标志不为实体
# 地名,机构名,人名,其他名词,缩略词
if netag in {'ns', 'ni', 'nh', 'nz', 'j'}:
flag = True
return flag
|
import os
import pytest
import yaml
from saucebindings.options import SauceOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver import __version__ as seleniumVersion
class TestInit(object):
def test_defaults(self):
sauce = SauceOptions.firefox()
assert sauce.browser_name == 'firefox'
assert sauce.browser_version == 'latest'
assert sauce.platform_name == 'Windows 10'
def test_accepts_browser_version_platform_name(self):
sauce = SauceOptions.firefox(browserVersion='75.0', platformName='macOS 10.13')
assert sauce.browser_name == 'firefox'
assert sauce.browser_version == '75.0'
assert sauce.platform_name == 'macOS 10.13'
def test_accepts_w3c_values_with_dict(self):
timeouts = {'implicit': 1,
'pageLoad': 59,
'script': 29}
options = {'acceptInsecureCerts': True,
'pageLoadStrategy': 'eager',
'setWindowRect': True,
'unhandledPromptBehavior': 'accept',
'strictFileInteractability': True,
'timeouts': timeouts}
sauce = SauceOptions.firefox(**options)
assert sauce.accept_insecure_certs is True
assert sauce.page_load_strategy == 'eager'
assert sauce.set_window_rect is True
assert sauce.unhandled_prompt_behavior == 'accept'
assert sauce.strict_file_interactability is True
assert sauce.timeouts == timeouts
def test_accepts_w3c_values_as_params(self):
timeouts = {'implicit': 1,
'pageLoad': 59,
'script': 29}
sauce = SauceOptions.firefox(acceptInsecureCerts=True,
pageLoadStrategy='eager',
setWindowRect=True,
unhandledPromptBehavior='accept',
strictFileInteractability=True,
timeouts=timeouts)
assert sauce.accept_insecure_certs is True
assert sauce.page_load_strategy == 'eager'
assert sauce.set_window_rect is True
assert sauce.unhandled_prompt_behavior == 'accept'
assert sauce.strict_file_interactability is True
assert sauce.timeouts == timeouts
def test_accepts_sauce_values_with_dict(self):
options = {'build': 'bar',
'geckodriverVersion': "71",
'commandTimeout': 2,
'customData': {'foo': 'foo',
'bar': 'bar'},
'idleTimeout': 3,
'maxDuration': 1,
'name': 'foo',
'parentTunnel': 'bar',
'prerun': {'executable': 'http://url.to/your/executable.exe',
'args': ['--silent', '-a', '-q'],
'background': False,
'timeout': 120},
'priority': 0,
'public': 'team',
'recordLogs': False,
'recordScreenshots': False,
'recordVideo': False,
'screenResolution': '10x10',
'seleniumVersion': '3.14',
'tags': ['foo', 'bar'],
'timeZone': 'Foo',
'tunnelIdentifier': 'foobar',
'videoUploadOnPass': False}
sauce = SauceOptions.firefox(**options)
assert sauce.build == 'bar'
assert sauce.geckodriver_version == '71'
assert sauce.command_timeout == 2
assert sauce.custom_data == {'foo': 'foo',
'bar': 'bar'}
assert sauce.idle_timeout == 3
assert sauce.max_duration == 1
assert sauce.name == 'foo'
assert sauce.parent_tunnel == 'bar'
assert sauce.prerun == {'executable': 'http://url.to/your/executable.exe',
'args': ['--silent', '-a', '-q'],
'background': False,
'timeout': 120}
assert sauce.priority == 0
assert sauce.public == 'team'
assert sauce.record_logs is False
assert sauce.record_screenshots is False
assert sauce.record_video is False
assert sauce.screen_resolution == '10x10'
assert sauce.selenium_version == '3.14'
assert sauce.tags == ['foo', 'bar']
assert sauce.time_zone == 'Foo'
assert sauce.tunnel_identifier == 'foobar'
assert sauce.video_upload_on_pass is False
def test_accepts_sauce_values_as_params(self):
custom_data = {'foo': 'foo',
'bar': 'bar'}
prerun = {'executable': 'http://url.to/your/executable.exe',
'args': ['--silent', '-a', '-q'],
'background': False,
'timeout': 120}
sauce = SauceOptions.firefox(build='bar',
geckodriverVersion='71',
commandTimeout=2,
customData=custom_data,
idleTimeout=3,
maxDuration=1,
name='foo',
parentTunnel='bar',
prerun=prerun,
priority=0,
public='team',
recordLogs=False,
recordScreenshots=False,
recordVideo=False,
screenResolution='10x10',
seleniumVersion='3.14',
tags=['foo', 'bar'],
timeZone='Foo',
tunnelIdentifier='foobar',
videoUploadOnPass=False)
assert sauce.build == 'bar'
assert sauce.geckodriver_version == '71'
assert sauce.command_timeout == 2
assert sauce.custom_data == {'foo': 'foo',
'bar': 'bar'}
assert sauce.idle_timeout == 3
assert sauce.max_duration == 1
assert sauce.name == 'foo'
assert sauce.parent_tunnel == 'bar'
assert sauce.prerun == {'executable': 'http://url.to/your/executable.exe',
'args': ['--silent', '-a', '-q'],
'background': False,
'timeout': 120}
assert sauce.priority == 0
assert sauce.public == 'team'
assert sauce.record_logs is False
assert sauce.record_screenshots is False
assert sauce.record_video is False
assert sauce.screen_resolution == '10x10'
assert sauce.selenium_version == '3.14'
assert sauce.tags == ['foo', 'bar']
assert sauce.time_zone == 'Foo'
assert sauce.tunnel_identifier == 'foobar'
assert sauce.video_upload_on_pass is False
def test_accepts_selenium_browser_options_instance(self):
options = FirefoxOptions()
options.add_argument('--foo')
sauce = SauceOptions.firefox(seleniumOptions=options)
assert sauce.browser_name == 'firefox'
assert sauce.selenium_options['moz:firefoxOptions'] == {'args': ['--foo']}
def test_accepts_w3c_sauce_options_capabilities(self):
browser_options = FirefoxOptions()
browser_options.add_argument('--foo')
options = {'maxDuration': 1,
'commandTimeout': 2}
w3c_options = {'acceptInsecureCerts': True,
'pageLoadStrategy': 'eager'}
options.update(w3c_options)
sauce = SauceOptions.firefox(seleniumOptions=browser_options, **options)
assert sauce.browser_name == 'firefox'
assert sauce.accept_insecure_certs is True
assert sauce.page_load_strategy == 'eager'
assert sauce.max_duration == 1
assert sauce.command_timeout == 2
assert sauce.selenium_options['moz:firefoxOptions'] == {'args': ['--foo']}
def test_default_build_name(self):
os.environ['BUILD_TAG'] = ' '
os.environ['BUILD_NAME'] = 'BUILD NAME'
os.environ['BUILD_NUMBER'] = '123'
sauce = SauceOptions.firefox()
assert sauce.build == 'BUILD NAME: 123'
os.environ.pop("BUILD_TAG")
os.environ.pop("BUILD_NAME")
os.environ.pop("BUILD_NUMBER")
def test_argument_error_as_param(self):
with pytest.raises(AttributeError):
SauceOptions.firefox(foo='Bar')
def test_argument_error_from_dict(self):
options = {'foo': 'Bar'}
with pytest.raises(AttributeError):
SauceOptions.firefox(**options)
class TestSettingSpecificOptions(object):
def test_w3c_options(self):
timeouts = {'implicit': 1,
'pageLoad': 59,
'script': 29}
options = SauceOptions.firefox()
options.browser_version = '7'
options.platform_name = 'macOS 10.14'
options.accept_insecure_certs = True
options.page_load_strategy = 'eager'
options.set_window_rect = True
options.unhandled_prompt_behavior = 'accept'
options.strict_file_interactability = True
options.timeouts = timeouts
assert options.browser_name == 'firefox'
assert options.browser_version == '7'
assert options.platform_name == 'macOS 10.14'
assert options.accept_insecure_certs is True
assert options.page_load_strategy == 'eager'
assert options.set_window_rect is True
assert options.unhandled_prompt_behavior == 'accept'
assert options.strict_file_interactability is True
assert options.timeouts == timeouts
def test_sauce_options(self):
prerun = {'executable': 'http://url.to/your/executable.exe',
'args': ['--silent', '-a', '-q'],
'background': False,
'timeout': 120}
custom_data = {'foo': 'foo',
'bar': 'bar'}
tags = ['foo', 'bar']
options = SauceOptions.firefox()
options.build = 'Sample Build Name'
options.geckodriver_version = '71'
options.command_timeout = 2
options.custom_data = custom_data
options.idle_timeout = 3
options.max_duration = 300
options.name = 'Sample Test Name'
options.parent_tunnel = 'Mommy'
options.prerun = prerun
options.priority = 0
options.public = 'team'
options.record_logs = False
options.record_screenshots = False
options.record_video = False
options.screen_resolution = '10x10'
options.selenium_version = '3.14'
options.tags = tags
options.time_zone = 'San Francisco'
options.tunnel_identifier = 'tunnelname'
options.video_upload_on_pass = False
assert options.build == 'Sample Build Name'
assert options.geckodriver_version == '71'
assert options.command_timeout == 2
assert options.custom_data == custom_data
assert options.idle_timeout == 3
assert options.max_duration == 300
assert options.name == 'Sample Test Name'
assert options.parent_tunnel == 'Mommy'
assert options.prerun == prerun
assert options.priority == 0
assert options.public == 'team'
assert options.record_logs is False
assert options.record_screenshots is False
assert options.record_video is False
assert options.screen_resolution == '10x10'
assert options.selenium_version == '3.14'
assert options.tags == tags
assert options.time_zone == 'San Francisco'
assert options.tunnel_identifier == 'tunnelname'
assert options.video_upload_on_pass is False
def test_setting_browser_name(self):
options = SauceOptions.firefox()
with pytest.raises(AttributeError):
options.browser_name = 'firefox'
def test_setting_invalid_option(self):
options = SauceOptions.firefox()
with pytest.raises(AttributeError):
options.iedriver_version = '3.14'
class TestAddingCapabilities(object):
def test_setting_capabilities(self):
file_location = r'./tests/options.yml'
# file_location = r'../options.yml' # If running locally
with open(file_location) as file:
yml = yaml.safe_load(file)
prerun = {'executable': 'http://url.to/your/executable.exe',
'args': ['--silent', '-a', '-q'],
'background': False,
'timeout': 120}
custom_data = {'foo': 'foo',
'bar': 'bar'}
tags = ['foo', 'bar', 'foobar']
timeouts = {'implicit': 1,
'pageLoad': 59,
'script': 29}
example_values = yml.get("exampleValues")
options = getattr(SauceOptions, example_values['browserName'])()
del example_values['browserName']
options.merge_capabilities(example_values)
assert options.browser_name == 'firefox'
assert options.browser_version == '68'
assert options.platform_name == 'macOS 10.13'
assert options.accept_insecure_certs is True
assert options.page_load_strategy == 'eager'
assert options.set_window_rect is True
assert options.unhandled_prompt_behavior == 'accept'
assert options.strict_file_interactability is True
assert options.timeouts == timeouts
assert options.build == 'Sample Build Name'
assert options.command_timeout == 2
assert options.custom_data == custom_data
assert options.extended_debugging == True
assert options.idle_timeout == 3
assert options.geckodriver_version == '0.23'
assert options.max_duration == 300
assert options.name == 'Sample Test Name'
assert options.parent_tunnel == 'Mommy'
assert options.prerun == prerun
assert options.priority == 0
assert options.public == 'team'
assert options.record_logs is False
assert options.record_screenshots is False
assert options.record_video is False
assert options.screen_resolution == '10x10'
assert options.selenium_version == '3.141.59'
assert options.tags == tags
assert options.time_zone == 'San Francisco'
assert options.tunnel_identifier == 'tunnelname'
assert options.video_upload_on_pass is False
class TestCapabilitiesCreation(object):
def test_capabilities_for_w3c(self):
options = SauceOptions.firefox()
options.browser_version = '7'
options.platform_name = 'macOS 10.14'
options.accept_insecure_certs = True
options.page_load_strategy = 'eager'
options.set_window_rect = True
options.unhandled_prompt_behavior = 'accept'
options.strict_file_interactability = True
options.timeouts = {'implicit': 1,
'pageLoad': 59,
'script': 29}
options.build = "Build Name"
expected_capabilities = {'browserName': 'firefox',
'browserVersion': '7',
'platformName': 'macOS 10.14',
'acceptInsecureCerts': True,
'pageLoadStrategy': 'eager',
'setWindowRect': True,
'unhandledPromptBehavior': 'accept',
'strictFileInteractability': True,
'timeouts': {'implicit': 1,
'pageLoad': 59,
'script': 29},
'sauce:options': {'build': 'Build Name',
'username': os.getenv('SAUCE_USERNAME'),
'accessKey': os.getenv('SAUCE_ACCESS_KEY')}}
assert options.to_capabilities() == expected_capabilities
def test_capabilities_for_sauce(self):
prerun = {'executable': 'http://url.to/your/executable.exe',
'args': ['--silent', '-a', '-q'],
'background': False,
'timeout': 120}
custom_data = {'foo': 'foo',
'bar': 'bar'}
tags = ['foo', 'bar']
options = SauceOptions.firefox()
options.build = 'Sample Build Name'
options.geckodriver_version = '71'
options.command_timeout = 2
options.custom_data = custom_data
options.idle_timeout = 3
options.max_duration = 300
options.name = 'Sample Test Name'
options.parent_tunnel = 'Mommy'
options.prerun = prerun
options.priority = 0
options.public = 'team'
options.record_logs = False
options.record_screenshots = False
options.record_video = False
options.screen_resolution = '10x10'
options.selenium_version = '3.14'
options.tags = tags
options.time_zone = 'San Francisco'
options.tunnel_identifier = 'tunnelname'
options.video_upload_on_pass = False
expected_capabilities = {'browserName': 'firefox',
'browserVersion': 'latest',
'platformName': 'Windows 10',
'sauce:options': {'build': 'Sample Build Name',
'geckodriverVersion': '71',
'commandTimeout': 2,
'customData': {'foo': 'foo',
'bar': 'bar'},
'idleTimeout': 3,
'maxDuration': 300,
'name': 'Sample Test Name',
'parentTunnel': 'Mommy',
'prerun': prerun,
'priority': 0,
'public': 'team',
'recordLogs': False,
'recordScreenshots': False,
'recordVideo': False,
'screenResolution': '10x10',
'seleniumVersion': '3.14',
'tags': ['foo', 'bar'],
'timeZone': 'San Francisco',
'tunnelIdentifier': 'tunnelname',
'videoUploadOnPass': False,
'username': os.getenv('SAUCE_USERNAME'),
'accessKey': os.getenv('SAUCE_ACCESS_KEY')}}
assert options.to_capabilities() == expected_capabilities
def test_capabilities_for_selenium(self):
browser_options = FirefoxOptions()
browser_options.add_argument('--foo')
options = SauceOptions.firefox(seleniumOptions=browser_options)
options.build = 'Sample Build Name'
expected_capabilities = {'browserName': 'firefox',
'browserVersion': 'latest',
'platformName': 'Windows 10',
'pageLoadStrategy': 'normal',
'moz:debuggerAddress': True,
'moz:firefoxOptions': {'args': ['--foo']},
'sauce:options': {'build': 'Sample Build Name',
'username': os.getenv('SAUCE_USERNAME'),
'accessKey': os.getenv('SAUCE_ACCESS_KEY')},
'acceptInsecureCerts': True
}
assert options.to_capabilities() == expected_capabilities
|
from typing import Optional, Tuple
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from ..caller_base import CallerBase
from ..error.client_only_endpoint import client_only_endpoint
from ..error.illegal_attr_checker import IllegalAttrChecker
from ..error.uncallable_namespace import UncallableNamespace
from ..model.link_prediction_model import LPModel
from ..model.node_classification_model import NCModel
from .graphsage_model import GraphSageModel
from .model import Model
class ModelProcRunner(CallerBase, UncallableNamespace, IllegalAttrChecker):
def store(self, model: Model, failIfUnsupportedType: bool = True) -> Series:
self._namespace += ".store"
query = f"CALL {self._namespace}($model_name, $fail_flag)"
params = {
"model_name": model.name(),
"fail_flag": failIfUnsupportedType,
}
return self._query_runner.run_query(query, params).squeeze() # type: ignore
def list(self, model: Optional[Model] = None) -> DataFrame:
self._namespace += ".list"
if model:
query = f"CALL {self._namespace}($model_name)"
params = {"model_name": model.name()}
else:
query = f"CALL {self._namespace}()"
params = {}
return self._query_runner.run_query(query, params)
def exists(self, model_name: str) -> Series:
self._namespace += ".exists"
query = f"CALL {self._namespace}($model_name)"
params = {"model_name": model_name}
return self._query_runner.run_query(query, params).squeeze() # type: ignore
def publish(self, model: Model) -> Model:
self._namespace += ".publish"
query = f"CALL {self._namespace}($model_name)"
params = {"model_name": model.name()}
result = self._query_runner.run_query(query, params)
model_name = result["modelInfo"][0]["modelName"]
model_type = result["modelInfo"][0]["modelType"]
return self._resolve_model(model_type, model_name)
def drop(self, model: Model) -> Series:
self._namespace += ".drop"
query = f"CALL {self._namespace}($model_name)"
params = {"model_name": model.name()}
return self._query_runner.run_query(query, params).squeeze() # type: ignore
def load(self, model_name: str) -> Tuple[Model, Series]:
self._namespace += ".load"
query = f"CALL {self._namespace}($model_name)"
params = {"model_name": model_name}
result = self._query_runner.run_query(query, params).squeeze()
self._namespace = "gds.model"
return self.get(result["modelName"]), result
def delete(self, model: Model) -> Series:
self._namespace += ".delete"
query = f"CALL {self._namespace}($model_name)"
params = {"model_name": model.name()}
return self._query_runner.run_query(query, params).squeeze() # type: ignore
@client_only_endpoint("gds.model")
def get(self, model_name: str) -> Model:
query = "CALL gds.beta.model.list($model_name)"
params = {"model_name": model_name}
result = self._query_runner.run_query(query, params)
if len(result) == 0:
raise ValueError(f"No loaded model named '{model_name}' exists")
model_type = result["modelInfo"][0]["modelType"]
return self._resolve_model(model_type, model_name)
def _resolve_model(self, model_type: str, model_name: str) -> Model:
if model_type == "NodeClassification":
return NCModel(model_name, self._query_runner, self._server_version)
elif model_type == "LinkPrediction":
return LPModel(model_name, self._query_runner, self._server_version)
elif model_type == "graphSage":
return GraphSageModel(model_name, self._query_runner, self._server_version)
raise ValueError(f"Unknown model type encountered: '{model_type}'")
|
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic nonlinear transform coder for RGB images.
This is a close approximation of the image compression model published in:
J. Ballé, V. Laparra, E.P. Simoncelli (2017):
"End-to-end Optimized Image Compression"
Int. Conf. on Learning Representations (ICLR), 2017
https://arxiv.org/abs/1611.01704
With patches from Victor Xing <[email protected]>
This is meant as 'educational' code - you can use this to get started with your
own experiments. To reproduce the exact results from the paper, tuning of hyper-
parameters may be necessary. To compress images with published models, see
`tfci.py`.
"""
import argparse
import glob
import sys
import pdb
import os
from absl import app
from absl.flags import argparse_flags
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import tensorflow as tf2
import tensorflow_compression as tfc
tf.disable_eager_execution()
def string_to_tensor(x):
split = tf.strings.split(x, sep=' ').values
numbers = tf.strings.to_number(split)
return tf.reshape(numbers, (-1, 2))
def read_png(filename):
"""Loads a PNG image file."""
string = tf.read_file(filename)
image = tf.image.decode_image(string, channels=1)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, (28, 28, 1))
image /= 255
return tf.random.normal((32, 32, 3))
def read_events(filename):
df = pd.read_csv(filename, sep=' ', dtype=np.float32)
values = np.transpose(np.reshape(df.values, (-1, 2, 128)), (0, 2, 1))
return values
def quantize_image(image):
image = tf.round(image * 255)
image = tf.saturate_cast(image, tf.uint8)
return image
def write_png(filename, image):
"""Saves an image to a PNG file."""
image = quantize_image(image)
string = tf.image.encode_png(image)
return tf.write_file(filename, string)
class AnalysisTransform(tf.keras.layers.Layer):
"""The analysis transform."""
def __init__(self, num_filters, *args, **kwargs):
self.num_filters = num_filters
super(AnalysisTransform, self).__init__(*args, **kwargs)
def build(self, input_shape):
self._layers = [
tfc.SignalConv1D(self.num_filters,
3,
strides_down=2,
padding="same_zeros"),
tfc.GDN(),
tfc.SignalConv1D(self.num_filters, 3, padding="same_zeros"),
tf.keras.layers.ReLU(),
tfc.SignalConv1D(self.num_filters,
3,
strides_down=2,
padding="same_zeros"),
tfc.GDN(),
tfc.SignalConv1D(self.num_filters, 3, padding="same_zeros"),
tf.keras.layers.ReLU(),
tfc.SignalConv1D(self.num_filters // 8,
3,
strides_down=2,
padding="same_zeros"),
tfc.GDN()
]
super(AnalysisTransform, self).build(input_shape)
def call(self, tensor):
for layer in self._layers:
tensor = layer(tensor)
return tensor
class SynthesisTransform(tf.keras.layers.Layer):
"""The synthesis transform."""
def __init__(self, num_filters, *args, **kwargs):
self.num_filters = num_filters
super(SynthesisTransform, self).__init__(*args, **kwargs)
def build(self, input_shape):
self._layers = [
tfc.GDN(inverse=True),
tfc.SignalConv1D(self.num_filters // 8,
3,
strides_up=2,
padding="same_zeros"),
tf.keras.layers.ReLU(),
tfc.SignalConv1D(self.num_filters, 3, padding="same_zeros"),
tfc.GDN(inverse=True),
tfc.SignalConv1D(self.num_filters,
3,
strides_up=2,
padding="same_zeros"),
tf.keras.layers.ReLU(),
tfc.SignalConv1D(self.num_filters, 3, padding="same_zeros"),
tfc.GDN(inverse=True),
tfc.SignalConv1D(self.num_filters,
3,
strides_up=2,
padding="same_zeros"),
tfc.SignalConv1D(2, 3, padding="same_zeros"),
]
super(SynthesisTransform, self).build(input_shape)
def call(self, tensor):
for layer in self._layers:
tensor = layer(tensor)
return tensor
def train(args):
"""Trains the model."""
if args.verbose:
tf.logging.set_verbosity(tf.logging.INFO)
# Create input data pipeline.
with tf.device("/cpu:0"):
train_files = glob.glob(args.train_glob)[:3]
if not train_files:
raise RuntimeError("No training images found with glob '{}'.".format(
args.train_glob))
train_dataset = tf.data.TextLineDataset(
train_files,
compression_type=None,
buffer_size=len(train_files),
num_parallel_reads=args.preprocess_threads)
train_dataset = train_dataset.map(
string_to_tensor, num_parallel_calls=args.preprocess_threads)
train_dataset = train_dataset.shuffle(buffer_size=len(train_files)).repeat()
train_dataset = train_dataset.batch(args.batchsize)
train_dataset = train_dataset.prefetch(32)
num_pixels = args.batchsize * 128
# Get training patch from dataset.
x = train_dataset.make_one_shot_iterator().get_next()
# Instantiate model.
analysis_transform = AnalysisTransform(32)
entropy_bottleneck = tfc.EntropyBottleneck()
synthesis_transform = SynthesisTransform(32)
# Build autoencoder.
y = analysis_transform(x)
y_tilde, likelihoods = entropy_bottleneck(y, training=True)
x_tilde = synthesis_transform(y_tilde)
timestamps, polarities = tf.split(x_tilde, num_or_size_splits=2, axis=-1)
timestamps = tf.math.abs(timestamps)
polarities = tf.math.tanh(polarities)
x_tilde = tf.concat([timestamps, polarities], axis=-1)
train_bpp = tf.reduce_mean(
-tf.reduce_sum(likelihoods * tf.log(likelihoods), axis=[1, 2]) /
np.log(2))
# Mean squared error across pixels.
train_mse = tf.reduce_mean((x - x_tilde)**2.)
# The rate-distortion cost.
train_loss = args.lmbda * train_mse + train_bpp
# Minimize loss and auxiliary loss, and execute update op.
step = tf.train.create_global_step()
main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
main_step = main_optimizer.minimize(train_loss, global_step=step)
aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
aux_step = aux_optimizer.minimize(entropy_bottleneck.losses[0])
train_op = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])
tf.summary.scalar("loss", train_loss)
tf.summary.scalar("bpp", train_bpp)
tf.summary.scalar("mse", train_mse)
hooks = [
tf.train.StopAtStepHook(last_step=args.last_step),
tf.train.NanTensorHook(train_loss),
]
with tf.train.MonitoredTrainingSession(hooks=hooks,
checkpoint_dir=args.checkpoint_dir,
save_checkpoint_secs=300,
save_summaries_secs=60) as sess:
while not sess.should_stop():
sess.run(train_op)
def compress(args):
"""Compresses an event file."""
x = tf.constant(read_events(args.input_file))
x_shape = tf.shape(x)
analysis_transform = AnalysisTransform(32)
entropy_bottleneck = tfc.EntropyBottleneck()
synthesis_transform = SynthesisTransform(32)
y = analysis_transform(x)
string = entropy_bottleneck.compress(y)
y_hat, likelihoods = entropy_bottleneck(y, training=False)
x_hat = synthesis_transform(y_hat)
timestamps, polarities = tf.split(x_hat, num_or_size_splits=2, axis=-1)
timestamps = tf.math.abs(timestamps)
polarities = tf.round(tf.math.tanh(polarities))
x_hat = tf.concat([timestamps, polarities], axis=-1)
eval_bpp = tf.reduce_mean(
-tf.reduce_sum(likelihoods * tf.log(likelihoods), axis=[1, 2]) /
np.log(2))
mse = tf.reduce_mean((x - x_hat)**2.)
with tf.Session() as sess:
# Load the latest model checkpoint, get the compressed string and the tensor
# shapes.
latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)
tf.train.Saver().restore(sess, save_path=latest)
tensors = [string, tf.shape(x)[1:-1], tf.shape(y)[1:-1]]
arrays = sess.run(tensors)
# Write a binary file with the shape information and the compressed string.
packed = tfc.PackedTensors()
packed.pack(tensors, arrays)
with open(args.output_file, "wb") as f:
f.write(packed.string)
# If requested, transform the quantized image back and measure performance.
if args.verbose:
# eval_bpp, mse, psnr, msssim, num_pixels = sess.run(
# [eval_bpp, mse, psnr, msssim, num_pixels])
eval_bpp, mse = sess.run([eval_bpp, mse])
compression_ratio = os.path.getsize(args.input_file) / len(packed.string)
print("Mean squared error: {:0.4f}".format(mse))
print("Estimated entropy: {}".format(eval_bpp))
print("Compression ratio: {}".format(compression_ratio))
def decompress(args):
"""Decompresses an image."""
# Read the shape information and compressed string from the binary file.
string = tf.placeholder(tf.string, [1])
x_shape = tf.placeholder(tf.int32, [2])
y_shape = tf.placeholder(tf.int32, [2])
with open(args.input_file, "rb") as f:
packed = tfc.PackedTensors(f.read())
tensors = [string, x_shape, y_shape]
arrays = packed.unpack(tensors)
# Instantiate model.
entropy_bottleneck = tfc.EntropyBottleneck(dtype=tf.float32)
synthesis_transform = SynthesisTransform(args.num_filters)
# Decompress and transform the image back.
y_shape = tf.concat([y_shape, [args.num_filters]], axis=0)
y_hat = entropy_bottleneck.decompress(string,
y_shape,
channels=args.num_filters)
x_hat = synthesis_transform(y_hat)
# Remove batch dimension, and crop away any extraneous padding on the bottom
# or right boundaries.
x_hat = x_hat[0, :x_shape[0], :x_shape[1], :]
# Write reconstructed image out as a PNG file.
op = write_png(args.output_file, x_hat)
# Load the latest model checkpoint, and perform the above actions.
with tf.Session() as sess:
latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)
tf.train.Saver().restore(sess, save_path=latest)
sess.run(op, feed_dict=dict(zip(tensors, arrays)))
def parse_args(argv):
"""Parses command line arguments."""
parser = argparse_flags.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# High-level options.
parser.add_argument(
"--verbose",
"-V",
action="store_true",
help="Report bitrate and distortion when training or compressing.")
parser.add_argument("--num_filters",
type=int,
default=128,
help="Number of filters per layer.")
parser.add_argument("--checkpoint_dir",
default="train",
help="Directory where to save/load model checkpoints.")
subparsers = parser.add_subparsers(
title="commands",
dest="command",
help="What to do: 'train' loads training data and trains (or continues "
"to train) a new model. 'compress' reads an image file (lossless "
"PNG format) and writes a compressed binary file. 'decompress' "
"reads a binary file and reconstructs the image (in PNG format). "
"input and output filenames need to be provided for the latter "
"two options. Invoke '<command> -h' for more information.")
# 'train' subcommand.
train_cmd = subparsers.add_parser(
"train",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Trains (or continues to train) a new model.")
train_cmd.add_argument(
"--train_glob",
default="images/*.png",
help="Glob pattern identifying training data. This pattern must expand "
"to a list of RGB images in PNG format.")
train_cmd.add_argument("--batchsize",
type=int,
default=8,
help="Batch size for training.")
train_cmd.add_argument("--patchsize",
type=int,
default=256,
help="Size of image patches for training.")
train_cmd.add_argument("--lambda",
type=float,
default=0.01,
dest="lmbda",
help="Lambda for rate-distortion tradeoff.")
train_cmd.add_argument("--last_step",
type=int,
default=1000000,
help="Train up to this number of steps.")
train_cmd.add_argument(
"--preprocess_threads",
type=int,
default=16,
help="Number of CPU threads to use for parallel decoding of training "
"images.")
# 'compress' subcommand.
compress_cmd = subparsers.add_parser(
"compress",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Reads a PNG file, compresses it, and writes a TFCI file.")
# 'decompress' subcommand.
decompress_cmd = subparsers.add_parser(
"decompress",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Reads a TFCI file, reconstructs the image, and writes back "
"a PNG file.")
# Arguments for both 'compress' and 'decompress'.
for cmd, ext in ((compress_cmd, ".tfci"), (decompress_cmd, ".png")):
cmd.add_argument("input_file", help="Input filename.")
cmd.add_argument(
"output_file",
nargs="?",
help="Output filename (optional). If not provided, appends '{}' to "
"the input filename.".format(ext))
# Parse arguments.
args = parser.parse_args(argv[1:])
if args.command is None:
parser.print_usage()
sys.exit(2)
return args
def main(args):
# Invoke subcommand.
if args.command == "train":
train(args)
elif args.command == "compress":
if not args.output_file:
args.output_file = args.input_file + ".tfci"
compress(args)
elif args.command == "decompress":
if not args.output_file:
args.output_file = args.input_file + ".png"
decompress(args)
if __name__ == "__main__":
app.run(main, flags_parser=parse_args)
|
import time
import subprocess
import socket
from datetime import datetime
def runSikuliScript(path):
filepath = path
p = subprocess.Popen(filepath, shell=True, stdout = subprocess.PIPE)
stdout, stderr = p.communicate()
print("Done Running Sikuli - " + str(datetime.now()))
def internet(host="8.8.8.8", port=53, timeout=3):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain(DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except socket.error as ex:
print(ex)
return False
p = "C:\\Users\\xandmaga\\workspace\\sikuli\\gvt.cmd"
#if not(internet()):
# runSikuliScript(p)
while True:
if str.find(str(subprocess.check_output(['netsh', 'interface', 'show', 'interface', 'Wi-Fi'])), "Conectado") < 0 or (not internet()):
print("Sem internet - " + str(datetime.now()))
runSikuliScript(p)
else:
print("Wifi conectada - " + str(datetime.now()))
time.sleep(360)
|
#!/usr/bin/env python
import cgi
import cgitb
import re
import fcntl
def print_res():
print "Content-type: text/html\n\n"
f = open("../homework/w3/query/w3-query.dat")
fcntl.flock(f, fcntl.DN_ACCESS)
template = "".join(open("../homework/w3/src/table.html").readlines()).split("<!-- insert point-->")
data = ""
lines = f.readlines()
fcntl.flock(f, fcntl.F_UNLCK)
f.close()
i = 0
for line in lines:
data += "<tr>\n"
fields = re.split("\t+", line)
for field in fields[0:4]:
data += "<td>" + str(field) + "</td>\n"
data += """<td>
<div class="form-check">
<input class="form-check-input" type="checkbox" name="""
data += str(i) + """ value="1">
</td>
</tr>\n
"""
i += 1
if lines.__len__() == 0:
data = "<tr>\n<h3>Nothing</h3></tr>"
print template[0] + data + template[1]
def main():
try:
cgitb.enable()
del_list = []
form = cgi.FieldStorage()
for key in form.keys():
del_list.append(int(key))
fin = open("../homework/w3/query/w3-query.dat")
fcntl.flock(fin, fcntl.DN_ACCESS)
lines = fin.readlines()
fin.close()
fout = open("../homework/w3/query/w3-query.dat", "w")
for line, i in zip(lines, range(lines.__len__())):
if not i in del_list:
fout.write(line)
fcntl.flock(fout, fcntl.F_UNLCK)
fout.close()
print_res()
except Exception as e:
print """
Content-type: text/html
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>Internal Server Error</title>
</head><body>
<h1>Internal Server Error</h1>
<p>The server encountered an uncaughted exception and was unable to complete
your request.</p>
<p>Please contact the server administrator at
[email protected] to inform them of the time this error occurred,
and the actions you performed just before this error.</p>
<p>More information about this error may be available
in the server error log.</p>
</body></html>
"""
exit(-1)
main()
|
#
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from neutron_fwaas.services.firewall.drivers import fwaas_base
from networking_odl.common import client as odl_client
from networking_odl.common import config # noqa
LOG = logging.getLogger(__name__)
class OpenDaylightFwaasDriver(fwaas_base.FwaasDriverBase):
"""OpenDaylight FWaaS Driver
This code is the backend implementation for the OpenDaylight FWaaS
driver for OpenStack Neutron.
"""
def __init__(self):
LOG.debug("Initializing OpenDaylight FWaaS driver")
self.client = odl_client.OpenDaylightRestClient.create_client()
def create_firewall(self, apply_list, firewall):
"""Create the Firewall with default (drop all) policy.
The default policy will be applied on all the interfaces of
trusted zone.
"""
pass
def delete_firewall(self, apply_list, firewall):
"""Delete firewall.
Removes all policies created by this instance and frees up
all the resources.
"""
pass
def update_firewall(self, apply_list, firewall):
"""Apply the policy on all trusted interfaces.
Remove previous policy and apply the new policy on all trusted
interfaces.
"""
pass
def apply_default_policy(self, apply_list, firewall):
"""Apply the default policy on all trusted interfaces.
Remove current policy and apply the default policy on all trusted
interfaces.
"""
pass
|
import taichi as ti
import numpy as np
import utils
from engine import mpm_solver
# Try to run on GPU
ti.init(arch=ti.cuda)
mpm = mpm_solver.MPMSolver(res=(24, 24, 24), size=1)
mpm.set_gravity((0, -20, 0))
mpm.add_sphere_collider(center=(0.25, 0.5, 0.5), radius=0.1, surface=mpm.surface_slip)
mpm.add_sphere_collider(center=(0.5, 0.5, 0.5), radius=0.1, surface=mpm.surface_sticky)
mpm.add_sphere_collider(center=(0.75, 0.5, 0.5), radius=0.1, surface=mpm.surface_separate)
for frame in range(5):
mpm.add_cube((0.2, 0.8, 0.45), (0.1, 0.03, 0.1), mpm.material_water, color=0x8888FF)
mpm.add_cube((0.45, 0.8, 0.45), (0.1, 0.03, 0.1), mpm.material_water, color=0xFF8888)
mpm.add_cube((0.7, 0.8, 0.45), (0.1, 0.03, 0.1), mpm.material_water, color=0xFFFFFF)
mpm.step(4e-3)
particles = mpm.particle_info()
np_x = particles['position'] / 1.0
|
from . import views as primer_views
from django.urls import path
from django.contrib.auth.decorators import login_required
urlpatterns = [
path('primerinput/', login_required(primer_views.PrimerFormView.as_view()), name='primerinput'),
path('primer/', primer_views.PrimerFilteredTableView.as_view(), name='primer'),
path('primer/<int:pk>/', primer_views.PrimerDetailView.as_view(), name='primerinfo'),
path('primer/<int:pk>/update', primer_views.PrimerUpdateView.as_view(), name='primerupdate'),
path('primer/<int:pk>/delete/', primer_views.PrimerDeleteView.as_view(), name='primerdelete'),
path('vector/', primer_views.vector_index, name='vector_index'),
path('vector/new/', primer_views.VectorCreateView.as_view(), name='vector_create'),
path('vector/<int:pk>/', primer_views.VectorDetailView.as_view(), name='vector_detail'),
path('vector/<int:pk>/delete/', primer_views.VectorDeleteView.as_view(), name='vector_delete'),
path('vector/<int:pk>/update', primer_views.VectorUpdateView.as_view(), name='vector_update'),
path('seq/', primer_views.calpcr, name='seq'),
path('seq/vector/', primer_views.SelectVector, name='seqvector'),
path('seq_setL/', primer_views.calpcr_setL, name='seq_setL'),
path('seq/showPCR', primer_views.show_pcr, name='showpcr'),
] |
a = float(input("Digite o tamanho de um lado de um triangulo: "))
b = float(input("Digite o valor de um segundo lado do triangulo: "))
c = float(input("Digite o valor do terceiro lado do triangulo: "))
if b - c < a < b + c and a - c < b < a + c and a - b < c < a + b:
print("Pode ser um triangulo")
else:
print("Não pode ser um triangulo")
|
# -*- coding: utf-8 -*-
"""Helper utilities and decorators."""
from flask import flash, _request_ctx_stack
from functools import wraps
from flask_jwt import _jwt
import jwt
def jwt_optional(realm=None):
def wrapper(fn):
@wraps(fn)
def decorator(*args, **kwargs):
token = _jwt.request_callback()
try:
payload = _jwt.jwt_decode_callback(token)
except jwt.exceptions.DecodeError:
pass
else:
_request_ctx_stack.top.current_identity = _jwt.identity_callback(payload)
return fn(*args, **kwargs)
return decorator
return wrapper
from conduit.user.models import User # noqa
def jwt_identity(payload):
user_id = payload['identity']
return User.get_by_id(user_id)
def authenticate(email, password):
user = User.query.filter_by(email=email).first()
if user and user.check_password(password):
return user
|
PRONUNCIATION_SEP = "\t"
SYMBOL_SEP = " "
INCLUDE_COUNTER = True
ENCODING = "UTF-8"
UNK_SYMBOL = "None" |
from .student import Student
|
'''
Podstawowe sposoby otwierania plików
r - Read (czytanie) - domyślne
w - Write (pisanie - jesli plik istniał to go usuniue, jeśli nie to stworzy
a - Append (dopisywanie)
rozszerzenie to tylko 'teskst' nadawany po to aby inne programy rozpoznawały plik w odpowiedni dla tych programów sposób
'''
try:
file = open('test2.txt', 'w') # UCHWYT HANDLE
file.write('sample')
print(0/0)
file.write('sample2')
finally:
file.close()
|
import asyncio
import base64
import hashlib
import os
import unittest
from unittest import mock
import aiohttp
from aiohttp import errors, hdrs, websocket, websocket_client
class TestWebSocketClient(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.key_data = os.urandom(16)
self.key = base64.b64encode(self.key_data)
self.ws_key = base64.b64encode(
hashlib.sha1(self.key + websocket.WS_KEY).digest()).decode()
def tearDown(self):
self.loop.close()
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect(self, m_req, m_os):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
hdrs.SEC_WEBSOCKET_PROTOCOL: 'chat'
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
res = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertIsInstance(res, websocket_client.ClientWebSocketResponse)
self.assertEqual(res.protocol, 'chat')
self.assertNotIn(hdrs.ORIGIN, m_req.call_args[1]["headers"])
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_with_origin(self, m_req, m_os):
resp = mock.Mock()
resp.status = 403
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
origin = 'https://example.org/page.html'
with self.assertRaises(errors.WSServerHandshakeError):
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
loop=self.loop,
origin=origin))
self.assertIn(hdrs.ORIGIN, m_req.call_args[1]["headers"])
self.assertEqual(m_req.call_args[1]["headers"][hdrs.ORIGIN], origin)
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_custom_response(self, m_req, m_os):
class CustomResponse(websocket_client.ClientWebSocketResponse):
def read(self, decode=False):
return 'customized!'
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
res = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
ws_response_class=CustomResponse,
loop=self.loop))
self.assertEqual(res.read(), 'customized!')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_global_loop(self, m_req, m_os):
asyncio.set_event_loop(self.loop)
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
resp = self.loop.run_until_complete(
aiohttp.ws_connect('http://test.org'))
self.assertIs(resp._loop, self.loop)
asyncio.set_event_loop(None)
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_err_status(self, m_req, m_os):
resp = mock.Mock()
resp.status = 500
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError) as ctx:
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertEqual(
ctx.exception.message, 'Invalid response status')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_err_upgrade(self, m_req, m_os):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: 'test',
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError) as ctx:
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertEqual(
ctx.exception.message, 'Invalid upgrade header')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_err_conn(self, m_req, m_os):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: 'close',
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError) as ctx:
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertEqual(
ctx.exception.message, 'Invalid connection header')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_err_challenge(self, m_req, m_os):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: 'asdfasdfasdfasdfasdfasdf'
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError) as ctx:
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
self.assertEqual(
ctx.exception.message, 'Invalid challenge response')
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_close(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
writer = WebSocketWriter.return_value = mock.Mock()
reader = resp.connection.reader.set_parser.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect('http://test.org', loop=self.loop))
self.assertFalse(resp.closed)
msg = websocket.Message(websocket.MSG_CLOSE, b'', b'')
reader.read.return_value = asyncio.Future(loop=self.loop)
reader.read.return_value.set_result(msg)
res = self.loop.run_until_complete(resp.close())
writer.close.assert_called_with(1000, b'')
self.assertTrue(resp.closed)
self.assertTrue(res)
self.assertIsNone(resp.exception())
# idempotent
res = self.loop.run_until_complete(resp.close())
self.assertFalse(res)
self.assertEqual(writer.close.call_count, 1)
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_close_exc(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
WebSocketWriter.return_value = mock.Mock()
reader = resp.connection.reader.set_parser.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
self.assertFalse(resp.closed)
exc = ValueError()
reader.read.return_value = asyncio.Future(loop=self.loop)
reader.read.return_value.set_exception(exc)
self.loop.run_until_complete(resp.close())
self.assertTrue(resp.closed)
self.assertIs(resp.exception(), exc)
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_close_exc2(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
writer = WebSocketWriter.return_value = mock.Mock()
resp.connection.reader.set_parser.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
self.assertFalse(resp.closed)
exc = ValueError()
writer.close.side_effect = exc
self.loop.run_until_complete(resp.close())
self.assertTrue(resp.closed)
self.assertIs(resp.exception(), exc)
resp._closed = False
writer.close.side_effect = asyncio.CancelledError()
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, resp.close())
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_send_data_after_close(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
WebSocketWriter.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
resp._closed = True
self.assertRaises(RuntimeError, resp.ping)
self.assertRaises(RuntimeError, resp.pong)
self.assertRaises(RuntimeError, resp.send_str, 's')
self.assertRaises(RuntimeError, resp.send_bytes, b'b')
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_send_data_type_errors(self, m_req, m_os, WebSocketWriter):
resp = mock.Mock()
resp.status = 101
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
WebSocketWriter.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
self.assertRaises(TypeError, resp.send_str, b's')
self.assertRaises(TypeError, resp.send_bytes, 'b')
@mock.patch('aiohttp.client.WebSocketWriter')
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_reader_read_exception(self, m_req, m_os, WebSocketWriter):
hresp = mock.Mock()
hresp.status = 101
hresp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key,
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(hresp)
WebSocketWriter.return_value = mock.Mock()
reader = hresp.connection.reader.set_parser.return_value = mock.Mock()
resp = self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org', loop=self.loop))
exc = ValueError()
reader.read.return_value = asyncio.Future(loop=self.loop)
reader.read.return_value.set_exception(exc)
msg = self.loop.run_until_complete(resp.receive())
self.assertEqual(msg.tp, aiohttp.MsgType.error)
self.assertIs(resp.exception(), exc)
def test_receive_runtime_err(self):
resp = websocket_client.ClientWebSocketResponse(
mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(), 10.0,
True, True, self.loop)
resp._waiting = True
self.assertRaises(
RuntimeError, self.loop.run_until_complete, resp.receive())
@mock.patch('aiohttp.client.os')
@mock.patch('aiohttp.client.ClientSession.get')
def test_ws_connect_close_resp_on_err(self, m_req, m_os):
resp = mock.Mock()
resp.status = 500
resp.headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_ACCEPT: self.ws_key
}
m_os.urandom.return_value = self.key_data
m_req.return_value = asyncio.Future(loop=self.loop)
m_req.return_value.set_result(resp)
with self.assertRaises(errors.WSServerHandshakeError):
self.loop.run_until_complete(
aiohttp.ws_connect(
'http://test.org',
protocols=('t1', 't2', 'chat'),
loop=self.loop))
resp.close.assert_called_with()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import operator
import warnings
import numpy as np
from nevergrad.parametrization import parameter as p
from nevergrad.parametrization.utils import float_penalty as _float_penalty
from nevergrad.parametrization import _datalayers
import nevergrad.common.typing as tp
from nevergrad.common.tools import OrderedSet
class MultiValue:
"""Estimation of a value based on one or multiple evaluations.
This class provides easy access to:
- count: how many times the point was evaluated
- mean: the mean value.
- square: the mean square value
- variance: the variance
- parameter: the corresponding Parameter
It also provides access to optimistic and pessimistic bounds for the value.
Parameter
---------
parameter: Parameter
the parameter for one of the evaluations
y: float
the first evaluation of the value
"""
def __init__(self, parameter: p.Parameter, y: float, *, reference: p.Parameter) -> None:
self.count = 1
self.mean = y
self._minimum = y
self.square = y * y
# TODO May be safer to use a default variance which depends on y for scale invariance?
self.variance = 1.0e6
parameter.freeze()
self.parameter = parameter
self._ref = reference
@property
def x(self) -> np.ndarray: # for compatibility
return self.parameter.get_standardized_data(reference=self._ref)
@property
def optimistic_confidence_bound(self) -> float:
return float(self.mean - 0.1 * np.sqrt((self.variance) / (1 + self.count)))
@property
def pessimistic_confidence_bound(self) -> float:
return float(self.mean + 0.1 * np.sqrt((self.variance) / (1 + self.count)))
def get_estimation(self, name: str) -> float:
# Note: pruning below relies on the fact than only 3 modes exist. If a new mode is added, update pruning
if name == "optimistic":
return self.optimistic_confidence_bound
elif name == "pessimistic":
return self.pessimistic_confidence_bound
elif name == "minimum":
return self._minimum
elif name == "average":
return self.mean
else:
raise NotImplementedError
def add_evaluation(self, y: float) -> None:
"""Adds a new evaluation of the value
Parameter
---------
y: float
the new evaluation
"""
self._minimum = min(self._minimum, y)
self.mean = (self.count * self.mean + y) / float(self.count + 1)
self.square = (self.count * self.square + y * y) / float(self.count + 1)
self.square = max(self.square, self.mean ** 2)
self.count += 1
factor = math.sqrt(float(self.count) / float(self.count - 1.0))
self.variance = factor * (self.square - self.mean ** 2)
def as_array(self, reference: p.Parameter) -> np.ndarray:
return self.parameter.get_standardized_data(reference=reference)
def __repr__(self) -> str:
return f"MultiValue<mean: {self.mean}, count: {self.count}, parameter: {self.parameter}>"
def _get_nash(optimizer: tp.Any) -> tp.List[tp.Tuple[tp.Tuple[float, ...], int]]:
"""Returns an empirical distribution. limited using a threshold
equal to max_num_trials^(1/4).
"""
if not optimizer.archive:
return [(optimizer.current_bests["pessimistic"].x, 1)]
max_num_trial = max(p.count for p in optimizer.archive.values())
sum_num_trial = sum(p.count for p in optimizer.archive.values())
threshold = np.power(max_num_trial, 0.5)
if threshold <= np.power(sum_num_trial, 0.25):
return [(optimizer.provide_recommendation(), 1)]
# make deterministic at the price of sort complexity
return sorted(
((np.frombuffer(k), p.count) for k, p in optimizer.archive.bytesdict.items() if p.count >= threshold),
key=operator.itemgetter(1),
)
def sample_nash(optimizer: tp.Any) -> tp.Tuple[float, ...]: # Somehow like fictitious play.
nash = _get_nash(optimizer)
if len(nash) == 1:
return nash[0][0]
prob = [float(n[1]) for n in nash]
prob = [p_ / sum(prob) for p_ in prob]
index: int = np.random.choice(np.arange(len(prob)), p=prob)
return nash[index][0]
class DelayedJob:
"""Future-like object which delays computation"""
def __init__(self, func: tp.Callable[..., tp.Any], *args: tp.Any, **kwargs: tp.Any) -> None:
self.func = func
self.args = args
self.kwargs = kwargs
self._result: tp.Optional[tp.Any] = None
self._computed = False
def done(self) -> bool:
return True
def result(self) -> tp.Any:
if not self._computed:
self._result = self.func(*self.args, **self.kwargs)
self._computed = True
return self._result
class SequentialExecutor:
"""Executor which run sequentially and locally
(just calls the function and returns a FinishedJob)
"""
def submit(self, fn: tp.Callable[..., tp.Any], *args: tp.Any, **kwargs: tp.Any) -> DelayedJob:
return DelayedJob(fn, *args, **kwargs)
def _tobytes(x: tp.ArrayLike) -> bytes:
x = np.array(x, copy=False) # for compatibility
assert x.ndim == 1, f"Input shape: {x.shape}"
assert x.dtype == np.float_, f"Incorrect type {x.dtype} is not float"
return x.tobytes()
_ERROR_STR = (
"Generating numpy arrays from the bytes keys is inefficient, "
"work on archive.bytesdict.<keys,items>() directly and convert with "
"np.frombuffer if you can. You can also use archive.<keys,items>_as_arrays() "
"but it is less efficient."
)
Y = tp.TypeVar("Y")
class Archive(tp.Generic[Y]):
"""A dict-like object with numpy arrays as keys.
The underlying `bytesdict` dict stores the arrays as bytes since arrays are not hashable.
Keys can be converted back with np.frombuffer(key)
"""
def __init__(self) -> None:
self.bytesdict: tp.Dict[bytes, Y] = {}
def __setitem__(self, x: tp.ArrayLike, value: Y) -> None:
self.bytesdict[_tobytes(x)] = value
def __getitem__(self, x: tp.ArrayLike) -> Y:
return self.bytesdict[_tobytes(x)]
def __contains__(self, x: tp.ArrayLike) -> bool:
return _tobytes(x) in self.bytesdict
def get(self, x: tp.ArrayLike, default: tp.Optional[Y] = None) -> tp.Optional[Y]:
return self.bytesdict.get(_tobytes(x), default)
def __len__(self) -> int:
return len(self.bytesdict)
def values(self) -> tp.ValuesView[Y]:
return self.bytesdict.values()
def keys(self) -> None:
raise RuntimeError(_ERROR_STR)
def items(self) -> None:
raise RuntimeError(_ERROR_STR)
def items_as_array(self) -> tp.Iterator[tp.Tuple[np.ndarray, Y]]:
raise RuntimeError("For consistency, items_as_array is renamed to items_as_arrays")
def items_as_arrays(self) -> tp.Iterator[tp.Tuple[np.ndarray, Y]]:
"""Functions that iterates on key-values but transforms keys
to np.ndarray. This is to simplify interactions, but should not
be used in an algorithm since the conversion can be inefficient.
Prefer using self.bytesdict.items() directly, and convert the bytes
to np.ndarray using np.frombuffer(b)
"""
return ((np.frombuffer(b), v) for b, v in self.bytesdict.items())
def keys_as_array(self) -> tp.Iterator[np.ndarray]:
raise RuntimeError("For consistency, keys_as_array is renamed to keys_as_arrays")
def keys_as_arrays(self) -> tp.Iterator[np.ndarray]:
"""Functions that iterates on keys but transforms them
to np.ndarray. This is to simplify interactions, but should not
be used in an algorithm since the conversion can be inefficient.
Prefer using self.bytesdict.keys() directly, and convert the bytes
to np.ndarray using np.frombuffer(b)
"""
return (np.frombuffer(b) for b in self.bytesdict)
def __repr__(self) -> str:
return f"Archive with bytesdict: {self.bytesdict!r}"
def __str__(self) -> str:
return f"Archive with bytesdict: {self.bytesdict}"
def __iter__(self) -> None:
raise RuntimeError(_ERROR_STR)
class Pruning:
"""Callable for pruning archives in the optimizer class.
See Optimizer.pruning attribute, called at each "tell".
Parameters
----------
min_len: int
minimum length of the pruned archive.
max_len: int
length at which pruning is activated (maximum allowed length for the archive).
Note
----
For each of the 3 criteria (optimistic, pessimistic and average), the min_len best (lowest)
points will be kept, which can lead to at most 3 * min_len points.
"""
def __init__(self, min_len: int, max_len: int):
self.min_len = min_len
self.max_len = max_len
self._num_prunings = 0 # for testing it is not called too often
def __call__(self, archive: Archive[MultiValue]) -> Archive[MultiValue]:
if len(archive) < self.max_len:
return archive
return self._prune(archive)
def _prune(self, archive: Archive[MultiValue]) -> Archive[MultiValue]:
self._num_prunings += 1
# separate function to ease profiling
quantiles: tp.Dict[str, float] = {}
threshold = float(self.min_len + 1) / len(archive)
names = ["optimistic", "pessimistic", "average"]
for name in names:
quantiles[name] = np.quantile(
[v.get_estimation(name) for v in archive.values()], threshold, interpolation="lower"
)
new_archive: Archive[MultiValue] = Archive()
new_archive.bytesdict = {
b: v
for b, v in archive.bytesdict.items()
if any(v.get_estimation(n) < quantiles[n] for n in names)
} # strict comparison to make sure we prune even for values repeated maaany times
# this may remove all points though, but nevermind for now
return new_archive
@classmethod
def sensible_default(cls, num_workers: int, dimension: int) -> "Pruning":
"""Very conservative pruning
- keep at least 100 elements, or 7 times num_workers, whatever is biggest
- keep at least 3 x min_len, or up to 10 x min_len if it does not exceed 1gb of data
Parameters
----------
num_workers: int
number of evaluations which will be run in parallel at once
dimension: int
dimension of the optimization space
"""
# safer to keep at least 7 time the workers
min_len = max(100, 7 * num_workers)
max_len_1gb = 1024 ** 3 // (dimension * 8 * 2) # stored twice: as key and as Parameter
max_len = max(3 * min_len, min(10 * min_len, max_len_1gb))
return cls(min_len, max_len)
class UidQueue:
"""Queue of uids to handle a population. This keeps track of:
- told uids
- asked uids
When telling, it removes from the asked queue and adds to the told queue
When asking, it takes from the told queue if not empty, else from the older
asked, and then adds to the asked queue.
"""
def __init__(self) -> None:
self.told = tp.Deque[str]() # this seems to be picklable (this syntax does not always work)
self.asked: OrderedSet[str] = OrderedSet()
def clear(self) -> None:
"""Removes all uids from the queues"""
self.told.clear()
self.asked.clear()
def ask(self) -> str:
"""Takes a uid from the told queue if not empty, else from the older asked,
then adds it to the asked queue.
"""
if self.told:
uid = self.told.popleft()
elif self.asked:
uid = next(iter(self.asked))
else:
raise RuntimeError("Both asked and told queues are empty.")
self.asked.add(uid)
return uid
def tell(self, uid: str) -> None:
"""Removes the uid from the asked queue and adds to the told queue"""
self.told.append(uid)
if uid in self.asked:
self.asked.discard(uid)
def discard(self, uid: str) -> None:
if uid in self.asked:
self.asked.discard(uid)
else:
self.told.remove(uid)
class BoundScaler:
"""Hacky way to sample in the space defined by the parametrization.
Given an vector of values between 0 and 1,
the transform method samples in the bounds if provided,
or using the provided function otherwise.
This is used for samplers.
Code of parametrization and/or this helper should definitely be
updated to make it simpler and more robust
"""
def __init__(self, reference: p.Parameter) -> None:
self.reference = reference.spawn_child()
self.reference.freeze()
# initial check
parameter = self.reference.spawn_child()
parameter.set_standardized_data(np.linspace(-1, 1, self.reference.dimension))
expected = parameter.get_standardized_data(reference=self.reference)
self._ref_arrays = p.helpers.list_data(self.reference)
arrays = p.helpers.list_data(parameter)
check = np.concatenate(
[x.get_standardized_data(reference=y) for x, y in zip(arrays, self._ref_arrays)], axis=0
)
self.working = True
if not np.allclose(check, expected):
self.working = False
self._warn()
def _warn(self) -> None:
warnings.warn(
f"Failed to find bounds for {self.reference}, quasi-random optimizer may be inefficient.\n"
"Please open an issue on Nevergrad github"
)
def transform(
self, x: tp.ArrayLike, unbounded_transform: tp.Callable[[np.ndarray], np.ndarray]
) -> np.ndarray:
"""Transform from [0, 1] to the space between bounds"""
y = np.array(x, copy=True, dtype=float)
if not self.working:
return unbounded_transform(y)
try:
out = self._transform(y, unbounded_transform)
except Exception: # pylint: disable=broad-except
self._warn()
out = unbounded_transform(y)
return out
def _transform(
self, x: np.ndarray, unbounded_transform: tp.Callable[[np.ndarray], np.ndarray]
) -> np.ndarray:
# modifies x in place
start = 0
for ref in self._ref_arrays:
end = start + ref.dimension
layers = _datalayers.BoundLayer.filter_from(ref) # find bound layers
layers = [x for x in layers if x.uniform_sampling] # keep only uniform sampling
if not layers:
x[start:end] = unbounded_transform(x[start:end])
else:
layer_index = layers[-1]._layer_index
array = ref.spawn_child()
normalized = x[start:end].reshape(ref._value.shape)
array._layers[layer_index].set_normalized_value(normalized) # type: ignore
x[start:end] = array.get_standardized_data(reference=ref)
start = end
return x
class ConstraintManager:
"""Try max_constraints_trials random explorations for satisfying constraints.
The finally chosen point, if it does not satisfy constraints, is penalized as shown in the penalty function,
using coeffcieints mentioned here.
Possibly unstable.
"""
def __init__(self) -> None:
self.max_trials = 1000
self.penalty_factor = 1.0
self.penalty_exponent = 1.001
def __repr__(self) -> str:
return "Constraints:" + ",".join(f"{x}={y}" for x, y in self.__dict__.items())
# pylint: disable=unused-argument
def update(
self,
max_trials: tp.Optional[int] = None,
penalty_factor: tp.Optional[float] = None,
penalty_exponent: tp.Optional[float] = None,
) -> None:
"""
Parameters
----------
max_trials: int
number of random tries for satisfying constraints.
penalty: float
multiplicative factor on the constraint penalization.
penalty_exponent: float
exponent, usually close to 1 and slightly greater than 1.
"""
for x, y in locals().items():
if y is not None and x != "self" and not x.startswith("_"):
setattr(self, x, y)
if self.penalty_exponent < 1:
raise ValueError("Penalty exponent needs to be equal or greater than 1")
def penalty(self, parameter: p.Parameter, num_ask: int, budget: tp.Optional[int]) -> float:
"""Computes the penalty associated with a Parameter, for constraint management"""
budget = 1 if budget is None else budget
coeff = self.penalty_factor * (self.penalty_exponent ** (num_ask / np.sqrt(budget)))
val = parameter.value
return coeff * sum(_float_penalty(func(val)) for func in parameter._constraint_checkers) # type: ignore
|
from .bfp import BFP
from .fpn import FPN
from .hrfpn import HRFPN
from .just_one_outs_neck import FPNLast, HRFPNLast, FPNCatLast, MyFPN, MyFPN3
__all__ = ['FPN', 'BFP', 'HRFPN', 'FPNLast', 'HRFPNLast', 'FPNCatLast', 'MyFPN', 'MyFPN3']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.