metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jockium/ert-downloader",
"score": 3
} |
#### File: ert-downloader/downloader/downloader.py
```python
from multiprocessing.pool import ThreadPool
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import requests
import re
import os
import shutil
def generate_download_list(url, chunklist):
download_list = []
for chunk in chunklist:
download_list.append(url + '/' + chunk)
return download_list
def download_process(link):
if not os.path.exists('download_parts'):
os.makedirs('download_parts')
filename = re.split("/", link)[-1]
with open("download_parts/" + filename, 'wb') as f:
file = requests.get(link, verify=False)
print("DOWNLOADING FILE {}". format(link))
f.write(file.content)
f.close()
def create_parts_txt(chunklist):
if not os.path.exists('download_parts'):
os.makedirs('download_parts')
with open("download_parts/parts", 'w+') as file:
for chunk in chunklist:
file.write("file {}\n".format(chunk))
file.close()
def clean_junk():
if os.path.exists('download_parts'):
shutil.rmtree("download_parts")
def download(stream_data):
title = stream_data["title"]
if os.path.exists( "{}//DLs//{}.mp4".format(os.getcwd(), title)):
print("FILE EXISTS, SKIPPING:{}//DLs//{}.mp4".format(os.getcwd(), title))
else:
stream_url = stream_data["stream_url"]
chunklist = stream_data["chunklist"]
create_parts_txt(chunklist)
print("DOWNLOADING:{}".format(title))
download_list = generate_download_list(stream_url, chunklist)
pool = ThreadPool(5)
pool.map(download_process, download_list)
pool.close()
pool.join() # waiting for the downloads to complete
os.system(('ffmpeg -f concat -i download_parts/parts -acodec copy -vcodec copy "{}//DLs//{}.mp4"'.format(os.getcwd(), title)))
clean_junk()
```
#### File: ert-downloader/extractors/ertflix_feed_extractor.py
```python
import feedparser
def obtain_list(url):
mylist = []
d = feedparser.parse(url)
for i in range(len(d.entries)):
mylist.append(d.entries[i].link)
print('found '+ str(len(d.entries)) +' links on page 1, continuing...')
mypage = 2
if "paged=" in url:
checknext = 0 # do not try to add page reference if someone already gave a specific page url...
else:
checknext = 1
while (checknext==1):
linksfound = 0
newurl = url+'?paged='+str(mypage)
d = feedparser.parse(newurl)
for z in range(len(d.entries)):
mylist.append(d.entries[z].link)
linksfound = z+1
if (linksfound == 0):
checknext = 0 #stop trying to find pages...
print('no links found on page '+str(mypage))
else:
print('found '+ str(linksfound) +' links on page '+str(mypage)+', continuing...')
mypage +=1
return mylist
``` |
{
"source": "jock-tanner/magicinvoke",
"score": 2
} |
#### File: magicinvoke/invoke/context.py
```python
import os
import re
from contextlib import contextmanager
from .config import Config, DataProxy
from .exceptions import Failure, AuthFailure, ResponseNotAccepted
from .runners import Result
from .util import raise_from, iteritems
from .watchers import FailingResponder
class Context(DataProxy):
"""
Context-aware API wrapper & state-passing object.
`.Context` objects are created during command-line parsing (or, if desired,
by hand) and used to share parser and configuration state with executed
tasks (see :ref:`why-context`).
Specifically, the class offers wrappers for core API calls (such as `.run`)
which take into account CLI parser flags, configuration files, and/or
changes made at runtime. It also acts as a proxy for its `~.Context.config`
attribute - see that attribute's documentation for details.
Instances of `.Context` may be shared between tasks when executing
sub-tasks - either the same context the caller was given, or an altered
copy thereof (or, theoretically, a brand new one).
.. versionadded:: 1.0
"""
def __init__(self, config=None):
"""
:param config:
`.Config` object to use as the base configuration.
Defaults to an anonymous/default `.Config` instance.
"""
#: The fully merged `.Config` object appropriate for this context.
#:
#: `.Config` settings (see their documentation for details) may be
#: accessed like dictionary keys (``c.config['foo']``) or object
#: attributes (``c.config.foo``).
#:
#: As a convenience shorthand, the `.Context` object proxies to its
#: ``config`` attribute in the same way - e.g. ``c['foo']`` or
#: ``c.foo`` returns the same value as ``c.config['foo']``.
config = config if config is not None else Config()
self._set(_config=config)
#: A list of commands to run (via "&&") before the main argument to any
#: `run` or `sudo` calls. Note that the primary API for manipulating
#: this list is `prefix`; see its docs for details.
command_prefixes = list()
self._set(command_prefixes=command_prefixes)
#: A list of directories to 'cd' into before running commands with
#: `run` or `sudo`; intended for management via `cd`, please see its
#: docs for details.
command_cwds = list()
self._set(command_cwds=command_cwds)
@property
def config(self):
# Allows Context to expose a .config attribute even though DataProxy
# otherwise considers it a config key.
return self._config
@config.setter
def config(self, value):
# NOTE: mostly used by client libraries needing to tweak a Context's
# config at execution time; i.e. a Context subclass that bears its own
# unique data may want to be stood up when parameterizing/expanding a
# call list at start of a session, with the final config filled in at
# runtime.
self._set(_config=value)
def run(self, command, **kwargs):
"""
Execute a local shell command, honoring config options.
Specifically, this method instantiates a `.Runner` subclass (according
to the ``runner`` config option; default is `.Local`) and calls its
``.run`` method with ``command`` and ``kwargs``.
See `.Runner.run` for details on ``command`` and the available keyword
arguments.
.. versionadded:: 1.0
"""
runner = self.config.runners.local(self)
return self._run(runner, command, **kwargs)
# NOTE: broken out of run() to allow for runner class injection in
# Fabric/etc, which needs to juggle multiple runner class types (local and
# remote).
def _run(self, runner, command, **kwargs):
command = self._prefix_commands(command)
return runner.run(command, **kwargs)
def sudo(self, command, **kwargs):
"""
Execute a shell command via ``sudo`` with password auto-response.
**Basics**
This method is identical to `run` but adds a handful of
convenient behaviors around invoking the ``sudo`` program. It doesn't
do anything users could not do themselves by wrapping `run`, but the
use case is too common to make users reinvent these wheels themselves.
.. note::
If you intend to respond to sudo's password prompt by hand, just
use ``run("sudo command")`` instead! The autoresponding features in
this method will just get in your way.
Specifically, `sudo`:
* Places a `.FailingResponder` into the ``watchers`` kwarg (see
:doc:`/concepts/watchers`) which:
* searches for the configured ``sudo`` password prompt;
* responds with the configured sudo password (``<PASSWORD>``
from the :doc:`configuration </concepts/configuration>`);
* can tell when that response causes an authentication failure
(e.g. if the system requires a password and one was not
configured), and raises `.AuthFailure` if so.
* Builds a ``sudo`` command string using the supplied ``command``
argument, prefixed by various flags (see below);
* Executes that command via a call to `run`, returning the result.
**Flags used**
``sudo`` flags used under the hood include:
- ``-S`` to allow auto-responding of password via stdin;
- ``-p <prompt>`` to explicitly state the prompt to use, so we can be
sure our auto-responder knows what to look for;
- ``-u <user>`` if ``user`` is not ``None``, to execute the command as
a user other than ``root``;
- When ``-u`` is present, ``-H`` is also added, to ensure the
subprocess has the requested user's ``$HOME`` set properly.
**Configuring behavior**
There are a couple of ways to change how this method behaves:
- Because it wraps `run`, it honors all `run` config parameters and
keyword arguments, in the same way that `run` does.
- Thus, invocations such as ``c.sudo('command', echo=True)`` are
possible, and if a config layer (such as a config file or env
var) specifies that e.g. ``run.warn = True``, that too will take
effect under `sudo`.
- `sudo` has its own set of keyword arguments (see below) and they are
also all controllable via the configuration system, under the
``sudo.*`` tree.
- Thus you could, for example, pre-set a sudo user in a config
file; such as an ``invoke.json`` containing ``{"sudo": {"user":
"someuser"}}``.
:param str password: Runtime override for ``sudo.password``.
:param str user: Runtime override for ``sudo.user``.
.. versionadded:: 1.0
"""
runner = self.config.runners.local(self)
return self._sudo(runner, command, **kwargs)
# NOTE: this is for runner injection; see NOTE above _run().
def _sudo(self, runner, command, **kwargs):
prompt = self.config.sudo.prompt
password = kwargs.pop("password", self.config.sudo.password)
user = kwargs.pop("user", self.config.sudo.user)
# TODO: allow subclassing for 'get the password' so users who REALLY
# want lazy runtime prompting can have it easily implemented.
# TODO: want to print a "cleaner" echo with just 'sudo <command>'; but
# hard to do as-is, obtaining config data from outside a Runner one
# holds is currently messy (could fix that), if instead we manually
# inspect the config ourselves that duplicates logic. NOTE: once we
# figure that out, there is an existing, would-fail-if-not-skipped test
# for this behavior in test/context.py.
# TODO: once that is done, though: how to handle "full debug" output
# exactly (display of actual, real full sudo command w/ -S and -p), in
# terms of API/config? Impl is easy, just go back to passing echo
# through to 'run'...
user_flags = ""
if user is not None:
user_flags = "-H -u {} ".format(user)
command = self._prefix_commands(command)
cmd_str = "sudo -S -p '{}' {}{}".format(prompt, user_flags, command)
watcher = FailingResponder(
pattern=re.escape(prompt),
response="{}\n".format(password),
sentinel="Sorry, try again.\n",
)
# Ensure we merge any user-specified watchers with our own.
# NOTE: If there are config-driven watchers, we pull those up to the
# kwarg level; that lets us merge cleanly without needing complex
# config-driven "override vs merge" semantics.
# TODO: if/when those semantics are implemented, use them instead.
# NOTE: config value for watchers defaults to an empty list; and we
# want to clone it to avoid actually mutating the config.
watchers = kwargs.pop("watchers", list(self.config.run.watchers))
watchers.append(watcher)
try:
return runner.run(cmd_str, watchers=watchers, **kwargs)
except Failure as failure:
# Transmute failures driven by our FailingResponder, into auth
# failures - the command never even ran.
# TODO: wants to be a hook here for users that desire "override a
# bad config value for sudo.password" manual input
# NOTE: as noted in #294 comments, we MAY in future want to update
# this so run() is given ability to raise AuthFailure on its own.
# For now that has been judged unnecessary complexity.
if isinstance(failure.reason, ResponseNotAccepted):
# NOTE: not bothering with 'reason' here, it's pointless.
# NOTE: using raise_from(..., None) to suppress Python 3's
# "helpful" multi-exception output. It's confusing here.
error = AuthFailure(result=failure.result, prompt=prompt)
raise_from(error, None)
# Reraise for any other error so it bubbles up normally.
else:
raise
# TODO: wonder if it makes sense to move this part of things inside Runner,
# which would grow a `prefixes` and `cwd` init kwargs or similar. The less
# that's stuffed into Context, probably the better.
def _prefix_commands(self, command):
"""
Prefixes ``command`` with all prefixes found in ``command_prefixes``.
``command_prefixes`` is a list of strings which is modified by the
`prefix` context manager.
"""
prefixes = list(self.command_prefixes)
current_directory = self.cwd
if current_directory:
prefixes.insert(0, "cd {}".format(current_directory))
return " && ".join(prefixes + [command])
@contextmanager
def prefix(self, command):
"""
Prefix all nested `run`/`sudo` commands with given command plus ``&&``.
Most of the time, you'll want to be using this alongside a shell script
which alters shell state, such as ones which export or alter shell
environment variables.
For example, one of the most common uses of this tool is with the
``workon`` command from `virtualenvwrapper
<https://virtualenvwrapper.readthedocs.io/en/latest/>`_::
with c.prefix('workon myvenv'):
c.run('./manage.py migrate')
In the above snippet, the actual shell command run would be this::
$ workon myvenv && ./manage.py migrate
This context manager is compatible with `cd`, so if your virtualenv
doesn't ``cd`` in its ``postactivate`` script, you could do the
following::
with c.cd('/path/to/app'):
with c.prefix('workon myvenv'):
c.run('./manage.py migrate')
c.run('./manage.py loaddata fixture')
Which would result in executions like so::
$ cd /path/to/app && workon myvenv && ./manage.py migrate
$ cd /path/to/app && workon myvenv && ./manage.py loaddata fixture
Finally, as alluded to above, `prefix` may be nested if desired, e.g.::
with c.prefix('workon myenv'):
c.run('ls')
with c.prefix('source /some/script'):
c.run('touch a_file')
The result::
$ workon myenv && ls
$ workon myenv && source /some/script && touch a_file
Contrived, but hopefully illustrative.
.. versionadded:: 1.0
"""
self.command_prefixes.append(command)
yield
self.command_prefixes.pop()
@property
def cwd(self):
"""
Return the current working directory, accounting for uses of `cd`.
.. versionadded:: 1.0
"""
if not self.command_cwds:
# TODO: should this be None? Feels cleaner, though there may be
# benefits to it being an empty string, such as relying on a no-arg
# `cd` typically being shorthand for "go to user's $HOME".
return ""
# get the index for the subset of paths starting with the last / or ~
for i, path in reversed(list(enumerate(self.command_cwds))):
if path.startswith("~") or path.startswith("/"):
break
# TODO: see if there's a stronger "escape this path" function somewhere
# we can reuse. e.g., escaping tildes or slashes in filenames.
paths = [path.replace(" ", "\ ") for path in self.command_cwds[i:]]
return os.path.join(*paths)
@contextmanager
def cd(self, path):
"""
Context manager that keeps directory state when executing commands.
Any calls to `run`, `sudo`, within the wrapped block will implicitly
have a string similar to ``"cd <path> && "`` prefixed in order to give
the sense that there is actually statefulness involved.
Because use of `cd` affects all such invocations, any code making use
of the `cwd` property will also be affected by use of `cd`.
Like the actual 'cd' shell builtin, `cd` may be called with relative
paths (keep in mind that your default starting directory is your user's
``$HOME``) and may be nested as well.
Below is a "normal" attempt at using the shell 'cd', which doesn't work
since all commands are executed in individual subprocesses -- state is
**not** kept between invocations of `run` or `sudo`::
c.run('cd /var/www')
c.run('ls')
The above snippet will list the contents of the user's ``$HOME``
instead of ``/var/www``. With `cd`, however, it will work as expected::
with c.cd('/var/www'):
c.run('ls') # Turns into "cd /var/www && ls"
Finally, a demonstration (see inline comments) of nesting::
with c.cd('/var/www'):
c.run('ls') # cd /var/www && ls
with c.cd('website1'):
c.run('ls') # cd /var/www/website1 && ls
.. note::
Space characters will be escaped automatically to make dealing with
such directory names easier.
.. versionadded:: 1.0
"""
# cast to str to avoid cryptic error (#583)
self.command_cwds.append(str(path))
try:
yield
finally:
self.command_cwds.pop()
class MockContext(Context):
"""
A `.Context` whose methods' return values can be predetermined.
Primarily useful for testing Invoke-using codebases.
.. note::
Methods not given `Results <.Result>` to yield will raise
``NotImplementedError`` if called (since the alternative is to call the
real underlying method - typically undesirable when mocking.)
.. versionadded:: 1.0
"""
def __init__(self, config=None, **kwargs):
"""
Create a ``Context``-like object whose methods yield `.Result` objects.
:param config:
A Configuration object to use. Identical in behavior to `.Context`.
:param run:
A data structure of `Results <.Result>`, to return from calls to
the instantiated object's `~.Context.run` method (instead of
actually executing the requested shell command).
Specifically, this kwarg accepts:
- A single `.Result` object, which will be returned once.
- An iterable of `Results <.Result>`, which will be returned on
each subsequent call to ``.run``.
- A map of command strings to either of the above, allowing
specific call-and-response semantics instead of assuming a call
order.
:param sudo:
Identical to ``run``, but whose values are yielded from calls to
`~.Context.sudo`.
:raises:
``TypeError``, if the values given to ``run`` or other kwargs
aren't individual `.Result` objects or iterables.
"""
# TODO: would be nice to allow regexen instead of exact string matches
super(MockContext, self).__init__(config)
for method, results in iteritems(kwargs):
# Special convenience case: individual Result -> one-item list
if (
not hasattr(results, "__iter__")
and not isinstance(results, Result)
# No need for explicit dict test; they have __iter__
):
err = "Not sure how to yield results from a {!r}"
raise TypeError(err.format(type(results)))
self._set("__{}".format(method), results)
# TODO: _maybe_ make this more metaprogrammy/flexible (using __call__ etc)?
# Pretty worried it'd cause more hard-to-debug issues than it's presently
# worth. Maybe in situations where Context grows a _lot_ of methods (e.g.
# in Fabric 2; though Fabric could do its own sub-subclass in that case...)
def _yield_result(self, attname, command):
# NOTE: originally had this with a bunch of explicit
# NotImplementedErrors, but it doubled method size, and chance of
# unexpected index/etc errors seems low here.
try:
value = getattr(self, attname)
# TODO: thought there's a 'better' 2x3 DictType or w/e, but can't
# find one offhand
if isinstance(value, dict):
if hasattr(value[command], "__iter__"):
result = value[command].pop(0)
elif isinstance(value[command], Result):
result = value.pop(command)
elif hasattr(value, "__iter__"):
result = value.pop(0)
elif isinstance(value, Result):
result = value
delattr(self, attname)
return result
except (AttributeError, IndexError, KeyError):
raise_from(NotImplementedError, None)
def run(self, command, *args, **kwargs):
# TODO: perform more convenience stuff associating args/kwargs with the
# result? E.g. filling in .command, etc? Possibly useful for debugging
# if one hits unexpected-order problems with what they passed in to
# __init__.
return self._yield_result("__run", command)
def sudo(self, command, *args, **kwargs):
# TODO: this completely nukes the top-level behavior of sudo(), which
# could be good or bad, depending. Most of the time I think it's good.
# No need to supply dummy password config, etc.
# TODO: see the TODO from run() re: injecting arg/kwarg values
return self._yield_result("__sudo", command)
def set_result_for(self, attname, command, result):
"""
Modify the stored mock results for given ``attname`` (e.g. ``run``).
This is similar to how one instantiates `MockContext` with a ``run`` or
``sudo`` dict kwarg. For example, this::
mc = MockContext(run={'mycommand': Result("mystdout")})
assert mc.run('mycommand').stdout == "mystdout"
is functionally equivalent to this::
mc = MockContext()
mc.set_result_for('run', 'mycommand', Result("mystdout"))
assert mc.run('mycommand').stdout == "mystdout"
`set_result_for` is mostly useful for modifying an already-instantiated
`MockContext`, such as one created by test setup or helper methods.
.. versionadded:: 1.0
"""
attname = "__{}".format(attname)
heck = TypeError(
"Can't update results for non-dict or nonexistent mock results!"
)
# Get value & complain if it's not a dict.
# TODO: should we allow this to set non-dict values too? Seems vaguely
# pointless, at that point, just make a new MockContext eh?
try:
value = getattr(self, attname)
except AttributeError:
raise heck
if not isinstance(value, dict):
raise heck
# OK, we're good to modify, so do so.
value[command] = result
```
#### File: examples/args-kwargs/tasks.py
```python
from __future__ import print_function
from invoke import task
from pprint import pformat
@task
def myfunc(ctx, *args, **kwargs):
"""
Note there is a bug where we couldn't do
def mine(ctx, mypositionalarg, *args, **kwargs):
pass
But something is better than nothing :) Search "TODO 531"
to find the comment describing our options.
Keyword optional args work but they can be filled by positional args
(because they're not KEYWORD_ONLY!) so we don't recommend their use.
"""
print("args: {}".format(args))
print("kwargs: {}".format(pformat(kwargs)))
```
#### File: examples/data-pipeline/tests.py
```python
import pytest, six
from cachepath import CachePath, Path
@pytest.fixture
def ctx():
from invoke import Context
return Context()
# pytest -k test_this --capture=no
def test_this(ctx):
# Includes workarounds for --clean and --force-run not working in PY2.
from textwrap import dedent
from colorama import Style
only_print_expected_stdout = dedent(
"""
print_peoples_ages called
Tom's age is 39
Jerry's age is 39
<NAME>'s age is 39
Done!
"""
)
both_stdout = dedent(
"""
print_peoples_ages called
get_peoples_ages called
Getting age for Tom
Getting age for Jerry
Getting age for <NAME>
Done pulling results!
Tom's age is 39
Jerry's age is 39
<NAME>'s age is 39
Done!
"""
)
def only_print_ran(stdout):
assert only_print_expected_stdout.strip() == stdout.strip()
def both_ran(stdout):
assert both_stdout.strip() == stdout.strip()
# Remove minv cache.
if six.PY2:
CachePath('.minv').rm()
Path('.minv').rm()
else:
st = Path("people.txt").stat().st_mtime if Path("people.txt").exists() else None
ctx.run("inv get-people --clean")
st1 = Path("people.txt").stat().st_mtime
assert st != st1 # Clean should delete and re-run.
def bprint(s):
print(Style.BRIGHT + s + Style.RESET_ALL)
# Now actually start the test. We run get-people just to seed the file,
# it shouldn't show up in actual @skippable testing in a few lines.
bprint("Everything should run from scratch.")
if six.PY2:
Path('people.txt').rm()
assert "Wrote" in ctx.run("invoke get-people").stdout
else:
assert "Wrote" in ctx.run("inv get-people --force-run").stdout.strip()
# If get-people still runs here, it means caching for skippable doesn't work.
both_ran(ctx.run("invoke print-peoples-ages").stdout)
only_print_ran(ctx.run("invoke print-peoples-ages").stdout)
bprint("Since latest outputs were generated by a task with different "
"params, everything should run again when params change.")
both_ran(
ctx.run(
"invoke -D people.important_flag=True print-peoples-ages"
).stdout
)
only_print_ran(
ctx.run(
"invoke -D people.important_flag=True print-peoples-ages"
).stdout
)
bprint("Flags changed _again_! But we still have the return value cached :)")
only_print_ran(ctx.run("invoke print-peoples-ages").stdout)
bprint("Make sure clean actually cleans.")
if six.PY2:
# If you fail here it's because we assume how minv implemented these paths :)
CachePath('.minv', 'tasks.get_peoples_ages').rm()
else:
ctx.run("invoke get-people --clean")
both_ran(ctx.run("invoke print-peoples-ages").stdout)
bprint("We're good!")
```
#### File: examples/skip-if/tasks.py
```python
from invoke import task
import os
@task(skip_ifs=[task(lambda ctx: os.getenv("SKIP_MYTASK", False))])
def mytask(ctx):
print("Didn't skip!")
``` |
{
"source": "JockWang/Graph-based-CTR",
"score": 3
} |
#### File: JockWang/Graph-based-CTR/data.py
```python
from torch.utils.data import Dataset
import torch
import logging
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
class MyDataset(Dataset):
def __init__(self, mode='train', item_size=0, dataset='book'):
super(MyDataset, self).__init__()
df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Graph4CTR/data/' + dataset + '/ratings_final.txt',
sep='\t', header=None, index_col=None).values
train, test = train_test_split(df, test_size=0.2, random_state=2019)
self.item_size = item_size
if mode == 'train':
self.data = train
else:
self.data = test
logging.info(mode + ' set size:' + str(self.data.shape[0]))
def __getitem__(self, index):
temp = self.data[index]
item = np.zeros(shape=(1, self.item_size))
item[0, temp[1]] = 1
return torch.tensor(temp[0], dtype=torch.long), torch.tensor(item, dtype=torch.float), torch.tensor(
[temp[2]], dtype=torch.float)
def __len__(self):
return len(self.data)
``` |
{
"source": "jockyuiz/ImageProcessing",
"score": 4
} |
#### File: jockyuiz/ImageProcessing/imaging.py
```python
import math
import numpy as np
from PIL import Image
import tifffile
# *************************************************************
# * From Photography Notebook *
# *************************************************************
# ======================= white_balance =======================
# Input:
# I: an RGB image -- a numpy array of shape (height, width, 3)
# black_level: an RGB offset to be subtracted from all the pixels
# gray: the RGB color of a gray object (includes the black level)
# Output:
# The corrected image: black level subtracted, then color channels scale to make gray come out gray
def white_balance(I, black_level, gray):
# A3TODO: Complete this function
I = I - black_level
I = I / gray
np.clip(I,0,255,out = I)
I = I.astype(np.float32)
return I # Replace this with your implementation
# ======================= color_transform =======================
# Input:
# I: an RGB image -- a numpy array of shape (height, width, 3)
# M: a 3x3 matrix, to be multiplied with each RGB triple in I
# Output:
# The image with each RGB triple multiplied by M
def color_transform(I, M):
# A3TODO: Complete this function
I = np.array([[M @ I[i,j,:] for j in range(I.shape[1])] for i in range(I.shape[0])])
return I # Replace this with your implementation
# *************************************************************
# * From Distortion Notebook *
# *************************************************************
# ======================= shift_image_to_left =======================
# Input:
# img: 2D numpy array of a grayscale image
# k: The number of units/pixels to be shifted to the left (you can assume k < width of image)
# Output:
# A 2D array of img shifted to the left by k pixels
# For points that fall out of range on the right side, repeat the rightmost pixel.
def shift_image_to_left(img, k):
new_img = np.zeros(img.shape, np.uint8)
# A3TODO: Complete this function
for i in range(img.shape[0]):
for j in range(img.shape[1]):
new_img[i,j,:] = img[i,(j+k) % img.shape[1],:]
return new_img
# ======================= rotate_image =======================
# Input:
# img: 2D numpy array of a grayscale image
# k: The angle (in degrees) to be rotated counter-clockwise around the image center
# interp_mode: 0 for nearest neighbor, 1 for bilinear
# Output:
# A 2D array of img rotated around the original image's center by k degrees
def rotate_image(img, k, interp_mode=0):
new_img = np.zeros(img.shape, np.uint8)
# A3TODO: Complete this function
center = np.array([img.shape[0]/2,img.shape[1]/2])
angle = -math.pi * k/180;
M = np.array([
[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)],
])
if interp_mode == 0:
# nearest neighbor
for i in range(img.shape[0]):
for j in range(img.shape[1]):
position = np.array([i,j])
vector = position - center
newvector = M @ vector
newPos = center + newvector
ix = newPos[0].astype(np.int)
iy = newPos[1].astype(np.int)
if (ix < img.shape[0] and iy < img.shape[1] and ix>= 0 and iy >= 0):
new_img[i,j,:] = img[ix,iy,:]
else:
# bilinear
for i in range(img.shape[0]):
for j in range(img.shape[1]):
position = np.array([i,j])
vector = position - center
newvector = M @ vector
newPos = center + newvector
if newPos[0]>=0 and newPos[0]<img.shape[0]-1:
x0 = math.trunc(newPos[0])
x1 = x0+1
dx = newPos[0]-x0
elif newPos[0]<0:
x0 = 0
x1 = 0
dx = 0
elif newPos[0]>img.shape[0]-1:
x0 = img.shape[0]-1
x1 = img.shape[0]-1
dx = 1
if newPos[1]>=0 and newPos[1]<img.shape[1]-1:
y0 = math.trunc(newPos[1])
y1 = y0+1
dy = newPos[1]-y0
elif newPos[1]<0:
y0 = 0
y1 = 0
dy = 0
elif newPos[1]>img.shape[1]-1:
y0 = img.shape[1]-1
y1 = img.shape[1]-1
dy = 1
RGBx0 = img[x0,y0,:]
RGBx1 = img[x1,y0,:]
RGBy0 = img[x0,y1,:]
RGBy1 = img[x1,y1,:]
xRGB = (dx * RGBx1) + (1-dx) * RGBx0
yRGB = (dx * RGBy1) + (1-dx) * RGBy0
finalRGB = (dy * yRGB) + (1-dy) * xRGB
new_img[i,j,:] = finalRGB
return new_img
# ======================= undistort_image =======================
# Input:
# img: A distorted image, with coordinates in the distorted space
# k1, k2: distortion model coefficients (see explanation above)
# M: affine transformation from pixel coordinates to distortion-model coordinates
# interp_mode: 0 for nearest neighbor, 1 for bilinear
# Output:
# An undistorted image, with pixels in the image coordinates
# Write down the formula for calculating the distortion model first (see exercise above)
# Put black in for points that fall out of bounds
def undistort_image(img, k1, k2, M, interp_mode=0):
Mi = np.linalg.inv(M)
output = np.zeros_like(img)
# A3TODO: Complete this function
h, w = img.shape[:2]
if interp_mode == 0:
# nearest neighbor
for i in range(h):
for j in range(w):
position = np.array([j,i,1])
x = Mi @ position
vector = np.array([x[0],x[1]])
r = np.linalg.norm(vector)
sr = 1 + (k1 * r**2) + (k2 * r**4)
scale = np.array([
[sr, 0,0],
[0, sr,0],
[0, 0,1]
])
gx = scale @ x
homo = gx[2]
gx /= homo
GPos = M @ gx
ix = round(GPos[1]).astype(np.int)
iy = round(GPos[0]).astype(np.int)
if (ix < h and iy < w and ix>=0 and iy >= 0):
output[i,j,:] = img[ix,iy,:]
else:
# bilinear
for i in range(h):
for j in range(w):
position = np.array([j,i,1])
x = Mi @ position
vector = np.array([x[0],x[1]])
r = np.linalg.norm(vector)
sr = 1 + (k1 * r**2) + (k2 * r**4)
scale = np.array([
[sr, 0,0],
[0, sr,0],
[0, 0,1]
])
gx = scale @ x
homo = gx[2]
gx /= homo
GPos = M @ gx
newPos = GPos
y0 = math.trunc(newPos[0])
y1 = y0+1
x0 = math.trunc(newPos[1])
x1 = x0+1
dy = newPos[0]-y0
dx = newPos[1]-x0
if (x1 < img.shape[0] and y1 < img.shape[1] and x0>=0 and y0 >= 0):
RGBx0 = img[x0,y0,:]
RGBx1 = img[x1,y0,:]
RGBy0 = img[x0,y1,:]
RGBy1 = img[x1,y1,:]
xRGB = (dx * RGBx1) + (1-dx) * RGBx0
yRGB = (dx * RGBy1) + (1-dx) * RGBy0
finalRGB = (dy * yRGB) + (1-dy) * xRGB
np.clip(finalRGB,0,255,out = finalRGB)
finalRGB = finalRGB.astype(np.uint8)
output[i,j,:] = finalRGB
return output
# *************************************************************
# * From Convolution Notebook *
# *************************************************************
# ======================= gen_gaussian_filter =======================
# Input:
# dim: size of the filter in both x and y direction
# sigma: standard deviation of the gaussian filter
# Output:
# A 2-dimensional numpy array of size dim*dim
# (Note that the array should be normalized)
# Hint: Use linspace or mgrid from numpy
def gen_gaussian_filter(dim, sigma):
# A3 implement
#pass # Replace this line with your implementation
f = np.zeros([dim, dim])
if (dim % 2 ==0):
center = dim/2
else:
center = (dim-1)/2
for i in range(dim):
for j in range(dim):
const = 2 * sigma**2
x = i - center
y = j - center
f[i,j] = (1/const*np.pi)*math.exp( -(x**2+y**2)/const )
sum = np.sum(f)
f = f / sum
return f
# ======================= convolve =======================
# Input:
# I: A 2D numpy array containing pixels of an image
# f: A squared/non-squared filter of odd/even-numbered dimensions
# Output:
# A 2D numpy array resulting from applying the convolution filter f to I
# All the entries of the array should be of type uint8, and restricted to [0,255]
# You may use clip and astype in numpy to enforce this
# Note: When convolving, do not operate on the entries outside of the image bound,
# i.e. clamp the ranges to the width and height of the image
# Tie-breaking: If f has an even number of dimensions in some direction (assume the dimension is 2r),
# sweep through [i-r+1, i+r] (i.e. length of left half = length of right half - 1)
# With odd # of dimensions (2r+1), you would sweep through [i-r, i+r].
def convolve(I, f):
# A3TODO: Complete this function
output = np.zeros_like(I)
kernel_h = f.shape[0]
kernel_w = f.shape[1]
h = math.trunc(kernel_h / 2)
w = math.trunc(kernel_w / 2)
image_pad = np.pad(I, pad_width=(
(kernel_h // 2, kernel_h // 2),(kernel_w // 2,
kernel_w // 2),(0,0)), mode='constant',
constant_values=0).astype(np.float32)
print(image_pad.shape)
for i in range(h,image_pad.shape[0]-h):
for j in range(w,image_pad.shape[0]-h):
s = np.zeros(im.shape[2])
for ii in range(-(kernel_h-h-1),h):
for jj in range(-(kernel_w-w-1),w):
s += f[ii+h,jj+w]*image_pad[i-ii,j-jj,:]
np.clip(s,0,255,out = s)
s = s.astype(np.uint8)
output[i-h,j-w]=s
return output
# ======================= convolve_sep =======================
# Input:
# I: A 2D numpy array containing pixels of an image
# f: A squared/non-squared filter of odd/even-numbered dimensions
# Output:
# A 2D numpy array resulting from applying the convolution filter f to I
# All the entries of the array should be of type uint8, and restricted to [0,255]
# You may use clip and astype in numpy to enforce this
# Note: When convolving, do not operate on the entries outside of the image bound,
# i.e. clamp the ranges to the width and height of the image in the for loop
# Tie-breaking: If f has an even number of dimensions in some direction (assume the dimension is 2r),
# sweep through [i-r+1, i+r] (i.e. length of left half = length of right half - 1)
# With odd # of dimensions (2r+1), you would sweep through [i-r, i+r].
# You will convolve with respect to the direction corresponding to I.shape[0] first, then I.shape[1]
def convolve_sep(I, f):
output = np.zeros_like(I)
# A3TODO: Complete this function
kernal_h = f.shape[0]
kernal_w = f.shape[1]
h = math.trunc(kernal_h / 2)
w = math.trunc(kernal_w / 2)
fh = f[h,:]
fw = f[:,w]
fh = fh / np.sum(fh)
fw = fw / np.sum(fw)
image_pad = np.pad(I, pad_width=(
(kernal_h // 2, kernal_h // 2),(kernal_w // 2,
kernal_w // 2),(0,0)), mode='constant',
constant_values=0).astype(np.float32)
temp = np.zeros_like(image_pad)
for i in range(h,image_pad.shape[0]-h):
for j in range(w,image_pad.shape[1]-w):
s = np.zeros(im.shape[2])
for ii in range(-(kernal_h-h-1),h):
s+= fh[ii+h]*image_pad[i-ii,j,:]
temp[i,j]=s
for i in range(h,image_pad.shape[0]-h):
for j in range(w,image_pad.shape[1]-w):
s = np.zeros(im.shape[2])
for jj in range(-(kernal_w-w-1),w):
s += fw[jj+w]*temp[i,j-jj,:]
np.clip(s,0,255,out = s)
s = s.astype(np.uint8)
output[i-h,j-w]=s
return output
# ======================= unsharp_mask =======================
# This function essentially subtracts a (scaled) blurred version of an image from (scaled version of) itself
# Input:
# I: A 2D numpy array containing pixels of an image
# sigma: Gassian std.dev. for blurring
# w: Sharpening weight
# Output:
# A sharpened version of I
def unsharp_mask(I, sigma, w):
output = np.zeros_like(I)
# A3TODO: Complete this function
f = gen_gaussian_filter(7, sigma)
blurI = convolve_sep(I, f)
o = (1+w)*I - w * blurI
np.clip(o,0,255,out = o)
o = o.astype(np.uint8)
output = o
return output
``` |
{
"source": "Jocoboy/Data-Visualization",
"score": 2
} |
#### File: Jocoboy/Data-Visualization/configurations.py
```python
import os
import geoip2.database
from geoip2.errors import AddressNotFoundError
import folium
import webbrowser
import plotly.graph_objs as go
from plotly.offline.offline import plot
import tkinter as tk
import settings
import tkinter.messagebox
# from main_window import MainWindow
class Configuration:
path = {
# City in China
'Anhui':'an_hui',
'Aomen':'ao_men',
'Beijing':'bei_jing',
'Chongqing':'chong_qing',
'Fujian':'fu_jian',
'Gansu':'gan_su',
'Guangdong':'guang_dong',
'Guangxi':'guang_xi',
'Guizhou':'gui_zhou',
'Hainan':'hai_nan',
'Hebei':'he_bei',
'Henan':'he_nan',
'Heilongjiang':'hei_long_jiang',
'Hubei':'hu_bei',
'Hunan':'hu_nan',
'Jilin':'ji_lin',
'Jiangsu':'jiang_su',
'Jiangxi':'jiang_xi',
'Liaoning':'liao_ning',
'Neimenggu':'nei_meng_gu',
'Ningxia':'ning_xia',
'Qinghai':'qing_hai',
'Shandong':'shan_dong',
'Sichuan':'si_chuan',
'Taiwan':'tai_wan',
'Tianjing':'tian_jing',
'Xizang':'xi_zang',
'Xianggang':'xiang_gang',
'Xinjiang':'xin_jiang',
'Yunnan':'yun_nan',
'Zhejiang':'zhe_jiang',
# City in other country
'America':'America',
'Australia':'Australia',
'Brazil':'Brazil',
'Canada':'Canada',
'England':'England',
'Finland':'Finland',
'France':'France',
'Germany':'Germany',
'India':'India',
'Mexico':'Mexico',
'Mongolia':'Mongolia',
'Russia':'Russia',
'Saudi Arabia':'Saudi Arabia',
'Singapore':'Singapore',
'The Philippines':'The Philippines'
}
def __init__(self):
pass
def __get_response(self):
settings.FULL_PATH = os.path.join(
settings.MEDIA_ROOT, 'GeoLite2-City.mmdb')
reader = geoip2.database.Reader(settings.FULL_PATH)
try:
self.response = reader.city(settings.IP)
except AddressNotFoundError:
return None
return self.response
def __get_location(self):
response = self.__get_response()
return [response.location.longitude, response.location.latitude]
def get_local_map(self):
print(settings.MAP_TYPE)
# print(settings.API_KEY)
try:
# local_map = folium.Map(location=self.__get_location(),
# zoom_start=12, tiles=settings.MAP_TYPE, API_key=settings.API_KEY)
if settings.MAP_TYPE == 'custom tileset':
local_map = folium.Map(location=self.__get_location(),
zoom_start=12, tiles='http://{s}.tiles.yourtiles.com/{z}/{x}/{y}.png'
, attr='My Data Attribution')
else:
if settings.MAP_TYPE == 'Mapbox':
fig = dict(
data = [
go.Scattermapbox(
mode='markers',
marker=dict(
size=9
)
)
],
layout =
go.Layout(
autosize=True,
hovermode='closest',
mapbox=dict(
accesstoken=settings.API_KEY,
bearing=0,
center=dict(
lat=self.__get_location()[1],
lon=self.__get_location()[0]
),
pitch=0,
zoom=10
),
)
)
plot(fig, filename='local_map.html')
return
else:
local_map = folium.Map(location=self.__get_location(),
zoom_start=12, tiles=settings.MAP_TYPE)
except ValueError:
tkinter.messagebox.showinfo('ValueError:','You must pass an API key if using Cloudmade or non-default Mapbox tiles.')
print("Failed to open!")
else:
local_map.save('local_map.html')
webbrowser.open('local_map.html')
print("Successfully open!")
def insert_info(self, var_infos):#,label_left,label_right):
response = self.__get_response()
if response is None:
var_infos[0].set("The address "+settings.IP+" is not in the database.")
return
var_infos[0].set( "IP Info:")
try:
continent_names_zh = response.continent.names["zh-CN"]
country_names_zh = response.country.names["zh-CN"]
sub_names_zh = response.subdivisions.most_specific.names["zh-CN"]
city_names_zh = response.city.names["zh-CN"]
except KeyError:
var_infos[1].set("Continent: {}({})".format(response.continent.names["es"],
"None"))
var_infos[2].set("Country: {}({}) ,iso_code: {}".format(response.country.name,
"None",
response.country.iso_code))
var_infos[3].set("State/Province: {}({})".format(response.subdivisions.most_specific.name,
"None"))
var_infos[4].set("City: {}({})".format(response.city.name,
"None"))
else:
var_infos[1].set("Continent: {}({})".format(response.continent.names["es"],
continent_names_zh))
var_infos[2].set("Country: {}({}) ,iso_code: {}".format(response.country.name,
country_names_zh,
response.country.iso_code))
var_infos[3].set("State/Province: {}({})".format(response.subdivisions.most_specific.name,
sub_names_zh))
var_infos[4].set("City: {}({})".format(response.city.name,
city_names_zh))
var_infos[5].set("Longitude: {} ,Latitude: {}".format(response.location.longitude,
response.location.latitude))
var_infos[6].set("Time_zone: {}".format(
response.location.time_zone))
var_infos[7].set("Postal code: {}".format(response.postal.code))
'''
Addons here.
'''
try:
m_name = self.path[response.subdivisions.most_specific.name]
print(m_name)
except KeyError:
# MainWindow.photo_left = tk.PhotoImage(file='city_scenery/error.png')
m_name = 'error'
print('photo_left error')
else:
# MainWindow.photo_left = tk.PhotoImage(file='city_scenery/'+
# self.path[response.subdivisions.most_specific.name]+'.png')
print('photo_left found')
return m_name
# MainWindow.label_left = tk.Label(image=MainWindow.photo_left).grid(row=6,column=0,rowspan=8,columnspan=1)
# photo_right = tk.PhotoImage(file='city_description/'+
# self.path[response.subdivisions.most_specific.name]+'.png')
``` |
{
"source": "jocode/curso-python",
"score": 3
} |
#### File: curso-python/aplicacion-web/main.py
```python
from flask import Flask, render_template, request, flash, redirect
from ContactModel import ContactModel
app = Flask(__name__)
app.secret_key = "some_secret"
app.debug = True
@app.route(r'/', methods=['GET'])
def contact_book():
contacts = ContactModel.query().fetch()
return render_template('contact_book.html', contacts=contacts)
@app.route(r'/add', methods=['GET', 'POST'])
def add_contact():
if request.form:
contact = ContactModel(
name = request.form.get('name'),
phone = request.form.get('phone'),
email=request.form.get('email')
)
# Se guarda en la base de datos
contact.put()
flash("¡Se añadió el contacto!")
return render_template('add_contact.html')
@app.route(r'/contacts/<uid>', methods=['GET'])
def contact_detail(uid):
contact = ContactModel.get_by_id(int(uid))
return render_template('contact.html', contact=contact)
@app.route(r'/delete', methods=['POST'])
def delete_contact():
contact = ContactModel.get_by_id(int(request.form.get('uid')))
if not contact:
return redirect('/', code=301)
contact.key.delete()
return redirect('/')
if __name__ == "__main__":
app.run()
```
#### File: curso-python/estructuras-datos/temperatura_promedio.py
```python
def average_temps(temps):
sum_of_temps = 0
for temp in temps:
sum_of_temps += float(temp)
return sum_of_temps/len(temps)
if __name__ == "__main__":
print('\nVamos a calcular la temperatura promedio')
temps = [21, 24, 22, 23, 24, 25, 21]
promedio = round(average_temps(temps), 2)
print('La temperatura promedio es: {}'.format(promedio))
```
#### File: curso-python/hilos/thread.py
```python
import threading
import time
class MiHilo(threading.Thread):
#
def run(self):
print("{} inicio".format(self.getName()))
time.sleep(1)
print("{} terminado".format(self.getName()))
if __name__ == "__main__":
for x in range(4):
hilo = MiHilo(name="Thread-{}".format(x+1))
hilo.start()
time.sleep(.5)
```
#### File: jocode/curso-python/turtle_funciones.py
```python
import turtle
def main():
window = turtle.Screen()
tortuga = turtle.Turtle()
make_square(tortuga)
turtle.mainloop()
def make_square(turtle):
lenght = int(raw_input('Digite el largo del lado: '))
for i in range(4):
make_line_and_turn(turtle, lenght)
def make_line_and_turn(turtle, lenght):
turtle.forward(lenght)
turtle.left(90)
# Le indicamos a python que ejecute el método de entrada
if __name__ == '__main__':
main()
``` |
{
"source": "jocode/python-mysql",
"score": 3
} |
#### File: python-mysql/src/App.py
```python
from flask import Flask, render_template, request, redirect, url_for, flash
from flask_mysqldb import MySQL
app = Flask(__name__)
# MySQl Connection
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = '<PASSWORD>'
app.config['MYSQL_DB'] = 'flaskcontacts'
mysql = MySQL(app)
# Settings
app.secret_key = 'mysecretkey'
# Decorador de rutas
@app.route('/')
def index():
cursor = mysql.connection.cursor()
cursor.execute('SELECT * FROM contacts')
data= cursor.fetchall()
# Por defecto flask tiene configurado el nombre la carpeta como templates
return render_template('index.html', contacts = data)
@app.route('/add', methods=['POST'])
def add_contact():
if request.method == 'POST':
fullname = request.form['fullname']
phone = request.form['phone']
email = request.form['email']
# Usamos la conexion a mysql
cursor = mysql.connection.cursor()
cursor.execute('INSERT INTO contacts (fullname, phone, email) VALUES (%s, %s, %s)', (fullname, phone, email))
mysql.connection.commit()
flash('Contacto Agregado')
return redirect(url_for('index'))
@app.route('/edit/<id>')
def getContact(id):
cursor = mysql.connection.cursor()
cursor.execute('SELECT * FROM contacts WHERE id = %s',(id))
data = cursor.fetchone()
return render_template('edit_contact.html', contact = data)
@app.route('/update/<id>', methods=['POST'])
def update_contact(id):
if request.method == 'POST':
# Coloco los valores en una tupla
data = (
request.form['fullname'],
request.form['phone'],
request.form['email'],
id
)
cursor = mysql.connection.cursor()
cursor.execute("""
UPDATE contacts
SET fullname = %s,
phone = %s,
email = %s
WHERE id = %s
""", data)
mysql.connection.commit()
flash('Contacto Actualizado Satisfactoriamente')
return redirect(url_for('index'))
@app.route('/delete/<string:id>')
def deleteContact(id):
cursor = mysql.connection.cursor()
cursor.execute('DELETE FROM contacts WHERE id = {0}'.format(id))
mysql.connection.commit()
flash('Contacto {0} eliminado'.format(id))
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(port = 3000, debug = True)
``` |
{
"source": "jocoder22/Disaster_response",
"score": 4
} |
#### File: Disaster_response/data/process_data.py
```python
import sys
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""The load_data function load the csv file into pandas dataframe
Args:
messages_filepath (filepath): the filepath for the messages
categories_filepath (filepath): the filepath for the categories data
Returns:
DataFrame: The DataFrame for analysis
"""
# load the messages csv
mess = pd.read_csv(messages_filepath)
# load the categories csv
catt = pd.read_csv(categories_filepath)
# merge the datasets
data = mess.merge(catt, on="id")
return data
def clean_data(dataset):
"""The clean_data function will return a clean DataFrame after removing,
replacing and cleaning the DataFrame to a suitable form for further
saving to database for analysis
Args:
dataset (DataFrame): the DataFrame for data wrangling
Returns:
DataFrame: The DataFrame for saving to database and later analysis
"""
# Split the values in the categories column on the ; character so that
# each value becomes a separate column
cat = dataset.categories.str.split(";", expand=True)
# Use the first row of categories dataframe to create column names for the
# categories data.
row = cat.iloc[0]
category_colnames = row.apply(lambda x: x[:-2])
cat.columns = category_colnames
# extract only the digits in categories columns
for column in cat:
# set each value to be the last character of the string
cat[column] = cat[column].str[-1:]
# convert column from string to numeric
cat[column] = cat[column].astype(int)
# drop the original categories column from dataset
dataset.drop(columns=["categories"], inplace=True)
# concatenate the original dataframe with the new `categories`
# dataframe
df_ = pd.concat([dataset, cat], axis=1)
# drop duplicates and columns not essential for further analysis
df_.drop_duplicates(keep="first", inplace=True)
# drop columns not needed
df_.drop(columns=["id", "original"], inplace=True)
return df_
def save_data(dtss, database_filepath):
"""The save_data function save the dataframe to sql database
Args:
dtss (DataFrame): the DataFrame to save to sql
database_filepath (filepath): filepath of the sql database
Returns: None
"""
# create engine
engine = create_engine(f"sqlite:///{database_filepath}", echo=False)
# save to database
dtss.to_sql("disasterTable", engine, index=False, if_exists="replace")
def main():
if len(sys.argv) == 4:
print(" ")
(
messages_filepath,
categories_filepath,
database_filepath,
) = sys.argv[1:]
print(
"Loading data...\n MESSAGES: {}\n CATEGORIES: {}".format(
messages_filepath, categories_filepath
),
end="\n\n",
)
df = load_data(messages_filepath, categories_filepath)
print("Cleaning data...\n\n")
df = clean_data(df)
print(
"Saving data...\n DATABASE: {}".format(database_filepath),
"\n\n",
)
save_data(df, database_filepath)
print("Cleaned data saved to database!")
else:
print(
"Please provide the filepaths of the messages and categories "
"datasets as the first and second argument respectively, as "
"well as the filepath of the database to save the cleaned data "
"to as the third argument. \n\nExample: python process_data.py "
"disaster_messages.csv disaster_categories.csv "
"DisasterResponse.db"
)
if __name__ == "__main__":
main()
```
#### File: Disaster_response/models/train_classifier.py
```python
import sys
import numpy as np
import pandas as pd
import pickle
import joblib
from sqlalchemy import create_engine
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MaxAbsScaler
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.multioutput import MultiOutputClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
# custom transformer
class dummyTransformer(BaseEstimator, TransformerMixin):
"""dummyTransformer class forms dummies from selected columns"""
def fit(self, X, y=None):
return self
def transform(self, X):
finaldata = pd.get_dummies(X)
return finaldata
# custom transformer
class columnSelector(BaseEstimator, TransformerMixin):
"""columnSelector class select columns"""
def __init__(self, col=0):
self.columnlist = col
def fit(self, X, y=None):
return self
def transform(self, X):
col_ = X[:, self.columnlist]
return col_
def tcolumn(dataframe, col, dtype):
"""The tcolumn function changes the dtype of dataframe column(s)
Args:
dataframe (DataFrame): the DataFrame for data_wrangling
col (list): list for features to select from the DataFrame
dtype (str): dtype to change to
Returns:
dataframe (DataFrame): The DataFrame for analysis
"""
for ele in col:
dataframe.loc[:, ele] = dataframe.loc[:, ele].astype(dtype)
return dataframe
def load_data(database_filepath):
"""The load_data function
Args:
database_filepath (filepath): the sql database filepath
Returns:
X_tokenized (DataFrame): dataframe with text message column
y (DataFrame): dataframe with target classes
category_names(list): list containing the name of the categories
"""
# create sql engine
engine = create_engine(f"sqlite:///{database_filepath}", echo=False)
# read all data in sql table
df = pd.read_sql_table('disasterTable', engine)
# drop duplicates and original text message
df.drop_duplicates(subset = ["message"], keep="first", inplace=True)
# drop nan, na
df.dropna(inplace=True)
# Select categorical data, create strings
catt = df.iloc[:,2:].columns
df = tcolumn(df, catt, "str")
# create new column of multicategories sum
df['total'] = df.iloc[:,2:].sum(axis=1).astype('str')
mask = df['total'].value_counts()
mask2 = mask[mask==1].index.tolist()
# create ones column
df["ones"] = df["total"].apply(lambda x: "Noting" if x in mask2 else x )
mask2 = df['ones'].value_counts()
# drop total column and pop ones
df.drop(columns=['total'], inplace=True)
strata = df.pop("ones")
# convert to integers
df = tcolumn(df, catt, "int")
# Select text and target
messages_ = df.iloc[:, 0].values
# get categories names
categories_ = df.iloc[:, 2:].values
name = df.iloc[:, 2:]
# get categories names
category_names = name.columns.tolist()
return messages_, categories_, category_names, strata
def tokenize(text):
"""The tokenize function will form tokenization for the text messages
to use for model training and testing
Args:
text (DataFrame): the DataFrame with text column for tokenization
Returns:
DataFrame: The DataFrame with words tokens and values for modelling
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
wordporter = SnowballStemmer("english")
stopword = set(stopwords.words("english"))
# Retain alphabetic words: alpha_only
alpha_only = [t.lower() for t in tokens if t.isalpha()]
# Remove all stop words: no_stops
_tokens = [t for t in alpha_only if t not in stopword]
no_stop_tokens = [wordporter.stem(word) for word in _tokens]
clean_tokens = []
# for tok in tokens:
for tok in no_stop_tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""The build_model function build a model pipeline
Args: None
Returns:
model(pipeline): model pipeline for fitting, prediction and scoring
"""
# create pipeline
plu = Pipeline([
('cvect', CountVectorizer(tokenizer=tokenize,
max_df=0.86, ngram_range=(1,2))),
('tfidt', TfidfTransformer()),
("mascaler", MaxAbsScaler()),
('rforest', MultiOutputClassifier(RandomForestClassifier()))
])
return plu
def evaluate_model(model, X_text, Y_test, category_names):
"""The evaluate_model function scores the performance of trained model
on test (unseen) text and categories
Args:
model (model): model to evaluate
X_text (numpy arrays): the test (unseen) tokenized text
Y_test (numpy arrays): the test (unseen) target used for evaluation
category_names(list): list containing the name of the categories
Returns: None
print out the accuracy and confusion metrics
"""
sp = {"end": "\n\n", "sep": "\n\n"}
# predict using the model
pred = model.predict(X_text)
# Calculate accuracy
accuracy = (pred == Y_test).mean()
accuracyscore = model.score(X_text, Y_test)
print(f"Model Accuracy: {accuracy*100:.02f}%\n")
print(f"Model Accuracy: {accuracyscore*100:.02f}%\n")
for i, label in enumerate(category_names):
print("Printing for ", label)
print(classification_report(Y_test[i] , pred[i]), **sp)
def save_model(model, model_filepath):
"""The save_model function save the model
Args:
model (model): the model to save
model_filepath (filepath): filepath where to save the modeld
Returns: None
print out: Done saving model!
"""
# # Save the model
joblib.dump(model, f"{model_filepath}")
print("Done saving model!")
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print(
"Loading data...\n DATABASE: {}".format(database_filepath)
)
X, Y, category_names, strata = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.2, stratify=strata
)
print("Building model...")
model = build_model()
print("Training model...")
model.fit(X_train, Y_train)
print("Evaluating model...")
evaluate_model(model, X_test, Y_test, category_names)
print("Saving model...\n MODEL: {}".format(model_filepath))
save_model(model, model_filepath)
print("Trained model saved!")
else:
print(
"Please provide the filepath of the disaster messages database "
"as the first argument and the filepath of the pickle file to "
"save the model to as the second argument. \n\nExample: python "
"train_classifier.py ../data/DisasterResponse.db classifier.pkl")
if __name__ == "__main__":
main()
``` |
{
"source": "jocoder22/Movie-Trailer-Website",
"score": 3
} |
#### File: jocoder22/Movie-Trailer-Website/media.py
```python
class Movie():
""" This class provides a way to store movie related information """
def __init__(self, movie_title, poster_url, trailer_youtube_link):
self.title = movie_title
self.poster_image_url = poster_url
self.trailer_youtube_url = trailer_youtube_link
``` |
{
"source": "jocodoma/coding-interview-prep",
"score": 4
} |
#### File: Problems/0027_Remove_Element/remove_element.py
```python
from typing import List
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
return self.__twoPointers(nums, val)
# return self.__swapValues(nums, val)
# time complexity: O(n), space complexity: O(1)
def __twoPointers(self, nums: List[int], val: int) -> int:
if len(nums) == 0:
return 0
i = 0
for n in nums:
if n != val:
nums[i] = n
i += 1
return i
# time complexity: O(n), space complexity: O(1)
def __swapValues(self, nums: List[int], val: int) -> int:
if len(nums) == 0:
return 0
l = 0
r = len(nums) - 1
while l <= r:
if nums[l] == val:
# nums[l], nums[r] = nums[r], nums[l]
nums[l] = nums[r]
r -= 1
else:
l += 1
return l
def printList(nums: List[int]) -> str:
return ("[" + (', ').join(map(str, nums)) + "]")
nums = [3,2,2,3]
val = 3
print("Input: nums = " + printList(nums) + ", val = " + str(val))
size = Solution().removeElement(nums, val)
print("Output: nums = " + printList(nums[:size]))
nums = [0,1,2,2,3,0,4,2]
val = 2
print("\nInput: nums = " + printList(nums) + ", val = " + str(val))
size = Solution().removeElement(nums, val)
print("Output: nums = " + printList(nums[:size]))
```
#### File: Problems/0217_Contains_Duplicate/contains_duplicate.py
```python
from typing import List
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
# return self.__bruteForce(nums)
# return self.__sorting(nums)
return self.__hashset(nums)
# time complexity: O(n**2), space complexity: O(1)
def __bruteForce(self, nums: List[int]) -> bool:
for i, num in enumerate(nums):
for j in range(i+1, len(nums)):
if num == nums[j]:
return True
return False
# time complexity: O(nlogn) due to sorting
# space complexity: O(1) without considering the space taken by sorting
def __sorting(self, nums: List[int]) -> bool:
nums.sort()
for i in range(1, len(nums)):
if nums[i-1] == nums[i]:
return True
return False
# time complexity: O(n), space complexity: O(n)
def __hashset(self, nums: List[int]) -> bool:
table = set()
for num in nums:
if num in table:
return True
else:
table.add(num)
return False
nums = [1,2,3,1]
print(f'Input: {nums}')
print(f'Output: {Solution().containsDuplicate(nums)}\n')
nums = [1,2,3,4]
print(f'Input: {nums}')
print(f'Output: {Solution().containsDuplicate(nums)}\n')
nums = [1,1,1,3,3,4,3,2,4,2]
print(f'Input: {nums}')
print(f'Output: {Solution().containsDuplicate(nums)}\n')
```
#### File: coding-interview-prep/Python/fibonacci.py
```python
class Solution:
def fibonacci(self, n: int) -> int:
# return self.__recursiveMethod(n)
# return self.__iterativeDynamicProgramming(n)
return self.__iterativeDP2(n)
# time complexity: O(2^n), space complexity: O(n)
def __recursiveMethod(self, n: int) -> int:
if n < 0:
print('Incorrect input')
if n == 0:
return 0
elif n == 1:
return 1
else:
return self.__recursiveMethod(n-1) + self.__recursiveMethod(n-2)
# time complexity: O(n), space complexity: O(n)
def __iterativeDynamicProgramming(self, n: int) -> int:
if n < 0:
print('Incorrect input')
f = [0,1]
for i in range(2, n+1):
f.append(f[i-1] + f[i-2])
return f[n]
# time complexity: O(n), space complexity: O(1)
def __iterativeDP2(self, n: int) -> int:
if n < 0:
print('Incorrect input')
a = 0
b = 1
if n == 0:
return a
if n == 1:
return b
for i in range(2, n+1):
c = a + b
a = b
b = c
return b
for i in range(0, 15):
print(Solution().fibonacci(i))
``` |
{
"source": "jocon15/Voice_Assistant-JARVIS",
"score": 3
} |
#### File: Voice_Assistant-JARVIS/src/weather_peripheral.py
```python
import geocoder
import time
import json
import MasterConfig
from datetime import datetime
from rich.console import Console
from rich.theme import Theme
from rich.text import Text
console = Console(theme=MasterConfig.custom_theme)
def scaled_color(temp):
"""Scale the color value based on whats passed in"""
temp = int(round(float(temp)))
if temp > 110 or temp < 0:
raise Exception('Temp out of bounds')
r = g = b = 0
if temp < 37:
b = 255
g = round((temp / 36) * 255)
elif temp < 74:
g = 255
b = 255 - round((temp/74)*255)
else:
r = round((temp/110)*255)
return "#{0:02x}{1:02x}{2:02x}".format(r, g, b)
def print_hourly_forecast(data):
# the 0 index is the current point, which we don't want
for i in range(1, 13, 1):
point = data['hourly'][i]
# get the unix timestamp
ts = point['dt']
# find out which hour it corresponds to
hour = int(datetime.fromtimestamp(ts).strftime("%H"))
# convert if > 12
if hour > 12:
hour = hour - 12
sufix = 'pm'
else:
if hour > 11:
sufix = 'pm'
else:
if hour < 1:
hour = hour + 12
sufix = 'pm'
else:
sufix = 'am'
spacing = ' '
if hour != 10 and hour != 11 and hour != 12:
spacing = ' '
# print data for that point
# color = scaled_color(point["temp"])
# MasterConfig.colors['temp'] = color
# console = Console(theme=Theme({"temp": color}))
print(
f'{hour}{sufix}{spacing}{point["temp"]:.0f}°F {point["wind_speed"]:.0f}mph {point["weather"][0]["description"]}')
pass
def main():
console = Console(theme=MasterConfig.custom_theme)
with open('data\\weather_data.json', ) as file:
data = json.load(file)
with open(f'data\\forecast_data.json', ) as file:
forecast = json.load(file)
# print(json.dumps(data, indent=4))
# for now, we are going to assume that jarvis will
# do all of the 'fixing' to all values will be in desired format
# before they are written to the json file
# ------header------
print(f'Weather for {data["name"]}:\n')
# ------levels------
color = scaled_color(data["main"]["temp"])
MasterConfig.colors['temp'] = color
console.print(
f'Temperature: [light blue]{data["main"]["temp"]}[/light blue]°F')
print(f'Humidity: {data["main"]["humidity"]}%')
# still need units
print(f'Pressure: {data["main"]["pressure"]} hPa')
print(f'Wind speed: {data["wind"]["speed"]} mph {data["wind"]["deg"]}')
if 'gust' in data['wind'].keys():
print(f'Wind gust: {data["wind"]["gust"]} mph\n')
# ------current state------
print(f'State: {data["weather"][0]["description"].upper()}')
print(f'Visibility: {data["visibility"]} ft\n')
# ------sun stuff------
print(f'Sunrise: {data["sys"]["sunrise"]} am')
print(f'Sunset: {data["sys"]["sunset"]} pm\n')
# ------forecast------
print(f'Hourly Forecast:')
print_hourly_forecast(forecast)
# sleep for a while to keep the window open
time.sleep(100000)
if __name__ == '__main__':
main()
``` |
{
"source": "JoCoSoft/pi",
"score": 3
} |
#### File: JoCoSoft/pi/motor.py
```python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
control_pins = [
7, # GPIO 04 (Pin 7)
11, # GPIO 17 (Pin 11)
13, # GPIO 27 (Pin 13)
15 # GPIO 22 (Pin 15)
]
for pin in control_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
halfstep_seq = [
[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 1],
[1, 0, 0, 1]
]
def rotateMotor(clockwise, degrees):
fullRotationSteps = 512
fullRotationDegrees = 360
halfstepExecutions = int(fullRotationSteps *
(float(degrees) / float(fullRotationDegrees)))
print "Looping for " + str(halfstepExecutions) + " half step executions"
for _ in range(halfstepExecutions):
for halfstep in range(8) if clockwise else reversed(range(8)):
for pin in range(4) if clockwise else reversed(range(4)):
GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])
time.sleep(0.0008)
def openVent(degrees):
print "Rotating " + str(degrees) + " degrees counter-clockwise"
rotateMotor(False, degrees)
def closeVent(degrees):
print "Rotating " + str(degrees) + " degrees clockwise"
rotateMotor(True, degrees)
```
#### File: JoCoSoft/pi/processor.py
```python
import sched
import time
import requests
import motor
delay = 5
priority = 1
scheduler = sched.scheduler(time.time, time.sleep)
def processJobs():
print "--- processJobs() ---"
processJobsUrl = "http://jocosoft-api.herokuapp.com/api/v1/jobs/process" # PROD
# processJobsUrl = "http://localhost:3000/api/v1/jobs/process" # DEV
response = requests.post(processJobsUrl, json={
"serial": "dev-vent", "code": "123456"}) # Pull serial / code from some storage
jsonResponse = response.json()
for job in jsonResponse:
print "Processing job " + job["id"]
jobName = job["name"]
jobDegrees = 90
try:
jobDegrees = job["data"]["degrees"]
except KeyError:
pass
if jobName == "open":
motor.openVent(jobDegrees)
continue
if jobName == "close":
motor.closeVent(jobDegrees)
continue
def processJobScheduler(_scheduler):
print "--- processJobScheduler() ---"
processJobs()
scheduler.enter(delay, priority, processJobScheduler, (_scheduler,))
def main():
print "--- main ----"
processJobs()
scheduler.enter(delay, priority, processJobScheduler, (scheduler,))
scheduler.run()
if __name__ == "__main__":
main()
```
#### File: pi/RPi/GPIO.py
```python
BOARD = 1
OUT = 1
IN = 1
def setmode(a):
#print a
return
def setup(a, b):
#print a
return
def output(a, b):
#print a
return
def cleanup():
#print 'a'
return
def setwarnings(flag):
#print 'False'
return
``` |
{
"source": "Jocs/reading-notes",
"score": 3
} |
#### File: begin-python/cp11/chapter11.py
```python
for char in open('./text.txt'):
print char
# ####11.1.1 文件模式
# 如果 open 只带一个文件名参数,那么我们可以获取得能读取文件内容的文件对象。
# open 方法支持5 中模式。`r` `w` `a` `+` `b`
# ####11.1.2 缓存
# 当参数为 0 时,不缓存,当参数不为 0 缓冲。当参数为 -1 时,使用默认的缓冲区大小。
# ###11.2 基本的文件方法
# 文件对象是指支持 file 类方法的对象,最重要的是支持read 方法和 write 方法。
# ####11.2.1 读和写
f = open('./text.txt', 'w')
f.write('ransixi\n')
f.write('<EMAIL>\n')
f.close()
f = open('./text.txt', 'r')
print f.read(4)
print f.read()
f.close()
# ####11.2.2 管道输出
# ####11.2.3 读写行
f = open('./text.txt', 'r')
print f.readline()
f.close()
f = open('./text.txt', 'r')
for line in f.readlines():
print '#' + line
f.close()
f = open('./text.txt', 'w')
lines = [
'hello\n',
'nice to meet you\n',
'my name is ransixi'
]
f.writelines(lines)
f.close()
f = open('./text.txt', 'w')
f.write('nice to meet you\n')
f.write('my name is jocs\n')
f.close()
# ####11.2.5 基本的文件方法
# ###11.3 对文件内容进行迭代
def process(string):
print 'Process: ' + string
f = open('./text.txt')
while True:
s = f.read(1)
if not s:
break
process(s)
f.close()
f = open('./text.txt')
while True:
line = f.readline()
if not line:
break
process(line)
f.close()
f = open('./text.txt')
for line in f.readlines():
process(line)
f.close()
print list(open('./text.txt'))
```
#### File: begin-python/cp16/test_chapter16.py
```python
import unittest, chapter16
class SquareTestCase(unittest.TestCase):
def testInterers(self):
for x in xrange(-10, 10):
p = chapter16.square(x)
self.failUnless(p == x * x, 'interger square failed')
def testFloat(self):
for x in xrange(-10, 10):
x = x / 10.0
p = chapter16.square(x)
self.failUnless(p == x * x, 'Float square failed')
if __name__ == '__main__': unittest.main()
``` |
{
"source": "joctaTorres/treeasy",
"score": 4
} |
#### File: treeasy/treeasy/entropy.py
```python
from math import log2
from typing import List
def collection_entropy(collection: List[int]) -> float:
"""
collection: discrete target attribute count
e.g.1: [7, 9, 4]
"""
collection_size = sum(collection)
return entropy([(target / collection_size) for target in collection])
def entropy(probabilities: List[float], log=log2) -> float:
"""
probabilities: e.g.1: [(7/20), (9/20), (4/20)]
e.g.2: [(4/11), (7/11)]
e.g.3: [0.5, 0.2, 0.1, 0.1, 0.1]
"""
if not probabilities:
return 0
entropy = 0
for p in probabilities:
term = p * log(p)
entropy -= term
return entropy
def target_collection_information_gain(
target_collection: List[int], attribute_subsets: List[List[int]]
) -> float:
target_instance_size = sum(target_collection)
target_entropy = collection_entropy(target_collection)
return attribute_information_gain(
target_entropy, target_instance_size, attribute_subsets
)
def attribute_information_gain(
target_entropy: float, target_instance_size: int, attribute_subsets: List[List[int]]
) -> float:
attribute_entropy = 0.0
for subset in attribute_subsets:
subset_weigth = sum(subset) / target_instance_size
attribute_entropy += subset_weigth * collection_entropy(subset)
gain = target_entropy - attribute_entropy
return gain
``` |
{
"source": "jocubeit/CDM",
"score": 2
} |
#### File: Python/tests/adls_test_helper.py
```python
import os
from cdm.storage import ADLSAdapter
from cdm.enums import AzureCloudEndpoint
class AdlsTestHelper:
@staticmethod
def create_adapter_with_shared_key(root_relative_path: str = None, test_blob_hostname: bool = False, https_hostname: bool = False):
hostname = os.environ.get("ADLS_HTTPS_HOSTNAME") if https_hostname else os.environ.get("ADLS_HOSTNAME")
root_path = os.environ.get("ADLS_ROOTPATH")
shared_key = os.environ.get("ADLS_SHAREDKEY")
if test_blob_hostname is True:
hostname = hostname.replace('dfs', 'blob')
return ADLSAdapter(hostname=hostname, root=AdlsTestHelper.get_full_root_path(root_path, root_relative_path), shared_key=shared_key)
@staticmethod
def create_adapter_with_client_id(root_relative_path: str = None, specify_endpoint: bool = False, test_blob_hostname: bool = False):
hostname = os.environ.get("ADLS_HOSTNAME")
root_path = os.environ.get("ADLS_ROOTPATH")
tenant = os.environ.get("ADLS_TENANT")
client_id = os.environ.get("ADLS_CLIENTID")
client_secret = os.environ.get("ADLS_CLIENTSECRET")
if test_blob_hostname is True:
hostname = hostname.replace('blob', 'dfs')
if specify_endpoint:
return ADLSAdapter(hostname=hostname, root=AdlsTestHelper.get_full_root_path(root_path, root_relative_path), tenant=tenant, client_id=client_id, secret=client_secret, endpoint=AzureCloudEndpoint.AZURE_PUBLIC)
return ADLSAdapter(hostname=hostname, root=AdlsTestHelper.get_full_root_path(root_path, root_relative_path), tenant=tenant, client_id=client_id, secret=client_secret)
@staticmethod
def get_full_root_path(first: str, second: str) -> str:
if second is None or second == '':
return first
if first.endswith('/'):
first = first[0:len(first) - 1]
if second.startswith('/'):
second = second[1:]
return first + '/' + second
```
#### File: cdm/projection/test_projection_performance.py
```python
import os
import time
import unittest
from cdm.utilities.resolve_options import ResolveOptions
from tests.common import TestHelper, async_test
from cdm.objectmodel import CdmTypeAttributeDefinition
class ProjectionPerformanceTest(unittest.TestCase):
# The path between TestDataPath and TestName.
tests_subpath = os.path.join('Cdm', 'Projection', 'ProjectionPerformanceTest')
@async_test
async def test_projection_performance_on_load(self):
"""A test class for testing the performance of projection operations"""
corpus = TestHelper.get_local_corpus(self.tests_subpath, 'TestProjectionPerformanceOnLoad')
entity = await corpus.fetch_object_async('largeProjectionEntity.cdm.json/largeProjectionEntity')
operation = entity.attributes[0].entity.explicit_reference.operations[0]
attGroup = operation.new_attribute.explicit_reference
# add a large number of attributes to the projection
for i in range(10000):
attGroup.members.append(CdmTypeAttributeDefinition(corpus.ctx, 'a' + str(i)))
start = time.time()
# reindex the entity to run through the visit function
await entity.in_document._index_if_needed(ResolveOptions(entity.in_document), True)
stop = time.time()
self.assertLess(stop - start, 500)
``` |
{
"source": "jod35/Fast-Food-Fast",
"score": 3
} |
#### File: app/controllers/methods.py
```python
from ..utils.database import db
from ..models.users import User
def check_username_exists(username):
user=User.query.filter_by(username=username).first()
if user:
return True
else:
return False
def check_email_exists(email):
user=User.query.filter_by(email=email).first()
if user:
return True
else:
return False
```
#### File: app/ui/routes.py
```python
from flask import Blueprint,render_template,url_for
from flask_login import login_required,current_user
from ..models.users import Order
ui_bp=Blueprint('ui',__name__,template_folder='templates')
@ui_bp.route('/')
def index():
return render_template('index.html')
@ui_bp.route('/login_fail')
def login_failed():
return render_template('login.html')
@login_required
@ui_bp.route('/orders')
def users_orders():
orders=Order.query.filter_by(sender=current_user).all()
return render_template('orders.html' ,orders=orders)
@login_required
@ui_bp.route('/admin')
def new_orders():
orders=Order.query.filter_by(delivery_complete=False).all()
return render_template('admin.html',orders=orders)
@login_required
@ui_bp.route('/complete-orders')
def admin_complete_orders():
orders=Order.query.filter_by(delivery_complete=True).all()
return render_template('orderscomplete.html',orders=orders)
@login_required
@ui_bp.route('/myorders')
def user_complete_orders():
orders=Order.query.filter_by(sender=current_user).all()
return render_template('myorders.html',orders=orders)
``` |
{
"source": "Joda89/parsedmarc",
"score": 2
} |
#### File: parsedmarc/parsedmarc/cli.py
```python
import json
import logging
import sys
import time
from collections import OrderedDict
from glob import glob
from itertools import repeat
from multiprocessing import Pool, Value
from ssl import CERT_NONE, create_default_context
from prometheus_client import start_http_server
from tqdm import tqdm
from parsedmarc import get_dmarc_reports_from_inbox, watch_inbox, \
parse_report_file, get_dmarc_reports_from_mbox, save_output, email_results, ParserError, InvalidDMARCReport
from parsedmarc.config import Config
from parsedmarc.logger import load_config
from parsedmarc.output import syslog, elastic, s3, splunk, kafkaclient
from parsedmarc.utils import is_mbox
logger = logging.getLogger("parsedmarc")
def cli_parse(file_path, sa, nameservers, dns_timeout,
ip_db_path, offline, parallel=False):
"""Separated this function for multiprocessing"""
try:
file_results = parse_report_file(file_path,
ip_db_path=ip_db_path,
offline=offline,
nameservers=nameservers,
dns_timeout=dns_timeout,
strip_attachment_payloads=sa,
parallel=parallel)
except ParserError as error:
return error, file_path
finally:
global counter
with counter.get_lock():
counter.value += 1
return file_results, file_path
def init(ctr):
global counter
counter = ctr
def _main():
"""Called when the module is executed"""
def process_reports(reports_):
output_str = "{0}\n".format(json.dumps(reports_,
ensure_ascii=False,
indent=2))
if not opts.silent:
print(output_str)
if opts.kafka_hosts:
try:
ssl_context = None
if opts.kafka_skip_certificate_verification:
logger.debug("Skipping Kafka certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
kafka_client = kafkaclient.KafkaClient(
opts.kafka_hosts,
username=opts.kafka_username,
password=opts.kafka_password,
ssl_context=ssl_context
)
except Exception as error_:
logger.error("Kafka Error: {0}".format(error_.__str__()))
if opts.s3_bucket:
try:
s3_client = s3.S3Client(
bucket_name=opts.s3_bucket,
bucket_path=opts.s3_path,
)
except Exception as error_:
logger.error("S3 Error: {0}".format(error_.__str__()))
if opts.syslog_server:
try:
syslog_client = syslog.SyslogClient(
server_name=opts.syslog_server,
server_port=int(opts.syslog_port),
)
except Exception as error_:
logger.error("Syslog Error: {0}".format(error_.__str__()))
if opts.save_aggregate:
for report in reports_["aggregate_reports"]:
try:
if opts.elasticsearch_hosts:
shards = opts.elasticsearch_number_of_shards
replicas = opts.elasticsearch_number_of_replicas
elastic.save_aggregate_report_to_elasticsearch(
report,
index_suffix=opts.elasticsearch_index_suffix,
monthly_indexes=opts.elasticsearch_monthly_indexes,
number_of_shards=shards,
number_of_replicas=replicas
)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
try:
if opts.kafka_hosts:
kafka_client.save_aggregate_reports_to_kafka(
report, kafka_aggregate_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
try:
if opts.s3_bucket:
s3_client.save_aggregate_report_to_s3(report)
except Exception as error_:
logger.error("S3 Error: {0}".format(error_.__str__()))
try:
if opts.syslog_server:
syslog_client.save_aggregate_report_to_syslog(report)
except Exception as error_:
logger.error("Syslog Error: {0}".format(error_.__str__()))
if opts.hec:
try:
aggregate_reports_ = reports_["aggregate_reports"]
if len(aggregate_reports_) > 0:
hec_client.save_aggregate_reports_to_splunk(
aggregate_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
if opts.save_forensic:
for report in reports_["forensic_reports"]:
try:
shards = opts.elasticsearch_number_of_shards
replicas = opts.elasticsearch_number_of_replicas
if opts.elasticsearch_hosts:
elastic.save_forensic_report_to_elasticsearch(
report,
index_suffix=opts.elasticsearch_index_suffix,
monthly_indexes=opts.elasticsearch_monthly_indexes,
number_of_shards=shards,
number_of_replicas=replicas)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
except InvalidDMARCReport as error_:
logger.error(error_.__str__())
try:
if opts.kafka_hosts:
kafka_client.save_forensic_reports_to_kafka(
report, kafka_forensic_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
try:
if opts.s3_bucket:
s3_client.save_forensic_report_to_s3(report)
except Exception as error_:
logger.error("S3 Error: {0}".format(error_.__str__()))
try:
if opts.syslog_server:
syslog_client.save_forensic_report_to_syslog(report)
except Exception as error_:
logger.error("Syslog Error: {0}".format(error_.__str__()))
if opts.hec:
try:
forensic_reports_ = reports_["forensic_reports"]
if len(forensic_reports_) > 0:
hec_client.save_forensic_reports_to_splunk(
forensic_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
aggregate_reports = []
forensic_reports = []
config = Config()
config.load_config()
opts = config.get_config()
load_config(opts)
if opts.imap_host is None and len(opts.file_path) == 0:
logger.error("You must supply input files, or an IMAP configuration")
exit(1)
logger.info("Starting dmarcparse")
if opts.save_aggregate or opts.save_forensic:
try:
if opts.elasticsearch_hosts:
es_aggregate_index = "dmarc_aggregate"
es_forensic_index = "dmarc_forensic"
if opts.elasticsearch_index_suffix:
suffix = opts.elasticsearch_index_suffix
es_aggregate_index = "{0}_{1}".format(
es_aggregate_index, suffix)
es_forensic_index = "{0}_{1}".format(
es_forensic_index, suffix)
elastic.set_hosts(opts.elasticsearch_hosts,
opts.elasticsearch_ssl,
opts.elasticsearch_ssl_cert_path,
opts.elasticsearch_username,
opts.elasticsearch_password,
timeout=opts.elasticsearch_timeout)
elastic.migrate_indexes(aggregate_indexes=[es_aggregate_index],
forensic_indexes=[es_forensic_index])
except elastic.ElasticsearchError as error:
logger.error("Elasticsearch Error: {0}".format(error.__str__()))
exit(1)
if opts.hec:
if opts.hec_token is None or opts.hec_index is None:
logger.error("HEC token and HEC index are required when "
"using HEC URL")
exit(1)
verify = True
if opts.hec_skip_certificate_verification:
verify = False
hec_client = splunk.HECClient(opts.hec, opts.hec_token,
opts.hec_index,
verify=verify)
kafka_aggregate_topic = opts.kafka_aggregate_topic
kafka_forensic_topic = opts.kafka_forensic_topic
file_paths = []
mbox_paths = []
for file_path in opts.file_path:
file_paths += glob(file_path)
for file_path in file_paths:
if is_mbox(file_path):
mbox_paths.append(file_path)
file_paths = list(set(file_paths))
mbox_paths = list(set(mbox_paths))
for mbox_path in mbox_paths:
file_paths.remove(mbox_path)
counter = Value('i', 0)
pool = Pool(opts.n_procs, initializer=init, initargs=(counter,))
results = pool.starmap_async(cli_parse,
zip(file_paths,
repeat(opts.strip_attachment_payloads),
repeat(opts.nameservers),
repeat(opts.dns_timeout),
repeat(opts.ip_db_path),
repeat(opts.offline),
repeat(opts.n_procs >= 1)),
opts.chunk_size)
if sys.stdout.isatty():
pbar = tqdm(total=len(file_paths))
while not results.ready():
pbar.update(counter.value - pbar.n)
time.sleep(0.1)
pbar.close()
else:
while not results.ready():
time.sleep(0.1)
results = results.get()
pool.close()
pool.join()
for result in results:
if type(result[0]) is InvalidDMARCReport:
logger.error("Failed to parse {0} - {1}".format(result[1],
result[0]))
else:
if result[0]["report_type"] == "aggregate":
aggregate_reports.append(result[0]["report"])
elif result[0]["report_type"] == "forensic":
forensic_reports.append(result[0]["report"])
for mbox_path in mbox_paths:
strip = opts.strip_attachment_payloads
reports = get_dmarc_reports_from_mbox(mbox_path,
nameservers=opts.nameservers,
dns_timeout=opts.dns_timeout,
strip_attachment_payloads=strip,
ip_db_path=opts.ip_db_path,
offline=opts.offline,
parallel=False)
aggregate_reports += reports["aggregate_reports"]
forensic_reports += reports["forensic_reports"]
if opts.imap_host:
try:
if opts.imap_user is None or opts.imap_password is None:
logger.error("IMAP user and password must be specified if"
"host is specified")
rf = opts.imap_reports_folder
af = opts.imap_archive_folder
ns = opts.nameservers
sa = opts.strip_attachment_payloads
ssl = True
verify = True
if opts.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
verify = False
if opts.imap_ssl is False:
ssl = False
reports = get_dmarc_reports_from_inbox(
host=opts.imap_host,
port=opts.imap_port,
ssl=ssl,
verify=verify,
timeout=opts.imap_timeout,
max_retries=opts.imap_max_retries,
user=opts.imap_user,
password=opts.imap_password,
reports_folder=rf,
archive_folder=af,
ip_db_path=opts.ip_db_path,
delete=opts.imap_delete,
offline=opts.offline,
nameservers=ns,
test=opts.imap_test,
strip_attachment_payloads=sa,
batch_size=opts.imap_batch_size
)
aggregate_reports += reports["aggregate_reports"]
forensic_reports += reports["forensic_reports"]
except Exception as error:
logger.error("IMAP Error: {0}".format(error.__str__()))
exit(1)
results = OrderedDict([("aggregate_reports", aggregate_reports),
("forensic_reports", forensic_reports)])
if opts.output:
save_output(results, output_directory=opts.output,
aggregate_json_filename=opts.aggregate_json_filename,
forensic_json_filename=opts.forensic_json_filename,
aggregate_csv_filename=opts.aggregate_csv_filename,
forensic_csv_filename=opts.forensic_csv_filename)
process_reports(results)
if opts.smtp_host:
try:
verify = True
if opts.smtp_skip_certificate_verification:
verify = False
email_results(results, opts.smtp_host, opts.smtp_from,
opts.smtp_to, port=opts.smtp_port, verify=verify,
username=opts.smtp_user,
password=<PASSWORD>,
subject=opts.smtp_subject)
except Exception as error:
logger.error("{0}".format(error.__str__()))
exit(1)
if opts.imap_host and opts.imap_watch:
logger.info("Watching for email - Quit with ctrl-c")
ssl = True
verify = True
if opts.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
verify = False
if opts.imap_ssl is False:
ssl = False
try:
sa = opts.strip_attachment_payloads
watch_inbox(
opts.imap_host,
opts.imap_user,
opts.imap_password,
process_reports,
port=opts.imap_port,
ssl=ssl,
verify=verify,
reports_folder=opts.imap_reports_folder,
archive_folder=opts.imap_archive_folder,
delete=opts.imap_delete,
test=opts.imap_test,
nameservers=opts.nameservers,
dns_timeout=opts.dns_timeout,
strip_attachment_payloads=sa,
batch_size=opts.imap_batch_size,
ip_db_path=opts.ip_db_path,
offline=opts.offline)
except FileExistsError as error:
logger.error("{0}".format(error.__str__()))
exit(1)
if __name__ == "__main__":
start_http_server(8000)
# _main()
```
#### File: parsedmarc/parsedmarc/config.py
```python
import logging
import os
from argparse import ArgumentParser
from configparser import ConfigParser
from parsedmarc.utils import str_to_list
logger = logging.getLogger("parsedmarc")
__version__ = "7.1.1"
class Config:
opts = None
def __init__(self):
arg_parser = ArgumentParser(description="Parses DMARC reports")
arg_parser.add_argument("-c", "--config-file",
help="a path to a configuration file "
"(--silent implied)")
arg_parser.add_argument("file_path", nargs="*",
help="one or more paths to aggregate or forensic "
"report files, emails, or mbox files'")
strip_attachment_help = "remove attachment payloads from forensic " \
"report output"
arg_parser.add_argument("--strip-attachment-payloads",
help=strip_attachment_help, action="store_true")
arg_parser.add_argument("-o", "--output",
help="write output files to the given directory")
arg_parser.add_argument("--aggregate-json-filename",
help="filename for the aggregate JSON output file",
default="aggregate.json")
arg_parser.add_argument("--forensic-json-filename",
help="filename for the forensic JSON output file",
default="forensic.json")
arg_parser.add_argument("--aggregate-csv-filename",
help="filename for the aggregate CSV output file",
default="aggregate.csv")
arg_parser.add_argument("--forensic-csv-filename",
help="filename for the forensic CSV output file",
default="forensic.csv")
arg_parser.add_argument("-n", "--nameservers", nargs="+",
help="nameservers to query")
arg_parser.add_argument("-t", "--dns_timeout",
help="number of seconds to wait for an answer "
"from DNS (default: 2.0)",
type=float,
default=2.0)
arg_parser.add_argument("--offline", action="store_true",
help="do not make online queries for geolocation "
" or DNS")
arg_parser.add_argument("-s", "--silent", action="store_true",
help="only print errors and warnings")
arg_parser.add_argument("--verbose", action="store_true",
help="more verbose output")
arg_parser.add_argument("--debug", action="store_true",
help="print debugging information")
arg_parser.add_argument("--log-file", default=None,
help="output logging to a file")
arg_parser.add_argument("-v", "--version", action="version",
version=__version__)
self.opts = arg_parser.parse_args()
self.opts.save_aggregate = False
self.opts.save_forensic = False
self.opts.imap_host = None
self.opts.imap_skip_certificate_verification = False
self.opts.imap_ssl = True
self.opts.imap_port = 993
self.opts.imap_timeout = 30
self.opts.imap_max_retries = 4
self.opts.imap_user = None
self.opts.imap_password = <PASSWORD>
self.opts.imap_reports_folder = "INBOX"
self.opts.imap_archive_folder = "Archive"
self.opts.imap_watch = False
self.opts.imap_delete = False
self.opts.imap_test = False
self.opts.imap_batch_size = None
self.opts.hec = None
self.opts.hec_token = None
self.opts.hec_index = None
self.opts.hec_skip_certificate_verification = False
self.opts.elasticsearch_hosts = None
self.opts.elasticsearch_timeout = 60
self.opts.elasticsearch_number_of_shards = 1
self.opts.elasticsearch_number_of_replicas = 0
self.opts.elasticsearch_index_suffix = None
self.opts.elasticsearch_ssl = True
self.opts.elasticsearch_ssl_cert_path = None
self.opts.elasticsearch_monthly_indexes = False
self.opts.elasticsearch_username = None
self.opts.elasticsearch_password = None
self.opts.kafka_hosts = None
self.opts.kafka_username = None
self.opts.kafka_password = None
self.opts.kafka_aggregate_topic = None
self.opts.kafka_forensic_topic = None
self.opts.kafka_ssl = False
self.opts.kafka_skip_certificate_verification = False
self.opts.smtp_host = None
self.opts.smtp_port = 25
self.opts.smtp_ssl = False
self.opts.smtp_skip_certificate_verification = False
self.opts.smtp_user = None
self.opts.smtp_password = None
self.opts.smtp_from = None
self.opts.smtp_to = []
self.opts.smtp_subject = "parsedmarc report"
self.opts.smtp_message = "Please see the attached DMARC results."
self.opts.s3_bucket = None
self.opts.s3_path = None
self.opts.syslog_server = None
self.opts.syslog_port = None
self.opts.n_procs = 1
self.opts.chunk_size = 1
def get_config(self):
return self.opts
def load_config(self):
if self.opts.config_file:
abs_path = os.path.abspath(self.opts.config_file)
if not os.path.exists(abs_path):
logger.error("A file does not exist at {0}".format(abs_path))
exit(-1)
self.opts.silent = True
config = ConfigParser()
config.read(self.opts.config_file)
if "general" in config.sections():
self._load_config_general(config)
if "imap" in config.sections():
self._load_config_imap(config)
if "elasticsearch" in config.sections():
self._load_config_elasticsearch(config)
if "splunk_hec" in config.sections():
self._load_config_splunk(config)
if "kafka" in config.sections():
self._load_config_kafka(config)
if "smtp" in config.sections():
self._load_config_smtp(config)
if "s3" in config.sections():
self._load_config_s3(config)
if "syslog" in config.sections():
self._load_config_syslog(config)
def _load_config_general(self, config):
general_config = config["general"]
if "offline" in general_config:
self.opts.offline = general_config.getboolean("offline")
if "strip_attachment_payloads" in general_config:
self.opts.strip_attachment_payloads = general_config[
"strip_attachment_payloads"]
if "output" in general_config:
self.opts.output = general_config["output"]
if "aggregate_json_filename" in general_config:
self.opts.aggregate_json_filename = general_config[
"aggregate_json_filename"]
if "forensic_json_filename" in general_config:
self.opts.forensic_json_filename = general_config[
"forensic_json_filename"]
if "aggregate_csv_filename" in general_config:
self.opts.aggregate_csv_filename = general_config[
"aggregate_csv_filename"]
if "forensic_csv_filename" in general_config:
self.opts.forensic_csv_filename = general_config[
"forensic_csv_filename"]
if "nameservers" in general_config:
self.opts.nameservers = str_to_list(general_config["nameservers"])
if "dns_timeout" in general_config:
self.opts.dns_timeout = general_config.getfloat("dns_timeout")
if "save_aggregate" in general_config:
self.opts.save_aggregate = general_config["save_aggregate"]
if "save_forensic" in general_config:
self.opts.save_forensic = general_config["save_forensic"]
if "debug" in general_config:
self.opts.debug = general_config.getboolean("debug")
if "verbose" in general_config:
self.opts.verbose = general_config.getboolean("verbose")
if "silent" in general_config:
self.opts.silent = general_config.getboolean("silent")
if "log_file" in general_config:
self.opts.log_file = general_config["log_file"]
if "n_procs" in general_config:
self.opts.n_procs = general_config.getint("n_procs")
if "chunk_size" in general_config:
self.opts.chunk_size = general_config.getint("chunk_size")
if "ip_db_path" in general_config:
self.opts.ip_db_path = general_config["ip_db_path"]
else:
self.opts.ip_db_path = None
def _load_config_imap(self, config):
imap_config = config["imap"]
if "host" in imap_config:
self.opts.imap_host = imap_config["host"]
else:
logger.error("host setting missing from the "
"imap config section")
exit(-1)
if "port" in imap_config:
self.opts.imap_port = imap_config.getint("port")
if "timeout" in imap_config:
self.opts.imap_timeout = imap_config.getfloat("timeout")
if "max_retries" in imap_config:
self.opts.imap_max_retries = imap_config.getint("max_retries")
if "ssl" in imap_config:
self.opts.imap_ssl = imap_config.getboolean("ssl")
if "skip_certificate_verification" in imap_config:
imap_verify = imap_config.getboolean(
"skip_certificate_verification")
self.opts.imap_skip_certificate_verification = imap_verify
if "user" in imap_config:
self.opts.imap_user = imap_config["user"]
else:
logger.critical("user setting missing from the "
"imap config section")
exit(-1)
if "password" in imap_config:
self.opts.imap_password = imap_config["password"]
else:
logger.critical("password setting missing from the "
"imap config section")
exit(-1)
if "reports_folder" in imap_config:
self.opts.imap_reports_folder = imap_config["reports_folder"]
if "archive_folder" in imap_config:
self.opts.imap_archive_folder = imap_config["archive_folder"]
if "watch" in imap_config:
self.opts.imap_watch = imap_config.getboolean("watch")
if "delete" in imap_config:
self.opts.imap_delete = imap_config.getboolean("delete")
if "test" in imap_config:
self.opts.imap_test = imap_config.getboolean("test")
if "batch_size" in imap_config:
self.opts.imap_batch_size = imap_config.getint("batch_size")
else:
self.opts.imap_batch_size = None
def _load_config_elasticsearch(self, config):
elasticsearch_config = config["elasticsearch"]
if "hosts" in elasticsearch_config:
self.opts.elasticsearch_hosts = str_to_list(elasticsearch_config["hosts"])
else:
logger.critical("hosts setting missing from the elasticsearch config section")
exit(-1)
if "timeout" in elasticsearch_config:
timeout = elasticsearch_config.getfloat("timeout")
self.opts.elasticsearch_timeout = timeout
if "number_of_shards" in elasticsearch_config:
number_of_shards = elasticsearch_config.getint(
"number_of_shards")
self.opts.elasticsearch_number_of_shards = number_of_shards
if "number_of_replicas" in elasticsearch_config:
number_of_replicas = elasticsearch_config.getint(
"number_of_replicas")
self.opts.elasticsearch_number_of_replicas = number_of_replicas
if "index_suffix" in elasticsearch_config:
self.opts.elasticsearch_index_suffix = elasticsearch_config[
"index_suffix"]
if "monthly_indexes" in elasticsearch_config:
monthly = elasticsearch_config.getboolean("monthly_indexes")
self.opts.elasticsearch_monthly_indexes = monthly
if "ssl" in elasticsearch_config:
self.opts.elasticsearch_ssl = elasticsearch_config.getboolean("ssl")
if "cert_path" in elasticsearch_config:
self.opts.elasticsearch_ssl_cert_path = elasticsearch_config["cert_path"]
if "user" in elasticsearch_config:
self.opts.elasticsearch_username = elasticsearch_config["user"]
if "password" in elasticsearch_config:
self.opts.elasticsearch_password = elasticsearch_config["password"]
def _load_config_splunk(self, config):
hec_config = config["splunk_hec"]
if "url" in hec_config:
self.opts.hec = hec_config["url"]
else:
logger.critical("url setting missing from the splunk_hec config section")
exit(-1)
if "token" in hec_config:
self.opts.hec_token = hec_config["token"]
else:
logger.critical("token setting missing from the splunk_hec config section")
exit(-1)
if "index" in hec_config:
self.opts.hec_index = hec_config["index"]
else:
logger.critical("index setting missing from the splunk_hec config section")
exit(-1)
if "skip_certificate_verification" in hec_config:
self.opts.hec_skip_certificate_verification = hec_config["skip_certificate_verification"]
def _load_config_kafka(self, config):
kafka_config = config["kafka"]
if "hosts" in kafka_config:
self.opts.kafka_hosts = str_to_list(kafka_config["hosts"])
else:
logger.critical("hosts setting missing from the kafka config section")
exit(-1)
if "user" in kafka_config:
self.opts.kafka_username = kafka_config["user"]
else:
logger.critical("user setting missing from the kafka config section")
exit(-1)
if "password" in kafka_config:
self.opts.kafka_password = kafka_config["password"]
else:
logger.critical("password setting missing from the kafka config section")
exit(-1)
if "ssl" in kafka_config:
self.opts.kafka_ssl = kafka_config["ssl"].getboolean()
if "skip_certificate_verification" in kafka_config:
kafka_verify = kafka_config.getboolean("skip_certificate_verification")
self.opts.kafka_skip_certificate_verification = kafka_verify
if "aggregate_topic" in kafka_config:
self.opts.kafka_aggregate = kafka_config["aggregate_topic"]
else:
logger.critical("aggregate_topic setting missing from the kafka config section")
exit(-1)
if "forensic_topic" in kafka_config:
self.opts.kafka_username = kafka_config["forensic_topic"]
else:
logger.critical("forensic_topic setting missing from the splunk_hec config section")
def _load_config_smtp(self, config):
smtp_config = config["smtp"]
if "host" in smtp_config:
self.opts.smtp_host = smtp_config["host"]
else:
logger.critical("host setting missing from the smtp config section")
exit(-1)
if "port" in smtp_config:
self.opts.smtp_port = smtp_config["port"]
if "ssl" in smtp_config:
self.opts.smtp_ssl = smtp_config.getboolean("ssl")
if "skip_certificate_verification" in smtp_config:
smtp_verify = smtp_config.getboolean(
"skip_certificate_verification")
self.opts.smtp_skip_certificate_verification = smtp_verify
if "user" in smtp_config:
self.opts.smtp_user = smtp_config["user"]
else:
logger.critical("user setting missing from the smtp config section")
exit(-1)
if "password" in smtp_config:
self.opts.smtp_password = smtp_config["password"]
else:
logger.critical("password setting missing from the smtp config section")
exit(-1)
if "from" in smtp_config:
self.opts.smtp_from = smtp_config["from"]
else:
logger.critical("from setting missing from the smtp config section")
if "to" in smtp_config:
self.opts.smtp_to = str_to_list(smtp_config["to"])
else:
logger.critical("to setting missing from the smtp config section")
if "subject" in smtp_config:
self.opts.smtp_subject = smtp_config["subject"]
if "attachment" in smtp_config:
self.opts.smtp_attachment = smtp_config["attachment"]
if "message" in smtp_config:
self.opts.smtp_message = smtp_config["message"]
def _load_config_s3(self, config):
s3_config = config["s3"]
if "bucket" in s3_config:
self.opts.s3_bucket = s3_config["bucket"]
else:
logger.critical("bucket setting missing from the s3 config section")
exit(-1)
if "path" in s3_config:
self.opts.s3_path = s3_config["path"]
if self.opts.s3_path.startswith("/"):
self.opts.s3_path = self.opts.s3_path[1:]
if self.opts.s3_path.endswith("/"):
self.opts.s3_path = self.opts.s3_path[:-1]
else:
self.opts.s3_path = ""
def _load_config_syslog(self, config):
syslog_config = config["syslog"]
if "server" in syslog_config:
self.opts.syslog_server = syslog_config["server"]
else:
logger.critical("server setting missing from the syslog config section")
exit(-1)
if "port" in syslog_config:
self.opts.syslog_port = syslog_config["port"]
else:
self.opts.syslog_port = 514
```
#### File: parsedmarc/parsedmarc/logger.py
```python
import logging
logger = logging.getLogger("parsedmarc")
def load_config(self, config):
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.WARNING)
if config.verbose:
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.INFO)
if config.debug:
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
if config.log_file:
fh = logging.FileHandler(config.log_file)
formatter = logging.Formatter(
'%(asctime)s - '
'%(levelname)s - [%(filename)s:%(lineno)d] - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
``` |
{
"source": "jodabyte/bifrost",
"score": 3
} |
#### File: apa102_pi/colorschemes/colorschemes.py
```python
from apa102_pi.driver import colorcycletemplate
class StrandTest(colorcycletemplate.ColorCycleTemplate):
"""Runs a simple strand test (9 LEDs wander through the strip)."""
color = None
def init(self, strip, num_led):
self.color = 0x000000 # Initialize with black
def update(self, strip, num_led, num_steps_per_cycle, current_step,
current_cycle):
# One cycle = The 9 Test-LEDs wander through numStepsPerCycle LEDs.
if current_step == 0:
self.color >>= 8 # Red->green->blue->black
if self.color == 0:
self.color = 0xFF0000 # If black, reset to red
bloblen = 9
if num_led - 1 < bloblen:
bloblen = num_led - 3
if num_led <= 0:
bloblen = 1
# The head pixel that will be turned on in this cycle
head = (current_step + bloblen) % num_steps_per_cycle
tail = current_step # The tail pixel that will be turned off
strip.set_pixel_rgb(head, self.color) # Paint head
strip.set_pixel_rgb(tail, 0) # Clear tail
return 1 # Repaint is necessary
class TheaterChase(colorcycletemplate.ColorCycleTemplate):
"""Runs a 'marquee' effect around the strip."""
def update(self, strip, num_led, num_steps_per_cycle, current_step,
current_cycle):
# One cycle = One trip through the color wheel, 0..254
# Few cycles = quick transition, lots of cycles = slow transition
# Note: For a smooth transition between cycles, numStepsPerCycle must
# be a multiple of 7
start_index = current_step % 7 # One segment is 2 blank, and 5 filled
color_index = strip.wheel(int(round(255 / num_steps_per_cycle *
current_step, 0)))
for pixel in range(num_led):
# Two LEDs out of 7 are blank. At each step, the blank
# ones move one pixel ahead.
if ((pixel + start_index) % 7 == 0) or ((pixel + start_index) % 7 == 1):
strip.set_pixel_rgb(pixel, 0)
else:
strip.set_pixel_rgb(pixel, color_index)
return 1
class RoundAndRound(colorcycletemplate.ColorCycleTemplate):
"""Runs three LEDs around the strip."""
def init(self, strip, num_led):
strip.set_pixel_rgb(0, 0xFF0000)
strip.set_pixel_rgb(1, 0xFF0000, 5) # Only 5% brightness
strip.set_pixel_rgb(2, 0xFF0000)
def update(self, strip, num_led, num_steps_per_cycle, current_step,
current_cycle):
# Simple class to demonstrate the "rotate" method
strip.rotate()
return 1
class Solid(colorcycletemplate.ColorCycleTemplate):
"""Paints the strip with one colour."""
def update(self, strip, num_led, num_steps_per_cycle, current_step, current_cycle):
stripcolour = 0xFFFFFF
if current_step == 1:
stripcolour = 0xFF0000
if current_step == 2:
stripcolour = 0x00FF00
if current_step == 3:
stripcolour = 0x0000FF
for led in range(0, num_led):
strip.set_pixel_rgb(led, stripcolour, 5) # Paint 5% white
return 1
class Rainbow(colorcycletemplate.ColorCycleTemplate):
"""Paints a rainbow effect across the entire strip."""
def update(self, strip, num_led, num_steps_per_cycle, current_step,
current_cycle):
# One cycle = One trip through the color wheel, 0..254
# Few cycles = quick transition, lots of cycles = slow transition
# -> LED 0 goes from index 0 to 254 in numStepsPerCycle cycles.
# So it might have to step up more or less than one index
# depending on numStepsPerCycle.
# -> The other LEDs go up to 254, then wrap around to zero and go up
# again until the last one is just below LED 0. This way, the
# strip always shows one full rainbow, regardless of the
# number of LEDs
scale_factor = 255 / num_led # Index change between two neighboring LEDs
start_index = 255 / num_steps_per_cycle * current_step # LED 0
for i in range(num_led):
# Index of LED i, not rounded and not wrapped at 255
led_index = start_index + i * scale_factor
# Now rounded and wrapped
led_index_rounded_wrapped = int(round(led_index, 0)) % 255
# Get the actual color out of the wheel
pixel_color = strip.wheel(led_index_rounded_wrapped)
strip.set_pixel_rgb(i, pixel_color)
return 1 # All pixels are set in the buffer, so repaint the strip now
``` |
{
"source": "Jodagito/Diamonds",
"score": 4
} |
#### File: diamonds/game/board.py
```python
from random import randint
board = []
def board_creation():
while len(board) < 8:
row = []
while len(row) < 8:
cell = randint(1, 5)
row.append(cell)
board.append(row)
return board
def show_board():
for row in range(len(board)):
for column in range(len(board[row])):
print(board[row][column], end=" ")
print()
print()
```
#### File: diamonds/game/neighbors.py
```python
board = []
def get_number_position(game_board, player_position, player_number, player):
board = game_board
position = ""
player_row = player_position[0]
player_column = player_position[1]
if player_row in [0, len(board) - 1] and player_column in [0, len(board) - 1]:
position = "corners"
neighbors = get_orientation(
player_number, player_position, position)
elif (player_row not in [0, len(board) - 1] and
player_column not in [0, len(board) - 1]):
position = "middle"
neighbors = get_neighbors(player_number, player_position, position)
else:
position = "bounds"
neighbors = get_orientation(
player_number, player_position, position)
return neighbors
def get_orientation(player_number, player_position, position):
player_row = player_position[0]
player_column = player_position[1]
direction = ""
orientation = ""
location = ""
if player_row == 0:
direction = "up"
elif player_row == len(board) - 1:
direction = "down"
if player_column == 0:
orientation = "left"
elif player_column == len(board) - 1:
orientation = "right"
location += direction + " " + orientation
return get_neighbors(player_number, player_position,
position, location)
def get_neighbors(player_number, player_position,
position, number_location=0):
neighbors = []
formulas = {'middle': [[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 1], [1, -1], [1, 0], [1, 1]],
'corners': {'up left': [[0, 1], [1, 0], [1, 1]],
'down left': [[-1, 0], [-1, 1], [0, 1]],
'down right': [[-1, -1], [0, -1], [-1, 0]],
'up right': [[0, -1], [1, -1], [1, 0]]},
'bounds': {'up ': [[0, -1], [1, -1], [1, 0], [1, 1], [0, 1]],
' left': [[-1, 0], [-1, 1], [0, 1], [1, 0], [1, 1]],
'down ': [[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 1]],
' right': [[-1, -1], [-1, 0], [0, -1],
[1, -1], [1, 0]]}}
formula_to_use = formulas[position]
if isinstance(formula_to_use, type(dict())):
formula_to_use = formula_to_use[number_location]
for step in formula_to_use:
neighbors.append([axis[1] + step[axis[0]] for
axis in enumerate(player_position)])
return neighbors
``` |
{
"source": "Jodagito/Pandomit",
"score": 3
} |
#### File: Jodagito/Pandomit/main.py
```python
import os
import parser
import pandas as pd
def file_converter(filename, expected_format):
"""Given a file returns a converted file to a preferred format"""
read_methods = [method for method in dir(pd) if method[:4] == 'read']
i = 0
while os.path.exists("converted filename {}.".format(i) + expected_format.replace("to_", "") + ""):
i += 1
try:
for method in read_methods[1:]:
try:
df = getattr(pd, method)(filename)
df_converted = getattr(pd.DataFrame, expected_format)(df)
if df_converted:
with open("converted filename {}.".format(i) + expected_format.replace("to_", "") + "", 'w') as converted_file:
converted_file.write(df_converted)
break
except:
continue
except ValueError:
print("This format can't be converted.")
if __name__ == "__main__":
args = parser.arguments_parser()
file_converter(args.filename, args.expectedformat)
``` |
{
"source": "Jodagito/YoutubeDownloader",
"score": 3
} |
#### File: YoutubeDownloader/YouTubeDownloader/__init__.py
```python
import inspect
import json
import os
from pytube import YouTube, Playlist
from pytube.exceptions import RegexMatchError, PytubeError
CONFIGURATIONS = {'destination_path': '', 'video_quality': '',
'audio_quality': '', 'when_unavailable': ''}
CONFIGS_FILE = 'configs.json'
def create_config_file():
with open(CONFIGS_FILE, 'w') as config_file:
config_data = {'destination_path': '', 'video_quality': '',
'audio_quality': '', 'when_unavailable': 'Highest'}
json.dump(config_data, config_file)
def load_config_file():
with open(CONFIGS_FILE) as config_file:
config_data = json.load(config_file)
CONFIGURATIONS['destination_path'] = config_data['destination_path']
CONFIGURATIONS['video_quality'] = config_data['video_quality']
CONFIGURATIONS['audio_quality'] = config_data['audio_quality']
CONFIGURATIONS['when_unavailable'] = config_data['when_unavailable']
if not os.path.exists(CONFIGS_FILE):
create_config_file()
load_config_file()
def main():
try:
if not CONFIGURATIONS['destination_path']:
print("A default path can be setted on settings menu.")
destination_path = input(
"\nInsert a destination path for the downloaded media ")
if not destination_path:
destination_path = "./"
destination_path = CONFIGURATIONS['destination_path']
start()
except KeyboardInterrupt:
exit()
def start():
clear_terminal()
print("\tYouTube Downloader\n\n")
menu_option = input(
"Select and option to continue\n\n\t1) Start Downloading\n\t2) Settings\n\t3) Help\n\t4) Exit\n").lower()
if menu_option in ['1', '1)', 'start downloading']:
return downloads_menu()
elif menu_option in ['2', '2)', 'settings']:
return settings_menu()
elif menu_option in ['3', '3)', 'help']:
return help_menu()
elif menu_option in ['4', '4)', 'exit']:
return exit()
else:
return handle_invalid_input()
def downloads_menu():
clear_terminal()
print("\tDownloads Menu\n\n")
download_source_url = ""
try:
download_source_url = input("Input the download source url ")
if not download_source_url:
return handle_invalid_input()
except KeyboardInterrupt:
return start()
if not validate_youtube_url(download_source_url):
return start()
pytube_object = YouTube(download_source_url)
playlist_videos = look_for_playlist(pytube_object)
format_selection = input(
"\n\nSelect a download option\n\t1) Audio only\n\t2) Video and audio\n")
if format_selection in ['1', '1)']:
for element in playlist_videos:
download_audio(element)
else:
download_audio(pytube_object)
elif format_selection in ['2', '2)']:
for element in playlist_videos:
download_video(element)
else:
download_video(pytube_object)
else:
return handle_invalid_input()
input("\nPress enter to continue...")
return start()
def look_for_playlist(pytube_object):
if validate_playlist(pytube_object.watch_url):
pytube_object = Playlist(pytube_object.watch_url)
return pytube_object.videos
return []
def validate_youtube_url(url):
try:
YouTube(url)
return True
except RegexMatchError as e:
input(
f"Error: An invalid URL has been inserted.\n{e}\n\nPress enter to continue...")
return False
def validate_playlist(url):
try:
Playlist(url)
return True
except KeyError:
return False
def download_audio(pytube_object):
print(f"\nDownloading {pytube_object.title}")
try:
if not CONFIGURATIONS['audio_quality']:
unavailable_audio(pytube_object)
else:
default_quality = CONFIGURATIONS['audio_quality'] + 'kbps'
filtered_pytube_object = pytube_object.streams.filter(
type='audio', abr=default_quality,
mime_type='audio/mp4').order_by('abr').desc()
if not filtered_pytube_object:
when_unavailable = CONFIGURATIONS['when_unavailable']
print(
f"\nDefault quality isn't available. {when_unavailable}" +
" quality will be downloaded.")
return unavailable_audio(pytube_object)
filtered_pytube_object = filtered_pytube_object[0]
name_with_resolution = filtered_pytube_object.title + \
" " + filtered_pytube_object.abr + ".mp4"
if os.path.isfile(destination_path + name_with_resolution):
print(
f"\nWarning: {name_with_resolution} already exists on this path.")
return
filtered_pytube_object.download(destination_path)
os.rename(destination_path + filtered_pytube_object.title +
".mp4", name_with_resolution)
print(f"\n{pytube_object.title} downloaded succesfully.")
except (IOError, OSError, PytubeError) as e:
print(f"{pytube_object.title} couldn't be downloaded.\n{e}\n")
return
def unavailable_audio(pytube_object):
if CONFIGURATIONS['when_unavailable'] == "Highest":
pytube_object = pytube_object.streams.filter(type='audio', mime_type='audio/mp4')
pytube_object = pytube_object.order_by('abr').desc()[0]
else:
pytube_object = pytube_object.streams.filter(type='audio', mime_type='audio/mp4')
pytube_object = pytube_object.order_by('abr')[0]
name_with_resolution = pytube_object.title + " " + pytube_object.abr + ".mp4"
if os.path.isfile(destination_path + name_with_resolution):
print(
f"\nWarning: {name_with_resolution} already exists on this path.")
return
pytube_object.download(destination_path)
os.rename(destination_path + pytube_object.title +
".mp4", name_with_resolution)
print(f"\n{pytube_object.title} downloaded succesfully.")
def download_video(pytube_object):
print(f"\nDownloading {pytube_object.title}")
try:
if not CONFIGURATIONS['video_quality']:
unavailable_video(pytube_object)
else:
default_quality = CONFIGURATIONS['video_quality'] + 'p'
filtered_pytube_object = pytube_object.streams.filter(
type='video', res=default_quality,
mime_type='video/mp4',
progressive='True').order_by('resolution').desc()
if not filtered_pytube_object:
when_unavailable = CONFIGURATIONS['when_unavailable']
print(
f"\nDefault quality isn't available. {when_unavailable}" +
" quality will be downloaded.")
return unavailable_video(pytube_object)
filtered_pytube_object = filtered_pytube_object[0]
name_with_resolution = filtered_pytube_object.title + \
" " + filtered_pytube_object.resolution + ".mp4"
if os.path.isfile(destination_path + name_with_resolution):
print(
f"\nWarning: {name_with_resolution} already exists on this path.")
return
filtered_pytube_object.download(destination_path)
os.rename(destination_path + filtered_pytube_object.title +
".mp4", name_with_resolution)
print(f"\n{pytube_object.title} downloaded succesfully.")
except (IOError, OSError, PytubeError) as e:
print(f"{pytube_object.title} couldn't be downloaded.\n{e}\n")
return
def unavailable_video(pytube_object):
if CONFIGURATIONS['when_unavailable'] == "Highest":
pytube_object = pytube_object.streams.filter(type='video',
mime_type='video/mp4',
progressive='True')
pytube_object = pytube_object.order_by('resolution').desc()[0]
else:
pytube_object = pytube_object.streams.filter(type='video',
mime_type='video/mp4',
progressive='True')
pytube_object = pytube_object.order_by('resolution')[0]
name_with_resolution = pytube_object.title + \
" " + pytube_object.resolution + ".mp4"
if os.path.isfile(destination_path + name_with_resolution):
print(
f"\nWarning: {name_with_resolution} already exists on this path.")
return
pytube_object.download(destination_path)
os.rename(destination_path + pytube_object.title +
".mp4", name_with_resolution)
print(f"\n{pytube_object.title} downloaded succesfully.")
def settings_menu():
clear_terminal()
selected_option = input(
f"\tSettings Menu\n\nSelect an option to continue" +
"\n\n\t1) List actual settings" +
"\n\t2) Set destination path\n\t3) Set qualities\n\t4) Go back\n").lower().replace(" ", "")
if selected_option in ["1", "listactualsettings"]:
return list_settings()
elif selected_option in ["2", "setdestinationpath"]:
set_destination_path()
elif selected_option in ["3", "setqualities"]:
set_qualities()
elif selected_option in ["4", "goback"]:
return start()
else:
return handle_invalid_input()
def set_destination_path():
clear_terminal()
default_destination_path = input(
"\n\nInsert the default destination path ")
if not default_destination_path:
default_destination_path = "./"
if (os.path.exists(default_destination_path) or
os.access(os.path.dirname(default_destination_path), os.W_OK)):
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
config_data['destination_path'] = default_destination_path
config_file.seek(0)
config_file.write(json.dumps(config_data))
config_file.truncate()
else:
return handle_invalid_input()
return settings_menu()
def set_qualities():
clear_terminal()
video_qualities = ["1080", "720", "480", "360", "144"]
audio_qualities = ["160", "128", "70", "50"]
print("\n\n\t\tTo go back leave both in blank.")
default_video_quality = input(
"\n\tSelect the default video quality \n1) 1080px\n2) 720px\n3) 480px\n4) 360px\n5) 144px\n")
default_audio_quality = input(
"\n\tSelect the default audio quality \n1) 160kbps\n2) 128kbps\n3) 70kbps\n4) 50kbps\n")
if default_video_quality in ["1", "2", "3", "4", "5"]:
default_video_quality = video_qualities[int(default_video_quality) - 1]
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
config_data['video_quality'] = default_video_quality
config_file.seek(0)
config_file.write(json.dumps(config_data))
config_file.truncate()
if default_audio_quality in ["1", "2", "3", "4"]:
default_audio_quality = audio_qualities[int(
default_audio_quality) - 1]
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
config_data['audio_quality'] = default_audio_quality
config_file.seek(0)
config_file.write(json.dumps(config_data))
config_file.truncate()
elif default_video_quality == "" and default_audio_quality == "":
return settings_menu()
else:
return handle_invalid_input()
set_default_when_unavailable()
return settings_menu()
def set_default_when_unavailable():
clear_terminal()
print(f"\t\tIf the default quality selected isn't " +
"available then the highest quality will be downloaded.")
change_default = input(
f"\n\nSet lowest quality as default if" +
" default one is unavailable\n\n\tYes\n\tNo\n").lower()
if change_default in ["yes", "y"]:
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
config_data['when_unavailable'] = 'Lowest'
config_file.seek(0)
config_file.write(json.dumps(config_data))
config_file.truncate()
elif change_default in ["no", "n"]:
return
else:
return handle_invalid_input()
def list_settings():
clear_terminal()
with open(CONFIGS_FILE, 'r+') as config_file:
config_data = json.load(config_file)
for setting, value in config_data.items():
print(f"{setting.capitalize().replace('_', ' ')} = {value}")
input("\n\nPress enter to continue...")
return settings_menu()
def help_menu():
clear_terminal()
input("Sorry, this menu is being developed\nPress enter to continue...")
return start()
def exit():
clear_terminal()
print("YouTube Downloader has been closed.")
def handle_invalid_input():
input("\n\nError: Invalid input.\nPress enter to continue...")
clear_terminal()
return globals()[inspect.stack()[1][3]]()
def clear_terminal():
return os.system('cls' if os.name == 'nt' else 'clear')
if __name__ == '__main__':
main()
``` |
{
"source": "jodahoney/pyinterview",
"score": 4
} |
#### File: pyinterview/pyinterview/tries.py
```python
class TrieNode:
def __init__(self, letter=None):
self.letter = letter
self.children = {}
self.is_end_of_word = False
class Trie:
def __init__(self):
self.root = TrieNode("*")
def add_word(self, word: str) -> None:
itr = self.root
for letter in word:
if letter not in itr.children:
itr.children[letter] = TrieNode(letter)
itr = itr.children[letter]
itr.is_end_of_word = True
def search(self, word: str) -> bool:
itr = self.root
for letter in word:
if letter not in itr.children:
return False
itr = itr.children[letter]
return itr.is_end_of_word
def starts_with(self, word: str) -> bool:
itr = self.root
for letter in word:
if letter not in itr.children:
return False
itr = itr.children[letter]
return True
``` |
{
"source": "jodaiber/semantic_compound_splitting",
"score": 2
} |
#### File: jodaiber/semantic_compound_splitting/compound.py
```python
from lattice import *
class Compound:
def __init__(self, string, gold_splits, predicted_lattice):
self.gold_splits = gold_splits
self.predicted_lattice = predicted_lattice
self.string = string
def get_gold_splits(self):
return self.gold_splits
```
#### File: semantic_compound_splitting/training/train_word2vec.py
```python
import gensim
import sys
import glob
import codecs
from nltk.tokenize import RegexpTokenizer
import glob
import sys
class CorpusReader():
"""
Reads corpus from gzip file.
"""
def __init__(self, files):
if isinstance(files, str):
self.files = [files]
else:
self.files = files
self.tokenizer = RegexpTokenizer(r'\w+')
def __iter__(self):
"""
Generator that returns a list of tokens for each sentence.
:return: list of tokens
"""
for f in self.files:
print "Processing ", f
for line in open(f, "r"):
try:
yield self.tokenizer.tokenize(line.decode("utf-8"))
except:
pass
print "Starting W2V training..."
files = glob.glob(sys.argv[1])
outfile_name = sys.argv[2]
dataset = CorpusReader(files)
model = gensim.models.Word2Vec(dataset, size=500, window=5, min_count=3, negative=5, workers=15)
model.save(outfile_name)
```
#### File: semantic_compound_splitting/visualization_and_test/evaluate_prototypes.py
```python
__author__ = 'rwechsler'
import datetime
import time
import cPickle as pickle
from annoy import AnnoyIndex
import gensim
import argparse
import numpy as np
import sys
import random
from scipy import spatial
import multiprocessing as mp
from collections import defaultdict
import codecs
def timestamp():
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
def load_candidate_dump(file_name):
return pickle.load(open(file_name, "rb"))
def load_annoy_tree(model_file_name, vector_dims):
tree = AnnoyIndex(vector_dims)
tree.load(model_file_name)
return tree
def load_prototype_dump(file_name):
return pickle.load(open(file_name, "rb"))
def load_word2vecmodel(file_name):
return gensim.models.Word2Vec.load_word2vec_format(file_name, binary=True)
def get_rank_annoy_knn(annoy_tree, vector, true_index, k=100):
neighbours = annoy_tree.get_nns_by_vector(list(vector), k)
try:
return neighbours.index(true_index) + 1
except ValueError:
return 0
def get_rank_word2vec_knn(word2vec_model, vector, true_index, k=100):
neighbours, _ = zip(*word2vec_model.most_similar(positive=[vector], topn=k))
try:
return neighbours.index(word2vec_model.index2word[true_index]) + 1
except ValueError:
return 0
def candidate_generator(evaluation_set, rank_threshold, sim_threshold):
for prefix_prototype_pair in evaluation_set:
yield (prefix_prototype_pair, evaluation_set[prefix_prototype_pair], rank_threshold, sim_threshold)
def mp_wrapper_evaluate_set(argument):
return evaluate_set(*argument)
def get_nn_hitrate(ranks):
return (len(ranks) - ranks.count(0)) / float(len(ranks))
def get_sim_hitrate(similarities, threshold):
return np.sum([1 for s in similarities if s >= threshold]) / float(len(similarities))
def get_average_rank(ranks):
return np.mean([r for r in ranks if r > 0] or 0)
def get_average_similarity(similarities):
return np.mean(similarities)
def get_hitrate(ranks, similarities, threshold):
count = 0
for i, r in enumerate(ranks):
if r > 0 and similarities[i] >= threshold:
count += 1
return count / float(len(ranks))
def get_word_representation(prefix, comp_index, tail_index, word2vec_model):
comp = word2vec_model.index2word[comp_index]
tail = word2vec_model.index2word[tail_index]
fl = comp[len(prefix):-len(tail)]
if fl:
fl = "[" + fl + "]"
return fl + tail
if __name__ == "__main__":
#### Default Parameters-------------------------------------------####
rank_threshold = 30
vector_dims = 500
sim_threshold = 0.5
sample_set_size = np.inf
n_processes = 2
####End-Parametes-------------------------------------------------####
parser = argparse.ArgumentParser(description='Evaluate candidates')
parser.add_argument('-w', action='store', dest="word2vec_file", required=True)
parser.add_argument('-v', action="store", dest="prototypes_file", required=True)
parser.add_argument('-d', action="store", dest="vector_dims", type=int, default=vector_dims)
parser.add_argument('-t', action="store", dest="annoy_tree_file")
parser.add_argument('-c', action="store", dest="candidates_index_file")
parser.add_argument('-o', action="store", dest="result_output_file", required=True)
parser.add_argument('-p', action="store", dest="n_processes", type=int, default=n_processes)
parser.add_argument('-s', action="store", dest="sample_set_size", type=int, default=sample_set_size)
parser.add_argument('-r', action="store", dest="rank_threshold", type=int, default=rank_threshold)
parser.add_argument('-z', action="store", dest="sim_threshold", type=float, default=sim_threshold)
arguments = parser.parse_args(sys.argv[1:])
print timestamp(), "loading word2vec model"
word2vec_model = load_word2vecmodel(arguments.word2vec_file)
print timestamp(), "loading prototypes"
prototypes = load_prototype_dump(arguments.prototypes_file)
if arguments.candidates_index_file:
print timestamp(), "loading candidates"
candidates = load_candidate_dump(arguments.candidates_index_file)
evaluation_set = dict()
# keys are (prefix, prototype_pair)
for prefix in prototypes:
for prototype, evidence_set in prototypes[prefix]:
if arguments.candidates_index_file:
evaluation_set[(prefix, prototype)] = candidates[prefix]
else:
evaluation_set[(prefix, prototype)] = evidence_set
print timestamp(), "preprocess candidates"
# only store vectors that we need. And sample already.
word2vec_vectors = dict()
for prototype_tup in evaluation_set:
if len(evaluation_set[prototype_tup]) > arguments.sample_set_size:
evaluation_set[prototype_tup] = set(random.sample(evaluation_set[prototype_tup], arguments.sample_set_size))
for (i,j) in evaluation_set[prototype_tup]:
word2vec_vectors[i] = np.array(word2vec_model.syn0[i])
word2vec_vectors[j] = np.array(word2vec_model.syn0[j])
word2vec_vectors[prototype_tup[1][0]] = np.array(word2vec_model.syn0[prototype_tup[1][0]])
word2vec_vectors[prototype_tup[1][1]] = np.array(word2vec_model.syn0[prototype_tup[1][1]])
print timestamp(), "number of vectors: ", len(word2vec_vectors)
if arguments.annoy_tree_file and arguments.vector_dims:
del word2vec_model
print timestamp(), "loading annoy tree"
# global annoy_tree
model = load_annoy_tree(arguments.annoy_tree_file, arguments.vector_dims)
knn_method = get_rank_annoy_knn
else:
print timestamp(), "using word2vec model"
model = word2vec_model
knn_method = get_rank_word2vec_knn
def evaluate_set(prefix_prototype_pair, evidence_set, rank_threshold=100, sim_threshold=0.5):
global model
global word2vec_vectors
ranks = []
similarities = []
prefix, vector_pair = prefix_prototype_pair
diff = word2vec_vectors[vector_pair[0]]- word2vec_vectors[vector_pair[1]]
for comp, tail in evidence_set:
predicted = word2vec_vectors[tail] + diff
true_vector = word2vec_vectors[comp]
rank = knn_method(model, predicted, comp, rank_threshold)
ranks.append(rank)
sim = spatial.distance.cosine(predicted, true_vector)
similarities.append(sim)
# returns hitrate, hitrate_nn, hitrate_sim, average_rank_if_found, average_similarity_if_found
results = get_hitrate(ranks, similarities, threshold=sim_threshold), get_nn_hitrate(ranks), get_sim_hitrate(similarities, threshold=sim_threshold), get_average_rank(ranks), get_average_similarity(similarities)
return (prefix_prototype_pair,results)
print timestamp(), "evaluating candidates"
pool = mp.Pool(processes=arguments.n_processes)
params = candidate_generator(evaluation_set, arguments.rank_threshold, arguments.sim_threshold)
results = pool.map(mp_wrapper_evaluate_set, params)
pool.close()
pool.join()
del pool
print timestamp(), "pickling"
pickle.dump(results, open(arguments.result_output_file, "wb"))
if arguments.annoy_tree_file:
print timestamp(), "loading word2vec model"
word2vec_model = load_word2vecmodel(arguments.word2vec_file)
else:
word2vec_model = model
print timestamp(), "mapping indices to word"
scores = defaultdict(dict)
for ((prefix, vector), eval_scores) in results:
vector_repr = get_word_representation(prefix, vector[0], vector[1], word2vec_model)
scores[prefix][vector_repr] = eval_scores
print timestamp(), "writing result file"
outfile = codecs.open(arguments.result_output_file, "w", "utf-8")
for prefix in scores:
for vector in scores[prefix]:
outfile.write("\t".join([prefix, vector] + map(str, scores[prefix][vector])) + "\n")
outfile.close()
print timestamp(), "done"
```
#### File: semantic_compound_splitting/visualization_and_test/map_prototypes_to_words.py
```python
__author__ = 'rwechsler'
import gensim
import cPickle as pickle
import argparse
import sys
import codecs
def load_word2vecmodel(file_name):
return gensim.models.Word2Vec.load_word2vec_format(file_name, binary=True)
def load_prototype_dump(file_name):
return pickle.load(open(file_name, "rb"))
def get_word_representation(prefix, comp_index, tail_index, word2vec_model):
comp = word2vec_model.index2word[comp_index]
tail = word2vec_model.index2word[tail_index]
fl = comp[len(prefix):-len(tail)]
if fl:
fl = "[" + fl + "]"
return fl + tail
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluate candidates')
parser.add_argument('-w', action='store', dest="word2vec_file", required=True)
parser.add_argument('-p', action='store', dest='prototype_file', required=True)
parser.add_argument('-o', action="store", dest='output_file', required=True)
arguments = parser.parse_args(sys.argv[1:])
word2vec_model = load_word2vecmodel(arguments.word2vec_file)
prototype_set = load_prototype_dump(arguments.prototype_file)
outfile = codecs.open(arguments.output_file, "w", "utf-8")
for prefix in prototype_set:
for prototype, evidence_set in prototype_set[prefix]:
outfile.write(prefix + "\t" + get_word_representation(prefix, prototype[0], prototype[1], word2vec_model)
+ "\t" + " ".join([get_word_representation(prefix, t[0], t[1], word2vec_model)for t in evidence_set]) + "\n")
outfile.close()
``` |
{
"source": "jodal/biip",
"score": 2
} |
#### File: biip/gs1/_element_strings.py
```python
import calendar
import datetime
import re
from dataclasses import dataclass
from decimal import Decimal
from typing import Iterable, List, Optional
from biip import ParseError
from biip.gln import Gln
from biip.gs1 import DEFAULT_SEPARATOR_CHARS, GS1ApplicationIdentifier
from biip.gtin import Gtin, RcnRegion
from biip.sscc import Sscc
try:
import moneyed
except ImportError: # pragma: no cover
moneyed = None # type: ignore
@dataclass
class GS1ElementString:
"""GS1 Element String.
An Element String consists of a GS1 Application Identifier (AI) and its data field.
A single barcode can contain multiple Element Strings. Together these are
called a "message."
Example:
>>> from biip.gs1 import GS1ElementString
>>> element_string = GS1ElementString.extract("0107032069804988")
>>> element_string
GS1ElementString(ai=GS1ApplicationIdentifier(ai='01',
description='Global Trade Item Number (GTIN)', data_title='GTIN',
fnc1_required=False, format='N2+N14'), value='07032069804988',
pattern_groups=['07032069804988'], gln=None,
gtin=Gtin(value='07032069804988', format=GtinFormat.GTIN_13,
prefix=GS1Prefix(value='703', usage='GS1 Norway'),
payload='703206980498', check_digit=8, packaging_level=None), sscc=None,
date=None, decimal=None, money=None)
>>> element_string.as_hri()
'(01)07032069804988'
"""
#: The element's Application Identifier (AI).
ai: GS1ApplicationIdentifier
#: Raw data field of the Element String. Does not include the AI.
value: str
#: List of pattern groups extracted from the Element String.
pattern_groups: List[str]
#: A GLN created from the element string, if the AI represents a GLN.
gln: Optional[Gln] = None
#: A GTIN created from the element string, if the AI represents a GTIN.
gtin: Optional[Gtin] = None
#: An SSCC created from the element string, if the AI represents a SSCC.
sscc: Optional[Sscc] = None
#: A date created from the element string, if the AI represents a date.
date: Optional[datetime.date] = None
#: A decimal value created from the element string, if the AI represents a number.
decimal: Optional[Decimal] = None
#: A Money value created from the element string, if the AI represents a
#: currency and an amount. Only set if py-moneyed is installed.
money: Optional["moneyed.Money"] = None
@classmethod
def extract(
cls,
value: str,
*,
rcn_region: Optional[RcnRegion] = None,
separator_chars: Iterable[str] = DEFAULT_SEPARATOR_CHARS,
) -> "GS1ElementString":
"""Extract the first GS1 Element String from the given value.
Args:
value: The string to extract an Element String from. May contain
more than one Element String.
rcn_region: The geographical region whose rules should be used to
interpret Restricted Circulation Numbers (RCN).
Needed to extract e.g. variable weight/price from GTIN.
separator_chars: Characters used in place of the FNC1 symbol.
Defaults to `<GS>` (ASCII value 29).
If variable-length fields are not terminated with a separator
character, the parser might greedily consume later fields.
Returns:
A data class with the Element String's parts and data extracted from it.
Raises:
ValueError: If the ``separator_char`` isn't exactly 1 character long.
ParseError: If the parsing fails.
"""
if any(len(char) != 1 for char in separator_chars):
raise ValueError(
"All separator characters must be exactly 1 character long, "
f"got {list(separator_chars)!r}."
)
ai = GS1ApplicationIdentifier.extract(value)
for separator_char in separator_chars:
value = value.split(separator_char, maxsplit=1)[0]
pattern = ai.pattern[:-1] if ai.pattern.endswith("$") else ai.pattern
matches = re.match(pattern, value)
if not matches:
raise ParseError(
f"Failed to match {value!r} with GS1 AI {ai} pattern '{ai.pattern}'."
)
pattern_groups = list(matches.groups())
value = "".join(pattern_groups)
element = cls(ai=ai, value=value, pattern_groups=pattern_groups)
element._set_gln()
element._set_gtin(rcn_region=rcn_region)
element._set_sscc()
element._set_date()
element._set_decimal()
return element
def _set_gln(self) -> None:
if self.ai.ai[:2] != "41":
return
self.gln = Gln.parse(self.value)
def _set_gtin(self, *, rcn_region: Optional[RcnRegion] = None) -> None:
if self.ai.ai not in ("01", "02"):
return
self.gtin = Gtin.parse(self.value, rcn_region=rcn_region)
def _set_sscc(self) -> None:
if self.ai.ai != "00":
return
self.sscc = Sscc.parse(self.value)
def _set_date(self) -> None:
if self.ai.ai not in ("11", "12", "13", "15", "16", "17"):
return
try:
self.date = _parse_date(self.value)
except ValueError:
raise ParseError(
f"Failed to parse GS1 AI {self.ai} date from {self.value!r}."
)
def _set_decimal(self) -> None:
variable_measure = self.ai.ai[:2] in (
"31",
"32",
"33",
"34",
"35",
"36",
)
amount_payable = self.ai.ai[:3] in ("390", "392")
amount_payable_with_currency = self.ai.ai[:3] in ("391", "393")
percentage = self.ai.ai[:3] in ("394",)
if (
variable_measure
or amount_payable
or amount_payable_with_currency
or percentage
):
# See GS1 General Specifications, chapter 3.6 for details.
# Only group for variable_measure, amount_payable, and percentage.
# Second and last group for amount_payable_with_currency.
value = self.pattern_groups[-1]
num_decimals = int(self.ai.ai[3])
num_units = len(value) - num_decimals
units = value[:num_units]
decimals = value[num_units:]
self.decimal = Decimal(f"{units}.{decimals}")
if amount_payable_with_currency and moneyed is not None:
currency = moneyed.get_currency(iso=self.pattern_groups[0])
self.money = moneyed.Money(amount=self.decimal, currency=currency)
def __len__(self) -> int:
"""Get the length of the element string."""
return len(self.ai) + len(self.value)
def as_hri(self) -> str:
"""Render as a human readable interpretation (HRI).
The HRI is often printed directly below the barcode.
Returns:
A human-readable string where the AI is wrapped in parenthesis.
"""
return f"{self.ai}{self.value}"
def _parse_date(value: str) -> datetime.date:
year, month, day = int(value[0:2]), int(value[2:4]), int(value[4:6])
year += _get_century(year)
if day == 0:
day = _get_last_day_of_month(year, month)
return datetime.date(year, month, day)
def _get_century(two_digit_year: int) -> int:
"""Get century from two-digit year.
The two-digit year refers to a year in the range between 49 years past
and 50 years into the future.
Args:
two_digit_year: A two-digit year, e.g. without century specified.
Returns:
The century the year is in.
References:
GS1 General Specifications, section 7.12
"""
current_year = datetime.date.today().year
current_century = current_year - current_year % 100
two_digit_current_year = current_year - current_century
if 51 <= two_digit_year - two_digit_current_year <= 99:
return current_century - 100 # Previous century
elif -99 <= two_digit_year - two_digit_current_year <= -50:
# Next century
# Skipping coverage as this code won't run until year 2051
return current_century + 100 # pragma: no cover
else:
return current_century # Current century
def _get_last_day_of_month(year: int, month: int) -> int:
"""Get the last day of the given month."""
return calendar.monthrange(year, month)[1]
```
#### File: biip/gtin/_gtin.py
```python
from dataclasses import dataclass
from typing import Optional, Type, Union
from biip import EncodeError, ParseError
from biip.gs1 import GS1Prefix
from biip.gs1.checksums import numeric_check_digit
from biip.gtin import GtinFormat, RcnRegion
@dataclass
class Gtin:
"""Data class containing a GTIN."""
#: Raw unprocessed value.
#:
#: May include leading zeros.
value: str
#: GTIN format, either GTIN-8, GTIN-12, GTIN-13, or GTIN-14.
#:
#: Classification is done after stripping leading zeros.
format: GtinFormat
#: The GS1 prefix, indicating what GS1 country organization that assigned
#: code range.
prefix: Optional[GS1Prefix]
#: The actual payload, including packaging level if any, company prefix,
#: and item reference. Excludes the check digit.
payload: str
#: Check digit used to check if the GTIN as a whole is valid.
check_digit: int
#: Packaging level is the first digit in GTIN-14 codes.
#:
#: This digit is used for wholesale shipments, e.g. the GTIN-14 product
#: identifier in GS1-128 barcodes, but not in the GTIN-13 barcodes used for
#: retail products.
packaging_level: Optional[int] = None
@classmethod
def parse(cls, value: str, *, rcn_region: Optional[RcnRegion] = None) -> "Gtin":
"""Parse the given value into a :class:`Gtin` object.
Both GTIN-8, GTIN-12, GTIN-13, and GTIN-14 are supported.
Args:
value: The value to parse.
rcn_region: The geographical region whose rules should be used to
interpret Restricted Circulation Numbers (RCN).
Needed to extract e.g. variable weight/price from GTIN.
Returns:
GTIN data structure with the successfully extracted data.
The checksum is guaranteed to be valid if a GTIN object is returned.
Raises:
ParseError: If the parsing fails.
"""
from biip.gtin import Rcn
value = value.strip()
if len(value) not in (8, 12, 13, 14):
raise ParseError(
f"Failed to parse {value!r} as GTIN: "
f"Expected 8, 12, 13, or 14 digits, got {len(value)}."
)
if not value.isnumeric():
raise ParseError(
f"Failed to parse {value!r} as GTIN: Expected a numerical value."
)
stripped_value = _strip_leading_zeros(value)
assert len(stripped_value) in (8, 12, 13, 14)
num_significant_digits = len(stripped_value)
gtin_format = GtinFormat(num_significant_digits)
payload = stripped_value[:-1]
check_digit = int(stripped_value[-1])
packaging_level: Optional[int] = None
if gtin_format == GtinFormat.GTIN_14:
packaging_level = int(stripped_value[0])
value_without_packaging_level = stripped_value[1:]
prefix = GS1Prefix.extract(value_without_packaging_level)
elif gtin_format == GtinFormat.GTIN_12:
# Add a zero to convert U.P.C. Company Prefix to GS1 Company Prefix
prefix = GS1Prefix.extract(stripped_value.zfill(13))
elif gtin_format == GtinFormat.GTIN_8:
prefix = GS1Prefix.extract(stripped_value.zfill(12))
else:
prefix = GS1Prefix.extract(stripped_value)
calculated_check_digit = numeric_check_digit(payload)
if check_digit != calculated_check_digit:
raise ParseError(
f"Invalid GTIN check digit for {value!r}: "
f"Expected {calculated_check_digit!r}, got {check_digit!r}."
)
gtin_type: Type[Union[Gtin, Rcn]]
if (
gtin_format <= GtinFormat.GTIN_13
and prefix is not None
and "Restricted Circulation Number" in prefix.usage
):
gtin_type = Rcn
else:
gtin_type = Gtin
gtin = gtin_type(
value=value,
format=gtin_format,
prefix=prefix,
payload=payload,
check_digit=check_digit,
packaging_level=packaging_level,
)
if isinstance(gtin, Rcn) and rcn_region is not None:
gtin._parse_with_regional_rules(rcn_region)
return gtin
def as_gtin_8(self) -> str:
"""Format as a GTIN-8."""
return self._as_format(GtinFormat.GTIN_8)
def as_gtin_12(self) -> str:
"""Format as a GTIN-12."""
return self._as_format(GtinFormat.GTIN_12)
def as_gtin_13(self) -> str:
"""Format as a GTIN-13."""
return self._as_format(GtinFormat.GTIN_13)
def as_gtin_14(self) -> str:
"""Format as a GTIN-14."""
return self._as_format(GtinFormat.GTIN_14)
def _as_format(self, gtin_format: GtinFormat) -> str:
if self.format.length > gtin_format.length:
raise EncodeError(f"Failed encoding {self.value!r} as {gtin_format!s}.")
return f"{self.payload}{self.check_digit}".zfill(gtin_format.length)
def without_variable_measure(self) -> "Gtin":
"""Create a new GTIN where the variable measure is zeroed out.
This method is a no-op for proper GTINs. For RCNs, see the method on the
`Rcn` subclass.
Returns:
A GTIN instance with zeros in the variable measure places.
Raises:
EncodeError: If the rules for variable measures in the region are unknown.
"""
return self
def _strip_leading_zeros(value: str) -> str:
if len(value) in (12, 13, 14) and len(value.lstrip("0")) in (9, 10, 11, 12):
# Keep up to three leading zeros in GTIN-12
num_zeros_before_gtin_12 = len(value) - 12
return value[num_zeros_before_gtin_12:]
if len(value) >= 8 and len(value.lstrip("0")) <= 8:
# Keep all leading zeros in GTIN-8
num_zeros_before_gtin_8 = len(value) - 8
return value[num_zeros_before_gtin_8:]
return value.lstrip("0")
```
#### File: src/biip/_parser.py
```python
from dataclasses import dataclass
from typing import Callable, Iterable, List, Optional, Tuple
from biip import ParseError
from biip.gs1 import DEFAULT_SEPARATOR_CHARS, GS1Message, GS1Symbology
from biip.gtin import Gtin, GtinFormat, RcnRegion
from biip.sscc import Sscc
from biip.symbology import SymbologyIdentifier
from biip.upc import Upc
ParseQueue = List[Tuple[Callable, str]]
def parse(
value: str,
*,
rcn_region: Optional[RcnRegion] = None,
separator_chars: Iterable[str] = DEFAULT_SEPARATOR_CHARS,
) -> "ParseResult":
"""Identify data format and parse data.
The current strategy is:
1. If Symbology Identifier prefix indicates a GTIN or GS1 Message,
attempt to parse and validate as that.
2. Else, if not Symbology Identifier, attempt to parse with all parsers.
Args:
value: The data to classify and parse.
rcn_region: The geographical region whose rules should be used to
interpret Restricted Circulation Numbers (RCN).
Needed to extract e.g. variable weight/price from GTIN.
separator_chars: Characters used in place of the FNC1 symbol.
Defaults to `<GS>` (ASCII value 29).
If variable-length fields in the middle of the message are
not terminated with a separator character, the parser might
greedily consume the rest of the message.
Returns:
A data class depending upon what type of data is parsed.
Raises:
ParseError: If parsing of the data fails.
"""
value = value.strip()
config = ParseConfig(
rcn_region=rcn_region,
separator_chars=separator_chars,
)
result = ParseResult(value=value)
# Extract Symbology Identifier
if value.startswith("]"):
result.symbology_identifier = SymbologyIdentifier.extract(value)
value = value[len(result.symbology_identifier) :]
# Select parsers
queue: ParseQueue = []
if result.symbology_identifier is not None:
if result.symbology_identifier.gs1_symbology in GS1Symbology.with_gtin():
queue.append((_parse_gtin, value))
if (
result.symbology_identifier.gs1_symbology
in GS1Symbology.with_ai_element_strings()
):
queue.append((_parse_gs1_message, value))
if not queue:
# If we're not able to select a subset based on Symbology Identifiers,
# run all parsers on the full value.
queue = [
(_parse_gs1_message, value),
(_parse_gtin, value),
(_parse_sscc, value),
(_parse_upc, value),
]
# Work through queue of parsers and the values to run them on. Any parser may
# add additional work to the queue. Only the first result for a field is kept.
while queue:
(parse_func, val) = queue.pop(0)
parse_func(val, config=config, queue=queue, result=result)
if result._has_result():
return result
else:
raise ParseError(f"Failed to parse {value!r}:\n{result._get_errors_list()}")
@dataclass
class ParseConfig:
"""Configuration options for parsers."""
rcn_region: Optional[RcnRegion]
separator_chars: Iterable[str]
@dataclass
class ParseResult:
"""Results from a successful barcode parsing."""
#: The raw value. Only stripped of surrounding whitespace.
value: str
#: The Symbology Identifier, if any.
symbology_identifier: Optional[SymbologyIdentifier] = None
#: The extracted GTIN, if any.
#: Is also set if a GS1 Message containing a GTIN was successfully parsed.
gtin: Optional[Gtin] = None
#: The GTIN parse error, if parsing as a GTIN was attempted and failed.
gtin_error: Optional[str] = None
#: The extracted UPC, if any.
upc: Optional[Upc] = None
#: The UPC parse error, if parsing as an UPC was attempted and failed.
upc_error: Optional[str] = None
#: The extracted SSCC, if any.
#: Is also set if a GS1 Message containing an SSCC was successfully parsed.
sscc: Optional[Sscc] = None
#: The SSCC parse error, if parsing as an SSCC was attempted and failed.
sscc_error: Optional[str] = None
#: The extracted GS1 Message, if any.
gs1_message: Optional[GS1Message] = None
#: The GS1 Message parse error,
#: if parsing as a GS1 Message was attempted and failed.
gs1_message_error: Optional[str] = None
def _has_result(self) -> bool:
return any([self.gtin, self.upc, self.sscc, self.gs1_message])
def _get_errors_list(self) -> str:
return "\n".join(
f"- {parser_name}: {error}"
for parser_name, error in [
("GTIN", self.gtin_error),
("UPC", self.upc_error),
("SSCC", self.sscc_error),
("GS1", self.gs1_message_error),
]
if error is not None
)
def _parse_gtin(
value: str,
*,
config: ParseConfig,
queue: ParseQueue,
result: ParseResult,
) -> None:
if result.gtin is not None:
return # pragma: no cover
try:
result.gtin = Gtin.parse(value, rcn_region=config.rcn_region)
result.gtin_error = None
except ParseError as exc:
result.gtin = None
result.gtin_error = str(exc)
else:
# If GTIN is a GTIN-12, set UPC on the top-level result.
if result.gtin.format == GtinFormat.GTIN_12:
queue.append((_parse_upc, result.gtin.as_gtin_12()))
def _parse_upc(
value: str,
*,
config: ParseConfig,
queue: ParseQueue,
result: ParseResult,
) -> None:
if result.upc is not None:
return # pragma: no cover
try:
result.upc = Upc.parse(value)
result.upc_error = None
except ParseError as exc:
result.upc = None
result.upc_error = str(exc)
else:
# If UPC, expand and set GTIN on the top-level result.
queue.append((_parse_gtin, result.upc.as_upc_a()))
def _parse_sscc(
value: str,
*,
config: ParseConfig,
queue: ParseQueue,
result: ParseResult,
) -> None:
if result.sscc is not None:
return # pragma: no cover
try:
result.sscc = Sscc.parse(value)
result.sscc_error = None
except ParseError as exc:
result.sscc = None
result.sscc_error = str(exc)
def _parse_gs1_message(
value: str,
*,
config: ParseConfig,
queue: ParseQueue,
result: ParseResult,
) -> None:
if result.gs1_message is not None:
return # pragma: no cover
try:
result.gs1_message = GS1Message.parse(
value,
rcn_region=config.rcn_region,
separator_chars=config.separator_chars,
)
result.gs1_message_error = None
except ParseError as exc:
result.gs1_message = None
result.gs1_message_error = str(exc)
else:
# If the GS1 Message contains an SSCC, set SSCC on the top-level result.
ai_00 = result.gs1_message.get(ai="00")
if ai_00 is not None and ai_00.sscc is not None:
queue.append((_parse_sscc, ai_00.sscc.value))
# If the GS1 Message contains an GTIN, set GTIN on the top-level result.
ai_01 = result.gs1_message.get(ai="01")
if ai_01 is not None and ai_01.gtin is not None:
queue.append((_parse_gtin, ai_01.gtin.value))
```
#### File: tests/gs1/test_prefixes.py
```python
import pytest
from biip import ParseError
from biip.gs1 import GS1Prefix
@pytest.mark.parametrize("bad_value", ["abcdef", "1a2b3c"])
def test_invalid_gs1_prefix(bad_value: str) -> None:
with pytest.raises(ParseError) as exc_info:
GS1Prefix.extract(bad_value)
assert str(exc_info.value) == f"Failed to get GS1 Prefix from {bad_value!r}."
@pytest.mark.parametrize(
"value, expected",
[
(
"0000001999",
GS1Prefix(value="0000001", usage="Unused to avoid collision with GTIN-8"),
),
("060999", GS1Prefix(value="060", usage="GS1 US")),
("139999", GS1Prefix(value="139", usage="GS1 US")),
("6712670000276", None), # Unassigned prefix
("701999", GS1Prefix(value="701", usage="GS1 Norway")),
("978-1-492-05374-3", GS1Prefix(value="978", usage="Bookland (ISBN)")),
],
)
def test_gs1_prefix(value: str, expected: GS1Prefix) -> None:
assert GS1Prefix.extract(value) == expected
def test_is_hashable() -> None:
prefix = GS1Prefix.extract("978")
assert hash(prefix) is not None
```
#### File: tests/gtin/test_parse.py
```python
import pytest
from biip import ParseError
from biip.gs1 import GS1Prefix
from biip.gtin import Gtin, GtinFormat
def test_parse_value_with_invalid_length() -> None:
with pytest.raises(ParseError) as exc_info:
Gtin.parse("123")
assert (
str(exc_info.value)
== "Failed to parse '123' as GTIN: Expected 8, 12, 13, or 14 digits, got 3."
)
def test_parse_nonnumeric_value() -> None:
with pytest.raises(ParseError) as exc_info:
Gtin.parse("0123456789abc")
assert (
str(exc_info.value)
== "Failed to parse '0123456789abc' as GTIN: Expected a numerical value."
)
def test_parse_gtin_13_with_invalid_check_digit() -> None:
with pytest.raises(ParseError) as exc_info:
Gtin.parse("5901234123458")
assert (
str(exc_info.value)
== "Invalid GTIN check digit for '5901234123458': Expected 7, got 8."
)
def test_parse_strips_surrounding_whitespace() -> None:
gtin = Gtin.parse(" \t 5901234123457 \n ")
assert gtin.value == "5901234123457"
@pytest.mark.parametrize(
"value",
[
# GTIN-8
"96385074",
# 0-padded to GTIN-12
"000096385074",
# 0-padded to GTIN-13
"0000096385074",
# 0-padded to GTIN-14
"00000096385074",
],
)
def test_parse_gtin_8(value: str) -> None:
assert Gtin.parse(value) == Gtin(
value=value,
format=GtinFormat.GTIN_8,
prefix=GS1Prefix(value="00009", usage="GS1 US"),
payload="9638507",
check_digit=4,
)
@pytest.mark.parametrize(
"value",
[
"00000017",
"00000123",
"00001236",
"00012348",
"00123457",
"01234565",
"07038013",
],
)
def test_parse_gtin_8_with_leading_zeros(value: str) -> None:
gtin = Gtin.parse(value)
assert gtin.value == value
assert gtin.format == GtinFormat.GTIN_8
@pytest.mark.parametrize(
"value",
[
# GTIN-12
"614141000036",
# 0-padded to GTIN-13
"0614141000036",
# 0-padded to GTIN-14
"00614141000036",
],
)
def test_parse_gtin_12_without_leading_zero(value: str) -> None:
assert Gtin.parse(value) == Gtin(
value=value,
format=GtinFormat.GTIN_12,
prefix=GS1Prefix(value="061", usage="GS1 US"),
payload="61414100003",
check_digit=6,
)
@pytest.mark.parametrize(
"value",
[
# GTIN-12
"036000291452",
# 0-padded to GTIN-13
"0036000291452",
# 0-padded to GTIN-14
"00036000291452",
],
)
def test_parse_gtin_12_with_1_leading_zero(value: str) -> None:
assert Gtin.parse(value) == Gtin(
value=value,
format=GtinFormat.GTIN_12,
prefix=GS1Prefix(value="003", usage="GS1 US"),
payload="03600029145",
check_digit=2,
)
@pytest.mark.parametrize(
"value",
[
# GTIN-12
"006000291455",
# 0-padded to GTIN-13
"00006000291455",
# 0-padded to GTIN-14
"00006000291455",
],
)
def test_parse_gtin_12_with_2_leading_zero(value: str) -> None:
assert Gtin.parse(value) == Gtin(
value=value,
format=GtinFormat.GTIN_12,
prefix=GS1Prefix(value="0006", usage="GS1 US"),
payload="00600029145",
check_digit=5,
)
@pytest.mark.parametrize(
"value",
[
# GTIN-12
"000902914511",
# 0-padded to GTIN-13
"0000902914511",
# 0-padded to GTIN-14
"00000902914511",
],
)
def test_parse_gtin_12_with_3_leading_zero(value: str) -> None:
assert Gtin.parse(value) == Gtin(
value=value,
format=GtinFormat.GTIN_12,
prefix=GS1Prefix(value="00009", usage="GS1 US"),
payload="00090291451",
check_digit=1,
)
@pytest.mark.parametrize(
"value",
[
# GTIN-13
"5901234123457",
# 0-padded to GTIN-14
"05901234123457",
],
)
def test_parse_gtin_13(value: str) -> None:
assert Gtin.parse(value) == Gtin(
value=value,
format=GtinFormat.GTIN_13,
prefix=GS1Prefix(value="590", usage="GS1 Poland"),
payload="590123412345",
check_digit=7,
)
def test_parse_gtin_14() -> None:
assert Gtin.parse("98765432109213") == Gtin(
value="98765432109213",
format=GtinFormat.GTIN_14,
prefix=GS1Prefix(value="876", usage="GS1 Netherlands"),
payload="9876543210921",
check_digit=3,
packaging_level=9,
)
def test_parse_gtin_with_unknown_gs1_prefix() -> None:
assert Gtin.parse("6712670000276") == Gtin(
value="6712670000276",
format=GtinFormat.GTIN_13,
prefix=None,
payload="671267000027",
check_digit=6,
)
```
#### File: tests/gtin/test_without_variable_measure.py
```python
from typing import List
import pytest
from biip import EncodeError
from biip.gs1.checksums import numeric_check_digit
from biip.gtin import Gtin, Rcn, RcnRegion
@pytest.mark.parametrize(
"rcn_region, value, expected",
[
(RcnRegion.ESTONIA, "2311111112345", "2311111100007"),
(RcnRegion.FINLAND, "2311111112345", "2311111100007"),
(RcnRegion.GREAT_BRITAIN, "2011122912346", "2011122000005"),
(RcnRegion.LATVIA, "2311111112345", "2311111100007"),
(RcnRegion.LITHUANIA, "2311111112345", "2311111100007"),
(RcnRegion.NORWAY, "2302148210869", "2302148200006"),
(RcnRegion.SWEDEN, "2088060112343", "2088060100005"),
],
)
def test_without_variable_measure_strips_variable_parts(
rcn_region: RcnRegion, value: str, expected: str
) -> None:
original_rcn = Gtin.parse(value, rcn_region=rcn_region)
assert isinstance(original_rcn, Rcn)
stripped_rcn = original_rcn.without_variable_measure()
assert isinstance(stripped_rcn, Rcn)
assert stripped_rcn.value == expected
assert stripped_rcn.region == original_rcn.region
@pytest.mark.parametrize(
"rcn_region, nonvariable_prefixes",
[
(
RcnRegion.ESTONIA,
["02", "20", "21", "22", "26", "27", "28", "29"],
),
(
RcnRegion.FINLAND,
["02", "20", "21", "22", "26", "27", "28", "29"],
),
(
RcnRegion.GREAT_BRITAIN,
["21", "22", "23", "24", "25", "26", "27", "28", "29"],
),
(
RcnRegion.LATVIA,
["02", "20", "21", "22", "26", "27", "28", "29"],
),
(
RcnRegion.LITHUANIA,
["02", "20", "21", "22", "26", "27", "28", "29"],
),
(
RcnRegion.NORWAY,
["02", "26", "27", "28", "29"],
),
(
RcnRegion.SWEDEN,
["02", "26", "27", "28", "29"],
),
],
)
def test_without_variable_measure_keeps_nonvariable_rcn_unchanged(
rcn_region: RcnRegion, nonvariable_prefixes: List[str]
) -> None:
for prefix in nonvariable_prefixes:
payload = f"{prefix}1111111111"
value = f"{payload}{numeric_check_digit(payload)}"
original_rcn = Gtin.parse(value, rcn_region=rcn_region)
assert isinstance(original_rcn, Rcn)
stripped_rcn = original_rcn.without_variable_measure()
assert isinstance(stripped_rcn, Rcn)
assert stripped_rcn.value == original_rcn.value
assert stripped_rcn.region == original_rcn.region
@pytest.mark.parametrize(
"rcn_region, value",
[
(RcnRegion.NORWAY, "00012348"),
(RcnRegion.NORWAY, "0412345678903"),
],
)
def test_without_variable_measure_keeps_company_rcn_unchanged(
rcn_region: RcnRegion, value: str
) -> None:
original_rcn = Gtin.parse(value, rcn_region=rcn_region)
assert isinstance(original_rcn, Rcn)
stripped_rcn = original_rcn.without_variable_measure()
assert isinstance(stripped_rcn, Rcn)
assert stripped_rcn.value == original_rcn.value
assert stripped_rcn.region == original_rcn.region
@pytest.mark.parametrize(
"value",
[
"96385074", # GTIN-8
"614141000036", # GTIN-12
"5901234123457", # GTIN-13
"98765432109213", # GTIN-14
],
)
def test_without_variable_measure_keeps_gtin_unchanged(value: str) -> None:
original_gtin = Gtin.parse(value)
assert isinstance(original_gtin, Gtin)
assert not isinstance(original_gtin, Rcn)
stripped_gtin = original_gtin.without_variable_measure()
assert isinstance(stripped_gtin, Gtin)
assert not isinstance(stripped_gtin, Rcn)
assert stripped_gtin.value == original_gtin.value
def test_without_variable_measure_fails_if_rules_are_unknown() -> None:
rcn = Gtin.parse("2302148210869", rcn_region=None)
assert isinstance(rcn, Rcn)
with pytest.raises(EncodeError) as exc_info:
rcn.without_variable_measure()
assert str(exc_info.value) == (
"Cannot zero out the variable measure part of '2302148210869' "
"as the RCN rules for the geographical region None are unknown."
)
```
#### File: biip/tests/test_gln.py
```python
import pytest
from biip import ParseError
from biip.gln import Gln
from biip.gs1 import GS1Prefix
def test_parse() -> None:
gln = Gln.parse("1234567890128")
assert gln == Gln(
value="1234567890128",
prefix=GS1Prefix(value="123", usage="GS1 US"),
payload="123456789012",
check_digit=8,
)
def test_parse_strips_surrounding_whitespace() -> None:
gln = Gln.parse(" \t 1234567890128 \n ")
assert gln.value == "1234567890128"
def test_parse_value_with_invalid_length() -> None:
with pytest.raises(ParseError) as exc_info:
Gln.parse("123")
assert (
str(exc_info.value)
== "Failed to parse '123' as GLN: Expected 13 digits, got 3."
)
def test_parse_nonnumeric_value() -> None:
with pytest.raises(ParseError) as exc_info:
Gln.parse("123456789o128")
assert (
str(exc_info.value)
== "Failed to parse '123456789o128' as GLN: Expected a numerical value."
)
def test_parse_with_invalid_check_digit() -> None:
with pytest.raises(ParseError) as exc_info:
Gln.parse("1234567890127")
assert (
str(exc_info.value)
== "Invalid GLN check digit for '1234567890127': Expected 8, got 7."
)
def test_as_gln() -> None:
gln = Gln.parse(" \t 1234567890128 \n ")
assert gln.as_gln() == "1234567890128"
```
#### File: biip/tests/test_sscc.py
```python
from typing import Optional
import pytest
from biip import ParseError
from biip.gs1 import GS1Prefix
from biip.sscc import Sscc
def test_parse() -> None:
sscc = Sscc.parse("376130321109103420")
assert sscc == Sscc(
value="376130321109103420",
prefix=GS1Prefix(value="761", usage="GS1 Schweiz, Suisse, Svizzera"),
extension_digit=3,
payload="37613032110910342",
check_digit=0,
)
def test_parse_strips_surrounding_whitespace() -> None:
sscc = Sscc.parse(" \t 376130321109103420 \n ")
assert sscc.value == "376130321109103420"
def test_parse_value_with_invalid_length() -> None:
with pytest.raises(ParseError) as exc_info:
Sscc.parse("123")
assert (
str(exc_info.value)
== "Failed to parse '123' as SSCC: Expected 18 digits, got 3."
)
def test_parse_nonnumeric_value() -> None:
with pytest.raises(ParseError) as exc_info:
Sscc.parse("012345678901234abc")
assert (
str(exc_info.value)
== "Failed to parse '012345678901234abc' as SSCC: Expected a numerical value."
)
def test_parse_with_invalid_check_digit() -> None:
with pytest.raises(ParseError) as exc_info:
Sscc.parse("376130321109103421")
assert (
str(exc_info.value)
== "Invalid SSCC check digit for '376130321109103421': Expected 0, got 1."
)
@pytest.mark.parametrize(
"prefix_length, expected",
[
(None, "3 761 3032110910342 0"),
(7, "3 761 3032 110910342 0"),
(8, "3 761 30321 10910342 0"),
(9, "3 761 303211 0910342 0"),
(10, "3 761 3032110 910342 0"),
],
)
def test_as_hri(prefix_length: Optional[int], expected: str) -> None:
sscc = Sscc.parse("376130321109103420")
assert sscc.as_hri(company_prefix_length=prefix_length) == expected
def test_as_hri_with_unknown_gs1_prefix() -> None:
# GS1 prefix 671 is currently unassigned.
sscc = Sscc.parse("367130321109103428")
assert sscc.as_hri() == "3 6713032110910342 8"
def test_as_hri_with_too_low_company_prefix_length() -> None:
sscc = Sscc.parse("376130321109103420")
with pytest.raises(ValueError) as exc_info:
sscc.as_hri(company_prefix_length=6)
assert (
str(exc_info.value) == "Expected company prefix length between 7 and 10, got 6."
)
def test_as_hri_with_too_high_company_prefix_length() -> None:
sscc = Sscc.parse("376130321109103420")
with pytest.raises(ValueError) as exc_info:
sscc.as_hri(company_prefix_length=11)
assert (
str(exc_info.value)
== "Expected company prefix length between 7 and 10, got 11."
)
``` |
{
"source": "jodal/mopidy-beets",
"score": 3
} |
#### File: mopidy_beets/browsers/__init__.py
```python
class GenericBrowserBase:
def __init__(self, ref, api):
self.ref = ref
self.api = api
def get_toplevel(self):
"""deliver the top level directories or tracks for this browser
The result is a list of ``mopidy.models.Ref`` objects.
Usually this list contains entries like "genre" or other categories.
"""
raise NotImplementedError
def get_directory(self, key):
"""deliver the corresponding sub items for a given category key
The result is a list of ``mopidy.models.Ref`` objects.
Usually this list contains tracks or albums belonging to the given
category 'key'.
"""
raise NotImplementedError
```
#### File: mopidy-beets/mopidy_beets/__init__.py
```python
import os
import pkg_resources
from mopidy import config, ext
__version__ = pkg_resources.get_distribution("Mopidy-Beets").version
class BeetsExtension(ext.Extension):
dist_name = "Mopidy-Beets"
ext_name = "beets"
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), "ext.conf")
return config.read(conf_file)
def get_config_schema(self):
schema = super(BeetsExtension, self).get_config_schema()
schema["hostname"] = config.Hostname()
schema["port"] = config.Port()
return schema
def setup(self, registry):
from .actor import BeetsBackend
registry.add("backend", BeetsBackend)
``` |
{
"source": "jodal/mopidy-local",
"score": 2
} |
#### File: mopidy-local/mopidy_local/actor.py
```python
import logging
import pykka
from mopidy import backend
from mopidy_local import storage
from mopidy_local.library import LocalLibraryProvider
from mopidy_local.playback import LocalPlaybackProvider
logger = logging.getLogger(__name__)
class LocalBackend(pykka.ThreadingActor, backend.Backend):
uri_schemes = ["local"]
def __init__(self, config, audio):
super().__init__()
self.config = config
storage.check_dirs_and_files(config)
self.playback = LocalPlaybackProvider(audio=audio, backend=self)
self.library = LocalLibraryProvider(backend=self, config=config)
```
#### File: mopidy-local/mopidy_local/translator.py
```python
from __future__ import annotations
import logging
import os
import urllib
from pathlib import Path
from typing import Union
logger = logging.getLogger(__name__)
def local_uri_to_file_uri(local_uri: str, media_dir: Path) -> str:
"""Convert local track or directory URI to file URI."""
path = local_uri_to_path(local_uri, media_dir)
return path.as_uri()
def local_uri_to_path(local_uri: str, media_dir: Path) -> Path:
"""Convert local track or directory URI to absolute path."""
if not local_uri.startswith(("local:directory:", "local:track:")):
raise ValueError("Invalid URI.")
uri_path = urllib.parse.urlsplit(local_uri.split(":", 2)[2]).path
file_bytes = urllib.parse.unquote_to_bytes(uri_path)
file_path = Path(os.fsdecode(file_bytes))
return media_dir / file_path
def path_to_file_uri(path: Union[str, bytes, Path]) -> str:
"""Convert absolute path to file URI."""
ppath = Path(os.fsdecode(path))
assert ppath.is_absolute()
return ppath.as_uri()
def path_to_local_track_uri(path: Union[str, bytes, Path], media_dir: Path) -> str:
"""Convert path to local track URI."""
ppath = Path(os.fsdecode(path))
if ppath.is_absolute():
ppath = ppath.relative_to(media_dir)
quoted_path = urllib.parse.quote(bytes(ppath))
return f"local:track:{quoted_path}"
```
#### File: mopidy-local/tests/__init__.py
```python
import pathlib
from mopidy.internal import deprecation
def path_to_data_dir(name):
path = pathlib.Path(__file__).parent / "data" / name
return path.resolve()
def generate_song(i):
return "local:track:song%s.wav" % i
def populate_tracklist(func):
def wrapper(self):
with deprecation.ignore("core.tracklist.add:tracks_arg"):
self.tl_tracks = self.core.tracklist.add(self.tracks)
return func(self)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
class IsA:
def __init__(self, klass):
self.klass = klass
def __eq__(self, rhs):
try:
return isinstance(rhs, self.klass)
except TypeError:
return type(rhs) == type(self.klass) # noqa
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __repr__(self):
return str(self.klass)
``` |
{
"source": "jodal/mopidy-nad",
"score": 2
} |
#### File: mopidy-nad/tests/test_mixer.py
```python
import mopidy.mixer
from mopidy_nad import mixer
def test_is_a_mopidy_mixer():
assert issubclass(mixer.NadMixer, mopidy.mixer.Mixer)
# TODO Add more tests
``` |
{
"source": "jodal/mopidy-orfradio",
"score": 2
} |
#### File: mopidy-orfradio/mopidy_orfradio/playback.py
```python
import logging
from mopidy import backend
from mopidy_orfradio.library import InvalidORFUri, ORFLibraryUri, ORFUriType
from .client import ORFClient
logger = logging.getLogger(__name__)
class ORFPlaybackProvider(backend.PlaybackProvider):
def __init__(self, audio, backend, client=None):
super().__init__(audio, backend)
self.client = client or ORFClient(backend=self.backend)
def translate_uri(self, uri):
try:
library_uri = ORFLibraryUri.parse(uri)
except InvalidORFUri:
return None
if library_uri.uri_type == ORFUriType.LIVE:
return self.client.get_live_url(library_uri.shoutcast)
if library_uri.uri_type == ORFUriType.ARCHIVE_ITEM:
return self.client.get_item_url(
library_uri.station,
library_uri.shoutcast,
library_uri.day_id,
library_uri.show_id,
library_uri.item_id,
)
``` |
{
"source": "jodal/mopidy-pandora",
"score": 2
} |
#### File: mopidy-pandora/mopidy_pandora/client.py
```python
import logging
import time
import requests
from cachetools import TTLCache
from pandora.client import APIClient, BaseAPIClient
from pandora.clientbuilder import (
DEFAULT_API_HOST,
APITransport,
Encryptor,
SettingsDictBuilder,
)
logger = logging.getLogger(__name__)
class MopidySettingsDictBuilder(SettingsDictBuilder):
def build_from_settings_dict(self, settings):
enc = Encryptor(settings["DECRYPTION_KEY"], settings["ENCRYPTION_KEY"])
trans = APITransport(
enc,
settings.get("API_HOST", DEFAULT_API_HOST),
settings.get("PROXY", None),
)
quality = settings.get(
"AUDIO_QUALITY", self.client_class.MED_AUDIO_QUALITY
)
return self.client_class(
settings["CACHE_TTL"],
trans,
settings["PARTNER_USER"],
settings["PARTNER_PASSWORD"],
settings["DEVICE"],
quality,
)
class MopidyAPIClient(APIClient):
"""Pydora API Client for Mopidy-Pandora
This API client implements caching of the station list.
"""
def __init__(
self,
cache_ttl,
transport,
partner_user,
partner_password,
device,
default_audio_quality=BaseAPIClient.MED_AUDIO_QUALITY,
):
super().__init__(
transport,
partner_user,
partner_password,
device,
default_audio_quality,
)
self.station_list_cache = TTLCache(1, cache_ttl)
self.genre_stations_cache = TTLCache(1, cache_ttl)
def get_station_list(self, force_refresh=False):
station_list = []
try:
if self.station_list_cache.currsize == 0 or (
force_refresh
and next(iter(self.station_list_cache.values())).has_changed()
):
station_list = super().get_station_list()
self.station_list_cache[time.time()] = station_list
except requests.exceptions.RequestException:
logger.exception("Error retrieving Pandora station list.")
station_list = []
try:
return next(iter(self.station_list_cache.values()))
except StopIteration:
# Cache disabled
return station_list
def get_station(self, station_token):
try:
return self.get_station_list()[station_token]
except TypeError:
# Could not find station_token in cached list, try retrieving from
# Pandora server.
return super().get_station(station_token)
def get_genre_stations(self, force_refresh=False):
genre_stations = []
try:
if self.genre_stations_cache.currsize == 0 or (
force_refresh
and next(iter(self.genre_stations_cache.values())).has_changed()
):
genre_stations = super().get_genre_stations()
self.genre_stations_cache[time.time()] = genre_stations
except requests.exceptions.RequestException:
logger.exception("Error retrieving Pandora genre stations.")
return genre_stations
try:
return next(iter(self.genre_stations_cache.values()))
except StopIteration:
# Cache disabled
return genre_stations
``` |
{
"source": "jodal/pykka",
"score": 2
} |
#### File: tests/proxy/test_mocking.py
```python
from collections.abc import Callable
import pytest
@pytest.fixture
def actor_class(runtime):
class ActorForMocking(runtime.actor_class):
_a_rw_property = "a_rw_property"
@property
def a_rw_property(self):
return self._a_rw_property
@a_rw_property.setter
def a_rw_property(self, value):
self._a_rw_property = value
def a_method(self):
raise Exception("This method should be mocked")
return ActorForMocking
@pytest.fixture
def proxy(actor_class):
proxy = actor_class.start().proxy()
yield proxy
proxy.stop()
def test_actor_with_noncallable_mock_property_works(actor_class, stop_all, mocker):
mock = mocker.NonCallableMock()
mock.__get__ = mocker.Mock(return_value="mocked property value")
assert not isinstance(mock, Callable)
actor_class.a_rw_property = mock
proxy = actor_class.start().proxy()
# When using NonCallableMock to fake the property, the value still behaves
# as a property when access through the proxy.
assert proxy.a_rw_property.get() == "mocked property value"
assert mock.__get__.call_count == 1
def test_actor_with_callable_mock_property_does_not_work(actor_class, stop_all, mocker):
mock = mocker.Mock()
mock.__get__ = mocker.Mock(return_value="mocked property value")
assert isinstance(mock, Callable)
actor_class.a_rw_property = mock
proxy = actor_class.start().proxy()
# XXX Because Mock and MagicMock are callable by default, they cause the
# property to be wrapped in a `CallableProxy`. Thus, the property no
# longer behaves as a property when mocked and accessed through a proxy.
with pytest.raises(AttributeError) as exc_info:
assert proxy.a_rw_property.get()
assert "'CallableProxy' object has no attribute 'get'" in str(exc_info.value)
def test_actor_with_mocked_method_works(actor_class, stop_all, mocker):
mock = mocker.MagicMock(return_value="mocked method return")
mocker.patch.object(actor_class, "a_method", new=mock)
proxy = actor_class.start().proxy()
assert proxy.a_method().get() == "mocked method return"
assert mock.call_count == 1
``` |
{
"source": "jodal/python-netsgiro",
"score": 3
} |
#### File: python-netsgiro/netsgiro/objects.py
```python
import collections
import datetime
from decimal import Decimal
from typing import Iterable, List, Mapping, Optional, Union
import attr
from attr.validators import instance_of, optional
import netsgiro
import netsgiro.records
from netsgiro.records import Record
from netsgiro.validators import str_of_length
__all__ = [
'Transmission',
'Assignment',
'Agreement',
'PaymentRequest',
'Transaction',
'parse',
]
@attr.s
class Transmission:
"""Transmission is the top-level object.
An OCR file contains a single transmission. The transmission can contain
multiple :class:`~netsgiro.Assignment` objects of various types.
"""
#: Data transmitters unique enumeration of the transmission. String of 7
#: digits.
number = attr.ib(validator=str_of_length(7))
#: Data transmitter's Nets ID. String of 8 digits.
data_transmitter = attr.ib(validator=str_of_length(8))
#: Data recipient's Nets ID. String of 8 digits.
data_recipient = attr.ib(validator=str_of_length(8))
#: For OCR Giro files from Nets, this is Nets' processing date.
#:
#: For AvtaleGiro payment request, the earliest due date in the
#: transmission is automatically used.
date = attr.ib(default=None, validator=optional(instance_of(datetime.date)))
#: List of assignments.
assignments = attr.ib(default=attr.Factory(list), repr=False)
@classmethod
def from_records(cls, records: List[Record]) -> 'Transmission':
"""Build a Transmission object from a list of record objects."""
if len(records) < 2:
raise ValueError(
'At least 2 records required, got {}'.format(len(records))
)
start, body, end = records[0], records[1:-1], records[-1]
assert isinstance(start, netsgiro.records.TransmissionStart)
assert isinstance(end, netsgiro.records.TransmissionEnd)
return cls(
number=start.transmission_number,
data_transmitter=start.data_transmitter,
data_recipient=start.data_recipient,
date=end.nets_date,
assignments=cls._get_assignments(body),
)
@staticmethod
def _get_assignments(records: List[Record]) -> List['Assignment']:
assignments = collections.OrderedDict()
current_assignment_number = None
for record in records:
if isinstance(record, netsgiro.records.AssignmentStart):
current_assignment_number = record.assignment_number
assignments[current_assignment_number] = []
if current_assignment_number is None:
raise ValueError(
'Expected AssignmentStart record, got {!r}'.format(record)
)
assignments[current_assignment_number].append(record)
if isinstance(record, netsgiro.records.AssignmentEnd):
current_assignment_number = None
return [Assignment.from_records(rs) for rs in assignments.values()]
def to_ocr(self) -> str:
"""Convert the transmission to an OCR string."""
lines = [record.to_ocr() for record in self.to_records()]
return '\n'.join(lines)
def to_records(self) -> Iterable[Record]:
"""Convert the transmission to a list of records."""
yield self._get_start_record()
for assignment in self.assignments:
yield from assignment.to_records()
yield self._get_end_record()
def _get_start_record(self) -> Record:
return netsgiro.records.TransmissionStart(
service_code=netsgiro.ServiceCode.NONE,
transmission_number=self.number,
data_transmitter=self.data_transmitter,
data_recipient=self.data_recipient,
)
def _get_end_record(self) -> Record:
avtalegiro_payment_request = all(
assignment.service_code == netsgiro.ServiceCode.AVTALEGIRO
and assignment.type
in (
netsgiro.AssignmentType.TRANSACTIONS,
netsgiro.AssignmentType.AVTALEGIRO_CANCELLATIONS,
)
for assignment in self.assignments
)
if self.assignments and avtalegiro_payment_request:
date = min(
assignment.get_earliest_transaction_date()
for assignment in self.assignments
)
else:
date = self.date
return netsgiro.records.TransmissionEnd(
service_code=netsgiro.ServiceCode.NONE,
num_transactions=self.get_num_transactions(),
num_records=self.get_num_records(),
total_amount=int(self.get_total_amount() * 100),
nets_date=date,
)
def add_assignment(
self,
*,
service_code: netsgiro.ServiceCode,
assignment_type: netsgiro.AssignmentType,
agreement_id: Optional[str] = None,
number: str,
account: str,
date: Optional[datetime.date] = None
) -> 'Assignment':
"""Add an assignment to the tranmission."""
assignment = Assignment(
service_code=service_code,
type=assignment_type,
agreement_id=agreement_id,
number=number,
account=account,
date=date,
)
self.assignments.append(assignment)
return assignment
def get_num_transactions(self) -> int:
"""Get number of transactions in the transmission."""
return sum(
assignment.get_num_transactions() for assignment in self.assignments
)
def get_num_records(self) -> int:
"""Get number of records in the transmission.
Includes the transmission's start and end record.
"""
return 2 + sum(
assignment.get_num_records() for assignment in self.assignments
)
def get_total_amount(self) -> Decimal:
"""Get the total amount from all transactions in the transmission."""
return sum(
assignment.get_total_amount() for assignment in self.assignments
)
@attr.s
class Assignment:
"""An Assignment groups multiple transactions within a transmission.
Use :meth:`netsgiro.Transmission.add_assignment` to create assignments.
"""
#: The service code. One of :class:`~netsgiro.ServiceCode`.
service_code = attr.ib(converter=netsgiro.ServiceCode)
#: The transaction type. One of :class:`~netsgiro.TransactionType`.
type = attr.ib(converter=netsgiro.AssignmentType)
#: The assignment number. String of 7 digits.
number = attr.ib(validator=str_of_length(7))
#: The payee's bank account. String of 11 digits.
account = attr.ib(validator=str_of_length(11))
#: Used for OCR Giro.
#:
#: The payee's agreement ID with Nets. String of 9 digits.
agreement_id = attr.ib(default=None, validator=optional(str_of_length(9)))
#: Used for OCR Giro.
#:
#: The date the assignment was generated by Nets.
date = attr.ib(default=None, validator=optional(instance_of(datetime.date)))
#: List of transaction objects, like :class:`~netsgiro.Agreement`,
#: :class:`~netsgiro.PaymentRequest`, :class:`~netsgiro.Transaction`.
transactions = attr.ib(default=attr.Factory(list), repr=False)
_next_transaction_number = 1
@classmethod
def from_records(cls, records: List[Record]) -> 'Assignment':
"""Build an Assignment object from a list of record objects."""
if len(records) < 2:
raise ValueError(
'At least 2 records required, got {}'.format(len(records))
)
start, body, end = records[0], records[1:-1], records[-1]
assert isinstance(start, netsgiro.records.AssignmentStart)
assert isinstance(end, netsgiro.records.AssignmentEnd)
if start.service_code == netsgiro.ServiceCode.AVTALEGIRO:
if (
start.assignment_type
== netsgiro.AssignmentType.AVTALEGIRO_AGREEMENTS
):
transactions = cls._get_agreements(body)
else:
transactions = cls._get_payment_requests(body)
elif start.service_code == netsgiro.ServiceCode.OCR_GIRO:
transactions = cls._get_transactions(body)
else:
raise ValueError(
'Unknown service code: {}'.format(start.service_code)
)
return cls(
service_code=start.service_code,
type=start.assignment_type,
agreement_id=start.agreement_id,
number=start.assignment_number,
account=start.assignment_account,
date=end.nets_date,
transactions=transactions,
)
@staticmethod
def _get_agreements(records: List[Record]) -> List['Agreement']:
return [Agreement.from_records([r]) for r in records]
@classmethod
def _get_payment_requests(
cls, records: List[Record]
) -> List['PaymentRequest']:
transactions = cls._group_by_transaction_number(records)
return [PaymentRequest.from_records(rs) for rs in transactions.values()]
@classmethod
def _get_transactions(cls, records: List[Record]) -> List['Transaction']:
transactions = cls._group_by_transaction_number(records)
return [Transaction.from_records(rs) for rs in transactions.values()]
@staticmethod
def _group_by_transaction_number(
records: List[Record],
) -> Mapping[int, List[Record]]:
transactions = collections.OrderedDict()
for record in records:
if record.transaction_number not in transactions:
transactions[record.transaction_number] = []
transactions[record.transaction_number].append(record)
return transactions
def to_records(self) -> Iterable[Record]:
"""Convert the assignment to a list of records."""
yield self._get_start_record()
for transaction in self.transactions:
yield from transaction.to_records()
yield self._get_end_record()
def _get_start_record(self) -> Record:
return netsgiro.records.AssignmentStart(
service_code=self.service_code,
assignment_type=self.type,
assignment_number=self.number,
assignment_account=self.account,
agreement_id=self.agreement_id,
)
def _get_end_record(self) -> Record:
if self.service_code == netsgiro.ServiceCode.OCR_GIRO:
dates = {
'nets_date_1': self.date,
'nets_date_2': self.get_earliest_transaction_date(),
'nets_date_3': self.get_latest_transaction_date(),
}
elif self.service_code == netsgiro.ServiceCode.AVTALEGIRO:
dates = {
'nets_date_1': self.get_earliest_transaction_date(),
'nets_date_2': self.get_latest_transaction_date(),
}
else:
raise ValueError(
'Unhandled service code: {}'.format(self.service_code)
)
return netsgiro.records.AssignmentEnd(
service_code=self.service_code,
assignment_type=self.type,
num_transactions=self.get_num_transactions(),
num_records=self.get_num_records(),
total_amount=int(self.get_total_amount() * 100),
**dates
)
def add_payment_request(
self,
*,
kid: str,
due_date: datetime.date,
amount: Decimal,
reference: Optional[str] = None,
payer_name: Optional[str] = None,
bank_notification: Union[bool, str] = False
) -> 'Transaction':
"""Add an AvtaleGiro payment request to the assignment.
The assignment must have service code
:attr:`~netsgiro.ServiceCode.AVTALEGIRO` and assignment type
:attr:`~netsgiro.AssignmentType.TRANSACTIONS`.
"""
assert (
self.service_code == netsgiro.ServiceCode.AVTALEGIRO
), 'Can only add payment requests to AvtaleGiro assignments'
assert (
self.type == netsgiro.AssignmentType.TRANSACTIONS
), 'Can only add payment requests to transaction assignments'
if bank_notification:
transaction_type = (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
else:
transaction_type = (
netsgiro.TransactionType.AVTALEGIRO_WITH_PAYEE_NOTIFICATION
)
return self._add_avtalegiro_transaction(
transaction_type=transaction_type,
kid=kid,
due_date=due_date,
amount=amount,
reference=reference,
payer_name=payer_name,
bank_notification=bank_notification,
)
def add_payment_cancellation(
self,
*,
kid: str,
due_date: datetime.date,
amount: Decimal,
reference: Optional[str] = None,
payer_name: Optional[str] = None,
bank_notification: Union[bool, str] = False
) -> 'Transaction':
"""Add an AvtaleGiro cancellation to the assignment.
The assignment must have service code
:attr:`~netsgiro.ServiceCode.AVTALEGIRO` and assignment type
:attr:`~netsgiro.AssignmentType.AVTALEGIRO_CANCELLATIONS`.
Otherwise, the cancellation must be identical to the payment request it
is cancelling.
"""
assert (
self.service_code == netsgiro.ServiceCode.AVTALEGIRO
), 'Can only add cancellation to AvtaleGiro assignments'
assert (
self.type == netsgiro.AssignmentType.AVTALEGIRO_CANCELLATIONS
), 'Can only add cancellation to cancellation assignments'
return self._add_avtalegiro_transaction(
transaction_type=netsgiro.TransactionType.AVTALEGIRO_CANCELLATION,
kid=kid,
due_date=due_date,
amount=amount,
reference=reference,
payer_name=payer_name,
bank_notification=bank_notification,
)
def _add_avtalegiro_transaction(
self,
*,
transaction_type,
kid,
due_date,
amount,
reference=None,
payer_name=None,
bank_notification=None
) -> 'Transaction':
if isinstance(bank_notification, str):
text = bank_notification
else:
text = ''
number = self._next_transaction_number
self._next_transaction_number += 1
transaction = PaymentRequest(
service_code=self.service_code,
type=transaction_type,
number=number,
date=due_date,
amount=amount,
kid=kid,
reference=reference,
text=text,
payer_name=payer_name,
)
self.transactions.append(transaction)
return transaction
def get_num_transactions(self) -> int:
"""Get number of transactions in the assignment."""
return len(self.transactions)
def get_num_records(self) -> int:
"""Get number of records in the assignment.
Includes the assignment's start and end record.
"""
return 2 + sum(
len(list(transaction.to_records()))
for transaction in self.transactions
)
def get_total_amount(self) -> Decimal:
"""Get the total amount from all transactions in the assignment."""
transactions = [
transaction
for transaction in self.transactions
if hasattr(transaction, 'amount')
]
if not transactions:
return Decimal(0)
return sum(transaction.amount for transaction in transactions)
def get_earliest_transaction_date(self) -> Optional[datetime.date]:
"""Get earliest date from the assignment's transactions."""
transactions = [
transaction
for transaction in self.transactions
if hasattr(transaction, 'date')
]
if not transactions:
return None
return min(transaction.date for transaction in transactions)
def get_latest_transaction_date(self) -> Optional[datetime.date]:
"""Get latest date from the assignment's transactions."""
transactions = [
transaction
for transaction in self.transactions
if hasattr(transaction, 'date')
]
if not transactions:
return None
return max(transaction.date for transaction in transactions)
@attr.s
class Agreement:
"""Agreement contains an AvtaleGiro agreement update.
Agreements are only found in assignments of the
:attr:`~netsgiro.AssignmentType.AVTALEGIRO_AGREEMENTS` type, which are only
created by Nets.
"""
#: The service code. One of :class:`~netsgiro.ServiceCode`.
service_code = attr.ib(converter=netsgiro.ServiceCode)
#: Transaction number. Unique and ordered within an assignment.
number = attr.ib(validator=instance_of(int))
#: Type of agreement registration update.
#: One of :class:`~netsgiro.AvtaleGiroRegistrationType`.
registration_type = attr.ib(converter=netsgiro.AvtaleGiroRegistrationType)
#: KID number to identify the customer and invoice.
kid = attr.ib(validator=optional(instance_of(str)))
#: Whether the payer wants notification about payment requests.
notify = attr.ib(validator=instance_of(bool))
TRANSACTION_TYPE = netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
@classmethod
def from_records(cls, records: List[Record]) -> 'Agreement':
"""Build an Agreement object from a list of record objects."""
assert len(records) == 1
record = records[0]
assert isinstance(record, netsgiro.records.AvtaleGiroAgreement)
assert (
record.transaction_type
== netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
)
return cls(
service_code=record.service_code,
number=record.transaction_number,
registration_type=record.registration_type,
kid=record.kid,
notify=record.notify,
)
def to_records(self) -> Iterable[Record]:
"""Convert the agreement to a list of records."""
yield netsgiro.records.AvtaleGiroAgreement(
service_code=self.service_code,
transaction_type=self.TRANSACTION_TYPE,
transaction_number=self.number,
registration_type=self.registration_type,
kid=self.kid,
notify=self.notify,
)
@attr.s
class PaymentRequest:
"""PaymentRequest contains an AvtaleGiro payment request or cancellation.
To create a transaction, you will normally use the helper methods on
:class:`~netsgiro.Assignment`:
:meth:`~netsgiro.Assignment.add_payment_request` and
:meth:`~netsgiro.Assignment.add_payment_cancellation`.
"""
#: The service code. One of :class:`~netsgiro.ServiceCode`.
service_code = attr.ib(converter=netsgiro.ServiceCode)
#: The transaction type. One of :class:`~netsgiro.TransactionType`.
type = attr.ib(converter=netsgiro.TransactionType)
#: Transaction number. Unique and ordered within an assignment.
number = attr.ib(validator=instance_of(int))
#: The due date.
date = attr.ib(validator=instance_of(datetime.date))
#: Transaction amount in NOK with two decimals.
amount = attr.ib(converter=Decimal)
#: KID number to identify the customer and invoice.
kid = attr.ib(validator=optional(instance_of(str)))
#: This is a specification line that will, if set, be displayed on the
#: payers account statement. Alphanumeric, max 25 chars.
reference = attr.ib(validator=optional(instance_of(str)))
#: This is up to 42 lines of 80 chars each of free text used by the bank to
#: notify the payer about the payment request. It is not used if the payee
#: is responsible for notifying the payer.
text = attr.ib(validator=optional(instance_of(str)))
#: The value is only used to help the payee cross-reference reports from
#: Nets with their own records. It is not visible to the payer.
payer_name = attr.ib(validator=optional(instance_of(str)))
@property
def amount_in_cents(self) -> int:
"""Transaction amount in NOK cents."""
return int(self.amount * 100)
@classmethod
def from_records(cls, records: List[Record]) -> 'Transaction':
"""Build a Transaction object from a list of record objects."""
amount_item_1 = records.pop(0)
assert isinstance(
amount_item_1, netsgiro.records.TransactionAmountItem1
)
amount_item_2 = records.pop(0)
assert isinstance(
amount_item_2, netsgiro.records.TransactionAmountItem2
)
text = netsgiro.records.TransactionSpecification.to_text(records)
return cls(
service_code=amount_item_1.service_code,
type=amount_item_1.transaction_type,
number=amount_item_1.transaction_number,
date=amount_item_1.nets_date,
amount=Decimal(amount_item_1.amount) / 100,
kid=amount_item_1.kid,
reference=amount_item_2.reference,
text=text,
payer_name=amount_item_2.payer_name,
)
def to_records(self) -> Iterable[Record]:
"""Convert the transaction to a list of records."""
yield netsgiro.records.TransactionAmountItem1(
service_code=self.service_code,
transaction_type=self.type,
transaction_number=self.number,
nets_date=self.date,
amount=self.amount_in_cents,
kid=self.kid,
)
yield netsgiro.records.TransactionAmountItem2(
service_code=self.service_code,
transaction_type=self.type,
transaction_number=self.number,
reference=self.reference,
payer_name=self.payer_name,
)
if self.type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
):
yield from netsgiro.records.TransactionSpecification.from_text(
service_code=self.service_code,
transaction_type=self.type,
transaction_number=self.number,
text=self.text,
)
@attr.s
class Transaction:
"""Transaction contains an OCR Giro transaction.
Transactions are found in assignments with the service code
:attr:`~netsgiro.ServiceCode.OCR_GIRO` type, which are only
created by Nets.
"""
#: The service code. One of :class:`~netsgiro.ServiceCode`.
service_code = attr.ib(converter=netsgiro.ServiceCode)
#: The transaction type. One of :class:`~netsgiro.TransactionType`.
type = attr.ib(converter=netsgiro.TransactionType)
#: Transaction number. Unique and ordered within an assignment.
number = attr.ib(validator=instance_of(int))
#: Nets' processing date.
date = attr.ib(validator=instance_of(datetime.date))
#: Transaction amount in NOK with two decimals.
amount = attr.ib(converter=Decimal)
#: KID number to identify the customer and invoice.
kid = attr.ib(validator=optional(instance_of(str)))
#: The value depends on the payment method.
reference = attr.ib(validator=optional(instance_of(str)))
#: Up to 40 chars of free text from the payment terminal.
text = attr.ib(validator=optional(instance_of(str)))
#: Used for OCR Giro.
centre_id = attr.ib(validator=optional(str_of_length(2)))
#: Used for OCR Giro.
day_code = attr.ib(validator=optional(instance_of(int)))
#: Used for OCR Giro.
partial_settlement_number = attr.ib(validator=optional(instance_of(int)))
#: Used for OCR Giro.
partial_settlement_serial_number = attr.ib(
validator=optional(str_of_length(5))
)
#: Used for OCR Giro.
sign = attr.ib(validator=optional(str_of_length(1)))
#: Used for OCR Giro.
form_number = attr.ib(validator=optional(str_of_length(10)))
#: Used for OCR Giro.
bank_date = attr.ib(validator=optional(instance_of(datetime.date)))
#: Used for OCR Giro.
debit_account = attr.ib(validator=optional(str_of_length(11)))
_filler = attr.ib(validator=optional(str_of_length(7)))
@property
def amount_in_cents(self) -> int:
"""Transaction amount in NOK cents."""
return int(self.amount * 100)
@classmethod
def from_records(cls, records: List[Record]) -> 'Transaction':
"""Build a Transaction object from a list of record objects."""
amount_item_1 = records.pop(0)
assert isinstance(
amount_item_1, netsgiro.records.TransactionAmountItem1
)
amount_item_2 = records.pop(0)
assert isinstance(
amount_item_2, netsgiro.records.TransactionAmountItem2
)
if len(records) == 1 and isinstance(
records[0], netsgiro.records.TransactionAmountItem3
):
text = records[0].text
else:
text = None
return cls(
service_code=amount_item_1.service_code,
type=amount_item_1.transaction_type,
number=amount_item_1.transaction_number,
date=amount_item_1.nets_date,
amount=Decimal(amount_item_1.amount) / 100,
kid=amount_item_1.kid,
reference=amount_item_2.reference,
text=text,
centre_id=amount_item_1.centre_id,
day_code=amount_item_1.day_code,
partial_settlement_number=amount_item_1.partial_settlement_number,
partial_settlement_serial_number=(
amount_item_1.partial_settlement_serial_number
),
sign=amount_item_1.sign,
form_number=amount_item_2.form_number,
bank_date=amount_item_2.bank_date,
debit_account=amount_item_2.debit_account,
filler=amount_item_2._filler,
)
def to_records(self) -> Iterable[Record]:
"""Convert the transaction to a list of records."""
yield netsgiro.records.TransactionAmountItem1(
service_code=self.service_code,
transaction_type=self.type,
transaction_number=self.number,
nets_date=self.date,
amount=self.amount_in_cents,
kid=self.kid,
centre_id=self.centre_id,
day_code=self.day_code,
partial_settlement_number=self.partial_settlement_number,
partial_settlement_serial_number=(
self.partial_settlement_serial_number
),
sign=self.sign,
)
yield netsgiro.records.TransactionAmountItem2(
service_code=self.service_code,
transaction_type=self.type,
transaction_number=self.number,
reference=self.reference,
form_number=self.form_number,
bank_date=self.bank_date,
debit_account=self.debit_account,
filler=self._filler,
)
if self.type in (
netsgiro.TransactionType.REVERSING_WITH_TEXT,
netsgiro.TransactionType.PURCHASE_WITH_TEXT,
):
yield netsgiro.records.TransactionAmountItem3(
service_code=self.service_code,
transaction_type=self.type,
transaction_number=self.number,
text=self.text,
)
def parse(data: str) -> Transmission:
"""Parse an OCR file into a Transmission object."""
records = netsgiro.records.parse(data)
return Transmission.from_records(records)
```
#### File: python-netsgiro/tests/test_object_parsing.py
```python
from datetime import date
from decimal import Decimal
import netsgiro
def test_parse_agreements(agreements_data):
transmission = netsgiro.parse(agreements_data)
assert isinstance(transmission, netsgiro.Transmission)
assert transmission.number == '1091949'
assert transmission.data_transmitter == netsgiro.NETS_ID
assert transmission.data_recipient == '00010200'
assert transmission.date == date(2017, 4, 19)
assert len(transmission.assignments) == 1
assignment = transmission.assignments[0]
assert isinstance(assignment, netsgiro.Assignment)
assert assignment.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert assignment.type == netsgiro.AssignmentType.AVTALEGIRO_AGREEMENTS
assert assignment.agreement_id is None
assert assignment.number == '0000002'
assert assignment.account == '99991042764'
assert len(assignment.transactions) == 16
agreement_1 = assignment.transactions[0]
assert isinstance(agreement_1, netsgiro.Agreement)
assert agreement_1.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert agreement_1.TRANSACTION_TYPE == (
netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
)
assert agreement_1.number == 1
assert agreement_1.registration_type == (
netsgiro.AvtaleGiroRegistrationType.NEW_OR_UPDATED_AGREEMENT
)
assert agreement_1.kid == '000112000507155'
assert agreement_1.notify is True
agreement_2 = assignment.transactions[1]
assert isinstance(agreement_2, netsgiro.Agreement)
assert agreement_2.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert agreement_2.TRANSACTION_TYPE == (
netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
)
assert agreement_2.number == 2
assert agreement_2.registration_type == (
netsgiro.AvtaleGiroRegistrationType.NEW_OR_UPDATED_AGREEMENT
)
assert agreement_2.kid == '001006300507304'
assert agreement_2.notify is False
def test_parse_payment_request(payment_request_data):
transmission = netsgiro.parse(payment_request_data)
assert isinstance(transmission, netsgiro.Transmission)
assert transmission.number == '1000081'
assert transmission.data_transmitter == '55555555'
assert transmission.data_recipient == netsgiro.NETS_ID
assert transmission.date == date(2004, 6, 17)
assert len(transmission.assignments) == 1
assignment = transmission.assignments[0]
assert isinstance(assignment, netsgiro.Assignment)
assert assignment.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert assignment.type == netsgiro.AssignmentType.TRANSACTIONS
assert assignment.agreement_id == '000000000'
assert assignment.number == '4000086'
assert assignment.account == '88888888888'
assert len(assignment.transactions) == 6
transaction = assignment.transactions[0]
assert isinstance(transaction, netsgiro.PaymentRequest)
assert transaction.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert transaction.type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert transaction.number == 1
assert transaction.date == date(2004, 6, 17)
assert transaction.amount == Decimal('1.00')
assert transaction.amount_in_cents == 100
assert transaction.kid == '008000011688373'
assert transaction.reference is None
assert transaction.text == (
' Gjelder Faktura: 168837 Dato: 19/03/04'
' ForfallsDato: 17/06/04\n'
)
# Specific to AvtaleGiro
assert transaction.payer_name == 'NAVN'
def test_parse_ocr_giro_transactions(ocr_giro_transactions_data):
transmission = netsgiro.parse(ocr_giro_transactions_data)
assert isinstance(transmission, netsgiro.Transmission)
assert transmission.number == '0170031'
assert transmission.data_transmitter == netsgiro.NETS_ID
assert transmission.data_recipient == '00010200'
assert transmission.date == date(1992, 1, 20)
assert len(transmission.assignments) == 1
assignment = transmission.assignments[0]
assert isinstance(assignment, netsgiro.Assignment)
assert assignment.service_code == netsgiro.ServiceCode.OCR_GIRO
assert assignment.type == netsgiro.AssignmentType.TRANSACTIONS
assert assignment.agreement_id == '001008566'
assert assignment.number == '0000002'
assert assignment.account == '99991042764'
assert len(assignment.transactions) == 20
transaction = assignment.transactions[0]
assert isinstance(transaction, netsgiro.Transaction)
assert transaction.service_code == netsgiro.ServiceCode.OCR_GIRO
assert transaction.type == (netsgiro.TransactionType.PURCHASE_WITH_TEXT)
assert transaction.number == 1
assert transaction.date == date(1992, 1, 20)
assert transaction.amount == Decimal('1020')
assert transaction.amount_in_cents == 102000
assert transaction.kid == '0000531'
assert transaction.reference == '099038562'
assert transaction.text == 'Foo bar baz'
# Specific to OCR Giro
assert transaction.centre_id == '13'
assert transaction.day_code == 20
assert transaction.partial_settlement_number == 1
assert transaction.partial_settlement_serial_number == '01464'
assert transaction.sign == '0'
assert transaction.form_number == '9636827194'
assert transaction.bank_date == date(1992, 1, 16)
assert transaction.debit_account == '99990512341'
```
#### File: python-netsgiro/tests/test_record_parsing.py
```python
from datetime import date
import pytest
import netsgiro
import netsgiro.records
def test_transmission_start():
record = netsgiro.records.TransmissionStart.from_string(
'NY00001055555555100008100008080000000000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.NONE
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSMISSION_START
assert record.data_transmitter == '55555555'
assert record.transmission_number == '1000081'
assert record.data_recipient == '00008080'
def test_transmission_start_fails_when_invalid_format():
line = 'XX' + ('0' * 78)
with pytest.raises(
ValueError,
match='{!r} did not match TransmissionStart record format'.format(line),
):
netsgiro.records.TransmissionStart.from_string(line)
def test_transmission_end():
record = netsgiro.records.TransmissionEnd.from_string(
'NY00008900000006000000220000000000000060'
'0170604000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.NONE
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSMISSION_END
assert record.num_transactions == 6
assert record.num_records == 22
assert record.total_amount == 600
assert record.nets_date == date(2004, 6, 17)
def test_assignment_start_for_avtalegiro_payment_requests():
record = netsgiro.records.AssignmentStart.from_string(
'NY21002000000000040000868888888888800000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.agreement_id == '000000000'
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_avtalegiro_agreements():
record = netsgiro.records.AssignmentStart.from_string(
'NY21242000000000040000868888888888800000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_AGREEMENTS
)
assert record.agreement_id is None
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_avtalegiro_cancellation():
record = netsgiro.records.AssignmentStart.from_string(
'NY21362000000000040000868888888888800000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_CANCELLATIONS
)
assert record.agreement_id is None
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_ocr_giro_transactions():
record = netsgiro.records.AssignmentStart.from_string(
'NY09002000100856600000029999104276400000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.agreement_id == '001008566'
assert record.assignment_number == '0000002'
assert record.assignment_account == '99991042764'
def test_assignment_end_for_avtalegiro_payment_requests():
record = netsgiro.records.AssignmentEnd.from_string(
'NY21008800000006000000200000000000000060'
'0170604170604000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount == 600
assert record.nets_date_earliest == date(2004, 6, 17)
assert record.nets_date_latest == date(2004, 6, 17)
def test_assignment_end_for_avtalegiro_agreements():
record = netsgiro.records.AssignmentEnd.from_string(
'NY21248800000006000000200000000000000000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_AGREEMENTS
)
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount is None
assert record.nets_date_earliest is None
assert record.nets_date_latest is None
def test_assignment_end_for_avtalegiro_cancellations():
record = netsgiro.records.AssignmentEnd.from_string(
'NY21368800000006000000200000000000000060'
'0170604170604000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_CANCELLATIONS
)
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount == 600
assert record.nets_date_latest == date(2004, 6, 17)
assert record.nets_date_earliest == date(2004, 6, 17)
def test_assignment_end_for_ocr_giro_transactions():
record = netsgiro.records.AssignmentEnd.from_string(
'NY09008800000020000000420000000000514490'
'0200192200192200192000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.num_transactions == 20
assert record.num_records == 42
assert record.total_amount == 5144900
assert record.nets_date == date(1992, 1, 20)
assert record.nets_date_earliest == date(1992, 1, 20)
assert record.nets_date_latest == date(1992, 1, 20)
def test_transaction_amount_item_1_for_avtalegiro_payment_request():
record = netsgiro.records.TransactionAmountItem1.from_string(
'NY2121300000001170604 00000000'
'000000100 008000011688373000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert record.transaction_number == 1
assert record.nets_date == date(2004, 6, 17)
assert record.amount == 100
assert record.kid == '008000011688373'
def test_transaction_amount_item_1_for_avtalegiro_cancellation():
record = netsgiro.records.TransactionAmountItem1.from_string(
'NY2193300000001170604 00000000'
'000000100 008000011688373000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_CANCELLATION
)
assert record.transaction_number == 1
assert record.nets_date == date(2004, 6, 17)
assert record.amount == 100
assert record.kid == '008000011688373'
def test_transaction_amount_item_1_for_ocr_giro_transactions():
record = netsgiro.records.TransactionAmountItem1.from_string(
'NY09103000000012001921320101464000000000'
'000102000 0000531000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == (
netsgiro.TransactionType.FROM_GIRO_DEBITED_ACCOUNT
)
assert record.transaction_number == 1
assert record.nets_date == date(1992, 1, 20)
assert record.centre_id == '13'
assert record.day_code == 20
assert record.partial_settlement_number == 1
assert record.partial_settlement_serial_number == '01464'
assert record.sign == '0'
assert record.amount == 102000
assert record.kid == '0000531'
def test_transaction_amount_item_2_for_avtalegiro_payment_request():
record = netsgiro.records.TransactionAmountItem2.from_string(
'NY2121310000001NAVN '
' 00000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert record.transaction_number == 1
assert record.payer_name == 'NAVN'
assert record.reference is None
def test_transaction_amount_item_2_for_ocr_giro_transactions():
record = netsgiro.records.TransactionAmountItem2.from_string(
'NY09103100000019636827194099038562000000'
'0160192999905123410000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == (
netsgiro.TransactionType.FROM_GIRO_DEBITED_ACCOUNT
)
assert record.transaction_number == 1
assert record.form_number == '9636827194'
assert record.payer_name is None
assert record.reference == '099038562'
assert record.bank_date == date(1992, 1, 16)
assert record.debit_account == '99990512341'
def test_transaction_amount_item_2_for_ocr_giro_with_data_in_filler_field():
record = netsgiro.records.TransactionAmountItem2.from_string(
'NY09103100000029797596016097596016188320'
'6160192999910055240000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == (
netsgiro.TransactionType.FROM_GIRO_DEBITED_ACCOUNT
)
assert record.transaction_number == 2
assert record.form_number == '9797596016'
assert record.payer_name is None
assert record.reference == '097596016'
assert record.bank_date == date(1992, 1, 16)
assert record.debit_account == '99991005524'
assert record._filler == '1883206'
def test_transaction_amount_item_3_for_ocr_giro_transactions():
record = netsgiro.records.TransactionAmountItem3.from_string(
'NY0921320000001Foo bar baz '
' 0000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_3
assert record.transaction_type == (
netsgiro.TransactionType.PURCHASE_WITH_TEXT
)
assert record.transaction_number == 1
assert record.text == 'Foo bar baz'
def test_transaction_specification_for_avtalegiro_payment_request():
record = netsgiro.records.TransactionSpecification.from_string(
'NY212149000000140011 Gjelder Faktura: 16'
'8837 Dato: 19/03/0400000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_SPECIFICATION
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert record.transaction_number == 1
assert record.line_number == 1
assert record.column_number == 1
assert record.text == ' Gjelder Faktura: 168837 Dato: 19/03/04'
def make_specification_records(num_lines, num_columns=2):
return [
netsgiro.records.TransactionSpecification(
service_code=netsgiro.ServiceCode.AVTALEGIRO,
transaction_type=(
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
),
transaction_number=1,
line_number=line,
column_number=column,
text='Line {}, column {}'.format(line, column),
)
for line in range(1, num_lines + 1)
for column in range(1, num_columns + 1)
]
def test_transaction_specification_to_text_with_max_number_of_records():
records = make_specification_records(42)
result = netsgiro.records.TransactionSpecification.to_text(records)
assert len(result.splitlines()) == 42
assert 'Line 1, column 1' in result
assert 'Line 42, column 2' in result
def test_transaction_specification_to_text_with_too_many_records():
records = make_specification_records(43)
with pytest.raises(
ValueError, match='Max 84 specification records allowed, got 86'
):
netsgiro.records.TransactionSpecification.to_text(records)
def test_avtalegiro_active_agreement():
record = netsgiro.records.AvtaleGiroAgreement.from_string(
'NY21947000000010 00800001168837'
'3J00000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AGREEMENTS
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
)
assert record.transaction_number == 1
assert record.registration_type == (
netsgiro.AvtaleGiroRegistrationType.ACTIVE_AGREEMENT
)
assert record.kid == '008000011688373'
assert record.notify is True
def test_avtalegiro_new_or_updated_agreement():
record = netsgiro.records.AvtaleGiroAgreement.from_string(
'NY21947000000011 00800001168837'
'3N00000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AGREEMENTS
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
)
assert record.transaction_number == 1
assert record.registration_type == (
netsgiro.AvtaleGiroRegistrationType.NEW_OR_UPDATED_AGREEMENT
)
assert record.kid == '008000011688373'
assert record.notify is False
``` |
{
"source": "jodalyst/6302_spring18_sandbox",
"score": 2
} |
#### File: examples/ESP32_Streaming/base.py
```python
import time
import math
from flask import Flask, render_template, session, request
from flask_cors import CORS, cross_origin
#Start up Flask server:
app = Flask(__name__, template_folder = './',static_folder='../../src')
#app.config['SECRET_KEY'] = 'secret!' #shhh don't tell anyone. Is a secret
#socketio = SocketIO(app, async_mode = async_mode)
CORS(app,resources={
r'/*/*': {
'origins': '*',
'allow_headers': ['Content-Type', 'Authorization']
}
})
@app.route('/')
def index():
print ("A user connected")
#if thread is None:
# thread = Thread(target=dataThread)
# thread.daemon = True
# thread.start()
return render_template('base.html')
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True)
``` |
{
"source": "jodalyst/jinstrument",
"score": 2
} |
#### File: div_render/interface_dev/breadboard_1v9.py
```python
from collections import OrderedDict
from bokeh.plotting import figure, show, output_file, ColumnDataSource
from bokeh.models import HoverTool
from bokeh.embed import components
import bokeh
import random
import math
import sys
import glob
import serial
import json
import struct
import time
import matplotlib.pyplot as plt
import sys
import time
import math
from threading import Thread, Lock
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, emit, join_room, leave_room,close_room, rooms, disconnect
from datetime import datetime
def serial_ports():
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in list(range(256))]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
#print("checking port "+port)
s = serial.Serial(port)
#print("closing port "+port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
#-------------------
# ports = serial_ports() #generate list of currently connected serial ports
# print (ports)
# ser = ports[0]
# s = serial.Serial(ser)
# print(s)
left_nodes = 62
right_nodes = 62
left_bus = 2
right_bus = 2
node_v_spacing = 0.18
bus_h_spacing = 0.18
midline_gap = 0.25
node_to_bus_gap = 0.15
node_length = 1.0
node_height = 0.15
bus_height = node_v_spacing*left_nodes
bb_node = [[0,0,node_length,node_length],[0,node_height,node_height,0]]
bb_bus = [[0,0,node_height,node_height],[0,bus_height,bus_height,0]]
image_height = 0.3+bus_height
image_width = 0.3+midline_gap + 2*node_length+2*node_to_bus_gap+4*node_height
BB_x = []
BB_y = []
orientation='horizontal'
if orientation =='vertical':
for q in range(left_nodes):
BB_x.append([t for t in bb_node[0]])
BB_y.append([t+q*node_v_spacing for t in bb_node[1]])
for q in range(right_nodes):
BB_x.append([t+node_length+midline_gap for t in bb_node[0]])
BB_y.append([t+q*node_v_spacing for t in bb_node[1]])
BB_x.append([t-node_to_bus_gap-node_height - bus_h_spacing for t in bb_bus[0]])
BB_x.append([t-node_to_bus_gap-node_height for t in bb_bus[0]])
BB_x.append([t+node_to_bus_gap+2*node_length + midline_gap for t in bb_bus[0]])
BB_x.append([t+bus_h_spacing+node_to_bus_gap+2*node_length + midline_gap for t in bb_bus[0]])
for y in range(4):
BB_y.append(bb_bus[1])
image_height = 0.3+bus_height
image_width = 0.3+midline_gap + 2*node_length+2*node_to_bus_gap+4*node_height
pixel_scaler = 500/6
else:
for q in range(left_nodes):
BB_y.append([t for t in bb_node[0]])
BB_x.append([t+q*node_v_spacing for t in bb_node[1]])
for q in range(right_nodes):
BB_y.append([t+node_length+midline_gap for t in bb_node[0]])
BB_x.append([t+q*node_v_spacing for t in bb_node[1]])
BB_y.append([t-node_to_bus_gap -node_height- bus_h_spacing for t in bb_bus[0]])
BB_y.append([t-node_to_bus_gap-node_height for t in bb_bus[0]])
BB_y.append([t+node_to_bus_gap+2*node_length + midline_gap for t in bb_bus[0]])
BB_y.append([t+bus_h_spacing+node_to_bus_gap+2*node_length + midline_gap for t in bb_bus[0]])
for y in range(4):
BB_x.append(bb_bus[1])
image_width = 0.3+bus_height
image_height = 0.3+midline_gap + 2*node_length+2*node_to_bus_gap+4*node_height
pixel_scaler = 500/5
def color_getter(value,maximum):
integer = int(math.floor(value*255/maximum))
#print (integer)
hexval = hex(integer)[2:]
#print (hexval)
if len(str(hexval))==1:
return "#" +"0" +hexval+"0000"
else:
return "#" +hexval+"0000"
async_mode = None
if async_mode is None:
try:
import eventlet
async_mode = 'eventlet'
except ImportError:
pass
if async_mode is None:
try:
from gevent import monkey
async_mode = 'gevent'
except ImportError:
pass
if async_mode is None:
async_mode = 'threading'
print('async_mode is ' + async_mode)
# monkey patching is necessary because this application uses a background
# thread
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
elif async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
#Start up Flask server:
app = Flask(__name__, template_folder = './',static_url_path='/static')
app.config['SECRET_KEY'] = 'secret!' #shhh don't tell anyone. Is a secret
socketio = SocketIO(app, async_mode = async_mode)
thread = None
def dataThread():
unique = 456
ports = serial_ports() #generate list of currently connected serial ports
print (ports)
ser = ports[1]
s = serial.Serial(ser)
print(s)
print("ALL GOOD")
while True:
#current = datetime.now().isoformat()
#current = current.replace(":","_")
#string_to_write = input() #7,3;single\n"
string_to_write = "all*"
print(string_to_write)
s.write(bytes(string_to_write,'UTF-8'))
print("sleeping now")
time.sleep(4) #time running in the arduino code. Modify if needed
print("post_sleep")
no_more_data = False
#this is a serious cludge:
all_data = ""
while not no_more_data:
#print("going")
time.sleep(0.1)
data_left = s.inWaiting()
if (data_left >0):
all_data += s.read(data_left).decode()
else:
no_more_data = True
print(all_data)
x = all_data
#x = x[1]
#print(x)
x = x.split("&")
x = x[:-1]
bins=[]
for y in x:
parts = y.split(":")
#print(parts[0])
#print(parts[1])
bins.append((int(parts[0]),int(parts[1])))
#for random testing:
#voltage = [3.3*random.random() for x in range(len(names))]
print (bins)
node_voltage = list()
time_x = list()
count = 0
#Create place holders when all is not called
#organizing to operate different modes
old_voltage = [0]*128 #create a list of zeros of 128 elements
for y in bins:
old_voltage[y[0]] = 3.3*y[1]/1023
old_names = list(range(128))
#print (old_names)
#Reorganizing orders to match with the breadboard layout
names = list()
voltage = list()
for i in old_names:
index = old_names.index(i)
if index >= 62:
if index == 125:
names.append(63)
voltage.append(old_voltage[63])
elif index == 124:
names.append(62)
voltage.append(old_voltage[62])
elif (index == 126):
names.append(127)
voltage.append(old_voltage[127])
elif (index == 127):
names.append(126)
voltage.append(old_voltage[126])
else:
names.append(i + 2)
voltage.append(old_voltage[index + 2])
else:
names.append(i)
voltage.append(old_voltage[index])
#print (names)
#print (voltage)
#colors = ["#F1EEF6", "#D4B9DA", "#C994C7", "#DF65B0", "#DD1C77", "#980043"]
colors = [color_getter(v,3.3) for v in voltage]
#print (colors)
source = ColumnDataSource(
data = dict(
x=BB_x,
y=BB_y,
color=colors,
name=names,
voltage=voltage,
)
)
#output_file("bb_test_{}.html".format(current), title="Breadboard Visualizer v1.0")
TOOLS="hover,save"
p = figure(title="Breadboard Voltages", tools=TOOLS)
p.toolbar.logo=None
#print(pixel_scaler*image_width)
#print(pixel_scaler*image_height)
p.patches('x', 'y',
fill_color='color', fill_alpha=0.7,
line_color="white", line_width=0.0,
source=source)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.plot_height=int(pixel_scaler*image_height)
p.plot_width=int(pixel_scaler*image_width)
p.axis.visible = False
hover = p.select(dict(type=HoverTool))
hover.point_policy = "follow_mouse"
hover.tooltips = OrderedDict([
("Name", "@name"),
("Voltage)", "@voltage V"),
])
script, div = components(p)
prep = script + div
#val1 = amp1*math.sin(omega1*time.time())
#val2 = amp2*math.sin(omega2*time.time())
socketio.emit('update_{}'.format(unique),prep,broadcast =True)
print('sending')
@app.route('/')
def index():
global thread
print ("A user connected")
if thread is None:
thread = Thread(target=dataThread)
thread.daemon = True
thread.start()
return render_template('div_render.example_1.html')
if __name__ == '__main__':
socketio.run(app, port=3000, debug=True)
```
#### File: jinstrument/py/primary.py
```python
async_mode = None
if async_mode is None:
try:
import eventlet
async_mode = 'eventlet'
except ImportError:
pass
if async_mode is None:
try:
from gevent import monkey
async_mode = 'gevent'
except ImportError:
pass
if async_mode is None:
async_mode = 'threading'
print('async_mode is ' + async_mode)
# monkey patching is necessary because this application uses a background
# thread
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
elif async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
import time
from threading import Thread, Lock
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, emit, join_room, leave_room,close_room, rooms, disconnect
import sys
import glob
import serial
import json
import struct
import csv
#Version 2.7 or Above?
if sys.version_info[0] >2:
version3 = True
kwargs = {'newline':''}
else:
version3 = False
kwargs = {}
##import logging
##log = logging.getLogger('werkzeug')
##log.setLevel(logging.ERROR)
serialConnected = False #global flag for whether or not the serial port should be connected
serialPort =0 # (init value is 3...junk) contains serial port object when in use...touching protected by serialLock below
serialLock = Lock() #serial permission lock (protects shared resource of serial port)
print (serialLock)
#Taken from here on StackExchange: http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
#Want to give credit where credit is due!
def serial_ports():
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in list(range(256))]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
#print("checking port "+port)
s = serial.Serial(port)
#print("closing port "+port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
#-------------------
#serial variables:
serialselection = ''
baudselection = 115200
mcuMessage = []
'''system_parameters (dictionary where keys are user-variable parameters and entry is list consisting of current value (index 0 and single-character comm term for conveying value back to micro...for example you could have system_parameters['K_d']=[1.4,'D']
'''
system_parameters = {}
#params_and_values an ordered list of the names of paramters, headroom, and values to be plotted
#Used in generating CSV header list in order
params_and_values = []
#A list pointing to parameter values for quick plotting (rather than list comprehend this every time
param_vals = []
command_terms = ['HIHI']
#expected_length...how long each full message from Micro should be
expected_length = 0
#function that will be stored for chopping up message into appropriate signed/unsignedness/float, etc... makes this processing arbitrarily expandable as needed...must obviously agree with encoding scheme on micro
parseFunction = lambda x: [0]
'''Kp = 0.0
Kd = 0.0
Ki = 0.0
direct = 0.0
desired = 0.0
alternate = 0.0 # global flag of whether or not we're alternating...
'''
#ALTERNATING DATA STRUCTURE:
# timer and state are used for storing/remembering the switching action
# period is how often to switch (in seconds)
# param is the user input that is switched (determined during initialization)
alt_data = {'timer': time.time(), 'state':-1.0, 'period': 5, 'param': None} #data struture used to implement alternating behavior
#Start up Flask server:
app = Flask(__name__, template_folder = './',static_url_path='/static')
app.config['SECRET_KEY'] = 'secret!' #shhh don't tell anyone. Is a secret
socketio = SocketIO(app, async_mode = async_mode)
thread = None
#csv variables:
global csv_default
global csv_recent
global current
global archive
csv_st = time.time()
#variable which determines whether a csv is being generated or not.
csv_yn = False #start out not writing csv files
csvLock = Lock()
keepRunning = True #set to True for default
#global setup variables:
#used during initialization of comms/building GUI
isSetup = False
setupString = ""
allGoodFromGUI = False
#Function run in parallel on infinite loop with
#serves as serial listener outside of separate loop
def serialThread():
print ("Starting serial background thread.")
global desired
global serialLock
global csvLock
global serialPort
global system_parameters
global params_and_values
global expected_length
global parseFunction
global param_vals
global csv_default
global csv_recent
global alt_data
global alternate
global isSetup
global setupString
global command_terms
while True:
if serialConnected:
writeUpdates('~',0)
time.sleep(2.0)
serialLock.acquire()
try:
new_setupString = serialPort.readline()
serialPort.flushInput()
except:
print ("initi string reading issue")
serialLock.release()
new_setupString = strip_until_marker(new_setupString)
temp_commands = new_setupString.split('&')
temp_commands = temp_commands[1:-1]
if temp_commands != command_terms: #only reload the gui if the configuration setup string has changed!
command_terms = temp_commands
setupString = new_setupString
temp = setupString.split('&',1)[1]
temp = temp.rsplit('&',1)[0]
setupString = temp
try:#send up to javascript to sort its part out
socketio.emit('startup',setupString,broadcast =True)
except:
print ("failed socket")
#build structures based on setupString's contents and orderj
plot_count =0 #used for tallying plots
spaces = [] #used for determining how to chop data string (bytes per var)
s=[] #list of sliders
t=[] #list of temporal plots
h = [] #contains headroom value if that is being plotted
for x in command_terms:
if len(x)>0 and x[0] =='S': #is a slider
slider_vals = x.split('~') #chop string
#next: add key to system_parameters dict of slider name
#entry is starting val (0) and one char value used for comms
system_parameters[slider_vals[1]]=[0,slider_vals[2]]
s.append(slider_vals[1]) #add name of param to s list
#next is to fill in the param_vals list with the current value
param_vals.append(system_parameters[slider_vals[1]][0])
if len(x)>0 and x[0] == 'A': #we are alternating
vals = x.split('~') #split substring
alt_data['period'] = float(vals[2]) #period unpacked
alt_data['param'] = vals[1] #link alternate to selected parameter
if len(x)>0 and x[0]=='T': #we have a temporal plot
plot_vals = x.split('~') #split substring
t.append(plot_vals[1]) #add name to t list
#next line: append list: [num_bytes,signed/unsigned/float,etc..]
spaces.append([int(plot_vals[2][1]),plot_vals[2][0]])
plot_count +=1 #increment plot count
if len(x)>0 and x[0]=='H':
head_vals = x.split('~')
h.append("Headroom")
plot_count +=1 #headroom isn't a "plot" but treated same
if head_vals[1] =='2':
spaces.append([2,'S']) #needed since 16bit int on Arduino
elif head_vals[1] =='4':
spaces.append([4,'F']) #needed since ARM32 Teensy
params_and_values = t+h+s #in order plots, headroom, sliders
expected_length = sum(x[0] for x in spaces)+2 #2 from open/closing byte
#parse_prototype is function that will chop up incoming bytes for sending up to the GUI
def parse_prototype(listo):
new_out = []
current_index=1 #start 1 up because of start byte
for x in range(plot_count):
val = 0
if spaces[x][0] == 1:
if spaces[x][1] == 'S':
val = struct.unpack('b',listo[current_index:current_index+1])[0]
elif spaces[x][1] =='U':
val = struct.unpack('B',listo[current_index:current_index+1])[0]
elif spaces[x][0] == 2:
if spaces[x][1] == 'S':
val = struct.unpack('<h',listo[current_index:current_index+2])[0]
elif spaces[x][1] == 'U':
val = struct.unpack('H',listo[current_index:current_index+2])[0]
elif spaces[x][0] == 4:
if spaces[x][1] == 'F':
val = struct.unpack('f',listo[current_index:current_index+4])[0]
elif spaces[x][1] == 'S':
val = struct.unpack('i',listo[current_index:current_index+4])[0]
new_out.append(val)
current_index += spaces[x][0]
return new_out
parseFunction = parse_prototype
while not allGoodFromGUI:
time.sleep(1.0)
isSetup = True
else:
inform_dev() #just tell device that we are good
serialLock.acquire()
try:
serialPort.flushInput()
except:
serialLock.release()
for x in s: #reload gui and device
socketio.emit('setup slider',{0:x,1:str(system_parameters[x][0])}, broadcast=True)
#print("Writing %s to be %0.4f" %(system_parameters[x][1],system_parameters[x][0]))
writeUpdates(system_parameters[x][1],system_parameters[x][0])
time.sleep(0.1)
writeUpdates(system_parameters[x][1],system_parameters[x][0])
time.sleep(0.1)
time.sleep(1)
while serialConnected:
serialLock.acquire()
b = serialPort.read(expected_length)
if len(b) != expected_length:
print("expected=%d, actual=%d\n",len(b),expected_length)
new_data = None
if len(b) > 0 and messageRead(b,expected_length):
new_data = parseFunction(b)
if new_data != None:
try:
socketio.emit('note',new_data,broadcast =True)
except:
print ("failed socket")
if csv_yn:
temp_time = [time.time()-csv_st] #time since recording started
csvLock.acquire()
newb_list = temp_time+new_data+[system_parameters[x][0] for x in s]
csv_default.writerow(newb_list)
csv_recent.writerow(newb_list)
csvLock.release()
#elif bytesThere > expected_length:
# try:
# serialPort.flushInput()
# except:
# print ("failure to flush input")
serialLock.release()
time.sleep(0.01)
if alternate == 1:
if time.time()-alt_data['timer'] > alt_data['period']:
print ('Switch to :')
alt_data['timer'] = time.time() #reset timer
poi = alt_data['param'] #param of interest
print(type(system_parameters[poi][0]))
print(system_parameters[poi][0])
system_parameters[poi][0] = system_parameters[poi][0]*-1.0
alt_data['state'] = alt_data.get('state')*-1
writeUpdates(system_parameters[poi][1],system_parameters[poi][0])
try:
socketio.emit('state toggle', system_parameters[poi][0], broadcast=True) #tell the GUI that the desired has changed
except:
print('failed toggle socket')
print ("Stopping serial read. Returning to idle state")
time.sleep(0.01)
def strip_until_marker(input_string):
#return only text after last non-ascii character has been found
#should *always* work...closing byte of plot package is \xff which is non-ascii and
#should get caught in this scheme...there are of course ways to break this but they
#require breaking the communication contract we have setup.
new_string = ''
for x in range(len(input_string)):
poss = input_string[x:x+1]
try:
if version3:
if type(poss)==type("hi"):
poss = str.encode(poss,'ascii') #fail here possibly
char = poss.decode('ascii')
new_string+=char
except:
new_string=""
return new_string
#runtime variables...
def messageRead(buff,exp):
first = struct.unpack('b',buff[0:1])[0]
last = struct.unpack('b',buff[exp-1:exp])[0]
if first == 0 and last == -1:
return True
else:
return False
# if not version3:
# newb = buff
# buff = [ord(q) for q in newb] #converts yucky binary/string abominations of python 2.* into list of ascii numbers essentially...not issue in 3
# mcuMessage=list(range(expected))
# if buff[0] == 0 and buff[expected-1] == 255: #likely correct message
# errorF = False
# mcuMessage[0] = buff[0]
# mcuMessage[expected-1] = buff[expected-1]
# for i in range(1,expected-1):
# bufI = buff[i]
# if bufI ==0 or bufI == 255:
# errorF = True;
# mcuMessage[i] = bufI
# if not errorF:
# return mcuMessage
# return None
@app.route('/')
def index():
global thread
print ("A user connected")
if thread is None:
thread = Thread(target=serialThread)
thread.daemon = True
thread.start()
return render_template('index.html')
@socketio.on('connect')
def test_connect():
print ('hey someone connected')
ports = serial_ports() #generate list of currently connected serial ports
print (ports)
newb=[]
for p in ports:
newb.append({"comName": p})
print (json.dumps(newb))
#emit('serial list display', {'data': ports}) #emit socket with serial ports in it
emit('serial list display', newb) #emit socket with serial ports in it
#emit('my response', {'data': 'Connected'})
@socketio.on('disconnect')
def test_disconnect():
global csv_yn
global csvLock
emit('serial disconnect request',broadcast=True)
csv_yn = 0
#if current is not None and archive is not None:
csvLock.acquire()
try:
current.close()
archive.close()
except NameError:
pass #if didn't exist yet, don't try...
csvLock.release()
print('Client disconnected. Hopefully that was for the best.')
writeUpdates('~',0)#for non-autoreset devices must tell it to enter child state again
def writeUpdates(tag,val):
global serialPort
global serialLock
string_to_write = tag+' %0.2f\n' %(float(val))
print(string_to_write)
if serialConnected:
serialLock.acquire() #claim serial resource
if version3:
b = bytes(string_to_write,'UTF-8')
print(b)
serialPort.write(bytes(string_to_write,'UTF-8'))
else:
serialPort.write(string_to_write.encode('utf-8'))
#serialPort.write(string_to_write)
serialLock.release() #release serial resource back out into big scary world
else:
print ("Change in %s to value %s not written since no live serial comm exists yet" %(tag,val))
# Specs
@socketio.on('serial select')
def action(port):
global serialselection
print ('serial port changed to %s' %(port))
serialselection = port
@socketio.on('baud select')
def action(baud):
global baudselection
print ('baud changed to %s' %(baud))
baudselection = baud
@socketio.on('csv state')
def csver(csv_val):
global csv_default
global csv_recent
global current
global archive
global csv_yn
global csvLock
global csv_st
if int(csv_val) == 0:
print('closing csv files')
csv_yn = 0
csvLock.acquire()
try:
current.close()
archive.close()
except NameError:
pass #did not exist yet...totes fine
csvLock.release()
else: #do other thing
print('Trying opening csv files up!')
csv_st = time.time()
#current = open('./csv_files/current.csv',"w",encoding='utf8',newline='')
#archive = open('./csv_files/'+str(int(time.time()))+'.csv',"w",encoding='utf8',newline='')
try:
current = open('./csv_files/current.csv',"w",**kwargs)
archive = open('./csv_files/'+str(int(time.time()))+'.csv',"w",**kwargs)
csv_default = csv.writer(archive)
csv_recent = csv.writer(current)
csvLock.acquire()
csv_default.writerow(['Time']+params_and_values)
csv_recent.writerow(['Time']+params_and_values)
csvLock.release()
csv_yn = 1
print ('CSV File Open successful')
except:
print("Failed to open CSV Files")
@socketio.on('serial connect request')
def connection(already_built):
global serialConnected
global serialPort
global serialLock
global alternate
global isSetup
already_built = eval(str(already_built))
print("state of gui")
print(already_built)
isSetup = already_built['state'] #user this
print(isSetup)
alternate = 0
print ('Trying to connect to: ' + serialselection + ' ' + str(baudselection))
print (serialLock)
print (serialConnected)
try:
serialLock.acquire()
print ("Lock acquired")
serialPort = serial.Serial(serialselection, int(baudselection),timeout=4)
print ('SerialPort')
print ('Connected to ' + str(serialselection) + ' at ' + str(baudselection) + ' BAUD.')
emit('serial connected', broadcast=True) #tells page to indicate connection (in button)
serialPort.flushInput()
#serialPort.flushOutput()
serialLock.release()
serialConnected = True #set global flag
except:
print ("Failed to connect with "+str(serialselection) + ' at ' + str(baudselection) + ' BAUD.')
@socketio.on('serial disconnect request')
def discon():
global serialConnected
global serialLock
global serialPort
print ('Trying to disconnect...')
serialLock.acquire()
serialPort.close()
serialLock.release()
serialConnected = False
emit('serial disconnected',broadcast=True)
print ('Disconnected...good riddance' )
@socketio.on("disconnected")
def ending_it():
print ("We're done")
@socketio.on('change')
def action(data):
global system_parameters
data = eval(str(data))
system_parameters[data['id']][0]=float(data['val'])
writeUpdates(system_parameters[data['id']][1],system_parameters[data['id']][0])
@socketio.on('all set from gui')
def action():
global allGoodFromGUI
allGoodFromGUI = True
print("we are done from GUI Side")
inform_dev()
def inform_dev():
global serialPort
global serialLock
string_to_write = "SET\n"
if serialConnected:
serialLock.acquire() #claim serial resource
if version3:
serialPort.write(bytes(string_to_write,'UTF-8'))
else:
print(string_to_write)
serialPort.write(string_to_write)
serialPort.flushInput()
serialLock.release() #release serial resource back out into big scary world
else:
print ("can't inform device since it isn't connected...what does this even mean")
@socketio.on('alternate state')
def action(alt):
alt = int(alt)
global alternate
global alt_data
if alt == 1:
print ('%s changed to alternating at +/- %0.2f ' %(alt_data['param'],float(system_parameters[alt_data['param']][0])))
alt_data['timer'] = time.time()
alt_data['state'] = 1.0
alternate = 1
else:
print ('%s changed to fixed at %0.2f' %(alt_data['param'],float(system_parameters[alt_data['param']][0])))
alternate = 0
if __name__ == '__main__':
socketio.run(app, port=3000, debug=True)
``` |
{
"source": "jodalyst/riscemu",
"score": 2
} |
#### File: riscemu/instructions/RV32I.py
```python
from .InstructionSet import *
from ..helpers import int_from_bytes, int_to_bytes, to_unsigned, to_signed
from ..colors import FMT_DEBUG, FMT_NONE
from ..debug import launch_debug_session
from ..Exceptions import LaunchDebuggerException
from ..Syscall import Syscall
from ..Executable import LoadedInstruction
class RV32I(InstructionSet):
"""
The RV32I instruction set. Some instructions are missing, such as
fence, fence.i, rdcycle, rdcycleh, rdtime, rdtimeh, rdinstret, rdinstreth
All atomic read/writes are also not implemented yet
See https://maxvytech.com/images/RV32I-11-2018.pdf for a more detailed overview
"""
def instruction_lb(self, ins: 'LoadedInstruction'):
rd, addr = self.parse_mem_ins(ins)
self.regs.set(rd, int_from_bytes(self.mmu.read(addr, 1)))
def instruction_lh(self, ins: 'LoadedInstruction'):
rd, addr = self.parse_mem_ins(ins)
self.regs.set(rd, int_from_bytes(self.mmu.read(addr, 2)))
def instruction_lw(self, ins: 'LoadedInstruction'):
rd, addr = self.parse_mem_ins(ins)
self.regs.set(rd, int_from_bytes(self.mmu.read(addr, 4)))
def instruction_lbu(self, ins: 'LoadedInstruction'):
rd, addr = self.parse_mem_ins(ins)
self.regs.set(rd, int_from_bytes(self.mmu.read(addr, 1), unsigned=True))
def instruction_lhu(self, ins: 'LoadedInstruction'):
rd, addr = self.parse_mem_ins(ins)
self.regs.set(rd, int_from_bytes(self.mmu.read(addr, 2), unsigned=True))
def instruction_sb(self, ins: 'LoadedInstruction'):
rd, addr = self.parse_mem_ins(ins)
self.mmu.write(addr, 1, int_to_bytes(self.regs.get(rd), 1))
def instruction_sh(self, ins: 'LoadedInstruction'):
rd, addr = self.parse_mem_ins(ins)
self.mmu.write(addr, 2, int_to_bytes(self.regs.get(rd), 2))
def instruction_sw(self, ins: 'LoadedInstruction'):
rd, addr = self.parse_mem_ins(ins)
self.mmu.write(addr, 4, int_to_bytes(self.regs.get(rd), 4))
def instruction_sll(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 3)
dst = ins.get_reg(0)
src1 = ins.get_reg(1)
src2 = ins.get_reg(2)
self.regs.set(
dst,
to_signed(to_unsigned(self.regs.get(src1)) << (self.regs.get(src2) & 0b11111))
)
def instruction_slli(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 3)
dst = ins.get_reg(0)
src1 = ins.get_reg(1)
imm = ins.get_imm(2)
self.regs.set(
dst,
to_signed(to_unsigned(self.regs.get(src1)) << (imm & 0b11111))
)
def instruction_srl(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 3)
dst = ins.get_reg(0)
src1 = ins.get_reg(1)
src2 = ins.get_reg(2)
self.regs.set(
dst,
to_signed(to_unsigned(self.regs.get(src1)) >> (self.regs.get(src2) & 0b11111))
)
def instruction_srli(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 3)
dst = ins.get_reg(0)
src1 = ins.get_reg(1)
imm = ins.get_imm(2)
self.regs.set(
dst,
to_signed(to_unsigned(self.regs.get(src1)) >> (imm & 0b11111))
)
def instruction_sra(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 3)
dst = ins.get_reg(0)
src1 = ins.get_reg(1)
src2 = ins.get_reg(2)
self.regs.set(
dst,
self.regs.get(src1) >> (self.regs.get(src2) & 0b11111)
)
def instruction_srai(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 3)
dst = ins.get_reg(0)
src1 = ins.get_reg(1)
imm = ins.get_imm(2)
self.regs.set(
dst,
self.regs.get(src1) >> (imm & 0b11111)
)
def instruction_add(self, ins: 'LoadedInstruction'):
dst = ""
if self.cpu.conf.add_accept_imm:
try:
dst, rs1, rs2 = self.parse_rd_rs_imm(ins)
except:
pass
if not dst:
dst, rs1, rs2 = self.parse_rd_rs_rs(ins)
self.regs.set(
dst,
rs1 + rs2
)
def instruction_addi(self, ins: 'LoadedInstruction'):
dst, rs1, imm = self.parse_rd_rs_imm(ins)
self.regs.set(
dst,
rs1 + imm
)
def instruction_sub(self, ins: 'LoadedInstruction'):
dst, rs1, rs2 = self.parse_rd_rs_rs(ins)
self.regs.set(
dst,
rs1 - rs2
)
def instruction_lui(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 2)
reg = ins.get_reg(0)
imm = ins.get_imm(1)
self.regs.set(reg, imm << 12)
def instruction_auipc(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 2)
reg = ins.get_reg(0)
imm = to_unsigned(ins.get_imm(1))
self.regs.set(reg, self.pc + (imm << 12))
def instruction_xor(self, ins: 'LoadedInstruction'):
rd, rs1, rs2 = self.parse_rd_rs_rs(ins)
self.regs.set(
rd,
rs1 ^ rs2
)
def instruction_xori(self, ins: 'LoadedInstruction'):
rd, rs1, imm = self.parse_rd_rs_imm(ins)
self.regs.set(
rd,
rs1 ^ imm
)
def instruction_or(self, ins: 'LoadedInstruction'):
rd, rs1, rs2 = self.parse_rd_rs_rs(ins)
self.regs.set(
rd,
rs1 | rs2
)
def instruction_ori(self, ins: 'LoadedInstruction'):
rd, rs1, imm = self.parse_rd_rs_imm(ins)
self.regs.set(
rd,
rs1 | imm
)
def instruction_and(self, ins: 'LoadedInstruction'):
rd, rs1, rs2 = self.parse_rd_rs_rs(ins)
self.regs.set(
rd,
rs1 & rs2
)
def instruction_andi(self, ins: 'LoadedInstruction'):
rd, rs1, imm = self.parse_rd_rs_imm(ins)
self.regs.set(
rd,
rs1 & imm
)
def instruction_slt(self, ins: 'LoadedInstruction'):
rd, rs1, rs2 = self.parse_rd_rs_rs(ins)
self.regs.set(
rd,
int(rs1 < rs2)
)
def instruction_slti(self, ins: 'LoadedInstruction'):
rd, rs1, imm = self.parse_rd_rs_imm(ins)
self.regs.set(
rd,
int(rs1 < imm)
)
def instruction_sltu(self, ins: 'LoadedInstruction'):
dst, rs1, rs2 = self.parse_rd_rs_rs(ins, signed=False)
self.regs.set(
dst,
int(rs1 < rs2)
)
def instruction_sltiu(self, ins: 'LoadedInstruction'):
dst, rs1, imm = self.parse_rd_rs_imm(ins, signed=False)
self.regs.set(
dst,
int(rs1 < imm)
)
def instruction_beq(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins)
if rs1 == rs2:
self.pc = dst
def instruction_bne(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins)
if rs1 != rs2:
self.pc = dst
def instruction_blt(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins)
if rs1 < rs2:
self.pc = dst
#jds pseudo-op
def instruction_bgt(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins)
if rs1 > rs2:
self.pc = dst
def instruction_bge(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins)
if rs1 >= rs2:
self.pc = dst
#jds pseudo-op
def instruction_ble(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins)
if rs1 <= rs2:
self.pc = dst
def instruction_bltu(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins, signed=False)
if rs1 < rs2:
self.pc = dst
#jds pseudo-op
def instruction_bgtu(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins, signed=False)
if rs1 > rs2:
self.pc = dst
def instruction_bgeu(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins, signed=False)
if rs1 >= rs2:
self.pc = dst
#jds pseudo-op
def instruction_bleu(self, ins: 'LoadedInstruction'):
rs1, rs2, dst = self.parse_rs_rs_imm(ins, signed=False)
if rs1 <= rs2:
self.pc = dst
# zero pseudo-ops:
#beqz, bnez, bltz, bgez, bgtz, blez
def instruction_beqz(self, ins: 'LoadedInstruction'):
#rs1, dst = self.parse_rs_rs_imm(ins)
ASSERT_LEN(ins.args, 2)
rs1 = ins.get_reg(0)
dst = ins.get_imm(1)
if self.regs.get(rs1) == 0:
self.pc = dst
def instruction_bnez(self, ins: 'LoadedInstruction'):
#rs1, dst = self.parse_rs_rs_imm(ins)
ASSERT_LEN(ins.args, 2)
rs1 = ins.get_reg(0)
dst = ins.get_imm(1)
if self.regs.get(rs1) != 0:
self.pc = dst
def instruction_bltz(self, ins: 'LoadedInstruction'):
#rs1, dst = self.parse_rs_rs_imm(ins)
ASSERT_LEN(ins.args, 2)
rs1 = ins.get_reg(0)
dst = ins.get_imm(1)
if self.regs.get(rs1) < 0:
self.pc = dst
def instruction_bgtz(self, ins: 'LoadedInstruction'):
#rs1, dst = self.parse_rs_rs_imm(ins)
ASSERT_LEN(ins.args, 2)
rs1 = ins.get_reg(0)
dst = ins.get_imm(1)
if self.regs.get(rs1) > 0:
self.pc = dst
def instruction_bgez(self, ins: 'LoadedInstruction'):
#rs1, dst = self.parse_rs_rs_imm(ins)
ASSERT_LEN(ins.args, 2)
rs1 = ins.get_reg(0)
dst = ins.get_imm(1)
if self.regs.get(rs1) >= 0:
self.pc = dst
def instruction_blez(self, ins: 'LoadedInstruction'):
#rs1, dst = self.parse_rs_rs_imm(ins)
ASSERT_LEN(ins.args, 2)
rs1 = ins.get_reg(0)
dst = ins.get_imm(1)
if self.regs.get(rs1) <= 0:
self.pc = dst
# technically deprecated
def instruction_j(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 1)
addr = ins.get_imm(0)
self.pc = addr
def instruction_jal(self, ins: 'LoadedInstruction'):
reg = 'ra' # default register is ra
if len(ins.args) == 1:
addr = ins.get_imm(0)
else:
ASSERT_LEN(ins.args, 2)
reg = ins.get_reg(0)
addr = ins.get_imm(1)
self.regs.set(reg, self.pc)
self.pc = addr
#jds pseudo-op
def instruction_call(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 1)
addr = ins.get_imm(0)
self.regs.set('ra', self.pc)
self.pc = addr
#jds pseudo-op
def instruction_jr(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 1)
reg = ins.get_reg(0)
val = self.regs.get(reg)
thing = val&(0xFFFFFFFE)
#self.regs.set(reg, self.pc)
self.pc = thing
#modified by jds
def instruction_jalr(self, ins: 'LoadedInstruction'):
reg = 'ra' # default register is ra
if len(ins.args) == 1:
addr = ins.get_reg(0)
thing = self.regs.get(addr)
addr = thing #&(0xFFFFFFFE)
else:
ASSERT_LEN(ins.args, 2)
reg, addr = self.parse_mem_ins(ins)
#reg = ins.get_reg(0)
#addr = ins.get_imm(1)
#thing = self.regs.get(addr)
#addr = addr&(0xFFFFFFFE)
#reg = ins.get_reg(0)
#addr = ins.get_imm(1)
self.regs.set(reg, self.pc)
self.pc = addr
'''
def instruction_jalr(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 2)
reg = ins.get_reg(0)
addr = ins.get_imm(1)
self.regs.set(reg, self.pc)
self.pc = addr
'''
def instruction_ret(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 0)
self.pc = self.regs.get('ra')
def instruction_ecall(self, ins: 'LoadedInstruction'):
self.instruction_scall(ins)
def instruction_ebreak(self, ins: 'LoadedInstruction'):
self.instruction_sbreak(ins)
def instruction_scall(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 0)
syscall = Syscall(self.regs.get('a7'), self.cpu, self.mmu)
self.cpu.syscall_int.handle_syscall(syscall)
def instruction_sbreak(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 0)
if self.cpu.active_debug:
print(FMT_DEBUG + "Debug instruction encountered at 0x{:08X}".format(self.pc - 1) + FMT_NONE)
raise LaunchDebuggerException()
launch_debug_session(
self.cpu,
self.mmu,
self.regs,
"Debug instruction encountered at 0x{:08X}".format(self.pc - 1)
)
def instruction_nop(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 0)
pass
def instruction_li(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 2)
reg = ins.get_reg(0)
immediate = ins.get_imm(1)
self.regs.set(reg, immediate)
def instruction_la(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 2)
reg = ins.get_reg(0)
immediate = ins.get_imm(1)
self.regs.set(reg, immediate)
def instruction_mv(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 2)
rd, rs = ins.get_reg(0), ins.get_reg(1)
self.regs.set(rd, self.regs.get(rs))
def instruction_not(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 2)
rd, rs = ins.get_reg(0), ins.get_reg(1)
val = (-1)^self.regs.get(rs)
self.regs.set(rd, val)
def instruction_neg(self, ins: 'LoadedInstruction'):
ASSERT_LEN(ins.args, 2)
rd, rs = ins.get_reg(0), ins.get_reg(1)
val = 0-self.regs.get(rs)
self.regs.set(rd, val)
"""Need to add:
* not, neg, call?
* jr, bgt, ble, bgtu, bleu
* beqz, bnez, bltz, bgez, bgtz, blez
"""
```
#### File: riscemu/priv/ImageLoader.py
```python
import json
from functools import lru_cache
from typing import Dict, List, Optional, TYPE_CHECKING
from .ElfLoader import ElfInstruction, ElfLoadedMemorySection, InstructionAccessFault, InstructionAddressMisalignedTrap
from .PrivMMU import PrivMMU
from ..Config import RunConfig
from ..Executable import LoadedMemorySection, MemoryFlags
from ..IO.IOModule import IOModule
from ..colors import FMT_ERROR, FMT_NONE, FMT_MEM
from ..decoder import decode
if TYPE_CHECKING:
pass
class MemoryImageMMU(PrivMMU):
io: List[IOModule]
data: bytearray
io_start: int
debug_info: Dict[str, Dict[str, Dict[str, str]]]
def __init__(self, file_name: str, io_start: int = 0xFF0000):
super(MemoryImageMMU, self).__init__(conf=RunConfig())
with open(file_name, 'rb') as memf:
data = memf.read()
with open(file_name + '.dbg', 'r') as dbgf:
debug_info: Dict = json.load(dbgf)
self.data = bytearray(data)
# TODO: super wasteful memory allocation happening here
if len(data) < io_start:
self.data += bytearray(io_start - len(data))
self.debug_info = debug_info
self.io_start = io_start
self.io = list()
def get_entrypoint(self):
try:
start = self.debug_info['symbols']['kernel'].get('_start', None)
if start is not None:
return start
return self.debug_info['symbols']['kernel'].get('_ftext')
except KeyError:
print(FMT_ERROR + '[MMU] cannot find kernel entry in debug information! Falling back to 0x100' + FMT_NONE)
return 0x100
@lru_cache(maxsize=2048)
def read_ins(self, addr: int) -> ElfInstruction:
if addr >= self.io_start:
raise InstructionAccessFault(addr)
if addr % 4 != 0:
raise InstructionAddressMisalignedTrap(addr)
return ElfInstruction(*decode(self.data[addr:addr + 4]))
def read(self, addr: int, size: int) -> bytearray:
if addr < 0x100:
pc = self.cpu.pc
text_sec = self.get_sec_containing(pc)
print(FMT_ERROR + "[MMU] possible null dereference (read {:x}) from (pc={:x},sec={},rel={:x})".format(
addr, pc, text_sec.owner + ':' + text_sec.name, pc - text_sec.base
) + FMT_NONE)
if addr >= self.io_start:
return self.io_at(addr).read(addr, size)
return self.data[addr: addr + size]
def write(self, addr: int, size: int, data):
if addr < 0x100:
pc = self.cpu.pc
text_sec = self.get_sec_containing(pc)
print(FMT_ERROR + "[MMU] possible null dereference (write {:x}) from (pc={:x},sec={},rel={:x})".format(
addr, pc, text_sec.owner + ':' + text_sec.name, pc - text_sec.base
) + FMT_NONE)
if addr >= self.io_start:
return self.io_at(addr).write(addr, data, size)
self.data[addr:addr + size] = data[0:size]
def io_at(self, addr) -> IOModule:
for mod in self.io:
if mod.contains(addr):
return mod
raise InstructionAccessFault(addr)
def add_io(self, io: IOModule):
self.io.append(io)
def __repr__(self):
return "MemoryImageMMU()"
@lru_cache(maxsize=32)
def get_sec_containing(self, addr: int) -> Optional[LoadedMemorySection]:
next_sec = len(self.data)
for sec_addr, name in reversed(self.debug_info['sections'].items()):
if addr >= int(sec_addr):
owner, name = name.split(':')
base = int(sec_addr)
size = next_sec - base
flags = MemoryFlags('.text' in name, '.text' in name)
return ElfLoadedMemorySection(name, base, size, self.data[base:next_sec], flags, owner)
else:
next_sec = int(sec_addr)
def translate_address(self, addr: int):
sec = self.get_sec_containing(addr)
if sec.name == '.empty':
return "<empty>"
symbs = self.debug_info['symbols'][sec.owner]
for sym, val in reversed(symbs.items()):
if addr >= val:
return "{}{:+x} ({}:{})".format(sym, addr - val, sec.owner, sec.name)
return "{}:{}{:+x}".format(sec.owner, sec.name, addr - sec.base)
def symbol(self, symb: str):
print(FMT_MEM + "Looking up symbol {}".format(symb))
for owner, symbs in self.debug_info['symbols'].items():
if symb in symbs:
print(" Hit in {}: {} = {}".format(owner, symb, symbs[symb]))
print(FMT_NONE, end="")
``` |
{
"source": "JodanGalas/Projeto_Integrador",
"score": 3
} |
#### File: src/controllers/usuario_controller.py
```python
from flask import jsonify, request
from flask_pymongo import ObjectId
from bson.json_util import dumps
from app import app, mongo
from email_controller import Cadastro
import pandas as pd
import random
import string
@app.route('/create/user', methods=['POST'])
def create():
if 'arq' in request.files:
arquivo = request.files['arq']
df = pd.read_csv(arquivo)
data = df.to_dict(orient="records")
cod = 6
df['id'] = 0
df['senha'] = 0
df['status'] = 0
df['cod'] = 0
df['atividade'] = 1
df.reset_index(inplace=True)
x=0
df = pd.DataFrame(data)
while x < (len(data)):
data[x]['id'] = mongo.db.usuarios.count()
data[x]['senha'] = ''.join(random.choice(string.digits) for x in range(cod))
data[x]['status'] = 0
data[x]['cod'] = 0
data[x]['atividade'] = 0
email = data[x]['email']
senha = data[x]['senha']
print(email)
print(senha)
Cadastro.sender_email(email, senha)
mongo.db.usuarios.insert_one(data[x])
mongo.db.dadosbkp.insert_one(data[x])
x+=1
mongo.db.dadosbkp.update_many(
{},
{ "$unset": { 'nome': "", 'telefone': "",'endereco': "", 'senha': "", 'status': "", 'cod': "",
'atividade': "", 'cpf': "" }}
)
return 'Arquivo enviado com sucesso!'
#lista todos os usuarios
@app.route('/listar/usuarios', methods = ["GET"])
def users():
users = []
for usuario in mongo.db.usuarios.find():
if usuario['cod'] == 0:
users.append({
'_id' : str(ObjectId(usuario['_id'])),
'nome' : usuario['nome'],
'cpf' : usuario['cpf'],
'email' : usuario['email'],
'telefone' : usuario['telefone'],
'endereco' : usuario['endereco'],
'id': usuario['id'],
'senha' : usuario['senha'],
'status' : usuario['status'],
'cod' : usuario['cod'],
'atividade' : usuario['atividade']})
return jsonify(users)
#Quantos usuarios tem no total
@app.route('/quantos/usuarios', methods = ["GET"])
def quantosUsers():
users = mongo.db.usuarios.find({"cod": 0}).count()
return jsonify(users)
#lista usuario por id
@app.route('/listar/usuario/<id>', methods = ["GET"])
def user(id):
usuario = mongo.db.usuarios.find_one({'id':int(id)})
return ({
'_id' : str(ObjectId(usuario['_id'])),
'nome' : usuario['nome'],
'cpf' : usuario['cpf'],
'email' : usuario['email'],
'telefone' : usuario['telefone'],
'endereco' : usuario['endereco'],
'id': usuario['id'],
'senha' : usuario['senha'],
'status' : usuario['status'],
'cod' : usuario['cod'],
'atividade' : usuario['atividade']})
#atualiza usuario
@app.route('/atualizar/usuario/<id>', methods=["PUT"])
def update_user(id):
_json = request.json
_nome = _json['nome']
_cpf = _json['cpf']
_email = _json['email']
_telefone = _json['telefone']
_endereco = _json['endereco']
find_user =mongo.db.usuarios.find_one_and_update(
{'id':int(id)}, {"$set":{
'nome' : _nome,
'cpf': _cpf,
'email': _email,
'telefone': _telefone,
'endereco': _endereco,}})
resp = dumps(find_user)
print(resp)
return resp
#exclui usuario
@app.route('/deletar/usuario/<id>', methods=["PUT"])
def delete_user(id):
mongo.db.usuarios.find_one_and_delete({'id':int(id)})
resp = jsonify("usuario deletado ")
return resp
``` |
{
"source": "JodanGalas/PyQt5",
"score": 3
} |
#### File: PyQt5/Exemplo 02/Controle.py
```python
from PyQt5 import uic, QtWidgets
def listar_dados():
dado_lido = lista.lineEdit.text()
lista.listWidget.addItem(dado_lido)
lista.lineEdit.setText("")
def deletar():
lista.listWidget.clear()
app = QtWidgets.QApplication([])
lista = uic.loadUi("Pratica.ui") #chama o arquivo pratica
lista.pushButton.clicked.connect(listar_dados)
lista.pushButton_2.clicked.connect(deletar)
lista.show()
app.exec()
``` |
{
"source": "jodavaho/highfleet-ship-opt",
"score": 3
} |
#### File: py/hf/opt.py
```python
import hfopt_lib
def version():
return hfopt_lib.version()
def is_module(name):
return hfopt_lib.is_module(name)
def module_names():
return hfopt_lib.get_module_names()
def example():
d= Design()
print("Create something like a Lightning")
# Set a module requirement for 2 ak100s and some safety equipment
d.require("100mm",2)
d.require("pod",2)
d.require("fss",2)
# and landing gear b/c we're not a savage
d.require("chassis_m",4)
# and minimum range:
d.set_min_range(800)
# and make it fast
d.set_min_spd(650)
d.dump_modules()
d.dump_constraints()
d.fill_constraints()
# Now we have a filled module list
print("============================")
print("Here's the list of modules: ")
d.dump_modules()
class Module:
def __init__(self):
self.cost=0
self.name=""
self.energy=0
self.ammo=0
self.crew=0
self.weight=0
self.thrust=0
def __init__(self,name):
attrdict = hfopt_lib.module_stats(name)
if (attrdict is None):
return
self.cost=attrdict["cost"]
self.name=attrdict["name"]
self.energy=attrdict["energy"]
self.ammo=attrdict["ammo"]
self.crew=attrdict["crew"]
self.weight=attrdict["weight"]
self.thrust=attrdict["thrust"]
class Design:
def __init__(self):
self.min_range =0
self.min_twr =1
self.min_spd =0
self.modules ={}
def require(self, name, count):
if is_module(name):
self.modules[name]=count
def dump_modules(self):
for mc in self.modules.keys():
if self.modules[mc] > 0:
print("{}={}".format(mc,self.modules[mc]))
def dump_constraints(self):
print('min_range={}'.format(self.min_range))
print('min_spd={}'.format(self.min_spd))
print('min_twr={}'.format(self.min_twr))
def set_min_range(self,r):
self.min_range = r
def set_min_spd(self,s):
"""
This will add the constraint that speed > s, which will impact min_twr.
The solver will sort it out.
"""
self.min_spd = s
def set_min_twr(self,t):
"""
This will add the constraint that T/W > T, which may also affect min_spd
The solver will sort it out.
"""
self.min_twr = t
def fill_constraints(self):
"""
This is the main method, which calls out to the C libraries to do
optimization / fill-in of the missing modules. If you have not specified
any required modules (set_req) or constraints (set_min_X), then it will
return a trivial empty solution.
"""
self.modules = hfopt_lib.solve_fill(
self.modules,
self.min_range,
self.min_spd,
self.min_twr)
``` |
{
"source": "joddiy/LipNet",
"score": 3
} |
#### File: lipnet/lipreading/helpers.py
```python
def text_to_labels(text, align_map):
ret = []
for char in text:
if char in align_map:
ret.append(align_map[char])
elif char == ' ':
ret.append(0)
return ret
def labels_to_text(labels, align_map):
reverse_map = {v: k for k, v in align_map.items()}
text = ''
for c in labels:
if c in reverse_map:
text += reverse_map[c]
elif c == 0:
text += ' '
return text
``` |
{
"source": "joddiy/singa-auto",
"score": 2
} |
#### File: models/image_classification/test.py
```python
import sys, os
import json
from singa import singa_wrap as singa
from singa import opt
from singa import device
from singa import tensor
from singa import sonnx
from singa import layer
from singa import autograd
import numpy as np
import time
import argparse
from PIL import Image
import onnx
import logging
from tqdm import tqdm
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
sys.path.append(os.path.dirname(__file__) + '/../../cnn')
sys.path.append(os.path.dirname(__file__) + '/..')
from utils import download_model
# Data Augmentation
def augmentation(x, batch_size):
xpad = np.pad(x, [[0, 0], [0, 0], [4, 4], [4, 4]], 'symmetric')
for data_num in range(0, batch_size):
offset = np.random.randint(8, size=2)
x[data_num, :, :, :] = xpad[data_num, :,
offset[0]:offset[0] + x.shape[2],
offset[1]:offset[1] + x.shape[2]]
if_flip = np.random.randint(2)
if (if_flip):
x[data_num, :, :, :] = x[data_num, :, :, ::-1]
return x
# Calculate Accuracy
def accuracy(pred, target):
# y is network output to be compared with ground truth (int)
y = np.argmax(pred, axis=1)
a = y == target
correct = np.array(a, "int").sum()
# print(correct)
return correct
# Data partition according to the rank
def partition(global_rank, world_size, train_x, train_y, val_x, val_y):
# Partition training data
data_per_rank = train_x.shape[0] // world_size
idx_start = global_rank * data_per_rank
idx_end = (global_rank + 1) * data_per_rank
train_x = train_x[idx_start:idx_end]
train_y = train_y[idx_start:idx_end]
# Partition evaluation data
data_per_rank = val_x.shape[0] // world_size
idx_start = global_rank * data_per_rank
idx_end = (global_rank + 1) * data_per_rank
val_x = val_x[idx_start:idx_end]
val_y = val_y[idx_start:idx_end]
return train_x, train_y, val_x, val_y
# Function to all reduce NUMPY Accuracy and Loss from Multiple Devices
def reduce_variable(variable, dist_opt, reducer):
reducer.copy_from_numpy(variable)
dist_opt.all_reduce(reducer.data)
dist_opt.wait()
output = tensor.to_numpy(reducer)
return output
def resize_dataset(x, image_size):
num_data = x.shape[0]
dim = x.shape[1]
X = np.zeros(shape=(num_data, dim, image_size, image_size),
dtype=np.float32)
for n in range(0, num_data):
for d in range(0, dim):
X[n, d, :, :] = np.array(Image.fromarray(x[n, d, :, :]).resize(
(image_size, image_size), Image.BILINEAR),
dtype=np.float32)
return X
class MyModel(sonnx.SONNXModel):
def __init__(self, onnx_model, num_classes=10, num_channels=3):
super(MyModel, self).__init__(onnx_model)
self.num_classes = num_classes
self.input_size = 224
self.dimension = 4
self.num_channels = num_channels
self.num_classes = num_classes
self.linear = layer.Linear(512, num_classes)
def forward(self, *x):
# if you change to other models, please update the output name here
y = super(MyModel, self).forward(*x, aux_output=['flatten_170'])[1]
y = self.linear(y)
return y
def train_one_batch(self, x, y, dist_option, spars):
out = self.forward(x)
loss = autograd.softmax_cross_entropy(out, y)
if dist_option == 'fp32':
self.optimizer.backward_and_update(loss)
elif dist_option == 'fp16':
self.optimizer.backward_and_update_half(loss)
elif dist_option == 'partialUpdate':
self.optimizer.backward_and_partial_update(loss)
elif dist_option == 'sparseTopK':
self.optimizer.backward_and_sparse_update(loss,
topK=True,
spars=spars)
elif dist_option == 'sparseThreshold':
self.optimizer.backward_and_sparse_update(loss,
topK=False,
spars=spars)
return out, loss
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def run(global_rank,
world_size,
local_rank,
max_epoch,
batch_size,
model_config,
data,
sgd,
graph,
verbosity,
dist_option='fp32',
spars=None):
dev = device.create_cuda_gpu_on(local_rank)
dev.SetRandSeed(0)
np.random.seed(0)
if data == 'cifar10':
from data import cifar10
train_x, train_y, val_x, val_y = cifar10.load()
elif data == 'cifar100':
from data import cifar100
train_x, train_y, val_x, val_y = cifar100.load()
num_channels = train_x.shape[1]
image_size = train_x.shape[2]
data_size = np.prod(train_x.shape[1:train_x.ndim]).item()
num_classes = (np.max(train_y) + 1).item()
# read and make onnx model
download_model(model_config['url'])
onnx_model = onnx.load(os.path.join('/tmp', model_config['path']))
model = MyModel(onnx_model,
num_channels=num_channels,
num_classes=num_classes)
# For distributed training, sequential gives better performance
if hasattr(sgd, "communicator"):
DIST = True
sequential = True
else:
DIST = False
sequential = False
if DIST:
train_x, train_y, val_x, val_y = partition(global_rank, world_size,
train_x, train_y, val_x,
val_y)
'''
# check dataset shape correctness
if global_rank == 0:
print("Check the shape of dataset:")
print(train_x.shape)
print(train_y.shape)
'''
if model.dimension == 4:
tx = tensor.Tensor(
(batch_size, num_channels, model.input_size, model.input_size), dev,
tensor.float32)
elif model.dimension == 2:
tx = tensor.Tensor((batch_size, data_size), dev, tensor.float32)
np.reshape(train_x, (train_x.shape[0], -1))
np.reshape(val_x, (val_x.shape[0], -1))
ty = tensor.Tensor((batch_size,), dev, tensor.int32)
num_train_batch = train_x.shape[0] // batch_size
num_val_batch = val_x.shape[0] // batch_size
idx = np.arange(train_x.shape[0], dtype=np.int32)
# attached model to graph
model.set_optimizer(sgd)
model.compile([tx], is_train=True, use_graph=graph, sequential=sequential)
dev.SetVerbosity(verbosity)
# Training and Evaluation Loop
for epoch in range(max_epoch):
start_time = time.time()
np.random.shuffle(idx)
if global_rank == 0:
print('Starting Epoch %d:' % (epoch))
# Training Phase
train_correct = np.zeros(shape=[1], dtype=np.float32)
test_correct = np.zeros(shape=[1], dtype=np.float32)
train_loss = np.zeros(shape=[1], dtype=np.float32)
model.train()
for b in tqdm(range(num_train_batch)):
# Generate the patch data in this iteration
x = train_x[idx[b * batch_size:(b + 1) * batch_size]]
if model.dimension == 4:
x = augmentation(x, batch_size)
if (image_size != model.input_size):
x = resize_dataset(x, model.input_size)
y = train_y[idx[b * batch_size:(b + 1) * batch_size]]
# Copy the patch data into input tensors
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
# Train the model
out, loss = model(tx, ty, dist_option, spars)
train_correct += accuracy(tensor.to_numpy(out), y)
train_loss += tensor.to_numpy(loss)[0]
if DIST:
# Reduce the Evaluation Accuracy and Loss from Multiple Devices
reducer = tensor.Tensor((1,), dev, tensor.float32)
train_correct = reduce_variable(train_correct, sgd, reducer)
train_loss = reduce_variable(train_loss, sgd, reducer)
if global_rank == 0:
print('Training loss = %f, training accuracy = %f' %
(train_loss, train_correct /
(num_train_batch * batch_size * world_size)),
flush=True)
# Evaluation Phase
model.eval()
for b in tqdm(range(num_val_batch)):
x = val_x[b * batch_size:(b + 1) * batch_size]
if model.dimension == 4:
if (image_size != model.input_size):
x = resize_dataset(x, model.input_size)
y = val_y[b * batch_size:(b + 1) * batch_size]
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
out_test = model(tx)
test_correct += accuracy(tensor.to_numpy(out_test), y)
if DIST:
# Reduce the Evaulation Accuracy from Multiple Devices
test_correct = reduce_variable(test_correct, sgd, reducer)
# Output the Evaluation Accuracy
if global_rank == 0:
print('Evaluation accuracy = %f, Elapsed Time = %fs' %
(test_correct / (num_val_batch * batch_size * world_size),
time.time() - start_time),
flush=True)
dev.PrintTimeProfiling()
def loss(out, y):
return autograd.softmax_cross_entropy(out, y)
if __name__ == '__main__':
with open(os.path.join(os.path.dirname(__file__),
'model.json')) as json_file:
model_config = json.load(json_file)
# use argparse to get command config: max_epoch, model, data, etc. for single gpu training
parser = argparse.ArgumentParser(
description='Training using the autograd and graph.')
parser.add_argument('--model',
choices=list(model_config.keys()),
help='please refer to the models.json for more details',
default='resnet18v1')
parser.add_argument('--data',
choices=['cifar10', 'cifar100'],
default='cifar10')
parser.add_argument('--epoch',
'--max-epoch',
default=10,
type=int,
help='maximum epochs',
dest='max_epoch')
parser.add_argument('--bs',
'--batch-size',
default=32,
type=int,
help='batch size',
dest='batch_size')
parser.add_argument('--lr',
'--learning-rate',
default=0.005,
type=float,
help='initial learning rate',
dest='lr')
# determine which gpu to use
parser.add_argument('--id',
'--device-id',
default=0,
type=int,
help='which GPU to use',
dest='device_id')
parser.add_argument('--no-graph',
'--disable-graph',
default='True',
action='store_false',
help='disable graph',
dest='graph')
parser.add_argument('--verbosity',
'--log-verbosity',
default=1,
type=int,
help='logging verbosity',
dest='verbosity')
args = parser.parse_args()
sgd = opt.SGD(lr=args.lr, momentum=0.9, weight_decay=1e-5)
run(0, 1, args.device_id, args.max_epoch, args.batch_size, model_config[args.model],
args.data, sgd, args.graph, args.verbosity)
``` |
{
"source": "Jodecir/exercicios_python",
"score": 4
} |
#### File: exercicios_python/array/contador de letras.py
```python
de letras.py
def letters_count(words_list):
count = []
for x in words_list:
quantity = len(x)
count.append(quantity)
return count
fechado = False
while fechado == False:
if __name__ == '__main__':
try:
animal_list = ['cachorro', 'gato', 'elefante']
animal = input('Digite um animal a lista: \n')
animal_list.append(animal)
total_letters = letters_count(animal_list)
print('Animais da lista:', animal_list)
print('Total de letras por palavra da lista: {}'.format(total_letters))
except IndexError:
print('Não foi possível acessar o index pois ele não existe na lista')
except Exception:
print('Ocorreu um erro desconhecido')
finally:
repetir = input("Deseja continuar (s/n): ")
if repetir == "n":
fechado = True
``` |
{
"source": "Jodeee/fasttest_selenium",
"score": 2
} |
#### File: fasttest_selenium/utils/testcast_utils.py
```python
import os
from fasttest_selenium.common.logging import log_error
class TestCaseUtils(object):
def __init__(self):
self.__testcase_list = []
def __traversal_dir(self,path):
for rt, dirs, files in os.walk(path):
files.sort()
for f in files:
file_path = os.path.join(rt, f)
if os.path.isfile(file_path):
if not file_path.endswith('.yaml'):
continue
self.__testcase_list.append(file_path)
def testcase_path(self,dirname,paths):
if not paths:
raise Exception('test case is empty.')
for path in paths:
file_path = os.path.join(dirname,path)
if os.path.isdir(file_path):
self.__traversal_dir(os.path.join(dirname, path))
elif os.path.isfile(file_path):
if not file_path.endswith('.yaml'):
continue
self.__testcase_list.append(file_path)
else:
log_error(' No such file or directory: {}'.format(path), False)
if not self.__testcase_list:
raise Exception('test case is empty: {}'.format(paths))
return self.__testcase_list
``` |
{
"source": "JoDehli/LoxGui",
"score": 2
} |
#### File: JoDehli/LoxGui/main.py
```python
import asyncio
import json
import logging
import sys
import traceback
import qasync
from PyQt5.QtWidgets import QMainWindow, QApplication
from qasync import asyncSlot, asyncClose
from Ui_Main import Ui_LoxQtGui
from api import LoxApp, LoxWs
logging.getLogger('asyncio').setLevel(logging.ERROR)
logging.getLogger('asyncio.coroutines').setLevel(logging.ERROR)
logging.getLogger('websockets.server').setLevel(logging.ERROR)
logging.getLogger('websockets.protocol').setLevel(logging.ERROR)
class LoxoneConnecionGui(QMainWindow):
def __init__(self):
super(LoxoneConnecionGui, self).__init__()
# uic.loadUi('Main.ui', self)
self.ui = Ui_LoxQtGui()
self.ui.setupUi(self)
self.api = None
self.show()
@asyncSlot()
async def message_callback(self, data):
print(data)
if isinstance(data, dict):
for k, v in data.items():
self.ui.log.appendPlainText("{} : {}".format(k, v))
else:
self.ui.log.appendPlainText(str(data))
@asyncClose
async def closeEvent(self, event):
try:
await self.api.stop()
except:
pass
loop = asyncio.get_running_loop()
loop.stop()
loop.close()
@asyncSlot()
async def disconnect_from_loxone(self):
if self.api is not None and self.api.state != "CLOSED":
await self.api.stop()
@asyncSlot()
async def connect_to_loxone(self):
if self.api is not None and self.api.state == "CONNECTED":
self.ui.log.appendPlainText("Already connected...")
return True
username = self.ui.user.text()
password = self.ui.password.text()
ip = self.ui.ip.text()
port = self.ui.port.text()
lox_config = LoxApp()
lox_config.lox_user = username
lox_config.lox_pass = password
lox_config.host = ip
lox_config.port = port
self.ui.json_txt.clear()
self.ui.log.clear()
self.ui.log.appendPlainText("Try to connect...")
try:
request_code = await lox_config.getJson()
if request_code == 200 or request_code == "200":
self.ui.log.appendPlainText("Got Config from Loxone. Port and Host ok.")
self.ui.json_txt.setText(json.dumps(lox_config.json, indent=4, sort_keys=False, ensure_ascii=False))
self.api = LoxWs(user=username,
password=password,
host=ip,
port=port,
loxconfig=lox_config.json,
loxone_url=lox_config.url)
res = await self.api.async_init()
self.ui.log.appendPlainText("Res {}".format(res))
if not res or res == -1:
self.ui.log.appendPlainText("Error connecting to loxone miniserver #1")
return False
self.api.message_call_back = self.message_callback
await self.api.start()
else:
self.ui.log.appendPlainText(f"Request Code {request_code}. Could not connect to Loxone.")
except:
traceback.print_exc()
t = traceback.format_exc()
self.ui.log.appendPlainText(str(t))
async def main():
loop = asyncio.get_running_loop()
app = QApplication(sys.argv)
# dark_palette = QPalette()
# dark_palette.setColor(QPalette.Window, QColor(53, 53, 53))
# dark_palette.setColor(QPalette.WindowText, Qt.white)
# dark_palette.setColor(QPalette.Base, QColor(25, 25, 25))
# dark_palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
# dark_palette.setColor(QPalette.ToolTipBase, Qt.white)
# dark_palette.setColor(QPalette.ToolTipText, Qt.white)
# dark_palette.setColor(QPalette.Text, Qt.white)
# dark_palette.setColor(QPalette.Button, QColor(53, 53, 53))
# dark_palette.setColor(QPalette.ButtonText, Qt.white)
# dark_palette.setColor(QPalette.BrightText, Qt.red)
# dark_palette.setColor(QPalette.Link, QColor(42, 130, 218))
# dark_palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
# dark_palette.setColor(QPalette.HighlightedText, Qt.black)
# app.setPalette(dark_palette)
window = LoxoneConnecionGui()
window.show()
await loop.create_future()
app.exec_()
if __name__ == '__main__':
qasync.run(main())
``` |
{
"source": "JoDehli/pyloxone-api",
"score": 3
} |
#### File: pyloxone-api/pyloxone_api/message.py
```python
from __future__ import annotations
import json
import math
import struct
import uuid
from enum import IntEnum
from pyloxone_api.exceptions import LoxoneException
class LLResponse:
"""A class for parsing LL Responses from the miniserver
An LL Response is a json object often returned by a miniserver in response
to a command. It begins "{"LL": {..." and has a control, code and value
keys. This class provides easy access to code (as an integer) and control
attributes (as a string), which should be present in every case.
LLResponse.value is a string containing the value of the response.
LLResponse.as_dict is guaranteed to be a dict, with at least a value key,
and if value has sub-values, keys for the sub-values as well.
Raises ValueError if the response cannot be parsed.
"""
def __init__(self, response: str | bytes):
try:
self._parsed: dict = json.loads(response)
# Sometimes, Loxone uses "Code", and sometimes "code"
self.code: int = int(
self._parsed.get("LL", {}).get("code", "")
or self._parsed.get("LL", {}).get("Code", "")
)
self.control: str = self._parsed["LL"]["control"]
self.value: str = str(self._parsed["LL"]["value"])
except (ValueError, KeyError, TypeError) as exc:
raise ValueError(exc)
@property
def value_as_dict(self) -> dict:
d = self._parsed["LL"]["value"]
retval = {"value": self.value}
if isinstance(d, dict):
return {**retval, **d}
return retval
class MessageType(IntEnum):
"""The different types of message which the miniserver might send"""
TEXT = 0
BINARY = 1
VALUE_STATES = 2
TEXT_STATES = 3
DAYTIMER_STATES = 4
OUT_OF_SERVICE = 5
KEEPALIVE = 6
WEATHER_STATES = 7
UNKNOWN = -1
class MessageHeader:
def __init__(self, header: bytes):
# From the Loxone API docs, the header is as follows
# typedef struct {
# BYTE cBinType; // fix 0x03
# BYTE cIdentifier; // 8-Bit Unsigned Integer (little endian)
# BYTE cInfo; // Info
# BYTE cReserved; // reserved
# UINT nLen; // 32-Bit Unsigned Integer (little endian)
# } PACKED WsBinHdr;
self.header = header
if not header[0] == 3:
raise LoxoneException(r"Invalid header received: first byte is not \0x03")
try:
unpacked_data = struct.unpack("<cBccI", header)
except (struct.error, TypeError) as exc:
raise LoxoneException(f"Invalid header received: {exc}")
self.message_type: MessageType = MessageType(unpacked_data[1])
# First bit indicates that length is only estimated
self.estimated: bool = ord(unpacked_data[2]) >> 7 == 1
self.payload_length: int = int(unpacked_data[4])
class BaseMessage:
"""The base class for all messages from the miniserver"""
message_type = MessageType.UNKNOWN
def __init__(self, message: bytes | str):
self.message = message
# For the base class, the dict is the message
def as_dict(self) -> dict:
"""Return the contents of the message as a dict"""
return {}
class TextMessage(BaseMessage):
message_type = MessageType.TEXT
def __init__(self, message: bytes | str):
super().__init__(message)
ll_message = LLResponse(message)
self.code = ll_message.code
self.control = ll_message.control
self.value = ll_message.value
self.value_as_dict = ll_message.value_as_dict
class BinaryFile(BaseMessage):
message_type = MessageType.BINARY
# The message is a binary file. There is nothing parse
def as_dict(self):
return {}
class ValueStatesTable(BaseMessage):
message_type = MessageType.VALUE_STATES
# A value state is as follows:
# typedef struct {
# PUUID uuid; // 128-Bit uuid
# double dVal; // 64-Bit Float (little endian) value
# } PACKED EvData;
def as_dict(self):
event_dict = {}
length = len(self.message)
num = length / 24
start = 0
end = 24
for _ in range(int(num)):
packet = self.message[start:end]
event_uuid = uuid.UUID(bytes_le=packet[0:16])
fields = event_uuid.urn.replace("urn:uuid:", "").split("-")
uuidstr = f"{fields[0]}-{fields[1]}-{fields[2]}-{fields[3]}{fields[4]}"
value = struct.unpack("d", packet[16:24])[0]
event_dict[uuidstr] = value
start += 24
end += 24
return event_dict
class TextStatesTable(BaseMessage):
message_type = MessageType.TEXT_STATES
# A text event state is as follows:
# typedef struct { // starts at multiple of 4
# PUUID uuid; // 128-Bit uuid
# PUUID uuidIcon; // 128-Bit uuid of icon
# unsigned long textLength; // 32-Bit Unsigned Integer (little endian)
# // text follows here
# } PACKED EvDataText;
def as_dict(self):
event_dict = {}
start = 0
def get_text(message: bytes, start: int, offset: int) -> int:
first = start
second = start + offset
event_uuid = uuid.UUID(bytes_le=self.message[first:second]) # type: ignore
first += offset
second += offset
icon_uuid_fields = event_uuid.urn.replace("urn:uuid:", "").split("-")
uuidstr = "{}-{}-{}-{}{}".format(
icon_uuid_fields[0],
icon_uuid_fields[1],
icon_uuid_fields[2],
icon_uuid_fields[3],
icon_uuid_fields[4],
)
icon_uuid = uuid.UUID(bytes_le=self.message[first:second]) # type: ignore
icon_uuid_fields = icon_uuid.urn.replace("urn:uuid:", "").split("-")
first = second
second += 4
text_length = struct.unpack("<I", message[first:second])[0]
first = second
second = first + text_length
message_str = struct.unpack(f"{text_length}s", message[first:second])[0]
start += (math.floor((4 + text_length + 16 + 16 - 1) / 4) + 1) * 4
event_dict[uuidstr] = message_str.decode("utf-8")
return start
if not isinstance(self.message, bytes):
raise LoxoneException("Expected bytes table, got str")
while start < len(self.message):
start = get_text(self.message, start, 16)
return event_dict
class DaytimerStatesTable(BaseMessage):
message_type = MessageType.DAYTIMER_STATES
# We dont currently handle this.
def as_dict(self):
return {}
class OutOfServiceIndicator(BaseMessage):
message_type = MessageType.OUT_OF_SERVICE
# There can be no such message. If an out-of-service header is sent, the
# miniserver will close the connection before sending a message.
class Keepalive(BaseMessage):
message_type = MessageType.KEEPALIVE
# Nothing to do. The dict is the message (which is b'keepalive')
def as_dict(self):
return {"keep_alive": "received"}
class WeatherStatesTable(BaseMessage):
message_type = MessageType.WEATHER_STATES
def as_dict(self):
return {}
def parse_header(header: bytes) -> MessageHeader:
return MessageHeader(header)
def parse_message(message: bytes | str, message_type: int) -> BaseMessage:
"""Return an instance of the appropriate BaseMessage subclass"""
for klass in BaseMessage.__subclasses__():
if klass.message_type == message_type:
return klass(message)
raise LoxoneException(f"Unknown message type {message_type}")
``` |
{
"source": "JoDehli/PyLoxone_beta",
"score": 2
} |
#### File: custom_components/loxone/cover.py
```python
import logging
from typing import Any
import random
from homeassistant.components.cover import (ATTR_POSITION, ATTR_TILT_POSITION,
DEVICE_CLASS_AWNING,
DEVICE_CLASS_BLIND,
DEVICE_CLASS_CURTAIN,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GARAGE,
DEVICE_CLASS_SHUTTER,
DEVICE_CLASS_WINDOW, SUPPORT_CLOSE,
SUPPORT_OPEN, CoverEntity)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import track_utc_time_change
from . import LoxoneEntity, get_miniserver_from_config_entry
from .const import (DOMAIN, SENDDOMAIN, SUPPORT_CLOSE_TILT, SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION, SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP, SUPPORT_STOP_TILT)
from .helpers import (get_all_covers, get_cat_name_from_cat_uuid,
get_room_name_from_room_uuid, map_range)
_LOGGER = logging.getLogger(__name__)
NEW_COVERS = "covers"
async def async_setup_platform(hass, config, async_add_devices, discovery_info={}):
"""Set up the Loxone covers."""
return True
async def async_setup_entry(hass, config_entry, async_add_entites):
"""Set Loxone covers."""
miniserver = get_miniserver_from_config_entry(hass, config_entry)
loxconfig = miniserver.loxone_config
covers = []
for cover in get_all_covers(loxconfig):
cover.update(
{
"hass": hass,
"room": get_room_name_from_room_uuid(loxconfig, cover.get("room", "")),
"cat": get_cat_name_from_cat_uuid(loxconfig, cover.get("cat", "")),
}
)
if cover["type"] == "Gate":
new_gate = LoxoneGate(**cover)
covers.append(new_gate)
elif cover["type"] == "Window":
new_window = LoxoneWindow(**cover)
covers.append(new_window)
else:
new_jalousie = LoxoneJalousie(**cover)
covers.append(new_jalousie)
@callback
def async_add_covers(_):
async_add_entites(_)
# miniserver.listeners.append(
# async_dispatcher_connect(
# hass, miniserver.async_signal_new_device(NEW_COVERS), async_add_entites
# )
# )
async_add_entites(covers)
class LoxoneGate(LoxoneEntity, CoverEntity):
"""Loxone Gate"""
def __init__(self, **kwargs):
LoxoneEntity.__init__(self, **kwargs)
self.hass = kwargs["hass"]
self._position_uuid = kwargs["states"]["position"]
self._state_uuid = kwargs["states"]["active"]
self._position = None
self._is_opening = False
self._is_closing = False
if self._position is None:
self._closed = True
else:
self._closed = self.current_cover_position <= 0
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return False
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
if self.animation == 0:
return DEVICE_CLASS_GARAGE
elif self.animation in [1, 2, 3, 4, 5]:
return DEVICE_CLASS_DOOR
return self.type
@property
def animation(self):
return self.details["animation"]
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._position
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._closed
@property
def is_closing(self):
"""Return if the cover is closing."""
return self._is_closing
@property
def is_opening(self):
"""Return if the cover is opening."""
return self._is_opening
def open_cover(self, **kwargs):
"""Open the cover."""
if self._position == 100.0:
return
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value="open"))
self.schedule_update_ha_state()
def close_cover(self, **kwargs):
"""Close the cover."""
if self._position == 0:
return
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value="close"))
self.schedule_update_ha_state()
def stop_cover(self, **kwargs):
"""Stop the cover."""
if self.is_closing:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="open")
)
return
if self.is_opening:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="close")
)
return
async def event_handler(self, event):
if self.states["position"] in event.data or self._state_uuid in event.data:
if self.states["position"] in event.data:
self._position = float(event.data[self.states["position"]]) * 100.0
if self._position == 0:
self._closed = True
else:
self._closed = False
if self._state_uuid in event.data:
self._is_closing = False
self._is_opening = False
if event.data[self._state_uuid] == -1:
self._is_opening = True
elif event.data[self._state_uuid] == 1:
self._is_opening = True
self.schedule_update_ha_state()
@property
def extra_state_attributes(self):
"""Return device specific state attributes.
Implemented by platform classes.
"""
return {
"uuid": self.uuidAction,
"device_typ": self.type,
"category": self.cat,
"platform": "loxone",
}
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "Loxone",
"model": "Gate",
"type": self.type,
"suggested_area": self.room,
}
class LoxoneWindow(LoxoneEntity, CoverEntity):
# pylint: disable=no-self-use
def __init__(self, **kwargs):
LoxoneEntity.__init__(self, **kwargs)
self.hass = kwargs["hass"]
self._position = None
self._closed = True
self._direction = 0
async def event_handler(self, e):
if self.states["position"] in e.data or self.states["direction"] in e.data:
if self.states["position"] in e.data:
self._position = float(e.data[self.states["position"]]) * 100.0
if self._position == 0:
self._closed = True
else:
self._closed = False
if self.states["direction"] in e.data:
self._direction = e.data[self.states["direction"]]
self.schedule_update_ha_state()
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._position
@property
def extra_state_attributes(self):
"""
Return device specific state attributes.
Implemented by platform classes.
"""
device_att = {
"uuid": self.uuidAction,
"device_typ": self.type,
"platform": "loxone",
"room": self.room,
"category": self.cat,
}
return device_att
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_WINDOW
@property
def is_closing(self):
"""Return if the cover is closing."""
if self._direction == -1:
return True
return False
@property
def is_opening(self):
"""Return if the cover is opening."""
if self._direction == 1:
return True
return False
@property
def is_closed(self):
return self._closed
def open_cover(self, **kwargs: Any) -> None:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="fullopen")
)
def close_cover(self, **kwargs: Any) -> None:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="fullclose")
)
def stop_cover(self, **kwargs):
"""Stop the cover."""
if self.is_closing:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="fullopen")
)
elif self.is_opening:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="fullclose")
)
def set_cover_position(self, **kwargs):
"""Return the current tilt position of the cover."""
position = kwargs.get(ATTR_POSITION)
self.hass.bus.async_fire(
SENDDOMAIN,
dict(uuid=self.uuidAction, value="moveToPosition/{}".format(position)),
)
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "Loxone",
"model": "Window",
"suggested_area": self.room,
}
class LoxoneJalousie(LoxoneEntity, CoverEntity):
"""Loxone Jalousie"""
# pylint: disable=no-self-use
def __init__(self, **kwargs):
LoxoneEntity.__init__(self, **kwargs)
self.hass = kwargs["hass"]
if "autoInfoText" not in self.states:
self.states["autoInfoText"] = ""
if "autoState" not in self.states:
self.states["autoState"] = ""
self._position = 0
self._position_loxone = -1
self._tilt_position_loxone = 1
self._set_position = None
self._set_tilt_position = None
self._tilt_position = 0
self._requested_closing = True
self._unsub_listener_cover = None
self._unsub_listener_cover_tilt = None
self._is_opening = False
self._is_closing = False
self._animation = 0
self._is_automatic = False
self._auto_text = ""
self._auto_state = 0
if "isAutomatic" in self.details:
self._is_automatic = self.details["isAutomatic"]
if "animation" in self.details:
self._animation = self.details["animation"]
if self._position is None:
self._closed = True
else:
self._closed = self.current_cover_position <= 0
@property
def name(self):
return self._name
@name.setter
def name(self, n):
self._name = n
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self.current_cover_position is not None:
supported_features |= SUPPORT_SET_POSITION
if self.current_cover_tilt_position is not None:
supported_features |= (
SUPPORT_OPEN_TILT
| SUPPORT_CLOSE_TILT
| SUPPORT_SET_TILT_POSITION
)
return supported_features
async def event_handler(self, e):
if (
self.states["position"] in e.data
or self.states["shadePosition"] in e.data
or self.states["up"] in e.data
or self.states["down"] in e.data
or self.states["autoInfoText"] in e.data
or self.states["autoState"] in e.data
):
if self.states["position"] in e.data:
self._position_loxone = float(e.data[self.states["position"]]) * 100.0
self._position = map_range(self._position_loxone, 0, 100, 100, 0)
if self._position == 0:
self._closed = True
else:
self._closed = False
if self.states["shadePosition"] in e.data:
self._tilt_position_loxone = float(e.data[self.states["shadePosition"]]) * 100.0
self._tilt_position = map_range(self._tilt_position_loxone, 0, 100, 100, 0)
if self.states["up"] in e.data:
self._is_opening = e.data[self.states["up"]]
if self.states["down"] in e.data:
self._is_closing = e.data[self.states["down"]]
if self.states["autoInfoText"] in e.data:
self._auto_text = e.data[self.states["autoInfoText"]]
if self.states["autoState"] in e.data:
self._auto_state = e.data[self.states["autoState"]]
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return False
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._position
@property
def current_cover_tilt_position(self):
"""Return the current tilt position of the cover."""
return self._tilt_position
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._closed
@property
def is_closing(self):
"""Return if the cover is closing."""
return self._is_closing
@property
def is_opening(self):
"""Return if the cover is opening."""
return self._is_opening
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
if self.animation in [0, 1]:
return DEVICE_CLASS_BLIND
elif self.animation in [2, 4, 5]:
return DEVICE_CLASS_CURTAIN
elif self.animation == 3:
return DEVICE_CLASS_SHUTTER
elif self.animation == 6:
return DEVICE_CLASS_AWNING
@property
def animation(self):
return self.details["animation"]
@property
def is_automatic(self):
return self._is_automatic
@property
def auto(self):
if self._is_automatic and self._auto_state:
return STATE_ON
else:
return STATE_OFF
@property
def shade_postion_as_text(self):
"""Returns shade postionn as text"""
if self.current_cover_tilt_position == 100 and self.current_cover_position < 10:
return "shading on"
else:
return " "
@property
def extra_state_attributes(self):
"""
Return device specific state attributes.
Implemented by platform classes.
"""
device_att = {
"uuid": self.uuidAction,
"device_typ": self.type,
"platform": "loxone",
"room": self.room,
"category": self.cat,
"current_position": self.current_cover_position,
"current_shade_mode": self.shade_postion_as_text,
"current_position_loxone_style": round(self._position_loxone, 0),
}
if self._is_automatic:
device_att.update(
{"automatic_text": self._auto_text, "auto_state": self.auto}
)
return device_att
def close_cover(self, **kwargs):
"""Close the cover."""
if self._position == 0:
return
elif self._position is None:
self._closed = True
self.schedule_update_ha_state()
return
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="FullDown")
)
self.schedule_update_ha_state()
def open_cover(self, **kwargs):
"""Open the cover."""
if self._position == 100.0:
return
elif self._position is None:
self._closed = False
self.schedule_update_ha_state()
return
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value="FullUp"))
self.schedule_update_ha_state()
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="stop")
)
def set_cover_position(self, **kwargs):
"""Return the current tilt position of the cover."""
position = kwargs.get(ATTR_POSITION)
mapped_pos = map_range(position, 0, 100, 100, 0)
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value=f"manualPosition/{mapped_pos}"))
def open_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
position = 0.0 + random.uniform(0.000000001, 0.00900000)
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value=f"manualLamelle/{position}"))
def stop_cover_tilt(self, **kwargs):
"""Stop the cover."""
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="stop")
)
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
position = 100.0 + random.uniform(0.000000001, 0.00900000)
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value=f"manualLamelle/{position}"))
def set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
tilt_position = kwargs.get(ATTR_TILT_POSITION)
mapped_pos = map_range(tilt_position, 0, 100, 100, 0)
position = mapped_pos + random.uniform(0.000000001, 0.00900000)
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value=f"manualLamelle/{position}"))
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "Loxone",
"model": "Jalousie",
"type": self.type,
"suggested_area": self.room,
}
```
#### File: custom_components/loxone/helpers.py
```python
import numpy as np
def map_range(value, in_min, in_max, out_min, out_max):
return out_min + (((value - in_min) / (in_max - in_min)) * (out_max - out_min))
def hass_to_lox(level):
"""Convert the given HASS light level (0-255) to Loxone (0.0-100.0)."""
return (level * 100.0) / 255.0
def lox_to_hass(lox_val):
"""Convert the given Loxone (0.0-100.0) light level to HASS (0-255)."""
return (lox_val / 100.0) * 255.0
def lox2lox_mapped(x, min_v, max_v):
if x <= min_v:
return 0
if x >= max_v:
return max_v
return x
def lox2hass_mapped(x, min_v, max_v):
if x <= min_v:
return 0
if x >= max_v:
return lox_to_hass(max_v)
return lox_to_hass(x)
def to_hass_color_temp(temp):
"""Linear interpolation between Loxone values from 2700 to 6500"""
return np.interp(temp, [2700, 6500], [500, 153])
def to_loxone_color_temp(temp):
"""Linear interpolation between HASS values from 153 to 500"""
return np.interp(temp, [153, 500], [6500, 2700])
def get_room_name_from_room_uuid(lox_config, room_uuid):
if "rooms" in lox_config:
if room_uuid in lox_config["rooms"]:
return lox_config["rooms"][room_uuid]["name"]
return ""
def get_cat_name_from_cat_uuid(lox_config, cat_uuid):
if "cats" in lox_config:
if cat_uuid in lox_config["cats"]:
return lox_config["cats"][cat_uuid]["name"]
return ""
def get_all_roomcontroller_entities(json_data):
return get_all(json_data, "IRoomControllerV2")
def get_all_switch_entities(json_data):
return get_all(json_data, ["Pushbutton", "Switch", "TimedSwitch", "Intercom"])
def get_all_covers(json_data):
return get_all(json_data, ["Jalousie", "Gate", "Window"])
def get_all_analog_info(json_data):
return get_all(json_data, "InfoOnlyAnalog")
def get_all_digital_info(json_data):
return get_all(json_data, "InfoOnlyDigital")
def get_all_light_controller(json_data):
return get_all(json_data, "LightControllerV2")
def get_all_alarm(json_data):
return get_all(json_data, "Alarm")
def get_all_dimmer(json_data):
return get_all(json_data, "Dimmer")
def get_miniserver_type(t):
if t == 0:
return "Miniserver Gen 1"
elif t == 1:
return "Miniserver Go"
elif t == 2:
return "Miniserver"
return "Unknown Typ"
def get_all(json_data, name):
controls = []
if isinstance(name, list):
for c in json_data["controls"].keys():
if json_data["controls"][c]["type"] in name:
controls.append(json_data["controls"][c])
else:
for c in json_data["controls"].keys():
if json_data["controls"][c]["type"] == name:
controls.append(json_data["controls"][c])
return controls
```
#### File: custom_components/loxone/scene.py
```python
import logging
from homeassistant.components.scene import Scene
from homeassistant.helpers.entity_platform import async_call_later
from . import get_miniserver_from_config
from .const import (CONF_SCENE_GEN, CONF_SCENE_GEN_DELAY, DEFAULT_DELAY_SCENE,
DOMAIN, SENDDOMAIN)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Set up Scenes."""
delay_scene = config_entry.options.get(CONF_SCENE_GEN_DELAY, DEFAULT_DELAY_SCENE)
create_scene = config_entry.options.get(CONF_SCENE_GEN, False)
async def gen_scenes(_):
devices = []
entity_ids = hass.states.async_entity_ids("LIGHT")
for _ in entity_ids:
state = hass.states.get(_)
att = state.attributes
if "platform" in att and att["platform"] == DOMAIN:
entity = hass.data["light"].get_entity(state.entity_id)
if entity.device_class == "LightControllerV2":
for effect in entity.effect_list:
mood_id = entity.get_id_by_moodname(effect)
uuid = entity.uuidAction
devices.append(
Loxonelightscene(
"{}-{}".format(entity.name, effect),
mood_id,
uuid,
entity.unique_id,
)
)
async_add_devices(devices)
if create_scene:
async_call_later(hass, delay_scene, gen_scenes)
return True
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up Scenes."""
return True
class Loxonelightscene(Scene):
def __init__(self, name, mood_id, uuid, light_controller_id):
self._name = name
self.mood_id = mood_id
self.uuidAction = uuid
self._light_controller_id = light_controller_id
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._light_controller_id}-{self.mood_id}"
@property
def name(self):
"""Return the name of the scene."""
return self._name
def activate(self):
"""Activate scene. Try to get entities into requested state."""
self.hass.bus.async_fire(
SENDDOMAIN,
dict(uuid=self.uuidAction, value="changeTo/{}".format(self.mood_id)),
)
``` |
{
"source": "jodemaey/climetlab",
"score": 2
} |
#### File: readers/grib/codes.py
```python
import datetime
import json
import logging
import os
import eccodes
from climetlab.core import Base
from climetlab.core.caching import auxiliary_cache_file
from climetlab.profiling import call_counter
from climetlab.utils.bbox import BoundingBox
LOG = logging.getLogger(__name__)
def missing_is_none(x):
return None if x == 2147483647 else x
# This does not belong here, should be in the C library
def _get_message_offsets(path):
fd = os.open(path, os.O_RDONLY)
try:
def get(count):
buf = os.read(fd, count)
assert len(buf) == count
return int.from_bytes(
buf,
byteorder="big",
signed=False,
)
offset = 0
while True:
code = os.read(fd, 4)
if len(code) < 4:
break
if code != b"GRIB":
offset = os.lseek(fd, offset + 1, os.SEEK_SET)
continue
length = get(3)
edition = get(1)
if edition == 1:
if length & 0x800000:
sec1len = get(3)
os.lseek(fd, 4, os.SEEK_CUR)
flags = get(1)
os.lseek(fd, sec1len - 8, os.SEEK_CUR)
if flags & (1 << 7):
sec2len = get(3)
os.lseek(fd, sec2len - 3, os.SEEK_CUR)
if flags & (1 << 6):
sec3len = get(3)
os.lseek(fd, sec3len - 3, os.SEEK_CUR)
sec4len = get(3)
if sec4len < 120:
length &= 0x7FFFFF
length *= 120
length -= sec4len
length += 4
if edition == 2:
length = get(8)
yield offset, length
offset = os.lseek(fd, offset + length, os.SEEK_SET)
finally:
os.close(fd)
eccodes_codes_release = call_counter(eccodes.codes_release)
eccodes_codes_new_from_file = call_counter(eccodes.codes_new_from_file)
class CodesHandle:
def __init__(self, handle, path, offset):
self.handle = handle
self.path = path
self.offset = offset
def __del__(self):
eccodes_codes_release(self.handle)
def get(self, name):
try:
if name == "values":
return eccodes.codes_get_values(self.handle)
size = eccodes.codes_get_size(self.handle, name)
if size and size > 1:
return eccodes.codes_get_array(self.handle, name)
return eccodes.codes_get(self.handle, name)
except eccodes.KeyValueNotFoundError:
return None
def get_long(self, name):
try:
return eccodes.codes_get_long(self.handle, name)
except eccodes.KeyValueNotFoundError:
return None
def get_string(self, name):
try:
return eccodes.codes_get_string(self.handle, name)
except eccodes.KeyValueNotFoundError:
return None
def get_double(self, name):
try:
return eccodes.codes_get_double(self.handle, name)
except eccodes.KeyValueNotFoundError:
return None
class CodesReader:
def __init__(self, path):
self.path = path
self.file = open(self.path, "rb")
def __del__(self):
try:
self.file.close()
except Exception:
pass
def at_offset(self, offset):
self.file.seek(offset, 0)
return next(self)
def __iter__(self):
return self
def __next__(self):
handle = self._next_handle()
if handle is None:
raise StopIteration()
return handle
def _next_handle(self):
offset = self.file.tell()
handle = eccodes_codes_new_from_file(self.file, eccodes.CODES_PRODUCT_GRIB)
if not handle:
return None
return CodesHandle(handle, self.path, offset)
@property
def offset(self):
return self.file.tell()
def read(self, offset, length):
self.file.seek(offset, 0)
return self.file.read(length)
class GribField(Base):
def __init__(self, reader, offset, length):
self._reader = reader
self._offset = offset
self._length = length
self._handle = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def path(self):
return self.handle.path
@property
def handle(self):
if self._handle is None:
assert self._offset is not None
assert self._reader is not None
self._handle = self._reader.at_offset(self._offset)
return self._handle
@property
def values(self):
return self.handle.get("values")
@property
def offset(self):
if self._offset is None:
self._offset = int(self.handle.get("offset"))
return self._offset
@property
def shape(self):
return (
missing_is_none(self.handle.get("Nj")),
missing_is_none(self.handle.get("Ni")),
)
def plot_map(self, backend):
backend.bounding_box(
north=self.handle.get("latitudeOfFirstGridPointInDegrees"),
south=self.handle.get("latitudeOfLastGridPointInDegrees"),
west=self.handle.get("longitudeOfFirstGridPointInDegrees"),
east=self.handle.get("longitudeOfLastGridPointInDegrees"),
)
backend.plot_grib(self.path, self.handle.get("offset"))
@call_counter
def to_numpy(self, normalise=False):
shape = self.shape
if shape[0] is None or shape[1] is None:
return self.values
if normalise:
return self.values.reshape(self.shape)
return self.values.reshape(self.shape)
def __repr__(self):
return "GribField(%s,%s,%s,%s,%s,%s)" % (
self.handle.get("shortName"),
self.handle.get("levelist"),
self.handle.get("date"),
self.handle.get("time"),
self.handle.get("step"),
self.handle.get("number"),
)
def _grid_definition(self):
return dict(
north=self.handle.get("latitudeOfFirstGridPointInDegrees"),
south=self.handle.get("latitudeOfLastGridPointInDegrees"),
west=self.handle.get("longitudeOfFirstGridPointInDegrees"),
east=self.handle.get("longitudeOfLastGridPointInDegrees"),
south_north_increment=self.handle.get("jDirectionIncrementInDegrees"),
west_east_increment=self.handle.get("iDirectionIncrementInDegrees"),
)
def field_metadata(self):
m = self._grid_definition()
for n in ("shortName", "units", "paramId"):
p = self.handle.get(n)
if p is not None:
m[n] = str(p)
m["shape"] = self.shape
return m
def datetime(self):
date = self.handle.get("date")
time = self.handle.get("time")
return datetime.datetime(
date // 10000,
date % 10000 // 100,
date % 100,
time // 100,
time % 100,
)
def valid_datetime(self):
step = self.handle.get("endStep")
return self.datetime() + datetime.timedelta(hours=step)
def to_datetime_list(self):
return [self.valid_datetime()]
def to_bounding_box(self):
return BoundingBox(
north=self.handle.get("latitudeOfFirstGridPointInDegrees"),
south=self.handle.get("latitudeOfLastGridPointInDegrees"),
west=self.handle.get("longitudeOfFirstGridPointInDegrees"),
east=self.handle.get("longitudeOfLastGridPointInDegrees"),
)
def _attributes(self, names):
result = {}
for name in names:
result[name] = self.handle.get(name)
return result
def _get(self, name):
"""Private, for testing only"""
# paramId is renamed as param to get rid of the
# additional '.128' (in climetlab/scripts/grib.py)
if name == "param":
name = "paramId"
return self.handle.get(name)
def __getitem__(self, name):
"""For cfgrib"""
# print(name)
proc = self.handle.get
if ":" in name:
try:
name, kind = name.split(":")
proc = dict(
str=self.handle.get_string,
int=self.handle.get_long,
float=self.handle.get_double,
)[kind]
except Exception:
LOG.exception(f"Unsupported kind '{kind}'")
raise ValueError(f"Unsupported kind '{kind}'")
return proc(name)
def write(self, f):
f.write(self._reader.read(self._offset, self._length))
class GribIndex:
VERSION = 1
def __init__(self, path):
assert isinstance(path, str), path
self.path = path
self.offsets = None
self.lengths = None
self.cache = auxiliary_cache_file(
"grib-index",
path,
content="null",
extension=".json",
)
if not self._load_cache():
self._build_index()
def _build_index(self):
offsets = []
lengths = []
for offset, length in _get_message_offsets(self.path):
offsets.append(offset)
lengths.append(length)
self.offsets = offsets
self.lengths = lengths
self._save_cache()
def _save_cache(self):
try:
with open(self.cache, "w") as f:
json.dump(
dict(
version=self.VERSION,
offsets=self.offsets,
lengths=self.lengths,
),
f,
)
except Exception:
LOG.exception("Write to cache failed %s", self.cache)
def _load_cache(self):
try:
with open(self.cache) as f:
c = json.load(f)
if not isinstance(c, dict):
return False
assert c["version"] == self.VERSION
self.offsets = c["offsets"]
self.lengths = c["lengths"]
return True
except Exception:
LOG.exception("Load from cache failed %s", self.cache)
return False
``` |
{
"source": "joders/logging_tqdm",
"score": 2
} |
#### File: joders/logging_tqdm/logging_tqdm.py
```python
import logging
import sys
import types
from typing import Union, Any, Callable
from tqdm import tqdm
logging.basicConfig(level=logging.INFO,
format='%(asctime)s: %(message)s')
LOGGER = logging.getLogger()
class LoggingTqdm(tqdm):
class PreserveLastOutputStreamInterceptor():
def __init__(self, output_stream: Any, last_output_initial_value: str = "(no output yet)") -> None:
self.output_stream = output_stream
if hasattr(output_stream, 'encoding'):
self.encoding = output_stream.encoding
self.last_output = last_output_initial_value
def write(self, string: str) -> None:
if string != '\n' and string != '': # don't store or print the end of line after the last iteration
self.last_output = string
self.output_stream.write(string)
else:
# get rid of the status bar line so that it is
# effectively replaced by the log of the last output
self.output_stream.write('\r')
def flush(self) -> None:
self.output_stream.flush()
def __init__(self, *args, **kwargs) -> None:
if 'file' not in kwargs:
kwargs['file'] = LoggingTqdm.PreserveLastOutputStreamInterceptor(sys.stderr)
super(tqdm, self).__init__(*args, **kwargs)
def __iter__(self) -> types.GeneratorType:
iterable = super(tqdm, self).__iter__()
def iter_logging_wrapper():
count = 0
item = None
try:
for count, item in enumerate(iterable):
yield item
finally:
if self.n != count + 1:
print('') # kinda dirty but ensures that the next log line is not appended to the last output line
LOGGER.info(f'failed on item: {item}')
self.n = count
self.refresh()
LOGGER.info(self.fp.last_output[1:])
return iter_logging_wrapper()
@classmethod
def pandas(tclass: type, *targs, **tkwargs) -> None: # pylint: disable=C0202
# adhering to tqdm's definition
if 'file' in tkwargs:
super(tqdm, tclass).pandas(*targs, **tkwargs)
else:
tkwargs['file']=LoggingTqdm.PreserveLastOutputStreamInterceptor(sys.stderr)
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.groupby import SeriesGroupBy
from pandas.core.groupby import GroupBy
from pandas.core.groupby import PanelGroupBy
from pandas import Panel
tqdm.pandas(tclass, *targs, **tkwargs)
def wrap_progress_apply(original_progress_apply) -> Callable[[Any, Callable, Any, Any], Any]:
def progress_apply(df: Union[DataFrame, Series, DataFrameGroupBy,
SeriesGroupBy, GroupBy, PanelGroupBy, Panel],
func: Callable, *args, **kwargs
) -> Union[DataFrame, Series, DataFrameGroupBy,
SeriesGroupBy, GroupBy, PanelGroupBy, Panel]:
# error handling in progress_apply is difficult because
# the instance of the tqdm object only exists within
# the tqdm progress_apply classmethod
# we can still keep track of the invocations by wrapping
# the apply function but we won't be able to produce an
# updated version of the status bar in the case of
# failure because at that point the tqdm object
# does not exist anymore (this can only be changed
# inside the tqdm code itself which we leave untouched)
count = 1
def counter_wrapped_func(item, *args, **kwargs):
nonlocal count
try:
res = func(item, *args, **kwargs)
except Exception: # pylint: disable=W0703
# needs to be general
print('') # kinda dirty but ensures that the next log line
# is not appended to the last output line
LOGGER.info(f'Failed on pandas apply during the {count}. invocation of the ' +
f'provided apply function processing item: \n{item}')
count+=1
return res
progress_apply_res = original_progress_apply(df, counter_wrapped_func, *args, **kwargs)
LOGGER.info(tkwargs['file'].last_output[1:])
return progress_apply_res
return progress_apply
for datatype in [DataFrame, Series, DataFrameGroupBy, SeriesGroupBy, GroupBy, PanelGroupBy, Panel]:
datatype.progress_apply = wrap_progress_apply(datatype.progress_apply)
tqdm = LoggingTqdm # pylint: disable=C0103
# we want users to be able to use logging.tqdm just like tqdm.tqdm
```
#### File: joders/logging_tqdm/test.py
```python
from logging_tqdm import tqdm
import time
print("Testing tqdm-logging:")
for i in tqdm(range(100)):
time.sleep(.02)
print("\n\n\n")
print("Testing tqdm-logging with exception:")
try:
for i in tqdm(range(100)):
if i == 75:
raise Exception("Some Exception")
time.sleep(.02)
except:
pass
print("\n\n\n")
import pandas as pd
import numpy as np
tqdm.pandas()
print("Testing pandas tqdm-logging:")
print("Processing 10 groups (needs 11 pandas operations):")
df=pd.DataFrame({'classes':np.random.randint(0,10,size=1000), \
'data':np.random.randn(1000)})
df.groupby('classes').progress_apply(lambda x:(time.sleep(.2),x.mean)[1])
print("\n\n\n")
print("Testing pandas tqdm-logging with exception:")
c=0
def exceptionMean(groupData):
global c
c+=1
if c==7:
raise Exception("")
return groupData.mean()
df.groupby('classes').progress_apply(exceptionMean)
``` |
{
"source": "JodesL/catboost",
"score": 2
} |
#### File: build/scripts/vcs_info.py
```python
import sys
import os
def merge_java_mf(out, infile):
manifest = os.path.join(infile, 'META-INF', 'MANIFEST.MF')
if not os.path.isfile(manifest):
cont = 'Manifest-Version: 1.0'
else:
with open(manifest, 'r') as f:
cont = f.read().rstrip()
with open(out, 'w') as f:
f.write(cont + '\n')
append_vca_info_to_java_mf(out)
def append_vca_info_to_java_mf(manifest):
with open(manifest, 'a') as f:
f.write('Vcs-Placeholder: 123\n\n')
if __name__ == "__main__":
merge_java_mf(sys.argv[1], sys.argv[2] if len(sys.argv) > 2 else os.curdir)
``` |
{
"source": "jodevsa/redash",
"score": 2
} |
#### File: redash/handlers/favorites.py
```python
from flask import request
from redash import models
from redash.permissions import require_access, view_only
from redash.handlers.base import BaseResource, get_object_or_404, filter_by_tags, paginate
from redash.handlers.queries import order_results
from redash.serializers import QuerySerializer, serialize_dashboard
from sqlalchemy.exc import IntegrityError
class QueryFavoriteListResource(BaseResource):
def get(self):
search_term = request.args.get('q')
if search_term:
base_query = models.Query.search(search_term, self.current_user.group_ids, include_drafts=True, limit=None)
favorites = models.Query.favorites(self.current_user, base_query=base_query)
else:
favorites = models.Query.favorites(self.current_user)
favorites = filter_by_tags(favorites, models.Query.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
ordered_favorites = order_results(favorites, fallback=bool(search_term))
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
response = paginate(
ordered_favorites,
page,
page_size,
QuerySerializer,
with_stats=True,
with_last_modified_by=False,
)
self.record_event({
'action': 'load_favorites',
'object_type': 'query',
'params': {
'q': search_term,
'tags': request.args.getlist('tags'),
'page': page
}
})
return response
class QueryFavoriteResource(BaseResource):
def post(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query.groups, self.current_user, view_only)
fav = models.Favorite(org_id=self.current_org.id, object=query, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in e.message:
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
def delete(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query.groups, self.current_user, view_only)
models.Favorite.query.filter(
models.Favorite.object_id == query_id,
models.Favorite.object_type == u'Query',
models.Favorite.user==self.current_user,
).delete()
models.db.session.commit()
self.record_event({
'action': 'favorite',
'object_id': query.id,
'object_type': 'query'
})
class DashboardFavoriteListResource(BaseResource):
def get(self):
search_term = request.args.get('q')
if search_term:
base_query = models.Dashboard.search(self.current_org, self.current_user.group_ids, self.current_user.id, search_term)
favorites = models.Dashboard.favorites(self.current_user, base_query=base_query)
else:
favorites = models.Dashboard.favorites(self.current_user)
favorites = filter_by_tags(favorites, models.Dashboard.tags)
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
response = paginate(favorites, page, page_size, serialize_dashboard)
self.record_event({
'action': 'load_favorites',
'object_type': 'dashboard',
'params': {
'q': search_term,
'tags': request.args.getlist('tags'),
'page': page
}
})
return response
class DashboardFavoriteResource(BaseResource):
def post(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
fav = models.Favorite(org_id=self.current_org.id, object=dashboard, user=self.current_user)
models.db.session.add(fav)
try:
models.db.session.commit()
except IntegrityError as e:
if 'unique_favorite' in e.message:
models.db.session.rollback()
else:
raise e
self.record_event({
'action': 'favorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
})
def delete(self, object_id):
dashboard = get_object_or_404(models.Dashboard.get_by_slug_and_org, object_id, self.current_org)
models.Favorite.query.filter(models.Favorite.object==dashboard, models.Favorite.user==self.current_user).delete()
models.db.session.commit()
self.record_event({
'action': 'unfavorite',
'object_id': dashboard.id,
'object_type': 'dashboard'
})
``` |
{
"source": "JodeZer/loss-landscape",
"score": 3
} |
#### File: JodeZer/loss-landscape/model_loader.py
```python
import os
import cifar10.model_loader
import rnn.model_loader
def load(dataset, model_name, model_file, data_parallel=False):
if dataset == 'cifar10':
net = cifar10.model_loader.load(model_name, model_file, data_parallel)
elif dataset == "binaryAdd":
net = rnn.model_loader.load(model_name, model_file, data_parallel)
return net
```
#### File: loss-landscape/rnn/model_loader.py
```python
import os
import torch, torchvision
import rnn.models.addRnn as rnn
# map between model name and function
models = {
'binaryAddRnn8_1' : rnn.BinaryAddRNN8_1,
'binaryAddRnn8_2' : rnn.BinaryAddRNN8_2,
'binaryAddRnn16_2' : rnn.BinaryAddRNN16_2,
'binaryAddRnn16_3' : rnn.BinaryAddRNN16_3,
}
def load(model_name, model_file=None, data_parallel=False):
net = models[model_name]()
if data_parallel: # the model is saved in data paralle mode
net = torch.nn.DataParallel(net)
if model_file:
assert os.path.exists(model_file), model_file + " does not exist."
stored = torch.load(model_file, map_location=lambda storage, loc: storage)
if 'state_dict' in stored.keys():
net.load_state_dict(stored['state_dict'])
else:
net.load_state_dict(stored)
if data_parallel: # convert the model back to the single GPU version
net = net.module
net.eval()
return net
``` |
{
"source": "jodfedlet/Seguran-a-De-Sistemas",
"score": 4
} |
#### File: Seguran-a-De-Sistemas/Tasks/RSA.py
```python
def divisor(a):
i = 2
liste = []
while(a >= i):
if a % i == 0:
res = a // i
liste.append(i)
a = res
else:
i+=1
return liste
def calcularD(e,fiN, d):
i = 2
while(d >= i):
dd = d
if e * dd % fiN == 1:
return d
else:
d+=1
def mdc(fin, l):
listFin = divisor(fin)
listE = divisor(l)
a = [ e for e in listE if e in listFin]
md = 1
for h in a:
md *=h
return md
def calcularE(fiN,e):
efin = e
if mdc(fiN, efin) == 1:
return efin
else:
e += 1
return calcularE(fiN,e)
def isPrime(n):
prim = 0
for i in range(2,n+1):
if n % i == 0:
prim+=1
if prim == 1:
return True
else:
return False
def cifrarMes(n, e, messLower):
cifrar = []
for let in messLower:
asc = ord(let)
cryp = pow(asc,e,n)
cifrar.append(cryp)
print('Mensagem Normal: ', messLower.capitalize())
print('Mensagem Cifrada: ', end="")
for cif in cifrar:
print('{}'.format(cif), end="")
print()
return cifrar
def decifrarMes(e,n,d, mess):
while True:
try:
ddd,dn = input('Digite a chave privada: ').split(' ')
dn = int(dn)
ddd = int(ddd)
if n != dn or ddd != d:
print('Chave invalida')
else:
decifrar = []
for num in mess:
decryp = pow(num,d,n)
decifrar.append(decryp)
print(55*'-')
print('Mensagem Cifrada: ', mess)
print('Mensagem Decifrada: ', end="")
for cif in decifrar:
print('{}'.format(cif), end="")
print()
print(55*'-')
texto = input('Quer ver o texto da mensagem? (S para aceitar): ')
if texto == 'S' or texto == 's':
print('Mensagem Decifrada em texto: ', end="")
for cif in decifrar:
print('{}'.format(chr(cif)), end="")
print()
exit(1)
else:
exit(1)
except ValueError:
print('Voce deve digitar 2 numeros na mesma linha!')
while True:
try:
p,q = input('Digite os 2 numeros primos: ').split(' ')
p = int(p)
q = int(q)
if(isPrime(p) and isPrime(q)):
n = p * q
fiN = (p - 1) * (q - 1)
max = 0
max = p if p > q else q
maxNUm = max+1
e = calcularE(fiN, maxNUm)
print(55*'*')
print('Chave publica: ({} , {})'.format(e,n))
print(55*'*')
print()
message = input('Enter a message: ')
messLower = message.lower()
mess = cifrarMes(n,e,messLower)
d = calcularD(e,fiN, maxNUm)
print()
print(55*'*')
print('Chave privada: ({} , {})'.format(d,n))
print(55*'*')
print()
decifrarMes(e,n,d, mess)
else:
print('Os numeros devem ser primos!')
except ValueError:
print('Voce deve digitar 2 numeros na mesma linha!')
``` |
{
"source": "jodfie/bank2ynab",
"score": 3
} |
#### File: bank2ynab/bank2ynab/transactionfile_reader.py
```python
import codecs
import logging
import os
import re
from os import path
import chardet
def get_files(
name: str,
file_pattern: str,
try_path: str,
regex_active: bool,
ext: str,
prefix: str,
) -> list[str]:
"""
Returns list of files matching the specified search parameters.
:param name: Bank format name
:type name: str
:param file_pattern: filename or regex pattern to match
:type file_pattern: str
:param try_path: provided path to search initially
:type try_path: str
:param regex_active: whether or not to use regex in file name check
:type regex_active: bool
:param ext: file extension
:type ext: str
:param prefix: prefix attached to processed files
:type prefix: str
:return: list of matching files
:rtype: list
"""
files: list[str] = list()
missing_dir = False
fpath = ""
if file_pattern != "":
try:
fpath = find_directory(try_path)
except FileNotFoundError:
missing_dir = True
fpath = find_directory("")
fpath = path.abspath(fpath)
try:
directory_list = os.listdir(fpath)
except FileNotFoundError:
directory_list = os.listdir(".")
if regex_active is True:
files = [
path.join(fpath, f)
for f in directory_list
if f.endswith(ext)
if re.match(file_pattern + r".*\.", f)
if prefix not in f
]
else:
files = [
path.join(fpath, f)
for f in directory_list
if f.endswith(ext)
if f.startswith(file_pattern)
if prefix not in f
]
if not files and missing_dir:
logging.error(
f"\nFormat: {name}\n\n"
+ "Error: Can't find download path:"
+ f"{try_path}\nTrying default path instead:\t {fpath}"
)
return files
def find_directory(filepath: str) -> str:
"""
Finds the downloads directory for active user if filepath is not set.
:param filepath: Filepath specified by the configuration file.
:type filepath: str
:raises FileNotFoundError: Error raised if the filepath is invalid.
:return: The desired directory to use.
:rtype: str
"""
if filepath == "":
if os.name == "nt":
# Windows
import winreg
shell_path = (
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion"
"\\Explorer\\Shell Folders"
)
dl_key = "{374DE290-123F-4565-9164-39C4925E467B}"
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, shell_path) as key:
input_dir = winreg.QueryValueEx(key, dl_key)[0]
else:
# Linux, OSX
userhome = os.path.expanduser("~")
input_dir = os.path.join(userhome, "Downloads")
else:
if not os.path.exists(filepath):
s = "Error: Input directory not found: {}"
raise FileNotFoundError(s.format(filepath))
input_dir = filepath
return input_dir
# TODO add check of config to see if we have encoding specified
def detect_encoding(filepath: str) -> str:
"""
Utility to detect file encoding. This is imperfect, but
should work for the most common cases.
:param filepath: string path to a given file
:return: encoding alias that can be used with open()
"""
# First try to guess the encoding with chardet. Take it if the
# confidence is >60% (randomly chosen)
with open(filepath, "rb") as f:
file_content = f.read()
rslt = chardet.detect(file_content)
conf, enc = rslt["confidence"], rslt["encoding"]
if conf > 0.6:
logging.info(
f"\tOpening file using encoding {enc} (confidence {conf})"
)
return enc
# because some encodings will happily encode anything even if wrong,
# keeping the most common near the top should make it more likely that
# we're doing the right thing.
encodings = [
"ascii",
"utf-8",
"utf-16",
"cp1251",
"utf_32",
"utf_32_be",
"utf_32_le",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8_sig",
"cp850",
"cp852",
"latin_1",
"big5",
"big5hkscs",
"cp037",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"euc_jp",
"euc_jis_2004",
"euc_jisx0213",
"euc_kr",
"gb2312",
"gbk",
"gb18030",
"hz",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"latin_1",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"johab",
"koi8_r",
"koi8_u",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"ptcp154",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
]
result = ""
error = (
ValueError,
UnicodeError,
UnicodeDecodeError,
UnicodeEncodeError,
)
for enc in encodings:
try:
logging.info(f"\tAttempting to open file using {enc} encoding...")
with codecs.open(filepath, "r", encoding=enc) as f:
for line in f:
line.encode("utf-8")
return enc
except error:
continue
return result
``` |
{
"source": "jodhanijanki/django-dashboard-light",
"score": 2
} |
#### File: django-dashboard-light/authentication/models.py
```python
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
class UserType(models.Model):
user_type = models.CharField(max_length=10,blank=True, null=True)
user=models.ForeignKey(User,on_delete=models.DO_NOTHING,null=True,blank=True,related_name='user_type')
# user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return self.user_type
class BankDetails(models.Model):
AccountType = [
(1, '--------------'),
(2, 'Saving Account'),
(3, 'Current Account'),
(4, 'Overdraft Account')
]
user=models.ForeignKey(User,on_delete=models.DO_NOTHING,null=True,blank=True,related_name='bank_detail')
bank_name = models.CharField(max_length=200, null=True, blank=False)
branch_name = models.CharField(max_length=20, null=True, blank=False)
ifsc_code = models.CharField(max_length=20, null=True, blank=False)
account_number = models.CharField(max_length=20,null=True, blank=False)
account_type = models.IntegerField(choices=AccountType, blank=True,null=True)
def __str__(self):
return f'{self.account_number}'
class BankStatement(models.Model):
user=models.ForeignKey(User,on_delete=models.DO_NOTHING,null=True,blank=True,related_name='bank_data')
bankdetails=models.ForeignKey(BankDetails,on_delete=models.DO_NOTHING,null=True,blank=True,related_name='bank_statements')
date = models.DateField(null=True, blank=False)
narration = models.CharField(max_length=100, null=True, blank=False)
ref_no = models.CharField(max_length=40, null=True, blank=False)
value_dt = models.DateField(null=True, blank=False)
withdrawal_amt = models.FloatField(blank=True,null=True)
deposite_amt = models.FloatField(blank=True,null=True)
closing_balance = models.FloatField(blank=True,null=True)
category =models.CharField(max_length=40, null=True, blank=False)
def __str__(self):
return f'{self.narration}'
class Category(models.Model):
name = models.CharField(max_length=255, null=True, blank=False)
def __str__(self):
return f'{self.name}'
class Keyword(models.Model):
name = models.CharField(max_length=255, null=True, blank=False)
category=models.ForeignKey(Category,on_delete=models.DO_NOTHING,null=True,blank=True,related_name='keywords')
def __str__(self):
return f'{self.name}'
class CompanyUser(models.Model):
CompanyType = [
(1, "Partnership"),
(2, "LLP"),
(3, "Pvt Ltd"),
(4, "Ltd"),
(5, "others")
]
BusinessType = [
(1, "FMCG"),
(2, "Manufacturer"),
(3, "Trader"),
(4, "IMPEX"),
(5, "Retailer"),
(6, "IT"),
(7, "Diversified")
]
BusinessSize = [
(1, "Startup"),
(2, "Small"),
(3, "Medium"),
(4, "Large"),
(5, "Corporate"),
(6, "Institution"),
]
user=models.ForeignKey(User,on_delete=models.DO_NOTHING,null=True,blank=True,related_name='company_profile')
first_name = models.CharField(max_length=200, null=True, blank=False)
last_name = models.CharField(max_length=200, null=True, blank=False)
contact_person_name = models.CharField(max_length=200, null=True, blank=False)
contact_person_mobile = models.CharField(max_length=200, null=True,blank=False)
brand_name = models.CharField(max_length=200, null=True, blank=False)
address = models.TextField(null=True, blank=False)
city = models.CharField(max_length=200, null=True, blank=False)
state = models.CharField(max_length=200, null=True, blank=False)
country = models.CharField(max_length=200, null=True, blank=False)
pin_code = models.CharField(max_length=200, null=True, blank=False)
logo = models.ImageField(upload_to='media/', null=True, blank=True)
started_in_year = models.DateField(auto_now=False, auto_now_add=False, blank=True,null=True)
website_address = models.URLField(null=True, blank=True)
company_type = models.IntegerField(choices=CompanyType, blank=True,null=True)
business_type = models.IntegerField(choices=BusinessType, blank=True, null=True)
business_size = models.IntegerField(choices=BusinessSize, blank=True, null=True)
annual_business_turnover = models.DecimalField(blank=True, max_digits=20, null=True, decimal_places=2)
number_of_employees = models.IntegerField(blank=True, null=True)
core_team_members = models.IntegerField(blank=True, null=True)
GST_number = models.CharField(max_length=15, blank=True, null=True)
PAN_number = models.CharField(max_length=10, blank=True, null=True)
description = models.TextField(max_length=10, blank=True, null=True)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
def __str__(self):
return self.first_name
class IndividualUser(models.Model):
CA='CA'
CS='CS'
ProfessionType = [
(CA, "CA"),
(CS, "CS"),
("Tax_Adviser", "Tax Adviser"),
("Legal_Adviser", "Legal Adviser"),
("Analyst", "Analyst"),
("Business_Adviser", "Business Adviser"),
("Finance_Adviser", "Finance Adviser"),
("Diversified", "Diversified")
]
SpecialityType = [
("FMCG", "FMCG"),
("Manufacturer", "Manufacturer"),
("Trader", "Trader"),
("IMPEX", "IMPEX"),
("Retailer", "Retailer"),
("IT", "IT"),
("Diversified", "Diversified")
]
BusinessCater = [
("Startup", "Startup"),
("Small", "Small"),
("Medium", "Medium"),
("Large", "Large"),
("Corporate", "Corporate"),
("Institution", "Institution")
]
Experience = [
(1, "1"),
(2, "2"),
(3, "3"),
(4, "4"),
(5, "5+")
]
# user_id = models.OneToOneField(User, on_delete=models.DO_NOTHING,related_name='individual')
user=models.ForeignKey(User,on_delete=models.DO_NOTHING,null=True,related_name='individual_profile')
first_name = models.CharField(max_length=200, null=True, blank=False)
last_name = models.CharField(max_length=200, null=True, blank=False)
brand_name = models.CharField(max_length=200, null=True, blank=False)
address = models.TextField(null=True, blank=False)
city = models.CharField(max_length=200, null=True, blank=False)
state = models.CharField(max_length=200, null=True, blank=False)
country = models.CharField(max_length=200, null=True, blank=False)
pin_code = models.CharField(max_length=200, null=True, blank=False)
logo = models.ImageField(upload_to='media/', null=True, blank=True)
started_in_year = models.DateField(auto_now=False, auto_now_add=False, blank=True,null=True)
website_address = models.URLField(null=True, blank=True)
profession_type = models.CharField(max_length=20 ,choices=ProfessionType, blank=True,null=True)
speciality_type = models.CharField(max_length=20 ,choices=SpecialityType, blank=True, null=True)
business_cater = models.CharField(max_length=20 ,choices=BusinessCater, blank=True, null=True)
experience = models.IntegerField(choices=Experience, blank=True, null=True)
annual_business_turnover = models.DecimalField(blank=True, max_digits=20, null=True, decimal_places=2)
number_of_employees = models.IntegerField(blank=True, null=True)
core_team_members = models.IntegerField(blank=True, null=True)
GST_number = models.CharField(max_length=15, blank=True, null=True)
PAN_number = models.CharField(max_length=10, blank=True, null=True)
description = models.TextField(max_length=10, blank=True, null=True)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
# bank_details = models.ForeignKey(BankDetails, null=True)
# user_type = models.ForeignKey(UserType, null=True)
def __str__(self):
return self.first_name
``` |
{
"source": "jodhernandezbe/standardizedinventories",
"score": 3
} |
#### File: standardizedinventories/stewi/egrid.py
```python
import pandas as pd
import numpy as np
from stewi.globals import *
# Set the year
eGRIDyear = '2016'
year_last2 = eGRIDyear[2:]
#filepath
eGRIDfilepath = '../eGRID/'
egrid_file_begin = {"2014":"eGRID2014", "2016":"egrid2016"}
egrid_file_version = {"2014":"_v2","2016":""}
#filename for 2014
eGRIDfile = eGRIDfilepath + egrid_file_begin[eGRIDyear] + '_Data' + egrid_file_version[eGRIDyear] + '.xlsx'
pltsheetname = 'PLNT'+ year_last2
untsheetname = 'UNT' + year_last2
# Import list of fields from egrid that are desired for LCI
def imp_fields(fields_txt):
egrid_req_fields_df = pd.read_csv(fields_txt, header=None)
egrid_req_fields = list(egrid_req_fields_df[0])
return egrid_req_fields
egrid_required_fields = (imp_fields(data_dir+'egrid_required_fields.txt'))
# Import egrid file
egrid = pd.read_excel(eGRIDfile, sheet_name=pltsheetname, skipinitialspace = True)
#drop first row which are column name abbreviations
egrid = egrid.drop([0])
#use_cols not working so drop them after import
#get list of columns not in the required fields and drop them
colstodrop = list(set(list(egrid.columns)) - set(egrid_required_fields))
egrid2 = egrid.drop(colstodrop,axis=1)
def egrid_unit_convert(value,factor):
new_val = value*factor;
return new_val;
#Read in unit sheet to get comment fields related to source of heat,NOx,SO2, and CO2 emission estimates
unit_egrid_required_fields = (imp_fields(data_dir+'egrid_unit_level_required_fields.txt')) #@author: Wes
unit_egrid = pd.read_excel(eGRIDfile, sheet_name=untsheetname, skipinitialspace = True)
#drop first row which are column name abbreviations
unit_egrid = unit_egrid.drop([0])
#Correction for 2014 unit sheet in column name
if eGRIDyear == '2014':
unit_egrid = unit_egrid.rename(columns={'Unit unadjusted annual heat input \n(MMBtu)':'Unit unadjusted annual heat input (MMBtu)'})
#get list of columns not in the required fields and drop them
colstodrop = list(set(list(unit_egrid.columns)) - set(unit_egrid_required_fields))
unit_egrid2 = unit_egrid.drop(colstodrop,axis=1)
#Import mapping between heat,NOx,SO2, and CO2 emissions source comments and reliability scores. Merge one by one.
rel_scores_heat_SO2_CO2_NOx = pd.read_csv(data_dir+'eGRID_unit_level_reliability_scores.csv')
unit_egrid2 = unit_egrid2.merge(rel_scores_heat_SO2_CO2_NOx, left_on =['Unit unadjusted annual heat input source'], right_on =['Source'], how = 'left')
unit_egrid2 = unit_egrid2.rename(columns= {'ReliabilityScore':'ReliabilityScore_heat'})
del unit_egrid2['Source']
unit_egrid2 = unit_egrid2.merge(rel_scores_heat_SO2_CO2_NOx, left_on =['Unit unadjusted annual NOx emissions source'], right_on =['Source'], how = 'left')
unit_egrid2 = unit_egrid2.rename(columns= {'ReliabilityScore':'ReliabilityScore_NOx'})
del unit_egrid2['Source']
unit_egrid2 = unit_egrid2.merge(rel_scores_heat_SO2_CO2_NOx, left_on =['Unit unadjusted annual SO2 emissions source'], right_on =['Source'], how = 'left')
unit_egrid2 = unit_egrid2.rename(columns= {'ReliabilityScore':'ReliabilityScore_SO2'})
del unit_egrid2['Source']
unit_egrid2 = unit_egrid2.merge(rel_scores_heat_SO2_CO2_NOx, left_on =['Unit unadjusted annual CO2 emissions source'], right_on =['Source'], how = 'left')
unit_egrid2 = unit_egrid2.rename(columns= {'ReliabilityScore':'ReliabilityScore_CO2'})
del unit_egrid2['Source']
unit_emissions_with_rel_scores = ['Heat','Nitrogen oxides','Sulfur dioxide','Carbon dioxide']
#Calculate reliability scores at plant level using flow-weighted average.
rel_score_cols = ['ReliabilityScore_heat','ReliabilityScore_NOx','ReliabilityScore_SO2','ReliabilityScore_CO2']
flows_used_for_weighting = ['Unit unadjusted annual heat input (MMBtu)',
'Unit unadjusted annual NOx emissions (tons)',
'Unit unadjusted annual SO2 emissions (tons)',
'Unit unadjusted annual CO2 emissions (tons)']
#First multiply by flows
unit_egrid2[rel_score_cols] = np.multiply(unit_egrid2[rel_score_cols],unit_egrid2[flows_used_for_weighting])
#Aggregate the multiplied scores at the plant level
unit_egrid3 = unit_egrid2.groupby(['DOE/EIA ORIS plant or facility code'])['ReliabilityScore_heat','ReliabilityScore_NOx','ReliabilityScore_SO2','ReliabilityScore_CO2'].sum().reset_index()
unit_egrid4 = unit_egrid2.groupby(['DOE/EIA ORIS plant or facility code'])['Unit unadjusted annual heat input (MMBtu)','Unit unadjusted annual NOx emissions (tons)','Unit unadjusted annual SO2 emissions (tons)','Unit unadjusted annual CO2 emissions (tons)'].sum().reset_index()
unit_egrid5 = unit_egrid3.merge(unit_egrid4, left_on = ['DOE/EIA ORIS plant or facility code'],right_on = ['DOE/EIA ORIS plant or facility code'], how = 'inner')
unit_egrid5[rel_score_cols] = np.divide(unit_egrid5[rel_score_cols],unit_egrid5[flows_used_for_weighting])
#Throws a RuntimeWarning about true_divide
unit_egrid5[unit_emissions_with_rel_scores] = unit_egrid5[rel_score_cols]
unit_egrid5['FacilityID'] = unit_egrid5['DOE/EIA ORIS plant or facility code']
rel_scores_heat_SO2_CO2_NOx_by_facility = pd.melt(unit_egrid5, id_vars=['FacilityID'], value_vars=unit_emissions_with_rel_scores, var_name='FlowName', value_name='ReliabilityScore')
##Create FLOWBYFACILITY output
flowbyfac_prelim = egrid2[['DOE/EIA ORIS plant or facility code',
'Plant primary fuel',
'Plant total annual heat input (MMBtu)',
'Plant annual net generation (MWh)',
'Plant annual NOx emissions (tons)',
'Plant annual SO2 emissions (tons)',
'Plant annual CO2 emissions (tons)',
'Plant annual CH4 emissions (lbs)',
'Plant annual N2O emissions (lbs)',
'CHP plant useful thermal output (MMBtu)']]
flowbyfac_prelim = flowbyfac_prelim.rename(columns={'DOE/EIA ORIS plant or facility code':'FacilityID',
'Plant total annual heat input (MMBtu)':'Heat',
'Plant annual net generation (MWh)':'Electricity',
'Plant annual NOx emissions (tons)':'Nitrogen oxides',
'Plant annual SO2 emissions (tons)':'Sulfur dioxide',
'Plant annual CO2 emissions (tons)':'Carbon dioxide',
'Plant annual CH4 emissions (lbs)':'Methane',
'Plant annual N2O emissions (lbs)':'Nitrous oxide',
'CHP plant useful thermal output (MMBtu)':'Steam'})
nox_so2_co2 = egrid_unit_convert(flowbyfac_prelim[['Nitrogen oxides','Sulfur dioxide','Carbon dioxide']],USton_kg)
ch4_n2o = egrid_unit_convert(flowbyfac_prelim[['Methane','Nitrous oxide']],lb_kg)
heat_steam = egrid_unit_convert(flowbyfac_prelim[['Heat','Steam']],MMBtu_MJ)
electricity = egrid_unit_convert(flowbyfac_prelim[['Electricity']],MWh_MJ)
facilityid = flowbyfac_prelim[['FacilityID','Plant primary fuel']]
frames = [facilityid,nox_so2_co2,ch4_n2o,heat_steam,electricity]
flowbyfac_stacked = pd.concat(frames,axis = 1)
#Create flowbyfac
flowbyfac = pd.melt(flowbyfac_stacked, id_vars=['FacilityID','Plant primary fuel'], value_vars=list(flowbyfac_stacked.columns[2:]),
var_name='FlowName', value_name='FlowAmount')
#Dropping zero emissions by changing name to NA
#Do not drop zeroes - WI 1/16/2019
#flowbyfac['FlowAmount'] = flowbyfac['FlowAmount'].replace({0:None})
#Dropping na emissions
flowbyfac = flowbyfac.dropna(subset=['FlowAmount'])
flowbyfac = flowbyfac.sort_values(by = ['FacilityID'], axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last')
#Merge in heat_SO2_CO2_NOx reliability scores calculated from unit sheet
flowbyfac = flowbyfac.merge(rel_scores_heat_SO2_CO2_NOx_by_facility,left_on = ['FacilityID','FlowName'],right_on = ['FacilityID','FlowName'], how = 'left')
#Assign electricity to a reliabilty score of 1
flowbyfac['ReliabilityScore'].loc[flowbyfac['FlowName']=='Electricity'] = 1
#Replace NaNs with 5
flowbyfac['ReliabilityScore']=flowbyfac['ReliabilityScore'].replace({None:5})
#Methane and nitrous oxide reliability scores
#Assign 3 to all facilities except for certain fuel types where measurements are taken
flowbyfac.loc[(flowbyfac['FlowName']=='Methane') | (flowbyfac['FlowName']=='Nitrous oxide')
,'ReliabilityScore'] = 3
#For all but selected fuel types, change it to 2
flowbyfac.loc[((flowbyfac['FlowName']=='Methane') | (flowbyfac['FlowName']=='Nitrous oxide')) &
((flowbyfac['Plant primary fuel'] != 'PG') | (flowbyfac['Plant primary fuel'] != 'RC') |
(flowbyfac['Plant primary fuel'] != 'WC') | (flowbyfac['Plant primary fuel'] != 'SLW'))
,'ReliabilityScore'] = 2
#Now the plant primary fuel is no longer needed
flowbyfac = flowbyfac.drop(columns = ['Plant primary fuel'])
#Import flow compartments
flow_compartments = pd.read_csv(data_dir+'eGRID_flow_compartments.csv',header=0)
#Merge in with flowbyfacility
flowbyfac = pd.merge(flowbyfac,flow_compartments,on='FlowName',how='left')
#Drop original name
flowbyfac = flowbyfac.drop(columns='OriginalName')
#Write flowbyfacility file to output
flowbyfac.to_csv(output_dir + 'flowbyfacility/eGRID_'+ eGRIDyear +'.csv', index=False)
##Creation of the facility file
#Need to change column names manually
facility=egrid2[['Plant name','Plant operator name','DOE/EIA ORIS plant or facility code',
'Plant state abbreviation','eGRID subregion acronym','Plant county name',
'Plant latitude', 'Plant longitude','Plant primary fuel',
'Plant primary coal/oil/gas/ other fossil fuel category','NERC region acronym',
'Balancing Authority Name','Balancing Authority Code',
'Plant coal generation percent (resource mix)',
'Plant oil generation percent (resource mix)',
'Plant gas generation percent (resource mix)',
'Plant nuclear generation percent (resource mix)',
'Plant hydro generation percent (resource mix)',
'Plant biomass generation percent (resource mix)',
'Plant wind generation percent (resource mix)',
'Plant solar generation percent (resource mix)',
'Plant geothermal generation percent (resource mix)',
'Plant other fossil generation percent (resource mix)',
'Plant other unknown / purchased fuel generation percent (resource mix)']]
facility = facility.rename(columns={'Plant name':'FacilityName',
'DOE/EIA ORIS plant or facility code':'FacilityID',
'Plant state abbreviation':'State'})
len(facility)
#2016: 9709
#2014: 8503
facility.to_csv(output_dir + '/facility/eGRID_' + eGRIDyear + '.csv', index=False)
##Write flows file
flows = flowbyfac[['FlowName','Compartment','Unit']]
flows = flows.drop_duplicates()
flows.to_csv(output_dir + '/flow/eGRID_' + eGRIDyear + '.csv', index=False)
#Write metadata
eGRID_meta = globals.inventory_metadata
#Set time manually for now
eGRID_meta['SourceAquisitionTime'] = 'Wed May 10 10:00:01 2018'
eGRID_meta['SourceType'] = 'Static File'
eGRID_meta['SourceFileName'] = eGRIDfile
eGRID_meta['SourceURL'] = url
eGRID_meta['SourceVersion'] = egrid_file_version[eGRIDyear]
write_metadata('eGRID',eGRIDyear, eGRID_meta)
#VALIDATE
egrid_national_totals = pd.read_csv(data_dir + 'eGRID_'+ eGRIDyear + '_NationalTotals.csv',header=0,dtype={"FlowAmount":np.float})
egrid_national_totals = unit_convert(egrid_national_totals, 'FlowAmount', 'Unit', 'lbs', lb_kg, 'FlowAmount')
egrid_national_totals = unit_convert(egrid_national_totals, 'FlowAmount', 'Unit', 'tons', USton_kg, 'FlowAmount')
egrid_national_totals = unit_convert(egrid_national_totals, 'FlowAmount', 'Unit', 'MMBtu', MMBtu_MJ, 'FlowAmount')
egrid_national_totals = unit_convert(egrid_national_totals, 'FlowAmount', 'Unit', 'MWh', MWh_MJ, 'FlowAmount')
# drop old unit
egrid_national_totals.drop('Unit',axis=1,inplace=True)
validation_result = validate_inventory(flowbyfac, egrid_national_totals, group_by='flow', tolerance=5.0)
write_validation_result('eGRID',eGRIDyear,validation_result)
``` |
{
"source": "jodhus/pyebur128",
"score": 2
} |
#### File: jodhus/pyebur128/setup.py
```python
import platform
from setuptools import setup, find_packages, Extension
from distutils.ccompiler import new_compiler
from distutils.msvccompiler import MSVCCompiler
def is_msvc():
'''Checks to see if the detected C compiler is MSVC.'''
try:
# This depends on _winreg, which is not available on not-Windows.
from distutils.msvc9compiler import MSVCCompiler as MSVC9Compiler
except ImportError:
MSVC9Compiler = None
try:
from distutils._msvccompiler import MSVCCompiler as MSVC14Compiler
except ImportError:
MSVC14Compiler = None
msvc_classes = tuple(filter(None, (MSVCCompiler,
MSVC9Compiler,
MSVC14Compiler)))
cc = new_compiler()
return isinstance(cc, msvc_classes)
macros = []
# MSVC won't use <math.h> unless this is defined.
if platform.system() == 'Windows' and is_msvc():
macros.append(('_USE_MATH_DEFINES', None))
extensions = [
Extension(
name='pyebur128.pyebur128',
sources=[
"src/pyebur128/pyebur128.pyx",
"src/lib/ebur128/ebur128.c",
],
include_dirs=[
'.',
'src/lib/ebur128',
'src/lib/ebur128/queue',
],
define_macros=macros,
),
]
if __name__ == '__main__':
from Cython.Build import cythonize
setup(
ext_modules=cythonize(
extensions,
compiler_directives={'language_level': 3, 'embedsignature': True},
),
)
```
#### File: pyebur128/tests/test_loudness_shortterm.py
```python
import pytest
from pyebur128 import (
ChannelType, MeasurementMode, R128State, get_loudness_shortterm
)
import soundfile as sf
def get_max_loudness_shortterm(filename):
'''Open the WAV file and get the loudness in short-term (3s) chunks.'''
with sf.SoundFile(filename) as wav:
state = R128State(wav.channels,
wav.samplerate,
MeasurementMode.MODE_S)
if wav.channels == 5:
state.set_channel(0, ChannelType.LEFT)
state.set_channel(1, ChannelType.RIGHT)
state.set_channel(2, ChannelType.CENTER)
state.set_channel(3, ChannelType.LEFT_SURROUND)
state.set_channel(4, ChannelType.RIGHT_SURROUND)
# 10 ms buffer / 10 Hz refresh rate.
max_shortterm = float('-inf')
total_frames_read = 0
for block in wav.blocks(blocksize=int(wav.samplerate / 10)):
frames_read = len(block)
total_frames_read += frames_read
for sample in block:
state.add_frames(sample, 1)
# Invalid results before the first 3 seconds.
if total_frames_read >= 3 * wav.samplerate:
shortterm = get_loudness_shortterm(state)
max_shortterm = max(shortterm, max_shortterm)
del state
return max_shortterm
def test_max_loudness_shortterm(r128_test_data):
'''Test for the loudness value of a single file in short-term (3s)
chunks.
'''
expected = [
('seq-3341-10-1-24bit.wav', -23.0),
('seq-3341-10-2-24bit.wav', -23.0),
('seq-3341-10-3-24bit.wav', -23.0),
('seq-3341-10-4-24bit.wav', -23.0),
('seq-3341-10-5-24bit.wav', -23.0),
('seq-3341-10-6-24bit.wav', -23.0),
('seq-3341-10-7-24bit.wav', -23.0),
('seq-3341-10-8-24bit.wav', -23.0),
('seq-3341-10-9-24bit.wav', -23.0),
('seq-3341-10-10-24bit.wav', -23.0),
('seq-3341-10-11-24bit.wav', -23.0),
('seq-3341-10-12-24bit.wav', -23.0),
('seq-3341-10-13-24bit.wav', -23.0),
('seq-3341-10-14-24bit.wav', -23.0),
('seq-3341-10-15-24bit.wav', -23.0),
('seq-3341-10-16-24bit.wav', -23.0),
('seq-3341-10-17-24bit.wav', -23.0),
('seq-3341-10-18-24bit.wav', -23.0),
('seq-3341-10-19-24bit.wav', -23.0),
('seq-3341-10-20-24bit.wav', -23.0),
]
tolerance = 0.1
status_msg = '==== \'{}\': want {} \u00b1 {} ---> '
print('\n')
for test in expected:
print(status_msg.format(test[0], test[1], tolerance), end='')
result = get_max_loudness_shortterm(r128_test_data / test[0])
print('got {} '.format(round(result, 1)), end='')
assert (round(result, 1) <= test[1] + tolerance and
round(result, 1) >= test[1] - tolerance)
print('---> PASSED!')
``` |
{
"source": "jodiefostersarmy/T4A2",
"score": 4
} |
#### File: T4A2/src/commands.py
```python
from main import db # This is the db instance created by SQLAlchemy
from flask import Blueprint # Use blueprints instead of passing the app object around
db_commands = Blueprint("db-custom", __name__) # Creates the blueprint
"""
A Blueprint is essentially a constructor, and is similar to a Flask application object, but is not an application.
It is a set of operations which can be registered on an application, even multiple times.
They provide separation at a Flask level, share app config, and can change an application object without being registered.
However, you cannot unregister a blueprint once the application has been created, hence the reason why we drop the tables
in testing if we add a new column or change the validations.
"""
@db_commands.cli.command("create")
def create_db():
db.create_all()
print("Tables created")
@db_commands.cli.command("drop") # this function will run when "flask db-custom drop" is run"
def drop_db():
db.drop_all() # Drop all tables
db.engine.execute("DROP TABLE IF EXISTS alembic_version;") # Drop table for migrations
print("Tables deleted") # Print message to indicate tables are dropped
@db_commands.cli.command("seed") # this fronction will run when "flask db-custom seed" is run"
def seed_db():
"""Create arbitrary data for testing"""
from models.User import User # Importing the User model
from models.Word import Word # Importing the Profile model
from models.SavedWord import SavedWord
from main import bcrypt # Hashing module for the passwords
from faker import Faker # Importing the faker module for fake data
import random # Importing random from the python standard library
faker = Faker()
users = []
words = []
folders = []
for i in range(5): # Do this 5 times
user = User() # Create an user object from the User model
user.name = faker.name() # assigns fake name for user object from faker module
user.email = f"<EMAIL>" # Assign an email to the user object
user.password = <PASSWORD>.generate_password_hash("<PASSWORD>").decode("utf-8") # Assign ta hashed password to the user object
user.mobile_number = faker.msisdn()
user.join_date = faker.msisdn()
db.session.add(user) # Add the user to the db session
users.append(user) # Append the user to the users list
db.session.commit() # Commit the seeion to the db
for i in range(5):
word = Word() # Create a profile object from the Profile model
word.word = f"Word {i+1}" # Add a username to the profile object
word.definition = f"Definition {i+1}" # Add a firstname to the profile object
word.pronunciation = f"Pronunciation {i+1}" # Add a lastname to the profile object
word.user_id = users[i].id # Add a user_id to the profile object. This comes from real ids from the users list
words.append(word)
db.session.add(word) # Add the profile to the session
db.session.commit() # Commit the session to the database
for i in range(5):
savedWord = SavedWord()
savedWord.user_id = users[i].id
savedWord.word_id = words[i].id
db.session.add(savedWord)
db.session.commit()
print("Tables seeded") # Print a message to let the user know they
```
#### File: src/controllers/auth_controller.py
```python
from flask import Blueprint, request, jsonify, abort, render_template, flash, redirect, url_for
from main import db, bcrypt
from models.User import User
from schemas.UserSchema import user_schema, users_schema
from flask_jwt_extended import create_access_token
import time
from datetime import timedelta
from flask_login import login_user, current_user, logout_user
auth = Blueprint('auth', __name__)
@auth.route("/create-account", methods=["GET","POST"])
def auth_register():
error = None
if request.method == "GET":
return render_template('register.html')
name = request.form.get('name')
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
# user_number = User.query.filter_by(mobile_number=user_fields["mobile_number"]).first()
if user:
error='This email has already been registered'
return render_template('register.html', error=error)
# if user_number:
# return abort(400, description="Mobile already registered") # kept this here, for when I fix up the app post bootcamp
user = User()
user.name = name
# user.mobile_number = user_fields["mobile_number"] # kept this here, for when I fix up the app post bootcamp
user.email = email
# if admin:
# user.is_admin = admin # kept this here, for when I fix up the app post bootcamp
user.password = <PASSWORD>.generate_password_hash(password).decode("utf-8")
user.join_date = time.time()
db.session.add(user)
db.session.commit()
return redirect(url_for('user.all_users'))
@auth.route("/login", methods=["GET","POST"])
def auth_login():
if request.method == "GET":
return render_template('login.html')
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if not user or not bcrypt.check_password_hash(user.password, password):
return abort(401, description="Incorrect username or password")
print(current_user)
login_user(user)
print(current_user.id)
return redirect(url_for('user.search'))
@auth.route("/", methods=["GET"])
def index():
return render_template('search.html')
@auth.route("/logout", methods=["GET"])
def logout():
logout_user()
return redirect(url_for('auth.auth_register'))
```
#### File: src/controllers/user_controller.py
```python
from models.User import User
from schemas.UserSchema import users_schema, user_schema
from models.Word import Word
from schemas.WordSchema import word_schema, words_schema
from models.SavedWord import SavedWord
from schemas.SavedWordSchema import savedword_schema, savedwords_schema
from main import db, bcrypt
from datetime import timedelta
from flask import Blueprint, request, jsonify, abort, render_template, Response
import json
import requests
from services.auth_service import verify_user
user = Blueprint('user', __name__, url_prefix="/user")
@user.route("/", methods=["GET"])
def all_users():
"""Return all users"""
users = User.query.all()
return render_template("users_index.html", users = users)
# return jsonify(users_schema.dump(users))
@user.route("/<int:id>", methods=["GET"])
def get_user(id):
"""Return single user"""
# if user.id != id: # note to educator: yes, I know this isn't dry, I should have turned this into a service. Will fix this post bootcamp.
# return abort(401, description="You are not authorized to view this database")
user = User.query.get(id)
if user:
return render_template("account_details.html", user=user) # we assign the variable we want to access for our html template to the SQLalchemy query we have just defined.
else:
return "This user does not exist!" # turn this into an error page
@user.route("/<int:id>", methods=["DELETE"])
def delete_user(id):
"""Delete single user"""
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return "User deleted"
@user.route("/<int:id>", methods=["PUT", "PATCH"]) # when browser hits this endpoint
def update_user(id): # it will run user update method
"""Update single user""" # i want to update a single user
user_fields = user_schema.load(request.json) # I will load all the attributes for the User model
user = User.query.filter_by(id=id) # I want to select the user with id = <int:id> from the URI
user.update(user_fields) # I want you to update the User account with fields input in the JSON body from insomnia
db.session.commit() # commit session to db with updated details
return "Updated User" # Return if successful
@user.route("/<int:id>/words", methods=["GET"])
def saved_words(id, user=None):
"""Return words saved by specific user"""
# if user.id != id:
# return abort(401, description="You are not authorized to view this database")
saved_word = SavedWord.query.filter_by(user_id=id)
is_there_a_word = SavedWord.query.filter_by(user_id=id).first()
if not is_there_a_word:
return render_template("no_words.html")
else:
return render_template("user_words.html", saved=saved_word)
@user.route("/<int:id>/save", methods=["POST"])
def save_user_word(id):
"""Save word by user"""
saveword_fields = savedword_schema.load(request.json)
save_word = SavedWord.query.filter_by(word_id=saveword_fields["word_id"], user_id=id).first()
"""
word_id is equal to post input.
user_id is equal to id in uri
first() accesses first element in list return as a query is always returned as a list.
:return: filtered query data
:rtype: list
"""
if save_word: # checks saved words to see if save_word exists in db
return abort(400, description='Word is already saved') # if exist, throw error and return message
new_save = SavedWord()
new_save.user_id = id
new_save.word_id = saveword_fields["word_id"]
new_save.date_added = 0
new_save.notification = saveword_fields["notification"]
db.session.add(new_save)
db.session.commit()
return jsonify(savedword_schema.dump(new_save))
@user.route("/<int:user_id>/words/<int:word_id>", methods=["DELETE"])
def delete_user_word(user_id, word_id):
"delete a user saved word"
saved_word = SavedWord.query.filter_by(user_id=user_id, word_id=word_id).first()
if saved_word:
db.session.delete(saved_word)
db.session.commit()
return "Word deleted"
else:
return abort(400, description='This word does not exist in your saved words!')
@user.route("/search", methods=["GET", "POST"])
def search():
if request.method == "GET":
return render_template('search.html')
else:
word = request.form.get('word')
saved_word = Word.query.filter_by(word=word).first()
if saved_word:
return render_template("search_results.html", saved=saved_word, word=word)
else:
r = requests.get(f'https://dictionaryapi.com/api/v3/references/collegiate/json/{word}?key=<KEY>')
return render_template("search_results.html", request=r.json(), word_searched=word)
``` |
{
"source": "jodiemorton/4th-umpire",
"score": 3
} |
#### File: 4th-umpire/ML/api_data.py
```python
import pandas as pd
import numpy as np
delivery_data = pd.read_csv('data/deliveries.csv')
match_data = pd.read_csv('data/matches.csv')
team_data = pd.read_csv('data/teams.csv')
city_data = pd.read_csv('data/city_id.csv')
def get_team(id):
return teams_data[(team_data["team_id"] == id)]
def get_match(id):
return match_data[(match_data["id"] == id)]
def get_winner(match):
match_info = get_match(match).values
winner = match_info[0,10]
by_run = match_info[0,11]
by_wicket = match_info[0,12]
print(winner,by_run,by_wicket)
return (winner,by_run,by_wicket)
def get_inning_match(inning):
data = delivery_data[(delivery_data["inning"]==inning)]
inn = delivery_data[(delivery_data["inning"]==1)]
return_data = []
for match in range(1,636):
match_info = get_match(match)
winner = get_winner(match)
print("iter:",match)
for_run = inn[(inn["match_id"]==match)]
for_run["run"] = for_run.total_runs.cumsum()
run = for_run['run'].max()
print("max_run : " ,run)
match = data[(data["match_id"]==match)]
match["run"] = match.total_runs.cumsum()
match["balls"] = 6 * (match["over"] - 1 ) + match["ball"]
match["player_dismissed"] = np.where(match["player_dismissed"].isnull(),0,1)
match["wicket"] = match.player_dismissed.cumsum()
match["winner"] = winner[0]
match["win_by_runs"] = winner[1]
match["win_by_wickets"] = winner[2]
match["1st_inning_run"] = run
city = match_info["city"].astype(str)
city_id = np.nan
for index, row in city_data.iterrows():
#print(row['id'], row['name'])
if row['name'] in str(city):
city_id = row['id']
break
match["city"] = city_id
return_data.append(match)
my_data = pd.concat(return_data)
my_data = pd.DataFrame(data)
return my_data
#data = get_1st_inning_match()
#data.to_csv("new.csv")
def get_1st_inning_total_run():
data = delivery_data[(delivery_data["inning"]==2)]
return_data = []
for match in range(636):
print(match)
match = data[(data["match_id"]==match)]
match["run"] = match.total_runs.cumsum()
run = match['run'].max()
match["total"] = run
return_data.append(match)
data = pd.concat(return_data)
data = data.iloc[:, [22]].values
data = pd.DataFrame(data)
return data
def get_1st_inning_total_ball():
data = pd.read_csv('data/1st_inning.csv')
return_data = []
for match in range(636):
print(match)
run = match['ball'].max()
match["total"] = run
return_data.append(match)
data = pd.concat(return_data)
data = data.iloc[:, [22]].values
data = pd.DataFrame(data)
return data
max_run = get_1st_inning_total_run()
def append_final_run():
dataset = pd.read_csv('data/deliveries_1st_inning.csv')
run = pd.read_csv('data/run_1st_inning.csv')
y = run.iloc[:, 4].values
run = pd.DataFrame(y)
data = dataset.join(run)
data = data.drop(data.columns[[0]],axis=1)
return data
def get_2nd_inning_data():
data = get_inning_match(2)
return data
plk = get_2nd_inning_data()
kl = pl.join(max_run)
plk.to_csv("2nd_inng.csv")
max_run.to_csv("maxrun.csv")
y = kl.iloc[:,[0,2,3,21,22,23,24,25,26,27,28,29]].values
klp = pd.DataFrame(y)
klp.to_csv("main_2nd.csv")
k_dataset = pd.read_csv("main_2nd.csv")
k_run = pd.read_csv("maxrun.csv")
k_data = k_dataset.join(k_run)
k_data.to_csv("main_2nd.csv")
def get_2st_inning_total_ball():
data = pd.read_csv('main_2nd.csv')
return_data = []
for match in range(636):
print(match)
match = data[(data["match_id"]==match)]
run = match["balls"].max()
match["total_ball"] = run
return_data.append(match)
data = pd.concat(return_data)
data = pd.DataFrame(data)
return data
toatal_ball = get_2st_inning_total_ball()
total_final = toatal_ball[toatal_ball.columns[[2,4,5,23,24,25,26,27,28,29,30,32,33]]]
total_final.to_csv("total_final.csv")
#winner(0)
def add_city_code():
all_data = match_data
return_data = []
ctr = 0
for match in range(1,636):
print(match)
match = get_match(match)
city = match["city"].astype(str)
city_id = np.nan
for index, row in city_data.iterrows():
#print(row['id'], row['name'])
if row['name'] in str(city):
city_id = row['id']
break
match["city_id"] = city_id
return_data.append(match)
my_data = pd.concat(return_data)
my_data = pd.DataFrame(data)
return my_data
new_data = add_city_code()
```
#### File: fourth_umpire/predictions/pred.py
```python
import numpy as np
from sklearn.externals import joblib
import pickle
import os
script_dir = os.path.dirname(__file__)
def pre_match_predict(season,team1,team2,city):
rel_path = "pre_pred/pre_pred.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
regressor = joblib.load(abs_file_path)
rel_path = "pre_pred/pre_hot.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
enc = pickle.load( open( abs_file_path, "rb" ))
X_test = [[season,team1,team2,city]]
X_test = enc.transform(X_test).toarray()
print(len(X_test[0]))
y_pred = regressor.predict(X_test)
print("our_prediction:",y_pred)
return y_pred[0]
def predict_1st_inn(team_batting,team_bowling,run,ball,wicket,city):
rel_path = "1st_inn/1st_inn_pred.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
regressor = joblib.load(abs_file_path)
rel_path = "1st_inn/1st_inn_hot.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
enc = pickle.load( open( abs_file_path, "rb" ))
X_test = [[team_batting,team_bowling,run,ball,wicket,city]]
X_test = enc.transform(X_test).toarray()
print(len(X_test[0]))
y_pred = regressor.predict(X_test)
print("our_prediction:",y_pred)
return y_pred[0]
def predict_if_bat_win(team_batting,team_bowling,run,ball,wicket,target,city):
rel_path = "2nd_inn/bat_win/2nd_inn_bat_win_wicket.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
regressor = joblib.load(abs_file_path)
rel_path = "2nd_inn/bat_win/2nd_inn_bat_win_hot.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
enc = pickle.load( open( abs_file_path, "rb" ))
X_test = [[team_batting,team_bowling,run,ball,wicket,target,city]]
X_test = enc.transform(X_test).toarray()
y_pred = regressor.predict(X_test)
print("our_prediction:bat_win:wicket",y_pred)
return y_pred[0]
def predict_if_bowl_win(team_batting,team_bowling,run,ball,wicket,target,city):
rel_path = "2nd_inn/bowl_win/2nd_inn_bowl_win_run.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
regressor = joblib.load(abs_file_path)
rel_path = "2nd_inn/bowl_win/2nd_inn_bowl_win_hot.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
enc = pickle.load( open( abs_file_path, "rb" ))
X_test = [[team_batting,team_bowling,run,ball,wicket,target,city]]
X_test = enc.transform(X_test).toarray()
y_pred = regressor.predict(X_test)
print("our_prediction:bowl_win:wicket",y_pred)
return y_pred[0]
def predict_2nd_end_ball(team_batting,team_bowling,run,ball,wicket,target,city):
rel_path = "2nd_inn/end/2nd_inn_end.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
regressor = joblib.load(abs_file_path)
rel_path = "2nd_inn/end/2nd_inn_end_hot.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
enc = pickle.load( open( abs_file_path, "rb" ))
X_test = [[team_batting,team_bowling,run,ball,wicket,target,city]]
X_test = enc.transform(X_test).toarray()
y_pred = regressor.predict(X_test)
print("our_prediction:2nd_inn_end",y_pred)
return y_pred[0]
def predict_2nd_inn(team_batting,team_bowling,run,ball,wicket,target,city):
rel_path = "2nd_inn/who_win/2nd_inn_win_pred.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
regressor = joblib.load(abs_file_path)
rel_path = "2nd_inn/who_win/2nd_inn_win_hot.pkl"
abs_file_path = os.path.join(script_dir, rel_path)
enc = pickle.load( open( abs_file_path, "rb" ))
X_test = [[team_batting,team_bowling,run,ball,wicket,target,city]]
X_test = enc.transform(X_test).toarray()
y_pred = regressor.predict(X_test)
end_ball = predict_2nd_end_ball(team_batting,team_bowling,run,ball,wicket,target,city)
if y_pred[0]>=0.5:
info = predict_if_bat_win(team_batting,team_bowling,run,ball,wicket,target,city)
else:
info = predict_if_bowl_win(team_batting,team_bowling,run,ball,wicket,target,city)
return [y_pred[0],info,end_ball]
def get_team(id):
teams = {
"1":'<NAME>',
"2":'Royal Challengers Bangalore',
"3":'Chennai Super Kings',
"4":'Kings XI Punjab',
"5":'Rajasthan Royals',
"6":'Delhi Daredevils',
"7":'Mumbai Indians',
"8":'Kolkata Knight Riders'
}
return teams[id]
#pred = predict_1st_inn(2,5,12,15,2,4)
#print("---------first-----------",pred)
#pred = predict_2nd_inn(1,6,12,15,2,174,11)
#print("-----------2nd---------------",pred)
```
#### File: web/fourth_umpire/views.py
```python
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render,HttpResponse
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from fourth_umpire.predictions.pred import *
from .forms import *
from .models import *
# Create your views here.
def main(request):
return render(request, 'fourth_umpire/main.html', {})
def gallery(request):
return render(request, 'fourth_umpire/gallery.html', {})
def icons(request):
return render(request, 'fourth_umpire/icons.html', {})
def services(request):
return render(request, 'fourth_umpire/services.html', {})
def typography(request):
return render(request, 'fourth_umpire/typography.html', {})
def about(request):
return render(request, 'fourth_umpire/about.html', {})
def contact(request):
return HttpResponseRedirect('https://github.com/aasis21/4th_umpire')
def first_inn(request):
if request.method == 'POST':
title_form = InningsFirst(request.POST)
if title_form.is_valid():
team1 = title_form.cleaned_data['team1']
team2 = title_form.cleaned_data['team2']
venue = title_form.cleaned_data['venue']
overs_played = title_form.cleaned_data['overs_played']
runs = title_form.cleaned_data['runs']
wickets = title_form.cleaned_data['wickets_fallen']
run_predicted = int(predict_1st_inn(team1,team2,runs,6*overs_played,wickets,venue)) + 1
return render(request, 'fourth_umpire/firstinn.html', context={'form1': title_form,"runs":run_predicted})
else:
title_form = InningsFirst()
return render(request, 'fourth_umpire/firstinn.html', context={'form1': title_form})
def second_inn(request):
if request.method == 'POST':
title_form = InningsSecond(request.POST)
if title_form.is_valid():
team1 = title_form.cleaned_data['team1']
team2 = title_form.cleaned_data['team2']
venue = title_form.cleaned_data['venue']
runs = title_form.cleaned_data['runs']
overs_played = title_form.cleaned_data['overs_played']
target_chasing = title_form.cleaned_data['target_set']
wickets = title_form.cleaned_data['wickets_fallen']
result = predict_2nd_inn(team1,team2,runs,6*overs_played,wickets,target_chasing,venue)
by_run = 0
by_wicket = 0
if result[0]>0.5:
winner = get_team(team1)
probab = result[0] * 100
by_wicket = int(result[1])
elif result[0]<0.5:
winner = get_team(team2)
probab = (1-result[0])*100
by_run = int(result[1])
else:
winner = "Can be any one"
probab = 50
if(result[2]>=120):
result[2]=119
end = str(int(result[2]/6)+1)
return render(request, 'fourth_umpire/secondinn.html', context={'form2': title_form,
"winner":winner,"probab":probab,"by_run":by_run,"by_wicket":by_wicket,"end":end})
else:
title_form = InningsSecond()
return render(request, 'fourth_umpire/secondinn.html', context={'form2': title_form})
def prematch(request):
if request.method == 'POST':
title_form = PreMatch(request.POST)
if title_form.is_valid():
team1 = title_form.cleaned_data['team1']
team2 = title_form.cleaned_data['team2']
venue = title_form.cleaned_data['venue']
probab = pre_match_predict("2016",team1,team2,venue)
if probab > 0.5 :
winner = get_team(team1)
probab = probab * 100
else:
winner = get_team(team2)
probab = (1- probab) * 100
return render(request, 'fourth_umpire/pre_pred.html', context={'form3': title_form,"winner":winner,"probab":probab})
else:
title_form = PreMatch()
return render(request, 'fourth_umpire/pre_pred.html', context={'form3': title_form})
``` |
{
"source": "jodiemorton/txt",
"score": 2
} |
#### File: txt/app/views.py
```python
from flask import Flask, render_template, session, redirect, url_for, jsonify
from app import app
import pandas as pd
import os
# FORM
from flask_wtf import FlaskForm
from wtforms import StringField, RadioField, SubmitField, validators
from wtforms.widgets import TextArea
# SCRAP
import requests
from bs4 import BeautifulSoup
# TEXT
import re
from gensim.summarization.summarizer import summarize
from gensim.summarization import keywords
from transformers import pipeline
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "<KEY>"
SECRET_KEY = os.urandom(32)
app.config["SECRET_KEY"] = SECRET_KEY
# abstract_summarizer = pipeline(
# "summarization", model="t5-base", tokenizer="t5-base"
# )
# TODO:
# [ ] Fix RadioField issue
# [ ] Token indices sequence length
# [ ] Validate output length is shorter than input length
class ReadingForm(FlaskForm):
"""input form"""
input_txt = StringField(
"Paste text or url(s) below",
[validators.required()],
widget=TextArea(),
render_kw={"style": "width: 100%; height: 100px"},
)
output_len = StringField(
"Enter desired output length in number of words or ratio of the original text",
[validators.required()],
render_kw={"style": "width: 100%; height: 30px"},
)
# smry_type = StringField(
# "Select summary type: 1 for extractive or 2 for abstractive (under development)",
# [validators.required()],
# render_kw={"style": "width: 100%; height: 30px"},
# )
# smry_type = RadioField(
# "Select summary methods",
# choices=["Extractive summary", "Abstractive summary"],
# default="Extractive summary",
# )
submit = SubmitField(
"Summarize",
render_kw={"class": "btn btn-light", "style": "width: 100px; height: 36px"},
)
def checkInputFormat(input):
"""check if input is raw text, url or else"""
if bool(re.match("http", input.replace('"', "").strip(), re.I)):
inputFormat = "url"
elif sum([t.isalpha() for t in list(input)]) > len(input) / 2:
inputFormat = "text"
else:
inputFormat = "invalid"
return inputFormat
def checkOutputFormat(output_len):
"""check if output length is word count or percentage ratio of the original text"""
if float(output_len) < 1:
outputFormat = "ratio"
else:
outputFormat = "word_count"
return outputFormat
def calcOutputLen(outputFormat, article_len, wrd):
"""calc length of the summary. wrd is the user-specified output length or ratio"""
if outputFormat == "word_count":
return int(wrd)
else:
return article_len * float(wrd)
def generate_smry(smry_type, text, word_count):
"""generate summary: extractive (original words) or abstractive (synthesized) """
if smry_type == "1":
# use gensim
return summarize(text, word_count=word_count)
else:
# use transformer T5
# return abstract_summarizer(text, min_length=5, max_length=word_count)[0][
# "summary_text"
# ]
pass
def splitUrl(urllst):
"""when there are multiple urls, split them into individual ones to parse"""
urls = [
"http" + i.replace("\n", "").replace('"', "").strip()
for i in urllst.split("http")
]
if "http" in urls:
urls.remove("http")
return urls
def getUrlEnding(url):
return url.rsplit("/", 1)[-1][-4:]
def getText(url):
"""parse text on the web page"""
page = requests.get(url)
h = ""
if page.status_code == 200 and getUrlEnding(url) not in [".pdf", ".ppt"]:
if getUrlEnding(url) == ".txt":
txt = page.text
else:
soup = BeautifulSoup(page.text, "html.parser")
p = soup.find_all("p")
h = max(
[i.get_text().replace("\n", "").strip() for i in soup.find_all("h1")],
key=len,
)
txt = " ".join([i.get_text().replace('"', "'") for i in p]).replace(
"\n", " "
)
else:
txt = "Content of the site not supported"
return h, txt
def timeSaved(txt, smry_result):
time_original = len(txt.split(" ")) / 250
time_smry = len(smry_result.split(" ")) / 250
return round(time_original - time_smry, 1)
@app.route("/", methods=["GET", "POST"])
def index():
form = ReadingForm()
if form.validate_on_submit():
session["input_txt"] = form.input_txt.data
session["output_len"] = form.output_len.data
# session["smry_type"] = form.smry_type.data
return redirect(url_for("summarizeText"))
return render_template("index.html", form=form)
@app.route("/summarizeText", methods=["GET", "POST"])
def summarizeText():
txt = session["input_txt"]
wrd = session["output_len"]
# smry_type = session["smry_type"]
smry_type = "1" # temp
inputFormat = checkInputFormat(txt)
header = []
smry = []
time_saved = []
article_len = []
key_words = []
n = 1
if inputFormat == "text":
l = len(txt.split(" "))
article_len.append(l)
wrd_cnt = calcOutputLen(checkOutputFormat(wrd), l, wrd)
gensim_result = generate_smry(smry_type, txt, wrd_cnt)
# fallback measure if every sentence in the text is long
smry_result = (
gensim_result
if len(gensim_result) > 0
else ". ".join(txt.split(".", 3)[:3])
)
header.append("summary")
smry.append(smry_result)
time_saved.append(timeSaved(txt, smry_result))
kword = keywords(txt, words=3, lemmatize=True, pos_filter=["NN", "NNS"])
key_words.append(kword.split("\n"))
elif inputFormat == "url":
for url in splitUrl(txt.replace('"', "")):
h, t = getText(url)
l = len(t.split(" "))
article_len.append(l)
header.append(h)
# default to 20% in case the entered word count is higher than the original word count
wrd_cnt = calcOutputLen(checkOutputFormat(wrd), l, wrd)
gensim_result = generate_smry(smry_type, t, wrd_cnt)
smry_result = (
gensim_result
if len(gensim_result) > 0
else ". ".join(t.split(".", 3)[:3])
)
smry.append(smry_result)
time_saved.append(timeSaved(t, smry_result))
kword = keywords(t, words=3, lemmatize=True, pos_filter=["NN", "NNS"])
key_words.append(kword.split("\n"))
n = len(smry)
return render_template(
"smry.html",
header=header,
smry=smry,
time_saved=round(sum(time_saved), 2),
article_len=article_len,
n=n,
key_words=key_words,
)
@app.route("/about")
def about():
return render_template("about.html")
``` |
{
"source": "jodieritchie/MLHPortfolio",
"score": 2
} |
#### File: site-packages/werkzeug/useragents.py
```python
import re
import typing as t
import warnings
from .user_agent import UserAgent as _BaseUserAgent
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIEnvironment
class _UserAgentParser:
platform_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
(" cros ", "chromeos"),
("iphone|ios", "iphone"),
("ipad", "ipad"),
(r"darwin\b|mac\b|os\s*x", "macos"),
("win", "windows"),
(r"android", "android"),
("netbsd", "netbsd"),
("openbsd", "openbsd"),
("freebsd", "freebsd"),
("dragonfly", "dragonflybsd"),
("(sun|i86)os", "solaris"),
(r"x11\b|lin(\b|ux)?", "linux"),
(r"nintendo\s+wii", "wii"),
("irix", "irix"),
("hp-?ux", "hpux"),
("aix", "aix"),
("sco|unix_sv", "sco"),
("bsd", "bsd"),
("amiga", "amiga"),
("blackberry|playbook", "blackberry"),
("symbian", "symbian"),
)
browser_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
("googlebot", "google"),
("msnbot", "msn"),
("yahoo", "yahoo"),
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
(r"opera|opr", "opera"),
("edge|edg", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
("firefox|firebird|phoenix|iceweasel", "firefox"),
("galeon", "galeon"),
("safari|version", "safari"),
("webkit", "webkit"),
("camino", "camino"),
("konqueror", "konqueror"),
("k-meleon", "kmeleon"),
("netscape", "netscape"),
(r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
("lynx", "lynx"),
("links", "links"),
("Baiduspider", "baidu"),
("bingbot", "bing"),
("mozilla", "mozilla"),
)
_browser_version_re = r"(?:{pattern})[/\sa-z(]*(\d+[.\da-z]+)?"
_language_re = re.compile(
r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
)
def __init__(self) -> None:
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platform_rules]
self.browsers = [
(b, re.compile(self._browser_version_re.format(pattern=a), re.I))
for a, b in self.browser_rules
]
def __call__(
self, user_agent: str
) -> t.Tuple[t.Optional[str], t.Optional[str], t.Optional[str], t.Optional[str]]:
platform: t.Optional[str]
browser: t.Optional[str]
version: t.Optional[str]
language: t.Optional[str]
for platform, regex in self.platforms: # noqa: B007
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
# Except for Trident, all browser key words come after the last ')'
last_closing_paren = 0
if (
not re.compile(r"trident/.+? rv:", re.I).search(user_agent)
and ")" in user_agent
and user_agent[-1] != ")"
):
last_closing_paren = user_agent.rindex(")")
for browser, regex in self.browsers: # noqa: B007
match = regex.search(user_agent[last_closing_paren:])
if match is not None:
version = match.group(1)
break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
# It wasn't public, but users might have imported it anyway, show a
# warning if a user created an instance.
class UserAgentParser(_UserAgentParser):
"""A simple user agent parser. Used by the `UserAgent`.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Use a dedicated parser library
instead.
"""
def __init__(self) -> None:
warnings.warn(
"'UserAgentParser' is deprecated and will be removed in"
" Werkzeug 2.1. Use a dedicated parser library instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
class _deprecated_property(property):
def __init__(self, fget: t.Callable[["_UserAgent"], t.Any]) -> None:
super().__init__(fget)
self.message = (
"The built-in user agent parser is deprecated and will be"
f" removed in Werkzeug 2.1. The {fget.__name__!r} property"
" will be 'None'. Subclass 'werkzeug.user_agent.UserAgent'"
" and set 'Request.user_agent_class' to use a different"
" parser."
)
def __get__(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
warnings.warn(self.message, DeprecationWarning, stacklevel=3)
return super().__get__(*args, **kwargs)
# This is what Request.user_agent returns for now, only show warnings on
# attribute access, not creation.
class _UserAgent(_BaseUserAgent):
_parser = _UserAgentParser()
def __init__(self, string: str) -> None:
super().__init__(string)
info = self._parser(string)
self._platform, self._browser, self._version, self._language = info
@_deprecated_property
def platform(self) -> t.Optional[str]: # type: ignore
return self._platform
@_deprecated_property
def browser(self) -> t.Optional[str]: # type: ignore
return self._browser
@_deprecated_property
def version(self) -> t.Optional[str]: # type: ignore
return self._version
@_deprecated_property
def language(self) -> t.Optional[str]: # type: ignore
return self._language
# This is what users might be importing, show warnings on create.
class UserAgent(_UserAgent):
"""Represents a parsed user agent header value.
This uses a basic parser to try to extract some information from the
header.
:param environ_or_string: The header value to parse, or a WSGI
environ containing the header.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Subclass
:class:`werkzeug.user_agent.UserAgent` (note the new module
name) to use a dedicated parser instead.
.. versionchanged:: 2.0
Passing a WSGI environ is deprecated and will be removed in 2.1.
"""
def __init__(self, environ_or_string: "t.Union[str, WSGIEnvironment]") -> None:
if isinstance(environ_or_string, dict):
warnings.warn(
"Passing an environ to 'UserAgent' is deprecated and"
" will be removed in Werkzeug 2.1. Pass the header"
" value string instead.",
DeprecationWarning,
stacklevel=2,
)
string = environ_or_string.get("HTTP_USER_AGENT", "")
else:
string = environ_or_string
warnings.warn(
"The 'werkzeug.useragents' module is deprecated and will be"
" removed in Werkzeug 2.1. The new base API is"
" 'werkzeug.user_agent.UserAgent'.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(string)
``` |
{
"source": "jodietrich/wgan_domain_adaptation",
"score": 2
} |
#### File: jodietrich/wgan_domain_adaptation/batch_augmentors.py
```python
import numpy as np
import adni_data_loader
from batch_generator_list import iterate_minibatches
def flip_augment(X, y_list=None, do_fliplr=True):
N = X.shape[0]
X_list = []
for ii in range(N):
img = np.squeeze(X[ii,...])
# RANDOM FLIP
if do_fliplr:
coin_flip = np.random.randint(2)
if coin_flip == 0:
img = np.flip(img, 0)
X_list.append(img[..., np.newaxis])
X_ = np.asarray(X_list)
if y_list is None:
return X_
else:
return X_, y_list
# translate the fraction generate_fraction of the given image batch with generator (class Generator)
def generator_augment(generator, X, y_list=None, generate_fraction=0.5):
if generate_fraction < 0 or generate_fraction > 1:
raise ValueError('generate_fraction %f is outside the range [0, 1]' % generate_fraction)
N = X.shape[0]
N_generate = round(N*generate_fraction)
X_translated = generator.translate(X[0:N_generate, ...])
X_ = np.concatenate((X_translated, X[N_generate:, ...]), axis=0)
assert X.shape == X_.shape
if y_list is None:
return X_
else:
return X_, y_list
if __name__ == '__main__':
from experiments import jia_xi_net as exp_config
import matplotlib.pyplot as plt
data = adni_data_loader.load_and_maybe_process_data(
input_folder=exp_config.data_root,
preprocessing_folder=exp_config.preproc_folder,
size=exp_config.image_size,
target_resolution=exp_config.target_resolution,
label_list=exp_config.fs_label_list,
force_overwrite=False
)
for batch in iterate_minibatches(data['images_train'],
[data['diagnosis_train'], data['age_train']],
batch_size=exp_config.batch_size,
augmentation_function=None, #flip_augment,
exp_config=exp_config):
X, [y, a] = batch
X_, [y_, a_] = flip_augment(X, [y, a], exp_config.do_fliplr)
fig1 = plt.figure()
fig1.add_subplot(131)
plt.imshow(np.squeeze(X[0,80,:,:]))
fig1.add_subplot(132)
plt.imshow(np.squeeze(X[0,:,80,:]))
fig1.add_subplot(133)
plt.imshow(np.squeeze(X[0,:,:,80]))
fig2 = plt.figure()
fig2.add_subplot(131)
plt.imshow(np.squeeze(X_[0,80,:,:]))
fig2.add_subplot(132)
plt.imshow(np.squeeze(X_[0,:,80,:]))
fig2.add_subplot(133)
plt.imshow(np.squeeze(X_[0,:,:,80]))
plt.show()
```
#### File: wgan_domain_adaptation/chrigi files/preprocess_adni_all.py
```python
import pandas as pd
import os
import glob
import datetime
import time
import csv
import shutil
import utils
from subprocess import Popen
import multiprocessing
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
INCLUDE_MISSING_IMAGES_IN_TABLE = True
DO_ONLY_TABLE = True
bmicdatasets_root = '/usr/bmicnas01/data-biwi-01/'
bmicdatasets_originals = os.path.join(bmicdatasets_root, 'bmicdatasets-originals/Originals/')
bmicdatasets_adni = os.path.join(bmicdatasets_originals, 'ADNI/')
bmicdatasets_adni_tables = os.path.join(bmicdatasets_adni, 'Tables')
bmicdatasets_adni_images = os.path.join(bmicdatasets_adni, 'adni_all_mri/ADNI/')
bmidatasets_mni = os.path.join(bmicdatasets_originals, 'TemplateData/MNI/mni_icbm152_nlin_asym_09a')
mni_template_t1 = os.path.join(bmidatasets_mni, 'mni_icbm152_t1_tal_nlin_asym_09a.nii')
adni_merge_path = os.path.join(bmicdatasets_adni_tables, 'ADNIMERGE.csv')
vitals_path = os.path.join(bmicdatasets_adni_tables, 'VITALS.csv')
diagnosis_path = os.path.join(bmicdatasets_adni_tables, 'DXSUM_PDXCONV_ADNIALL.csv')
mri_3_0_meta_path = os.path.join(bmicdatasets_adni_tables, 'MRI3META.csv')
mri_1_5_meta_path = os.path.join(bmicdatasets_adni_tables, 'MRIMETA.csv')
N4_executable = '/usr/bmicnas01/data-biwi-01/bmicdatasets/Sharing/N4'
robex_executable = '/scratch_net/bmicdl03/software/robex/robex-build/ROBEX'
def date_string_to_seconds(date_str):
date, time = date_str.split(' ')
year, month, day = [int(i.split('.')[0]) for i in date.split('-')]
hours, minutes, secs = [int(i.split('.')[0]) for i in time.split(':')]
acq_time = datetime.datetime(year, month, day, hours, minutes, secs)
start_of_time = datetime.datetime(1970,1,1)
return (acq_time - start_of_time).total_seconds()
#
# def find_by_conditions(pandas_df, condition_dict):
#
# for ii, (key, value) in enumerate(condition_dict.items()):
# if ii == 0:
# conds = pandas_df[key] == value
# else:
# conds = conds & (pandas_df[key] == value)
#
# return pandas_df.loc[conds]
def find_by_conditions(pandas_df, and_condition_dict=None, or_condition_dict=None):
if and_condition_dict is not None:
conds_and = True
for ii, (key, value_list) in enumerate(and_condition_dict.items()):
if not isinstance(value_list, list):
value_list = [value_list]
for value in list(value_list):
conds_and = conds_and & (pandas_df[key] == value)
else:
conds_and = False
if or_condition_dict is not None:
conds_or = False
for ii, (key, value_list) in enumerate(or_condition_dict.items()):
if not isinstance(value_list, list):
value_list = [value_list]
for value in list(value_list):
conds_or = conds_or | (pandas_df[key] == value)
else:
conds_or = True
conds = conds_and & conds_or
# logging.info('conds:')
# logging.info(sum(conds))
return pandas_df.loc[conds]
def diagnosis_to_3categories_blformat(diag_str):
if diag_str in ['EMCI', 'LMCI', 'MCI']:
return 'MCI'
elif diag_str in ['CN', 'SMC']:
return 'CN'
elif diag_str in ['AD', 'Dementia']:
return 'AD'
else:
raise ValueError('Unknown diagnosis: "%s"'% diag_str)
def diagnosis_to_3categories(diag_str):
if diag_str in [2,4,8]:
return 'MCI'
elif diag_str in [1,7,9]:
return 'CN'
elif diag_str in [3,5,6]:
return 'AD'
elif diag_str in [0]:
return 'unknown'
else:
raise ValueError('Unknown diagnosis: "%s"'% diag_str)
def convert_weight_to_kg(weight, unit):
if unit == 2:
return weight
elif unit == 1:
return 0.453592*weight
else:
return weight # probably weight unknown = -1
def map_all_baseline_to_bl(list):
return_list =[]
for ll in list:
if ll in ['bl', 'sc', 'scmri']:
return_list.append('bl')
else:
return_list.append(ll)
return return_list
def all_same(items):
return all(x == items[0] for x in items)
def do_preprocessing(adnimerge_table_arg,
tmp_index,
processed_images_folder,
summary_csv_file,
do_reorientation=False,
do_registration=False,
do_bias_correction=False,
do_cropping=False,
do_skull_stripping=False,
write_csv=True):
if do_reorientation | do_registration | do_bias_correction | do_cropping | do_skull_stripping == False:
do_postprocessing = False
else:
do_postprocessing = True
vitals_table = pd.read_csv(vitals_path)
mri_3_0_meta_table = pd.read_csv(mri_3_0_meta_path)
mri_1_5_meta_table = pd.read_csv(mri_1_5_meta_path)
diagnosis_table = pd.read_csv(diagnosis_path)
tmp_file_folder = os.path.join(processed_images_folder, 'tmp')
if do_postprocessing:
utils.makefolder(tmp_file_folder)
with open(summary_csv_file, 'w') as csvfile:
if write_csv:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(['rid', 'phase', 'image_exists', 'site', 'viscode', 'exam_date', 'field_strength', 'diagnosis', 'diagnosis_3cat',
'age', 'gender', 'weight',
'education', 'ethnicity', 'race', 'apoe4', 'adas13', 'mmse', 'faq', 'counter' ])
for ii, row in adnimerge_table_arg.iterrows():
viscode = row['VISCODE']
# if viscode not in ['bl']: # or 'sc']: There are no 'sc' in adnimerge
# continue
rid = row['RID']
phase = row['COLPROT']
if phase in ['ADNI3']:
continue
site = row['SITE']
age_at_bl = row['AGE'] # Note ADNIMERGE age is always the same, even for the follow up scans years later
gender = row['PTGENDER']
education = row['PTEDUCAT']
ethnicity = row['PTETHCAT']
race = row['PTRACCAT']
apoe4 = row['APOE4']
adas13 = row['ADAS13']
mmse = row['MMSE']
faq = row['FAQ']
exam_date_adnimerge = row['EXAMDATE'] # Not necessarily the same as the exam date in the MRIMETA files
diagnosis_row = find_by_conditions(diagnosis_table, and_condition_dict={'RID': rid, 'VISCODE2': viscode})
if phase == 'ADNI1':
diagnosis = diagnosis_row['DXCURREN'].values
else:
diagnosis = diagnosis_row['DXCHANGE'].values
print('---- rid %s -----' % rid)
print(viscode)
print(diagnosis)
if len(diagnosis) == 0:
diagnosis = 0
if viscode == 'm03':
diagnosis_bl = row['DX_bl']
diagnosis_3cat = diagnosis_to_3categories_blformat(diagnosis_bl)
else:
diagnosis_3cat = 'unknown'
else:
diagnosis = int(diagnosis[0])
diagnosis_3cat = diagnosis_to_3categories(diagnosis)
# field_strength = row['FLDSTRENG'] # This field is incomplete, too many nan values
vitals_row = find_by_conditions(vitals_table, {'RID': rid, 'VISCODE2': 'bl'}) # here also examdates sometimes don't correspond
if len(vitals_row) == 0:
vitals_row = find_by_conditions(vitals_table, {'RID': rid, 'VISCODE2': 'sc'})
assert len(vitals_row) <= 1, 'in vitals table found %d rows for case with rid=%s, and viscode=bl. Expected one.' \
% (len(vitals_row), rid)
# Getting some vitals information
if len(vitals_row) == 1:
weight = vitals_row['VSWEIGHT'].values[0]
weight_units = vitals_row['VSWTUNIT'].values[0]
weight = convert_weight_to_kg(weight, weight_units)
else:
weight = 'unknown'
mri_1_5_meta_row = find_by_conditions(mri_1_5_meta_table, and_condition_dict={'RID': rid, 'VISCODE2': viscode})
if len(mri_1_5_meta_row) == 0 and viscode == 'bl':
mri_1_5_meta_row = find_by_conditions(mri_1_5_meta_table,
and_condition_dict={'RID': rid, 'VISCODE2': 'sc'})
mri_3_0_meta_row = find_by_conditions(mri_3_0_meta_table, and_condition_dict={'RID': rid, 'VISCODE2': viscode})
if len(mri_3_0_meta_row) == 0 and viscode == 'bl':
mri_3_0_meta_row = find_by_conditions(mri_3_0_meta_table,
and_condition_dict={'RID': rid},
or_condition_dict={'VISCODE2': ['sc', 'scmri']})
exam_dates = list(mri_1_5_meta_row['EXAMDATE'].values) + list(mri_3_0_meta_row['EXAMDATE'].values)
field_strengths = [1.5]*len(mri_1_5_meta_row['EXAMDATE']) + [3.0]*len(mri_3_0_meta_row['EXAMDATE'])
viscodes = list(mri_1_5_meta_row['VISCODE2'].values) + list(mri_3_0_meta_row['VISCODE2'].values)
subj_subfolder = '%s_S_%s' % (str(site).zfill(3), str(rid).zfill(4))
# Remove nans from exam dates and corresponding field strengths
exam_dates_tmp = []
field_strengths_tmp = []
viscodes_tmp = []
for ed, fs, vc in zip(exam_dates, field_strengths, viscodes):
if str(ed) != 'nan':
exam_dates_tmp.append(ed)
field_strengths_tmp.append(fs)
viscodes_tmp.append(vc)
exam_dates = exam_dates_tmp
field_strengths = field_strengths_tmp
viscodes = viscodes_tmp
# If all exam dates are the same keep only one
if len(exam_dates) > 1 and all_same(exam_dates):
print('Multiple equal exam dates')
print(field_strengths)
exam_dates = [exam_dates[0]]
field_strengths = [field_strengths[0]]
viscodes = [viscodes[0]]
# If all there are duplicate viscodes keep the first and say 1.5T because duplicates are almost always 1.5T
if len(viscodes) > 1 and all_same(map_all_baseline_to_bl(viscodes)):
print('Identical viscodes')
print(field_strengths)
exam_dates = [exam_dates[0]]
if phase in ['ADNI1', 'ADNIGO']:
field_strengths = [field_strengths[0]] # 1.5 is always the first item anyways
else:
print('!! Multiple viscodes. Duplicate that was in ADNI2')
print(field_strengths)
field_strengths = [field_strengths[0]]
if not len(exam_dates) > 0:
continue
# Philips scanners have do not have the gradwarp preprocessed images. I am assuming MT1__N3m is fine even
# though B1_Correctino is missing.
# This webpage: http://adni.loni.usc.edu/methods/mri-analysis/mri-pre-processing/ says all files with a N3m
# in the end are fine to use. I am assuming that MPR____N3 and MPR__GradWarp__N3 also indicate that the
# whole preprocessing pipeline was applied.
preproc_subfolders = ['MPR__GradWarp__B1_Correction__N3', 'MPR____N3', 'MT1__N3m', 'MT1__GradWarp__N3m', 'MPR__GradWarp__N3']
nii_files = []
for exam_date, field_strength in zip(exam_dates, field_strengths):
# figure out age:
# get baseline examdate from adnimerge
baseline_row = find_by_conditions(adnimerge_table_arg,
and_condition_dict={'RID': rid},
or_condition_dict={'VISCODE': ['sc', 'scmri', 'bl']})
baseline_exam_dates = baseline_row['EXAMDATE'].values
if len(baseline_exam_dates) <= 0:
current_age = 'unknown'
else:
baseline_exam_date = baseline_exam_dates[0]
year_diff = int(exam_date.split('-')[0]) - int(baseline_exam_date.split('-')[0])
month_diff = int(exam_date.split('-')[1]) - int(baseline_exam_date.split('-')[1])
day_diff = int(exam_date.split('-')[2]) - int(baseline_exam_date.split('-')[2])
decimal_year_diff = year_diff + (1.0/12)*month_diff + (1.0/(12*30)*day_diff)
assert decimal_year_diff >= -0.75, 'Year diff cannot be (too) negative! Was %f' % decimal_year_diff
if decimal_year_diff < 0:
decimal_year_diff = 0.0
current_age = age_at_bl + decimal_year_diff
for preproc_subfolder in preproc_subfolders:
nii_search_str = os.path.join(subj_subfolder, preproc_subfolder, exam_date + '_*', '*/*.nii')
nii_files += glob.glob(os.path.join(bmicdatasets_adni_images, nii_search_str))
# If some files have gradwarp prefer those files
contains_GradWarp = any(['GradWarp' in ff for ff in nii_files])
if contains_GradWarp:
nii_files = [ff for ff in nii_files if 'GradWarp' in ff]
# if some files have MT1 and MPR prefer the MT1
contains_MT1 = any(['MT1' in ff for ff in nii_files])
if contains_MT1:
nii_files = [ff for ff in nii_files if 'MT1' in ff]
# if some files have B1 correction prefer those
contains_B1 = any(['B1_Correction' in ff for ff in nii_files])
if contains_B1:
nii_files = [ff for ff in nii_files if 'B1_Correction' in ff]
image_exists = True if len(nii_files) > 0 else False
if image_exists:
start_time = time.time()
if not DO_ONLY_TABLE:
nii_use_file = nii_files[0]
logging.info(nii_use_file)
gz_postfix = '.gz' if do_postprocessing else ''
patient_folder = 'rid_%s' % (str(rid).zfill(4))
out_file_name = '%s_%sT_%s_rid%s_%s.nii%s' % (phase.lower(),
field_strength,
diagnosis_3cat,
str(rid).zfill(4),
viscode,
gz_postfix)
out_folder = os.path.join(processed_images_folder, patient_folder)
utils.makefolder(out_folder)
out_file_path = os.path.join(out_folder, out_file_name)
if os.path.exists(out_file_path):
logging.info('!!! File already exists. Skipping')
continue
else:
logging.info('--- Doing File: %s' % out_file_path)
if not do_postprocessing:
logging.info('Not doing any preprocessing...')
shutil.copyfile(nii_use_file, out_file_path)
else:
tmp_file_path = os.path.join(tmp_file_folder, 'tmp_rid%s_%s.nii.gz' % (str(rid).zfill(4), str(tmp_index)))
shutil.copyfile(nii_use_file, tmp_file_path)
if do_reorientation:
# fsl orientation enforcing:
logging.info('Reorienting to MNI space...')
Popen('fslreorient2std {0} {1}'.format(tmp_file_path, tmp_file_path), shell=True).communicate()
if do_cropping:
# field of view cropping
logging.info('Cropping the field of view...')
Popen('robustfov -i {0} -r {1}'.format(tmp_file_path, tmp_file_path), shell=True).communicate()
if do_bias_correction:
# bias correction with N4:
logging.info('Bias correction...')
Popen('{0} {1} {2}'.format(N4_executable, tmp_file_path, tmp_file_path),
shell=True).communicate()
if do_registration:
# registration with flirt to MNI 152:
logging.info('Registering the structural image...')
Popen(
'flirt -in {0} -ref {1} -out {2} -searchrx -45 45 -searchry -45 45 -searchrz -45 45 -dof 7'.format(
tmp_file_path, mni_template_t1, tmp_file_path), shell=True).communicate()
if do_skull_stripping:
# skull stripping with bet2
logging.info('Skull stripping...')
# Popen('bet {0} {1} -R -f 0.5 -g 0'.format(tmp_file_path, tmp_file_path), shell=True).communicate() # bet was not robust enough
Popen('{0} {1} {2} -R -f 0.5 -g 0'.format(robex_executable, tmp_file_path, tmp_file_path), shell=True).communicate()
logging.info('Finished.')
logging.info('Copying tmp file: %s, to output: %s' % (tmp_file_path, out_file_path))
shutil.copyfile(tmp_file_path, out_file_path)
if write_csv:
csvwriter.writerow([rid, phase, image_exists, site, viscode, exam_date, field_strength, diagnosis, diagnosis_3cat,
current_age, gender, weight,
education, ethnicity, race, apoe4, adas13, mmse, faq, 1])
elapsed_time = time.time() - start_time
logging.info('Elapsed time: %.2f secs' % elapsed_time)
if not image_exists and INCLUDE_MISSING_IMAGES_IN_TABLE and write_csv:
# If the include missing images constant is set to true it will write all the rows to the table
csvwriter.writerow([rid, phase, image_exists, site, viscode, exam_date, field_strength, diagnosis, diagnosis_3cat,
current_age, gender, weight,
education, ethnicity, race, apoe4, adas13, mmse, faq, 1])
if __name__ == '__main__':
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI1_screening_noPP/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI1_screening_reorient_crop_strip/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI1_screening_reorient_crop/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI1_screening_reorient_crop_strip_mni/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_ender_selection_reorient_crop/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_ender_selection_noPP/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_ender_selection_allPP_robex/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_all_no_skullstrip/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_all_no_PP_2/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_all_no_skullstrip/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_all_allPP_robex/')
### ----------
processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_all_no_PP_3')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_allfixed_no_skullstrip/')
# processed_images_folder = os.path.join(bmicdatasets_root, 'bmicdatasets/Processed/ADNI_Christian/ADNI_allfixed_allPP_robex/')
utils.makefolder(processed_images_folder)
summary_csv_file = os.path.join(processed_images_folder, 'summary_alldata.csv')
do_reorientation = True #True
do_registration = True #True
do_bias_correction = True
do_cropping = True #True
do_skull_stripping = False #True
# adnimerge_table = pd.read_csv(adni_merge_path, nrows=2)
# adnimerge_table = pd.read_csv(adni_merge_path, chunksize=100)
pool = multiprocessing.Pool(1)
start_time = time.time()
# func_list = []
# for tmp_index, df in enumerate(adnimerge_table):
#
# f = pool.apply_async(do_preprocessing, args=(df, tmp_index, processed_images_folder, summary_csv_file),
# kwds={'do_reorientation': do_reorientation,
# 'do_registration': do_registration,
# 'do_bias_correction': do_bias_correction,
# 'do_cropping': do_cropping,
# 'do_skull_stripping': do_skull_stripping,
# 'write_csv': True})
#
# func_list.append(f)
#
#
# for f in func_list:
# f.get()
adnimerge_table = pd.read_csv(adni_merge_path)
do_preprocessing(adnimerge_table, 0,
processed_images_folder,
summary_csv_file,
do_reorientation=do_reorientation,
do_registration=do_registration,
do_bias_correction=do_bias_correction,
do_cropping=do_cropping,
do_skull_stripping=do_skull_stripping,
write_csv=True)
logging.info('Elapsed time %f secs' % (time.time()-start_time))
```
#### File: experiments/gan/FCN_disc_res_gen_bn.py
```python
__author__ = 'jdietric'
from experiments.gan.standard_parameters import *
experiment_name = 'FCN_disc_res_gen_n16b3_no_noise_all_small_data_1e4l1_bn_i1'
# Model settings
batch_normalization = True
# model to use
def generator(xs, z_noise, training, scope_reuse=False, scope_name='generator'):
return model_zoo.bousmalis_generator(xs, z_noise=z_noise, training=training, batch_normalization=batch_normalization,
residual_blocks=3, nfilters=16, scope_reuse=scope_reuse, scope_name=scope_name)
def discriminator(x, training, scope_reuse=False, scope_name='discriminator'):
diag_logits = model_zoo.FCN_disc_bn(x, training, nlabels=1, scope_name=scope_name, scope_reuse=scope_reuse)
return diag_logits
```
#### File: jodietrich/wgan_domain_adaptation/histograms.py
```python
import config.system as sys_config
import os
import utils
import numpy as np
import matplotlib.pyplot as plt
from pylab import savefig
# make histograms to compare 1.5 T and 3 T images and generated images
# histograms of intensity and intensity gradient with respect to the spacial dimensions
def make_histogram_vectors(image):
# image must be a numpy array
vectors = {}
vectors['intensity'] = image.flatten()
vectors['gradient_norm'] = pixel_gradient_norm_list(image)
return vectors
def pixel_gradient_norm_list(image):
difference_images = pixel_difference_gradients(image)
pixel_gradient_norms = np.linalg.norm(difference_images, ord=2, axis=-1)
pixel_gradient_norm_vector = pixel_gradient_norms.flatten()
return pixel_gradient_norm_vector
def pixel_difference_gradients(image):
pixel_dif1 = image[1:, :-1, :-1] - image[:-1, :-1, :-1]
pixel_dif2 = image[:-1, 1:, :-1] - image[:-1, :-1, :-1]
pixel_dif3 = image[:-1, :-1, 1:] - image[:-1, :-1, :-1]
return np.stack((pixel_dif1, pixel_dif2, pixel_dif3), axis=-1)
def plot_histograms(hist_vectors, fig_name, saving_folder, n_bins='auto', cutoff_left=0.01, show_figure=True):
# plots the intensity and gradient norm histograms
fig = plt.figure(fig_name)
plt.subplot(121)
plt.hist(hist_vectors['intensity'], bins=n_bins, range=(-1 + cutoff_left, 1))
plt.xlabel('intensity')
plt.ylabel('number of pixels')
plt.subplot(122)
plt.hist(hist_vectors['gradient_norm'], bins=n_bins, range=(cutoff_left, 2))
plt.xlabel('gradient norm')
plt.ylabel('number of pixels')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
save_path = os.path.join(saving_folder, fig_name + '.svg')
print('saving figure as: ' + save_path)
savefig(save_path, bbox_inches='tight')
if show_figure:
plt.show()
if __name__ == '__main__':
# images path
plot_name = 'real_1T5_avg'
img_folder = os.path.join(sys_config.project_root, 'data/generated_images/final/all_experiments')
saving_folder = '/scratch_net/brossa/jdietric/Documents/thesis/figures/histograms'
sub_folder = 'source'
field_strs = ['1.5']
labels = [0, 2]
fs_label_combinations = [('1.5', 0), ('1.5', 2)]
# get all images from the given combinations
image_folder_path = os.path.join(img_folder, sub_folder)
file_list = os.listdir(image_folder_path)
# filter out relevant images
filtered_file_list = []
for fs_label_tuple in fs_label_combinations:
contain_strings = [fs_label_tuple[0] + 'T', 'diag%d' % fs_label_tuple[1]]
filtered_file_list += [file_name for file_name in file_list if all([str in file_name for str in contain_strings])]
hist_vectors = {'intensity': [], 'gradient_norm': []}
for img_name in filtered_file_list:
img_path = os.path.join(img_folder, sub_folder, img_name)
# load image
img_array, _, _ = utils.load_nii(img_path)
hist_vectors['intensity'].append(make_histogram_vectors(img_array)['intensity'])
hist_vectors['gradient_norm'].append(make_histogram_vectors(img_array)['gradient_norm'])
avg_hist_vectors = {'intensity': np.mean(hist_vectors['intensity']), 'gradient_norm': hist_vectors['gradient_norm']}
plot_histograms(hist_vectors, plot_name, saving_folder)
# code for single image histogram
# images path
# img_folder = os.path.join(sys_config.project_root, 'data/generated_images/final/all_experiments')
# sub_folder = 'residual_gen_n8b4_disc_n8_bn_dropout_keep0.9_10_noise_all_small_data_1e4l1_s3_final_i1'
# img_num = 468
# field_str = '1.5'
# label = 2
# img_name = 'generated_img_%sT_diag%d_ind%d.nii.gz' % (field_str, label, img_num)
# saving_folder = '/scratch_net/brossa/jdietric/Documents/thesis/figures/histograms'
# field_str = field_str.replace('.', '')
# plot_name = 'separate_residual_no_noise_target' + field_str + 'T_' + str(img_num)
# img_path = os.path.join(img_folder, sub_folder, img_name)
# # load image
# img_array, _, _ = utils.load_nii(img_path)
# hist_vectors = make_histogram_vectors(img_array)
# plot_histograms(hist_vectors, plot_name, saving_folder)
```
#### File: jodietrich/wgan_domain_adaptation/image_utils.py
```python
import numpy as np
from skimage import measure
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
try:
import cv2
except:
logging.warning('Could not import opencv. Augmentation functions will be unavailable.')
else:
def rotate_image(img, angle, interp=cv2.INTER_LINEAR):
rows, cols = img.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
return cv2.warpAffine(img, rotation_matrix, (cols, rows), flags=interp)
def resize_image(im, size, interp=cv2.INTER_LINEAR):
im_resized = cv2.resize(im, (size[1], size[0]), interpolation=interp) # swap sizes to account for weird OCV API
return im_resized
def convert_to_uint8(image):
image = image - image.min()
image = 255.0*np.divide(image.astype(np.float32),image.max())
return image.astype(np.uint8)
def normalise_image(image):
'''
make image zero mean and unit standard deviation
'''
img_o = np.float32(image.copy())
m = np.mean(img_o)
s = np.std(img_o)
return np.divide((img_o - m), s)
def map_image_to_intensity_range(image, min_o, max_o):
if image.dtype in [np.uint8, np.uint16, np.uint32]:
assert min_o >= 0, 'Input image type is uintXX but you selected a negative min_o: %f' % min_o
if image.dtype == np.uint8:
assert max_o <= 255, 'Input image type is uint8 but you selected a max_o > 255: %f' % max_o
min_i = np.min(image)
max_i = np.max(image)
image = (np.divide((image - min_i), max_i - min_i) * (max_o - min_o) + min_o).copy()
return image
def normalise_images(X):
'''
Helper for making the images zero mean and unit standard deviation i.e. `white`
'''
X_white = np.zeros(X.shape, dtype=np.float32)
for ii in range(X.shape[0]):
Xc = X[ii,:,:,:]
mc = Xc.mean()
sc = Xc.std()
Xc_white = np.divide((Xc - mc), sc)
X_white[ii,:,:,:] = Xc_white
return X_white.astype(np.float32)
def reshape_2Dimage_to_tensor(image):
return np.reshape(image, (1,image.shape[0], image.shape[1],1))
def keep_largest_connected_components(mask):
'''
Keeps only the largest connected components of each label for a segmentation mask.
'''
out_img = np.zeros(mask.shape, dtype=np.uint8)
for struc_id in [1, 2, 3]:
binary_img = mask == struc_id
blobs = measure.label(binary_img, connectivity=1)
props = measure.regionprops(blobs)
if not props:
continue
area = [ele.area for ele in props]
largest_blob_ind = np.argmax(area)
largest_blob_label = props[largest_blob_ind].label
out_img[blobs == largest_blob_label] = struc_id
return out_img
```
#### File: jodietrich/wgan_domain_adaptation/joint_train.py
```python
import logging
import time
import numpy as np
import os.path
import tensorflow as tf
import shutil
from sklearn.metrics import f1_score
import config.system as sys_config
import gan_model
from tfwrapper import utils as tf_utils
import utils
import adni_data_loader_all
import data_utils
from batch_generator_list import iterate_minibatches_endlessly, iterate_minibatches
import clf_model_multitask as clf_model_mt
import joint_model
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# Set SGE_GPU environment variable if we are not on the local host
sys_config.setup_GPU_environment()
#######################################################################
from experiments.joint import clf_allconv_gan_bousmalis as exp_config
#######################################################################
log_dir = os.path.join(sys_config.log_root, exp_config.log_folder, exp_config.experiment_name)
try:
import cv2
except:
logging.warning('Could not find cv2. If you want to use augmentation '
'function you need to setup OpenCV.')
def run_training(continue_run, log_dir):
logging.info('===== RUNNING EXPERIMENT ========')
logging.info(exp_config.experiment_name)
logging.info('=================================')
init_step = 0
if continue_run:
logging.info('!!!!!!!!!!!!!!!!!!!!!!!!!!!! Continuing previous run !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
try:
init_checkpoint_path = utils.get_latest_model_checkpoint_path(log_dir, 'model.ckpt')
logging.info('Checkpoint path: %s' % init_checkpoint_path)
init_step = int(init_checkpoint_path.split('/')[-1].split('-')[-1]) + 1 # plus 1 b/c otherwise starts with eval
logging.info('Latest step was: %d' % init_step)
log_dir += '_cont'
except:
logging.warning('!!! Didnt find init checkpoint. Maybe first run failed. Disabling continue mode...')
continue_run = False
init_step = 0
logging.info('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
# import data
data = adni_data_loader_all.load_and_maybe_process_data(
input_folder=exp_config.data_root,
preprocessing_folder=exp_config.preproc_folder,
size=exp_config.image_size,
target_resolution=exp_config.target_resolution,
label_list = exp_config.label_list,
offset=exp_config.offset,
rescale_to_one=exp_config.rescale_to_one,
force_overwrite=False
)
# extract images and indices of source/target images for the training and validation set
images_train, source_images_train_ind, target_images_train_ind,\
images_val, source_images_val_ind, target_images_val_ind = data_utils.get_images_and_fieldstrength_indices(
data, exp_config.source_field_strength, exp_config.target_field_strength)
# get labels
# the following are HDF5 datasets, not numpy arrays
labels_train = data['diagnosis_train']
ages_train = data['age_train']
labels_val = data['diagnosis_val']
ages_val = data['age_val']
if exp_config.age_ordinal_regression:
ages_train = utils.age_to_ordinal_reg_format(ages_train, bins=exp_config.age_bins)
ordinal_reg_weights = utils.get_ordinal_reg_weights(ages_train)
else:
ages_train = utils.age_to_bins(ages_train, bins=exp_config.age_bins)
ordinal_reg_weights = None
if exp_config.age_ordinal_regression:
ages_val = utils.age_to_ordinal_reg_format(ages_val, bins=exp_config.age_bins)
else:
ages_val= utils.age_to_bins(ages_val, bins=exp_config.age_bins)
generator = exp_config.generator
discriminator = exp_config.discriminator
augmentation_function = exp_config.augmentation_function if exp_config.use_augmentation else None
s_sampler_train = iterate_minibatches_endlessly(images_train,
batch_size=2*exp_config.batch_size,
exp_config=exp_config,
labels_list=[labels_train, ages_train],
selection_indices=source_images_train_ind,
augmentation_function=augmentation_function)
t_sampler_train = iterate_minibatches_endlessly(images_train,
batch_size=exp_config.batch_size,
exp_config=exp_config,
labels_list=[labels_train, ages_train],
selection_indices=target_images_train_ind,
augmentation_function=augmentation_function)
with tf.Graph().as_default():
training_time_placeholder = tf.placeholder(tf.bool, shape=[], name='training_time')
# GAN
# input noise for generator
if exp_config.use_generator_input_noise:
noise_in_gen_pl = tf.random_uniform(shape=exp_config.generator_input_noise_shape, minval=-1, maxval=1)
else:
noise_in_gen_pl = None
# target image batch
xt_pl = tf.placeholder(tf.float32, image_tensor_shape(exp_config.batch_size), name='x_target')
# the classifier uses 2 times the batch size of the GAN
clf_batch_size = 2 * exp_config.batch_size
# source image batch
xs_pl, diag_s_pl, ages_s_pl = placeholders_clf(clf_batch_size, 'source')
# split source batch into 1 to be translated to xf and 2 for the classifier
# for the discriminator train op half 2 of the batch is not used
xs1_pl, xs2_pl = tf.split(xs_pl, 2, axis=0)
# generated fake image batch
xf_pl = generator(xs1_pl, noise_in_gen_pl, training_time_placeholder)
# difference between generated and source images
diff_img_pl = xf_pl - xs1_pl
# visualize the images by showing one slice of them in the z direction
tf.summary.image('sample_outputs', tf_utils.put_kernels_on_grid3d(xf_pl, exp_config.cut_axis,
exp_config.cut_index, rescale_mode='manual',
input_range=exp_config.image_range))
tf.summary.image('sample_xt', tf_utils.put_kernels_on_grid3d(xt_pl, exp_config.cut_axis,
exp_config.cut_index, rescale_mode='manual',
input_range=exp_config.image_range))
tf.summary.image('sample_xs', tf_utils.put_kernels_on_grid3d(xs1_pl, exp_config.cut_axis,
exp_config.cut_index, rescale_mode='manual',
input_range=exp_config.image_range))
tf.summary.image('sample_difference_xf-xs', tf_utils.put_kernels_on_grid3d(diff_img_pl, exp_config.cut_axis,
exp_config.cut_index, rescale_mode='centered',
cutoff_abs=exp_config.diff_threshold))
# output of the discriminator for real image
d_pl = discriminator(xt_pl, training_time_placeholder, scope_reuse=False)
# output of the discriminator for fake image
d_pl_ = discriminator(xf_pl, training_time_placeholder, scope_reuse=True)
d_hat = None
x_hat = None
if exp_config.improved_training:
epsilon = tf.random_uniform([], 0.0, 1.0)
x_hat = epsilon * xt_pl + (1 - epsilon) * xf_pl
d_hat = discriminator(x_hat, training_time_placeholder, scope_reuse=True)
dist_l1 = tf.reduce_mean(tf.abs(diff_img_pl))
learning_rate_gan_pl = tf.placeholder(tf.float32, shape=[], name='learning_rate')
learning_rate_clf_pl = tf.placeholder(tf.float32, shape=[], name='learning_rate')
if exp_config.momentum is not None:
optimizer_handle = lambda learning_rate: exp_config.optimizer_handle(learning_rate=learning_rate,
momentum=exp_config.momentum)
else:
optimizer_handle = lambda learning_rate: exp_config.optimizer_handle(learning_rate=learning_rate)
# Build the operation for clipping the discriminator weights
d_clip_op = gan_model.clip_op()
# Put L1 distance of generated image and original image on summary
dist_l1_summary_op = tf.summary.scalar('L1_distance_to_source_img', dist_l1)
# Classifier ----------------------------------------------------------------------------------------
# for training usually false so xt and xf get concatenated as classifier input, otherwise
directly_feed_clf_pl = tf.placeholder(tf.bool, shape=[], name='direct_classifier_feeding')
# conditionally assign either a concatenation of the generated dataset and the source data
# cond to avoid having to specify not needed placeholders in the feed dict
images_clf, diag_clf, ages_clf = tf.cond(
directly_feed_clf_pl,
lambda: placeholders_clf(clf_batch_size, 'direct_clf'),
lambda: concatenate_clf_input([xf_pl, xs2_pl], diag_s_pl, ages_s_pl, scope_name = 'fs_concat')
)
tf.summary.scalar('learning_rate_gan', learning_rate_gan_pl)
tf.summary.scalar('learning_rate_clf', learning_rate_clf_pl)
# Build a Graph that computes predictions from the inference model.
diag_logits_train, ages_logits_train = exp_config.clf_model_handle(images_clf,
nlabels=exp_config.nlabels,
training=training_time_placeholder,
n_age_thresholds=len(exp_config.age_bins),
bn_momentum=exp_config.bn_momentum)
# Add to the Graph the Ops for loss calculation.
[classifier_loss, diag_loss, age_loss, weights_norm_clf] = clf_model_mt.loss(diag_logits_train,
ages_logits_train,
diag_clf,
ages_clf,
nlabels=exp_config.nlabels,
weight_decay=exp_config.weight_decay,
diag_weight=exp_config.diag_weight,
age_weight=exp_config.age_weight,
use_ordinal_reg=exp_config.age_ordinal_regression,
ordinal_reg_weights=ordinal_reg_weights)
# nr means no regularization, meaning the loss without the regularization term
train_ops_dict, losses_gan_dict = joint_model.training_ops(d_pl, d_pl_,
classifier_loss,
optimizer_handle=optimizer_handle,
learning_rate_gan=learning_rate_gan_pl,
learning_rate_clf=learning_rate_clf_pl,
l1_img_dist=dist_l1,
gan_loss_weight=exp_config.gan_loss_weight,
task_loss_weight=exp_config.task_loss_weight,
w_reg_img_dist_l1=exp_config.w_reg_img_dist_l1,
w_reg_gen_l1=exp_config.w_reg_gen_l1,
w_reg_disc_l1=exp_config.w_reg_disc_l1,
w_reg_gen_l2=exp_config.w_reg_gen_l2,
w_reg_disc_l2=exp_config.w_reg_disc_l2,
d_hat=d_hat, x_hat=x_hat, scale=exp_config.scale)
tf.summary.scalar('classifier loss', classifier_loss)
tf.summary.scalar('diag_loss', diag_loss)
tf.summary.scalar('age_loss', age_loss)
tf.summary.scalar('weights_norm_term_classifier', weights_norm_clf)
tf.summary.scalar('generator loss joint', losses_gan_dict['gen']['joint'])
tf.summary.scalar('discriminator loss joint', losses_gan_dict['disc']['joint'])
eval_diag_loss, eval_ages_loss, pred_labels, ages_softmaxs = clf_model_mt.evaluation(diag_logits_train, ages_logits_train,
diag_clf,
ages_clf,
images_clf,
diag_weight=exp_config.diag_weight,
age_weight=exp_config.age_weight,
nlabels=exp_config.nlabels,
use_ordinal_reg=exp_config.age_ordinal_regression)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a savers for writing training checkpoints.
saver_latest = tf.train.Saver(max_to_keep=2)
saver_best_disc = tf.train.Saver(max_to_keep=2) # disc loss is scaled negative EM distance
saver_best_diag_f1 = tf.train.Saver(max_to_keep=5)
saver_best_ages_f1 = tf.train.Saver(max_to_keep=1)
saver_best_xent = tf.train.Saver(max_to_keep=5)
# validation summaries gan
val_disc_loss_pl = tf.placeholder(tf.float32, shape=[], name='disc_val_loss')
disc_val_summary_op = tf.summary.scalar('validation_discriminator_loss', val_disc_loss_pl)
val_gen_loss_pl = tf.placeholder(tf.float32, shape=[], name='gen_val_loss')
gen_val_summary_op = tf.summary.scalar('validation_generator_loss', val_gen_loss_pl)
val_summary_gan = tf.summary.merge([disc_val_summary_op, gen_val_summary_op])
# Classifier summary
val_error_clf_ = tf.placeholder(tf.float32, shape=[], name='val_error_diag')
val_error_summary = tf.summary.scalar('classifier_validation_loss', val_error_clf_)
val_diag_f1_score_ = tf.placeholder(tf.float32, shape=[], name='val_diag_f1')
val_f1_diag_summary = tf.summary.scalar('validation_diag_f1', val_diag_f1_score_)
val_ages_f1_score_ = tf.placeholder(tf.float32, shape=[], name='val_ages_f1')
val_f1_ages_summary = tf.summary.scalar('validation_ages_f1', val_ages_f1_score_)
val_summary_clf = tf.summary.merge([val_error_summary, val_f1_diag_summary, val_f1_ages_summary])
val_summary = tf.summary.merge([val_summary_clf, val_summary_gan])
train_error_clf_ = tf.placeholder(tf.float32, shape=[], name='train_error_diag')
train_error_clf_summary = tf.summary.scalar('classifier_training_loss', train_error_clf_)
train_diag_f1_score_ = tf.placeholder(tf.float32, shape=[], name='train_diag_f1')
train_diag_f1_summary = tf.summary.scalar('training_diag_f1', train_diag_f1_score_)
train_ages_f1_score_ = tf.placeholder(tf.float32, shape=[], name='train_ages_f1')
train_f1_ages_summary = tf.summary.scalar('training_ages_f1', train_ages_f1_score_)
train_summary = tf.summary.merge([train_error_clf_summary, train_diag_f1_summary, train_f1_ages_summary])
# prevents ResourceExhaustError when a lot of memory is used
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # Do not assign whole gpu memory, just use it on the go
config.allow_soft_placement = True # If a operation is not defined in the default device, let it execute in another.
# Create a session for running Ops on the Graph.
sess = tf.Session(config=config)
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
sess.graph.finalize()
# Run the Op to initialize the variables.
sess.run(init)
if continue_run:
# Restore session
saver_latest.restore(sess, init_checkpoint_path)
curr_lr_gan = exp_config.learning_rate_gan
curr_lr_clf = exp_config.learning_rate_clf
no_improvement_counter = 0
best_val = np.inf
last_train = np.inf
loss_history = []
loss_gradient = np.inf
best_diag_f1_score = 0
best_ages_f1_score = 0
# initialize value of lowest (i. e. best) discriminator loss
best_d_loss = np.inf
for step in range(init_step, exp_config.max_steps):
start_time = time.time()
# discriminator and classifier (task) training iterations
d_iters = 5
t_iters = 1
if step % 500 == 0 or step < 25:
d_iters = 100
for iteration in range(max(d_iters, t_iters)):
x_t, [diag_t, age_t] = next(t_sampler_train)
x_s, [diag_s, age_s] = next(s_sampler_train)
feed_dict_dc = {xs_pl: x_s,
xt_pl: x_t,
learning_rate_gan_pl: curr_lr_gan,
learning_rate_clf_pl: curr_lr_clf,
diag_s_pl: diag_s,
ages_s_pl: age_s,
training_time_placeholder: True,
directly_feed_clf_pl: False}
train_ops_list_dc = []
if iteration < t_iters:
# train classifier
train_ops_list_dc.append(train_ops_dict['clf'])
if iteration < d_iters:
# train discriminator
train_ops_list_dc.append(train_ops_dict['disc'])
sess.run(train_ops_list_dc, feed_dict=feed_dict_dc)
if not exp_config.improved_training:
sess.run(d_clip_op)
elapsed_time = time.time() - start_time
# train generator
x_t, [diag_t, age_t] = next(t_sampler_train)
x_s, [diag_s, age_s] = next(s_sampler_train)
sess.run(train_ops_dict['gen'],
feed_dict={xs_pl: x_s,
xt_pl: x_t,
learning_rate_gan_pl: curr_lr_gan,
learning_rate_clf_pl: curr_lr_clf,
diag_s_pl: diag_s,
ages_s_pl: age_s,
training_time_placeholder: True,
directly_feed_clf_pl: False
})
if step % exp_config.update_tensorboard_frequency == 0:
x_t, [diag_t, age_t] = next(t_sampler_train)
x_s, [diag_s, age_s] = next(s_sampler_train)
feed_dict_summary={xs_pl: x_s,
xt_pl: x_t,
learning_rate_gan_pl: curr_lr_gan,
learning_rate_clf_pl: curr_lr_clf,
diag_s_pl: diag_s,
ages_s_pl: age_s,
training_time_placeholder: True,
directly_feed_clf_pl: False
}
c_loss_one_batch, gan_losses_one_batch_dict, summary_str = sess.run(
[classifier_loss, losses_gan_dict, summary], feed_dict=feed_dict_summary)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
logging.info("[Step: %d], classifier_loss: %g, GAN losses: %s" % (step, c_loss_one_batch, str(gan_losses_one_batch_dict)))
logging.info(" - elapsed time for one step: %f secs" % elapsed_time)
if (step + 1) % exp_config.train_eval_frequency == 0:
# Evaluate against the training set
logging.info('Training data eval for classifier (target domain):')
[train_loss, train_diag_f1, train_ages_f1] = do_eval_classifier(sess, eval_diag_loss,
eval_ages_loss,
pred_labels,
ages_softmaxs,
xs_pl,
diag_s_pl,
ages_s_pl,
training_time_placeholder,
directly_feed_clf_pl,
images_train,
[labels_train, ages_train],
clf_batch_size=clf_batch_size,
do_ordinal_reg=exp_config.age_ordinal_regression,
selection_indices=source_images_train_ind)
train_summary_msg = sess.run(train_summary, feed_dict={train_error_clf_: train_loss,
train_diag_f1_score_: train_diag_f1,
train_ages_f1_score_: train_ages_f1}
)
summary_writer.add_summary(train_summary_msg, step)
loss_history.append(train_loss)
if len(loss_history) > 5:
loss_history.pop(0)
loss_gradient = (loss_history[-5] - loss_history[-1]) / 2
logging.info('loss gradient is currently %f' % loss_gradient)
if exp_config.schedule_lr and loss_gradient < exp_config.schedule_gradient_threshold:
logging.warning('Reducing learning rate of the classifier!')
curr_lr_clf /= 10.0
logging.info('Learning rate of the classifier changed to: %f' % curr_lr_clf)
# reset loss history to give the optimisation some time to start decreasing again
loss_gradient = np.inf
loss_history = []
if train_loss <= last_train: # best_train:
logging.info('Decrease in training error!')
else:
logging.info('No improvment in training error for %d steps' % no_improvement_counter)
last_train = train_loss
if (step + 1) % exp_config.validation_frequency == 0:
# evaluate gan losses
g_loss_val_avg, d_loss_val_avg = do_eval_gan(sess=sess,
losses=[losses_gan_dict['gen']['nr'], losses_gan_dict['disc']['nr']],
images_s_pl=xs_pl,
images_t_pl=xt_pl,
training_time_placeholder=training_time_placeholder,
images=images_val,
source_images_ind=source_images_val_ind,
target_images_ind=target_images_val_ind)
# evaluate classifier losses
[val_loss, val_diag_f1, val_ages_f1] = do_eval_classifier(sess,
eval_diag_loss,
eval_ages_loss,
pred_labels,
ages_softmaxs,
xs_pl,
diag_s_pl,
ages_s_pl,
training_time_pl=training_time_placeholder,
directly_feed_clf_pl=directly_feed_clf_pl,
images=images_val,
labels_list=[labels_val, ages_val],
clf_batch_size=clf_batch_size,
do_ordinal_reg=exp_config.age_ordinal_regression,
selection_indices=source_images_val_ind)
feed_dict_val = {
val_error_clf_: val_loss,
val_diag_f1_score_: val_diag_f1,
val_ages_f1_score_: val_ages_f1,
val_disc_loss_pl: d_loss_val_avg,
val_gen_loss_pl: g_loss_val_avg
}
validation_summary_msg = sess.run(val_summary, feed_dict=feed_dict_val)
summary_writer.add_summary(validation_summary_msg, step)
summary_writer.flush()
# save best variables (if discriminator loss is the lowest yet)
if d_loss_val_avg <= best_d_loss:
best_d_loss = d_loss_val_avg
best_file = os.path.join(log_dir, 'model_best_d_loss.ckpt')
saver_best_disc.save(sess, best_file, global_step=step)
logging.info('Found new best discriminator loss on validation set! - %f - Saving model_best_d_loss.ckpt' % best_d_loss)
if val_diag_f1 >= best_diag_f1_score:
best_diag_f1_score = val_diag_f1
best_file = os.path.join(log_dir, 'model_best_diag_f1.ckpt')
saver_best_diag_f1.save(sess, best_file, global_step=step)
logging.info(
'Found new best DIAGNOSIS F1 score on validation set! - %f - Saving model_best_diag_f1.ckpt' % val_diag_f1)
if val_ages_f1 >= best_ages_f1_score:
best_ages_f1_score = val_ages_f1
best_file = os.path.join(log_dir, 'model_best_ages_f1.ckpt')
saver_best_ages_f1.save(sess, best_file, global_step=step)
logging.info(
'Found new best AGES F1 score on validation set! - %f - Saving model_best_ages_f1.ckpt' % val_ages_f1)
if val_loss <= best_val:
best_val = val_loss
best_file = os.path.join(log_dir, 'model_best_xent.ckpt')
saver_best_xent.save(sess, best_file, global_step=step)
logging.info(
'Found new best crossentropy on validation set! - %f - Saving model_best_xent.ckpt' % val_loss)
logging.info("[Validation], generator loss: %g, discriminator_loss: %g" % (g_loss_val_avg, d_loss_val_avg))
# Write the summaries and print an overview fairly often.
if step % exp_config.save_frequency == 0:
saver_latest.save(sess, os.path.join(log_dir, 'model.ckpt'), global_step=step)
sess.close()
def image_tensor_shape(batch_size):
return [batch_size] + list(exp_config.image_size) + [exp_config.n_channels]
def image_placeholder(batch_size, name):
return tf.placeholder(tf.float32, image_tensor_shape(batch_size), name=name)
def placeholders_clf(batch_size, scope_name):
with tf.variable_scope(scope_name):
labels_tensor_shape = [batch_size]
if exp_config.age_ordinal_regression:
ages_tensor_shape = [batch_size, len(exp_config.age_bins)]
else:
ages_tensor_shape = [batch_size]
images_pl = image_placeholder(batch_size, 'images')
diag_pl = tf.placeholder(tf.uint8, shape=labels_tensor_shape, name='labels')
ages_pl = tf.placeholder(tf.uint8, shape=ages_tensor_shape, name='ages')
return images_pl, diag_pl, ages_pl
def concatenate_clf_input(images_list, diag_list, ages_list, scope_name='fs_concat'):
with tf.variable_scope(scope_name):
images = tf.concat(images_list, axis=0, name='images')
diag = tf.concat(diag_list, axis=0, name='diagnose')
ages = tf.concat(ages_list, axis=0, name='ages')
return images, diag, ages
def do_eval_gan(sess, losses, images_s_pl, images_t_pl, training_time_placeholder, images, source_images_ind,
target_images_ind, batch_size=exp_config.batch_size, num_batches=exp_config.num_val_batches):
'''
Function for running the evaluations of the gan every X iterations on the training and validation sets.
:param sess: The current tf session
:param losses: list of loss placeholders
:param images_placeholder: Placeholder for the images
:param labels_placeholder: Placeholder for the masks
:param training_time_placeholder: Placeholder toggling the training/testing mode.
:param images: A numpy array or h5py dataset containing the images
:param batch_size: The batch_size to use.
:return: The average loss (as defined in the experiment), and the average dice over all `images`.
'''
s_sampler_val = iterate_minibatches_endlessly(images,
batch_size=2*batch_size,
exp_config=exp_config,
selection_indices=source_images_ind)
t_sampler_val = iterate_minibatches_endlessly(images,
batch_size=batch_size,
exp_config=exp_config,
selection_indices=target_images_ind)
# evaluate the validation batch with batch_size images (from each domain) at a time
loss_val_array = np.empty((num_batches, len(losses)), dtype=np.float32)
for batch_ind in range(exp_config.num_val_batches):
x_t = next(t_sampler_val)
x_s = next(s_sampler_val)
loss_val = sess.run(
losses, feed_dict={images_s_pl: x_s,
images_t_pl: x_t,
training_time_placeholder: False})
loss_val_array[batch_ind, :] = np.array(loss_val)
loss_val_avg = np.mean(loss_val_array, axis=0)
logging.info(losses)
logging.info(num_batches)
logging.info('average val loss: ' + str(loss_val_avg.tolist()))
return loss_val_avg.tolist()
def do_eval_classifier(sess, eval_diag_loss, eval_ages_loss, pred_labels, ages_softmaxs, images_s_pl, diag_labels_pl,
ages_pl, training_time_pl, directly_feed_clf_pl, images, labels_list, clf_batch_size, do_ordinal_reg,
selection_indices=None):
'''
Function for running the evaluations every X iterations on the training and validation sets.
:param sess: The current tf session
:param eval_loss: The placeholder containing the eval loss
:param images_pl: Placeholder for the images
:param labels_placeholder: Placeholder for the masks
:param training_time_pl: Placeholder toggling the training/testing mode.
:param images: A numpy array or h5py dataset containing the images
:param labels_list: A numpy array or h45py dataset containing the corresponding labels
:param clf_batch_size: The batch_size to use.
:return: The average loss (as defined in the experiment), and the average dice over all `images`.
'''
diag_loss_ii = 0
ages_loss_ii = 0
num_batches = 0
predictions_diag = []
predictions_diag_gt = []
predictions_ages = []
predictions_ages_gt = []
for batch in iterate_minibatches(images,
labels_list,
batch_size=clf_batch_size,
selection_indices=selection_indices,
augmentation_function=None,
exp_config=exp_config): # No aug in evaluation
# As before you can wrap the iterate_minibatches function in the BackgroundGenerator class for speed improvements
# but at the risk of not catching exceptions
x, [y, a] = batch
assert y.shape[0] == clf_batch_size
feed_dict = {images_s_pl: x,
diag_labels_pl: y,
ages_pl: a,
training_time_pl: False,
directly_feed_clf_pl: False}
c_d_loss, c_a_loss, c_d_preds, c_a_softmaxs = sess.run([eval_diag_loss, eval_ages_loss, pred_labels, ages_softmaxs], feed_dict=feed_dict)
# This converts the labels back into the original format. I.e. [0,1,1,0] will become [0,2,2,0] again if
# 1 didn't exist in the dataset.
c_d_preds = [exp_config.label_list[pp] for pp in c_d_preds]
y_gts = [exp_config.label_list[pp] for pp in y]
diag_loss_ii += c_d_loss
ages_loss_ii += c_a_loss
num_batches += 1
predictions_diag += c_d_preds
predictions_diag_gt += y_gts
if do_ordinal_reg:
c_a_preds = np.asarray(c_a_softmaxs)
c_a_preds = np.transpose(c_a_preds, (1, 0, 2))
c_a_preds = c_a_preds[:, :, 1]
c_a_preds = np.uint8(c_a_preds + 0.5)
predictions_ages += list(utils.ordinal_regression_to_bin(c_a_preds))
predictions_ages_gt += list(utils.ordinal_regression_to_bin(a))
else:
c_a_preds = np.argmax(c_a_softmaxs, axis=-1)
predictions_ages += list(c_a_preds)
predictions_ages_gt += list(a)
avg_loss = (diag_loss_ii / num_batches) + (ages_loss_ii / num_batches)
# check whether the labels are in {0, 2} as expected
logging.info('diagnose predictions and ground truth:')
logging.info(predictions_diag)
assert all([label in {0, 2} for label in predictions_diag])
logging.info(predictions_diag_gt)
assert all([label in {0, 2} for label in predictions_diag_gt])
f1_diag_score = f1_score(np.asarray(predictions_diag_gt), np.asarray(predictions_diag), pos_label=2, average='binary') # micro is overall, macro doesn't take class imbalance into account
# f1_ages_score = f1_score(np.asarray(predictions_ages_gt), np.asarray(predictions_ages), average='micro') # micro is overall, macro doesn't take class imbalance into account
f1_ages_score = np.mean(np.abs(np.asarray(predictions_ages, dtype=np.int32) - np.asarray(predictions_ages_gt, dtype=np.int32)))
logging.info(' Average loss: %0.04f, diag f1_score: %0.04f, age f1_score %0.04f' % (avg_loss, f1_diag_score, f1_ages_score))
return avg_loss, f1_diag_score, f1_ages_score
def main():
continue_run = True
if not tf.gfile.Exists(log_dir):
tf.gfile.MakeDirs(log_dir)
continue_run = False
# Copy experiment config file
if continue_run:
tf.gfile.MakeDirs(log_dir + '_cont')
shutil.copy(exp_config.__file__, log_dir + '_cont')
else:
shutil.copy(exp_config.__file__, log_dir)
run_training(continue_run, log_dir=log_dir)
if __name__ == '__main__':
main()
``` |
{
"source": "jodilodi/Eyeshadow",
"score": 3
} |
#### File: jodilodi/Eyeshadow/color_analyse.py
```python
import math
import operator
class Color_Class:
def rgb_to_hex(rgb):
return '%02x%02x%02x' % rgb
class Color_Analysis:
def Calculate_Mode_RGB(image, middle, borderdistance):
start = x,y = middle[0] - borderdistance, middle[1] - borderdistance
RGBDic = {}
for i in range(int(start[0]), int(start[0] + borderdistance*2)):
for j in range(int(start[1]), int(start[1] + borderdistance*2)):
pixel = i,j
RGB = r,g,b =image.getpixel(pixel)
if RGB in RGBDic:
RGBDic[RGB]+= 1
else:
RGBDic[RGB] = 1
# return len(HSVDic)
return len(RGBDic), max(RGBDic.items(), key=operator.itemgetter(1))[0]
def Min_Max_RGB(image, middle, borderdistance):
start = x,y = middle[0] - borderdistance, middle[1] - borderdistance
R = []
G = []
B = []
for i in range(int(start[0]), int(start[0] + borderdistance*2)):
for j in range(int(start[1]), int(start[1] + borderdistance*2)):
pixel = i,j
RGB = r,g,b =image.getpixel(pixel)
# if RGB in RGBDic:
# RGBDic[RGB]+= 1
# else:
# RGBDic[RGB] = 1
R.append(RGB[0])
G.append(RGB[1])
B.append(RGB[2])
# return len(HSVDic)
MIN = r,g,b = min(R), min(G), min(B)
MAX = r,g,b = max(R), max(G), max(B)
return MIN, MAX
def within_rgb_frame(middle, topleft, topright, bottomleft, bottomright):
#format r,g,b from each passed variable
threshold = 15
if abs(middle[0] - topleft[0]) <= threshold \
and abs(middle[1] - topleft[1]) <= threshold \
and abs(middle[2] - topleft[2]) <= threshold \
and abs(middle[0] - topright[0]) <= threshold \
and abs(middle[1] - topright[1]) <= threshold \
and abs(middle[2] - topright[2]) <= threshold \
and abs(middle[0] - bottomleft[0]) <= threshold \
and abs(middle[1] - bottomleft[1]) <= threshold \
and abs(middle[2] - bottomleft[2]) <= threshold \
and abs(middle[0] - bottomright[0]) <= threshold \
and abs(middle[1] - bottomright[1]) <= threshold \
and abs(middle[2] - bottomright[2]) <= threshold :
return True
else:
return False
def AVG_Image_RGB(image, middle, borderdistance):
start = x,y = middle[0] - borderdistance, middle[1] - borderdistance
tot_r,tot_g,tot_b = 0,0,0
for i in range(int(start[0]), int(start[0] + borderdistance*2)):
for j in range(int(start[1]), int(start[1] + borderdistance*2)):
pixel = i,j
RGB = r,g,b = image.getpixel(pixel)
tot_r += r
tot_g += g
tot_b += b
surfacearea = borderdistance * borderdistance * 4
RGBAvg = R,G,B = math.floor(tot_r/surfacearea), math.floor(tot_g/surfacearea), math.floor(tot_b/surfacearea)
return RGBAvg
def Calculate_Image_Box(image, middle):
borderdistance = 200
middlex, middley = middle[0],middle[1]
width, height = image.size
middlergb = image.getpixel((middlex,middley))
topleftrgb =image.getpixel((max(1, middlex-borderdistance), max(1, middley - borderdistance)))
toprightrgb = image.getpixel((min(width, middlex+borderdistance),max(1, middley-borderdistance)))
bottomleftrgb = image.getpixel((max(1,middlex-borderdistance), min(height, middley+borderdistance)))
bottomrightrgb = image.getpixel((min(width, middlex + borderdistance), min(height, middley + borderdistance)))
if len(middlergb) != 3 or \
len(topleftrgb) != 3 or \
len(toprightrgb) != 3 or \
len(bottomleftrgb) != 3 or \
len(bottomrightrgb) != 3:
print("Error")
return 0
#do middle and try for 150 up/down and 150 right/left
#check if the hue is within 60 degrees if not then go down by 50 all direction
while not Color_Analysis.within_rgb_frame(middlergb, topleftrgb, toprightrgb, bottomleftrgb, bottomrightrgb) \
and borderdistance > 0:
borderdistance -= 5
topleftrgb =image.getpixel((max(1, middlex-borderdistance), max(1, middley - borderdistance)))
toprightrgb = image.getpixel((min(width, middlex+borderdistance),max(1, middley-borderdistance)))
bottomleftrgb = image.getpixel((max(1,middlex-borderdistance), min(height, middley+borderdistance)))
bottomrightrgb = image.getpixel((min(width, middlex + borderdistance), min(height, middley + borderdistance)))
return borderdistance
```
#### File: jodilodi/Eyeshadow/html_scraping.py
```python
from bs4 import BeautifulSoup
import requests
import re
import sys
#error handling
#image stuff
from PIL import Image
from io import BytesIO
class Brand:
def __init__(self, name, id):
self.name = name
self.id = id
class Eyeshadow:
def __init__(self, name, imgsrc, src, foundin, grade, brand, finish):
self.name = name
self.imgsrc = imgsrc
response = requests.get(imgsrc, timeout=5)
self.byte = BytesIO(response.content).getvalue()
self.src = src
self.foundin = foundin
self.temptaliagrade = grade
self.brand = brand
self.finish = finish
class Temptalia_Scrapping:
def Get_Brands():
url = "https://www.temptalia.com/p/_brands/"
r = requests.get(url, timeout=5)
html_doc = r.text
soup = BeautifulSoup(html_doc, 'html.parser')
#will print out all of the brands that temptalia has reviewed or mentioned
# for option in soup.find(id='filter_brand'):
# try:
# if "Select" not in option.get_text() and "All Brands" not in option.get_text():
# print(option.get_text())
# except:
# continue
brands = []
for option in soup.find(id = 'filter_brand'):
try:
if "Select" not in option.get_text() and "All Brands" not in option.get_text():
#brands.append(option.get_text())
brands.append(Brand(option.get_text(), option['value']))
except:
continue
return brands
def Brand_Contains_Eyeshadow(id):
url = r"https://www.temptalia.com/product/page/1/?f_formula_search&f_formula=0&t%%5B0%%5D=12674&brand=%s&time=all&sorting=date_desc&archive=rated" % (id)
try:
r = requests.get(url, timeout=10)
except:
return True #dunno, false positive is better than false negative
html_doc = r.text
soup = BeautifulSoup(html_doc, 'html.parser')
elements = soup.find_all("div", class_="alert alert-danger my-5 f-4 sans-serif text-center")
if len(elements) == 0:
#there is not warning for not eyeshadows
return True
else:
return False
def Print_Brands():
url = "https://www.temptalia.com/p/_brands/"
r = requests.get(url, timeout=5)
html_doc = r.text
soup = BeautifulSoup(html_doc, 'html.parser')
for option in soup.find(id = 'filter_brand'):
print(option)
print(option.get_text())
print(option['value'])
def Get_Available_In_Palette(url):
try:
r = requests.get(url, timeout=5)
html_doc = r.text
soup = BeautifulSoup(html_doc, 'html.parser')
section = soup.find(id="sectionAvailable")
#section
if section is None:
return "Single"
else:
title = section.find("h4", class_="f-2 mb-0 regular text-uppercase").text
return str.rstrip(title)
except:
return "Unknown"
def Get_Eyeshadow_Rank(url):
try:
r = requests.get(url, timeout=5)
html_doc = r.text
soup = BeautifulSoup(html_doc, 'html.parser')
# grade = soup.find("div", class_="glossover-grade large py-4").text
gradebox = soup.find("div", class_="glossover-grade large py-4")
if gradebox is None:
return ""
else:
return gradebox.text
except:
return ""
def Get_Eyeshadow_Finish(url):
try:
print(url)
r = requests.get(url, timeout = 5)
html_doc = r.text
soup = BeautifulSoup(html_doc, 'html.parser')
description = "none"
for tag in soup.find_all("meta"):
if tag.get("name", None) == "twitter:description":
description = tag["content"]
desSplit = description.split(" ")
findex = 0
try:
findex = desSplit.index('finish')
except:
findex = desSplit.index('finish.')
print(desSplit[findex-1])
return desSplit[findex - 1]
except:
print(sys.exc_info()[0])
return -1
def Get_Eyeshadow(brand, id, pageindex):
#brand, type is eyeshadow, date rangeis set to all time
#https://www.temptalia.com/product/page/1/?f_formula_search&f_formula=0&t%%5B0%%5D=12674&brand=%s&time=all&sorting=date_desc&archive=rated
url = r"https://www.temptalia.com/product/page/%s/?f_formula_search&f_formula=0&t%%5B0%%5D=12674&brand=%s&time=all&sorting=date_desc&archive=rated" % (pageindex, id)
try:
r = requests.get(url, timeout=10)
except:
print("Get Eyeshadow Page Error")
return []
html_doc = r.text
soup = BeautifulSoup(html_doc, 'html.parser')
eyeshadowcolors = []
for element in soup.find_all("div", class_="display-badge"):#, class_="display-badge product product-archive"):
try:
#get the biggest image for the color
allimg = element.find("img", class_="img-fluid").get('data-lazy-srcset')
img_array = allimg.split(",")
img = img_array[len(img_array)-1].strip()
img = img[0:img.find(' ')]
colorName = element.find("h5", class_="f-3 text-base text-ellipsis m-0").text
src = element.find("h5", class_="f-3 text-base text-ellipsis m-0").find("a").get("href")
foundin = Temptalia_Scrapping.Get_Available_In_Palette(src)
grade = Temptalia_Scrapping.Get_Eyeshadow_Rank(src)
finish = Temptalia_Scrapping.Get_Eyeshadow_Finish(src)
eyeshadowcolors.append(Eyeshadow(colorName, img, src, foundin, grade, brand, finish))
print(colorName)
except:
print("Error in adding eyeshadow")
print(sys.exc_info()[0])
continue
return eyeshadowcolors
def Get_Nav_Pages(id):
url = r"https://www.temptalia.com/product/page/1/?f_formula_search&f_formula=0&t%%5B0%%5D=12674&brand=%s&time=all&sorting=date_desc&archive=rated" % id
try:
r = requests.get(url, timeout=5)
html_doc = r.text
soup = BeautifulSoup(html_doc, 'html.parser')
maxpage = 1
for pages in soup.find_all(True, class_="page-link" ):
try:
maxpage = max(maxpage, int(pages.text))
except ValueError:
continue
return maxpage
except:
return 1
```
#### File: jodilodi/Eyeshadow/Scrap_Insert.py
```python
from html_scraping import Temptalia_Scrapping
from demo_mongodb_test import Makeup_MongoDB
from os import system, name
from subprocess import call
import os
def Insert_New_Brands():
print("Get all brands from Temptalia")
AllBrands = Temptalia_Scrapping.Get_Brands()
#print(AllBrands)
#print(Makeup_MongoDB.Contain_Brand(AllBrands[0]))
print("Check if brand is already in db and insert")
for brand in AllBrands:
print(brand.name)
brand_exist = Makeup_MongoDB.Contain_Brand(brand.name, brand.id)
if not brand_exist:
insertid = Makeup_MongoDB.Insert_Brand(brand.name, brand.id)
print(insertid)
else:
print("exists")
def clear():
_ = call('clear' if os.name == 'posix' else 'cls')
if __name__ == "__main__":
clear()
#For Reset
# Makeup_MongoDB.Delete_Makeup_DB()
#Insert new brand names
# Insert_New_Brands()
#insert all eyeshadows
branddata = Makeup_MongoDB.Get_Makeup_DB_After("<NAME>")
for brand in branddata:
print(brand)
totalpages = Temptalia_Scrapping.Get_Nav_Pages(brand["temptalia_id"])
alleyeshadows = []
for pageindex in range(1, totalpages + 1):
#for pageindex in range(1,2):
alleyeshadows = alleyeshadows + Temptalia_Scrapping.Get_Eyeshadow(brand["name"], brand["temptalia_id"], pageindex)
# for eyeshadow in alleyeshadows:
# print(eyeshadow.name)
# print(eyeshadow.src)
# for i in range(0,10):
# eyeshadow = alleyeshadows[i]
for eyeshadow in alleyeshadows:
exist = Makeup_MongoDB.Contain_Eyeshadow(eyeshadow.brand, eyeshadow.name)
if not exist:
insertid = Makeup_MongoDB.Insert_Eyeshadow(eyeshadow)
print(insertid)
else:
print("Added")
``` |
{
"source": "JodisKripe/Python",
"score": 4
} |
#### File: project_euler/problem_092/sol1.py
```python
def next_number(number: int) -> int:
"""
Returns the next number of the chain by adding the square of each digit
to form a neww number.
For example if number = 12, next_number() will return 1^2 + 2^2 = 5.
Therefore 5 is the next number of the chain.
>>> next_number(44)
32
>>> next_number(10)
1
>>> next_number(32)
13
"""
num = 0
for i in range(len(str(number))):
num += int(str(number)[i]) ** 2
return num
def chain(number: int) -> bool:
"""
Generates the chain of numbers until the nest number generated is 1 0r 89.
for example, if starting number is 44, then the function generates the
following chain of numbers.
chain: 44 → 32 → 13 → 10 → 1 → 1
once the next number generated is 1 or 89, the function
Returns True if the next number generated by next_number() if 1.
Returns False if the next number generated by next_number() is 89.
>>> chain(10)
True
>>> chain(58)
False
>>> chain(1)
True
"""
while number != 1 and number != 89:
number = next_number(number)
if number == 1:
return True
elif number == 89:
return False
def solution(number: int = 10000000) -> int:
"""
The function returns the total numbers that end up in 89 after the chain generation.
The function accepts a range number and the function checks all the values
under value number.
if the chain generation leads to the end number as 1 or 89. If the chain()
returns True, then total is incremented, implying that the number we
started with ended up with 1 else total2 is incremented, implying that
the number we started with ended up in 89 after chain generation.
But the function returns total2 as the requirement of question is
to find out how many ended up in 89.
>>> solution(100)
80
>>> solution(10000000)
8581146
"""
total = 0
total2 = 0
for i in range(1, number):
val = chain(i)
if val is True:
total += 1
elif val is False:
total2 += 1
return total2
if __name__ == "__main__":
print(f"{solution() = }")
``` |
{
"source": "jodli/FritzScraper",
"score": 2
} |
#### File: FritzScraper/src/fritzscrapercargo.py
```python
import time
class FritzScraperCargo(object):
def __init__(self, cargo):
self.timestamp = time.time()
self.cargo = cargo
``` |
{
"source": "j-o-d-o/accident_predictor",
"score": 3
} |
#### File: data/upload/sampler.py
```python
import configparser
from dlpipe.data_reader.mongodb import MongoDBConnect
from dlpipe.utils import DLPipeLogger
from accident_predictor.data.upload.data_encoder import sin_cos_representation
from accident_predictor.data.upload.calc_class_distances import upload_distances
from keras.utils.np_utils import to_categorical
import numpy as np
import copy
def generate_synth_data(col, ids, insert=True):
"""
create synthetic data from distance calculation of entries to other classes and save to database
:param col: collection to save synthetic data to
:param ids: list of ids for the records that should be sampledn
:param insert: bool to actually insert all entry (for debugging)
:return: mongodb ids that where inserted as synthetic data
"""
# TODO: potentially change light condition depending on what time (e.g. shouldnt be dark at 13:00)
# TODO: same for date, chance of snow and ice in summer is rather low...
cursor = col.find({"_id": {"$in": ids}})
inserted_ids = []
for row in cursor:
# change age
org_age = copy.deepcopy(row["age"])
for age_idx in range(0, 18):
age_min = 5 * age_idx
age_max = 5 * (age_idx + 1)
new_age = int(np.random.uniform(age_min, age_max, 1)[0])
row["age"] = int(new_age)
if insert:
del row["_id"]
inserted_ids.append(col.insert_one(row))
row["age"] = org_age
# change time
org_time = copy.deepcopy(row["time"])
for time_idx in range(0, 24):
time_min = 60 * time_idx
time_max = 60 * (time_idx + 1)
new_time = int(np.random.uniform(time_min, time_max, 1)[0])
sin_time, cos_time = sin_cos_representation(new_time, 1440)
row["time"]["value"] = new_time
row["time"]["sin"] = sin_time
row["time"]["cos"] = cos_time
if insert:
del row["_id"]
inserted_ids.append(col.insert_one(row))
row["time"] = org_time
# change date
org_date = copy.deepcopy(row["date"])
for date_idx in range(0, 18):
date_min = 20 * date_idx
date_max = 20 * (date_idx + 1)
new_date = int(np.random.uniform(date_min, date_max, 1)[0])
sin_date, cos_date = sin_cos_representation(new_date, 361)
row["date"]["value"] = new_date
row["date"]["sin"] = sin_date
row["date"]["cos"] = cos_date
if insert:
del row["_id"]
inserted_ids.append(col.insert_one(row))
row["date"] = org_date
# change class
org_class = copy.deepcopy(row["class"])
for new_index in range(0, len(org_class["encoded"])):
row["class"] = {
"value": "generated",
"encoded": to_categorical(new_index, num_classes=len(org_class["encoded"])).astype(int).tolist()
}
if insert:
del row["_id"]
inserted_ids.append(col.insert_one(row))
row["class"] = org_class
# change weather
org_class = copy.deepcopy(row["weather"])
for new_index in range(0, len(org_class["encoded"])):
row["weather"] = {
"value": "generated",
"encoded": to_categorical(new_index, num_classes=len(org_class["encoded"])).astype(int).tolist()
}
if insert:
del row["_id"]
inserted_ids.append(col.insert_one(row))
row["weather"] = org_class
# change gender
org_class = copy.deepcopy(row["gender"])
for new_index in range(0, len(org_class["encoded"])):
row["gender"] = {
"value": "generated",
"encoded": to_categorical(new_index, num_classes=len(org_class["encoded"])).astype(int).tolist()
}
if insert:
del row["_id"]
inserted_ids.append(col.insert_one(row))
row["gender"] = org_class
# change vehicle type
org_class = copy.deepcopy(row["vehicle_type"])
for new_index in range(0, len(org_class["encoded"]) - 1):
row["vehicle_type"] = {
"value": "generated",
"encoded": to_categorical(new_index, num_classes=len(org_class["encoded"])).astype(int).tolist()
}
if insert:
del row["_id"]
inserted_ids.append(col.insert_one(row))
row["vehicle_type"] = org_class
# change road_type
org_class = copy.deepcopy(row["road_type"])
for new_index in range(0, len(org_class["encoded"]) - 1):
row["road_type"] = {
"value": "generated",
"encoded": to_categorical(new_index, num_classes=len(org_class["encoded"])).astype(int).tolist()
}
if insert:
del row["_id"]
inserted_ids.append(col.insert_one(row))
row["road_type"] = org_class
return inserted_ids
def up_sample(col, cursor, nr_create):
"""
Sample a set amount of data by copying the existing data and saving it to mongodb
:param col: mongodb collection where the new documents should be saved to
:param cursor: pymongo cursor with the data that is getting sampled
:param nr_create: how many additional documents should be created
"""
if nr_create < 0:
raise ValueError("Can not create negative amount of entries")
counter = 0
while counter < nr_create:
for row in cursor:
del row["_id"]
col.insert_one(row)
counter += 1
if counter >= nr_create:
break
cursor.rewind()
if __name__ == "__main__":
DLPipeLogger.remove_file_logger()
cp = configparser.ConfigParser()
if len(cp.read('./../../connections.ini')) == 0:
raise ValueError("Config File could not be loaded, please check the correct path!")
MongoDBConnect.add_connections_from_config(cp)
col_train = MongoDBConnect.get_collection("localhost_mongo_db", "accident", "train")
col_distance = MongoDBConnect.get_collection("localhost_mongo_db", "accident", "k_distance")
# find class distances
upload_distances()
# get averaged class distances for class 1 and 2
raw_distance_data_avg_class_1 = col_distance.find({"class": 1, "compared_to": {"$all": [0, 2]}})
raw_distance_data_avg_class_2 = col_distance.find({"class": 2, "compared_to": {"$all": [0, 1]}})
if raw_distance_data_avg_class_1.count() == 0 or raw_distance_data_avg_class_2.count() == 0:
raise ValueError("No distance data found, need to execute 'calc_class_distance.py' first")
# generate synthetic data from class distances
inserted_ids_1 = generate_synth_data(col_train, raw_distance_data_avg_class_1[0]["ids"][0:70], True)
inserted_ids_2 = generate_synth_data(col_train, raw_distance_data_avg_class_2[0]["ids"][0:20], True)
raw_data_train_0 = col_train.find({"accident_severity": 0})
raw_data_train_1 = col_train.find({"accident_severity": 1})
raw_data_train_2 = col_train.find({"accident_severity": 2})
print("Class distribution after synthetic data generation:")
print("Class 0: " + str(raw_data_train_0.count()))
print("Class 1: " + str(raw_data_train_1.count()))
print("Class 2: " + str(raw_data_train_2.count()))
# evenly sample data by copying existing data
max_count = raw_data_train_0.count()
up_sample(col_train, raw_data_train_1, (max_count - raw_data_train_1.count()))
up_sample(col_train, raw_data_train_2, (max_count - raw_data_train_2.count()))
```
#### File: accident_predictor/accident_predictor/processors.py
```python
from dlpipe.processors.processor_interface import IPreProcessor
import numpy as np
DATA_INFO = {
"age": {"norm": 98},
"nr_person_hurt": {"norm": 3},
"nr_vehicles": {"norm": 4}
}
class PreProcessData(IPreProcessor):
def process(self, raw_data, input_data, ground_truth, piped_params=None):
ground_truth = np.zeros(3)
if "accident_severity" in raw_data:
index = min(int(raw_data["accident_severity"]), 2)
ground_truth[index] = 1.0
list_input = []
# sin and cos components are already normalized
list_input.append(float(raw_data["date"]["sin"]))
list_input.append(float(raw_data["date"]["cos"]))
list_input.append(float(raw_data["time"]["sin"]))
list_input.append(float(raw_data["time"]["cos"]))
# normalize features
list_input.append(int(raw_data["age"]) / DATA_INFO["age"]["norm"])
list_input.append(int(raw_data["nr_person_hurt"]) / DATA_INFO["nr_person_hurt"]["norm"])
list_input.append(int(raw_data["nr_vehicles"]) / DATA_INFO["nr_vehicles"]["norm"])
# some classification features have "unknown" columns at the end which are sliced off
list_input += raw_data["class"]["encoded"]
list_input += raw_data["light"]["encoded"]
list_input += raw_data["weather"]["encoded"][:-1]
list_input += raw_data["ground_condition"]["encoded"][:-1]
list_input += raw_data["gender"]["encoded"]
list_input += raw_data["vehicle_type"]["encoded"][:-1]
list_input += raw_data["road_type"]["encoded"][:-1]
input_data = np.asarray(list_input)
return raw_data, input_data, ground_truth, piped_params
```
#### File: dlpipe/callbacks/save_exp_mongodb.py
```python
from dlpipe.callbacks import Callback
from dlpipe.schemas import ExperimentSchema
from dlpipe.utils import DLPipeLogger
class SaveExpMongoDB(Callback):
"""
Callback class to save keras models during training, usually after each epoch
or if self._epoch_save_condition(result) returns true. Append this Callback to the Trainer e.g.:
>> callback = SaveExpMongoDB(model_db, "my_model_name", model.get_config())
>> trainer = Trainer(model=model, data_reader=data_reader, callbacks=[callback])
"""
def __init__(
self,
mongo_db,
name,
keras_model,
save_initial_weights: bool=True,
epoch_save_condition=None):
self._epoch_save_condition = epoch_save_condition
self._save_initial_weights = save_initial_weights
self._db = mongo_db
self._collection = mongo_db["experiment"]
self._keras_model = keras_model
self._exp = ExperimentSchema(self._collection, name, keras_model)
self._exp.log_file_path = DLPipeLogger.get_log_file_path()
self._exp.save()
def get_exp_id(self):
return self._exp.id
def training_start(self, result):
self._exp.result = result
self._exp.status = 100
self._exp.update(update_result=self._save_initial_weights)
def batch_end(self, result):
self._exp.result = result
self._exp.update(update_result=False)
def epoch_end(self, result):
self._exp.result = result
should_save_weights = self._epoch_save_condition is None or self._epoch_save_condition(result)
self._exp.update(update_result=should_save_weights)
def training_end(self, result):
self._exp.status = 2
self._exp.update(update_result=False)
def test_start(self, result):
self._exp.status = 200
self._exp.update(update_result=False)
def test_end(self, result):
self._exp.status = 1
# no new weights to save after testing, just metrics
self._exp.update(update_result=True, update_weights=False)
```
#### File: data_reader/mongodb/actions.py
```python
from dlpipe.data_reader.mongodb import MongoDBConnect
import configparser
class MongoDBActions:
@staticmethod
def add_config(file_name):
cp = configparser.ConfigParser()
if len(cp.read(file_name)) == 0:
raise ValueError("Config File could not be loaded, please check the correct path!")
MongoDBConnect.add_connections_from_config(cp)
```
#### File: dlpipe/schemas/experiment.py
```python
from dlpipe.result import Result
from bson import ObjectId
import gridfs
import os
class ExperimentSchema:
def __init__(self,
collection,
name: str,
keras_model,
):
# model info
self.keras_model = keras_model
self.name: str = name
self.result: Result = None
# training info
self.status: int = 0
self.log_file_path = ""
self.id = None
# mongodb connection
self._collection = collection
def get_dict(self) -> dict:
"""
:return: dict of serialized experiment data
"""
return_dict = {
"name": self.name,
"keras_model": self.keras_model,
"status": self.status,
"log_file_path": self.log_file_path,
"curr_epoch": None,
"curr_batch": None,
"max_batches_per_epoch": None,
"max_epochs": None
}
if self.result is not None:
return_dict.update({
"curr_epoch": self.result.curr_epoch,
"curr_batch": self.result.curr_batch,
"max_batches_per_epoch": self.result.max_batches_per_epoch,
"max_epochs": self.result.max_epochs
})
return return_dict
def save(self):
data_dict = self.get_dict()
data_dict["metrics"] = None
data_dict["weights"] = []
if self._collection is not None:
self.id = self._collection.insert_one(data_dict).inserted_id
self.update_result()
def update(self, update_result: bool=True, update_weights: bool=True):
data_dict = self.get_dict()
if self._collection is not None:
self._collection.update_one(
{'_id': ObjectId(self.id)},
{
'$set': data_dict
}
)
if update_result:
self.update_result(update_weights=update_weights)
def update_result(self, update_weights: bool=True):
if self.result is not None and self._collection is not None:
if update_weights:
fs = gridfs.GridFS(self._collection.database)
tmp_filename = "tmp_model_weights_save.h5"
model_gridfs = None
if self.result.model is not None:
self.result.model.save(tmp_filename)
with open(tmp_filename, mode='rb') as file:
file_bytes = file.read()
model_gridfs = fs.put(file_bytes)
os.remove(tmp_filename)
weights = {
"model_gridfs": model_gridfs,
"epoch": self.result.curr_epoch,
"batch": self.result.curr_batch
}
query = {
'$set': {
'metrics': self.result.metrics,
},
'$push': {'weights': weights}
}
else:
query = {
'$set': {
'metrics': self.result.metrics,
}
}
self._collection.update_one(
{'_id': ObjectId(self.id)},
query
)
```
#### File: dlpipe/utils/logger.py
```python
import logging
import io
import os
class DLPipeLogger:
_logger_level = logging.DEBUG
_logger_name = 'DLPipe.logger'
_formatter = logging.Formatter('[%(asctime)s] %(levelname)-10s %(message)s')
_log_contents = io.StringIO()
_current_log_file_path = "dlpipe.log"
logger = None
string_handler = None
file_handler = None
console_handler = None
@staticmethod
def setup_logger():
if DLPipeLogger.logger is not None:
print("WARNING: logger was setup already, deleting all previously existing handlers")
for hdlr in DLPipeLogger.logger.handlers[:]: # remove all old handlers
DLPipeLogger.logger.removeHandler(hdlr)
# Create the logger
DLPipeLogger.logger = logging.getLogger(DLPipeLogger._logger_name)
DLPipeLogger.logger.setLevel(DLPipeLogger._logger_level)
# Setup the StringIO handler
DLPipeLogger._log_contents = io.StringIO()
DLPipeLogger.string_handler = logging.StreamHandler(DLPipeLogger._log_contents)
DLPipeLogger.string_handler.setLevel(DLPipeLogger._logger_level)
# Setup the console handler
DLPipeLogger.console_handler = logging.StreamHandler()
DLPipeLogger.console_handler.setLevel(DLPipeLogger._logger_level)
# Setup the file handler
DLPipeLogger.file_handler = logging.FileHandler(DLPipeLogger._current_log_file_path, 'a')
DLPipeLogger.file_handler.setLevel(DLPipeLogger._logger_level)
# Optionally add a formatter
DLPipeLogger.string_handler.setFormatter(DLPipeLogger._formatter)
DLPipeLogger.console_handler.setFormatter(DLPipeLogger._formatter)
DLPipeLogger.file_handler.setFormatter(DLPipeLogger._formatter)
# Add the console handler to the logger
DLPipeLogger.logger.addHandler(DLPipeLogger.string_handler)
DLPipeLogger.logger.addHandler(DLPipeLogger.console_handler)
DLPipeLogger.logger.addHandler(DLPipeLogger.file_handler)
@staticmethod
def set_log_file(path, mode: str='a'):
DLPipeLogger._current_log_file_path = path
DLPipeLogger.logger.removeHandler(DLPipeLogger.file_handler)
DLPipeLogger.file_handler = logging.FileHandler(DLPipeLogger._current_log_file_path, mode)
DLPipeLogger.file_handler.setLevel(DLPipeLogger._logger_level)
DLPipeLogger.logger.addHandler(DLPipeLogger.file_handler)
@staticmethod
def remove_file_logger():
DLPipeLogger.logger.removeHandler(DLPipeLogger.file_handler)
if os.path.exists(DLPipeLogger._current_log_file_path):
os.remove(DLPipeLogger._current_log_file_path)
@staticmethod
def get_contents():
return DLPipeLogger._log_contents.getvalue()
@staticmethod
def get_log_file_path() -> str:
return DLPipeLogger._current_log_file_path
@staticmethod
def set_level(lvl):
DLPipeLogger._logger_level = lvl
DLPipeLogger.setup_logger()
DLPipeLogger.setup_logger()
``` |
{
"source": "jodoldar/AdventofCode18",
"score": 4
} |
#### File: jodoldar/Day 5/day5.py
```python
def match_case(char1, char2):
if char1.lower() == char2.lower():
if char1.islower() and char2.isupper():
return True
elif char1.isupper() and char2.islower():
return True
else:
return False
return False
def second_match_a(char1, char2, char_f):
if char1.lower() == char2.lower() and char1.lower() == char_f:
return True
else:
return False
def part_1(compare_func):
input_file = open('./input','r')
initial_cadena = input_file.read().splitlines()[0]
cadena = initial_cadena
#print(cadena)
finished = False
iter = 0
while (finished == False):
modif = False
for i in range(0,len(cadena)-1):
if compare_func(cadena[i],cadena[i+1]):
#print('Encuentro {}'.format(cadena[i:i+2]))
newcadena = cadena[:i] + cadena[i+2:]
cadena = newcadena
modif = True
break
iter += 1
if iter%1000 == 0:
print(iter)
if modif == False:
finished = True
def part_1b():
input_file = open('./input','r')
line = input_file.read()
oldline = None
while oldline != line:
oldline = line
for i in range(0,26):
line = line.replace(chr(ord("a") + i) + chr(ord("A") + i),"")
line = line.replace(chr(ord("A") + i) + chr(ord("a") + i),"")
print(len(line))
def part_2():
input_file = open('./input','r')
line = input_file.read().splitlines()[0]
original = line
best = len(line)
for j in range(0,26):
line = original
line = line.replace(chr(ord("a") + j),"")
line = line.replace(chr(ord("A") + j),"")
oldline = None
while oldline != line:
oldline = line
for i in range(0,26):
line = line.replace(chr(ord("a") + i) + chr(ord("A") + i),"")
line = line.replace(chr(ord("A") + i) + chr(ord("a") + i),"")
best = len(line) if len(line) < best else best
print("Part2:")
print(best)
#part_1(match_case)
part_2()
``` |
{
"source": "jodom961/sparkmagic",
"score": 2
} |
#### File: sparkmagic/kernels/kernelmagics.py
```python
from __future__ import print_function
import json
from IPython.core.magic import magics_class
from IPython.core.magic import needs_local_scope, cell_magic, line_magic
from IPython.core.magic_arguments import argument, magic_arguments
from hdijupyterutils.utils import generate_uuid
import importlib
import sparkmagic.utils.configuration as conf
from sparkmagic.utils.configuration import get_livy_kind
from sparkmagic.utils import constants
from sparkmagic.utils.utils import parse_argstring_or_throw, get_coerce_value, initialize_auth, Namespace
from sparkmagic.utils.sparkevents import SparkEvents
from sparkmagic.utils.constants import LANGS_SUPPORTED
from sparkmagic.livyclientlib.command import Command
from sparkmagic.livyclientlib.endpoint import Endpoint
from sparkmagic.magics.sparkmagicsbase import SparkMagicBase
from sparkmagic.livyclientlib.exceptions import handle_expected_exceptions, wrap_unexpected_exceptions, \
BadUserDataException
def _event(f):
def wrapped(self, *args, **kwargs):
guid = self._generate_uuid()
self._spark_events.emit_magic_execution_start_event(f.__name__, get_livy_kind(self.language), guid)
try:
result = f(self, *args, **kwargs)
except Exception as e:
self._spark_events.emit_magic_execution_end_event(f.__name__, get_livy_kind(self.language), guid,
False, e.__class__.__name__, str(e))
raise
else:
self._spark_events.emit_magic_execution_end_event(f.__name__, get_livy_kind(self.language), guid,
True, u"", u"")
return result
wrapped.__name__ = f.__name__
wrapped.__doc__ = f.__doc__
return wrapped
@magics_class
class KernelMagics(SparkMagicBase):
def __init__(self, shell, data=None, spark_events=None):
# You must call the parent constructor
super(KernelMagics, self).__init__(shell, data)
self.session_name = u"session_name"
self.session_started = False
# In order to set these following 3 properties, call %%_do_not_call_change_language -l language
self.language = u""
self.endpoint = None
self.fatal_error = False
self.fatal_error_message = u""
if spark_events is None:
spark_events = SparkEvents()
self._spark_events = spark_events
@magic_arguments()
@cell_magic
@wrap_unexpected_exceptions
@handle_expected_exceptions
@_event
def help(self, line, cell="", local_ns=None):
parse_argstring_or_throw(self.help, line)
self._assure_cell_body_is_empty(KernelMagics.help.__name__, cell)
help_html = u"""
<table>
<tr>
<th>Magic</th>
<th>Example</th>
<th>Explanation</th>
</tr>
<tr>
<td>info</td>
<td>%%info</td>
<td>Outputs session information for the current Livy endpoint.</td>
</tr>
<tr>
<td>cleanup</td>
<td>%%cleanup -f</td>
<td>Deletes all sessions for the current Livy endpoint, including this notebook's session. The force flag is mandatory.</td>
</tr>
<tr>
<td>delete</td>
<td>%%delete -f -s 0</td>
<td>Deletes a session by number for the current Livy endpoint. Cannot delete this kernel's session.</td>
</tr>
<tr>
<td>logs</td>
<td>%%logs</td>
<td>Outputs the current session's Livy logs.</td>
</tr>
<tr>
<td>configure</td>
<td>%%configure -f<br/>{"executorMemory": "1000M", "executorCores": 4}</td>
<td>Configure the session creation parameters. The force flag is mandatory if a session has already been
created and the session will be dropped and recreated.<br/>Look at <a href="https://github.com/cloudera/livy#request-body">
Livy's POST /sessions Request Body</a> for a list of valid parameters. Parameters must be passed in as a JSON string.</td>
</tr>
<tr>
<td>spark</td>
<td>%%spark -o df<br/>df = spark.read.parquet('...</td>
<td>Executes spark commands.
Parameters:
<ul>
<li>-o VAR_NAME: The Spark dataframe of name VAR_NAME will be available in the %%local Python context as a
<a href="http://pandas.pydata.org/">Pandas</a> dataframe with the same name.</li>
<li>-m METHOD: Sample method, either <tt>take</tt> or <tt>sample</tt>.</li>
<li>-n MAXROWS: The maximum number of rows of a dataframe that will be pulled from Livy to Jupyter.
If this number is negative, then the number of rows will be unlimited.</li>
<li>-r FRACTION: Fraction used for sampling.</li>
</ul>
</td>
</tr>
<tr>
<td>sql</td>
<td>%%sql -o tables -q<br/>SHOW TABLES</td>
<td>Executes a SQL query against the variable sqlContext (Spark v1.x) or spark (Spark v2.x).
Parameters:
<ul>
<li>-o VAR_NAME: The result of the SQL query will be available in the %%local Python context as a
<a href="http://pandas.pydata.org/">Pandas</a> dataframe.</li>
<li>-q: The magic will return None instead of the dataframe (no visualization).</li>
<li>-m, -n, -r are the same as the %%spark parameters above.</li>
</ul>
</td>
</tr>
<tr>
<td>local</td>
<td>%%local<br/>a = 1</td>
<td>All the code in subsequent lines will be executed locally. Code must be valid Python code.</td>
</tr>
<tr>
<td>send_to_spark</td>
<td>%%send_to_spark -i variable -t str -n var</td>
<td>Sends a variable from local output to spark cluster.
<br/>
Parameters:
<ul>
<li>-i VAR_NAME: Local Pandas DataFrame(or String) of name VAR_NAME will be available in the %%spark context as a
Spark dataframe(or String) with the same name.</li>
<li>-t TYPE: Specifies the type of variable passed as -i. Available options are:
`str` for string and `df` for Pandas DataFrame. Optional, defaults to `str`.</li>
<li>-n NAME: Custom name of variable passed as -i. Optional, defaults to -i variable name.</li>
<li>-m MAXROWS: Maximum amount of Pandas rows that will be sent to Spark. Defaults to 2500.</li>
</ul>
</td>
</tr>
</table>
"""
self.ipython_display.html(help_html)
@cell_magic
def local(self, line, cell=u"", local_ns=None):
# This should not be reachable thanks to UserCodeParser. Registering it here so that it auto-completes with tab.
raise NotImplementedError(u"UserCodeParser should have prevented code execution from reaching here.")
@magic_arguments()
@argument("-i", "--input", type=str, default=None, help="If present, indicated variable will be stored in variable"
" in Spark's context.")
@argument("-t", "--vartype", type=str, default='str', help="Optionally specify the type of input variable. "
"Available: 'str' - string(default) or 'df' - Pandas DataFrame")
@argument("-n", "--varname", type=str, default=None, help="Optionally specify the custom name for the input variable.")
@argument("-m", "--maxrows", type=int, default=2500, help="Maximum number of rows that will be pulled back "
"from the local dataframe")
@cell_magic
@needs_local_scope
@wrap_unexpected_exceptions
@handle_expected_exceptions
def send_to_spark(self, line, cell=u"", local_ns=None):
self._assure_cell_body_is_empty(KernelMagics.send_to_spark.__name__, cell)
args = parse_argstring_or_throw(self.send_to_spark, line)
if not args.input:
raise BadUserDataException("-i param not provided.")
if self._do_not_call_start_session(""):
self.do_send_to_spark(cell, args.input, args.vartype, args.varname, args.maxrows, None)
else:
return
@magic_arguments()
@cell_magic
@wrap_unexpected_exceptions
@handle_expected_exceptions
@_event
def info(self, line, cell=u"", local_ns=None):
parse_argstring_or_throw(self.info, line)
self._assure_cell_body_is_empty(KernelMagics.info.__name__, cell)
if self.session_started:
current_session_id = self.spark_controller.get_session_id_for_client(self.session_name)
else:
current_session_id = None
self.ipython_display.html(u"Current session configs: <tt>{}</tt><br>".format(conf.get_session_properties(self.language)))
info_sessions = self.spark_controller.get_all_sessions_endpoint(self.endpoint)
self._print_endpoint_info(info_sessions, current_session_id)
@magic_arguments()
@cell_magic
@wrap_unexpected_exceptions
@handle_expected_exceptions
@_event
def logs(self, line, cell="", local_ns=None):
parse_argstring_or_throw(self.logs, line)
self._assure_cell_body_is_empty(KernelMagics.logs.__name__, cell)
if self.session_started:
out = self.spark_controller.get_logs()
self.ipython_display.write(out)
else:
self.ipython_display.write(u"No logs yet.")
@magic_arguments()
@cell_magic
@argument("-f", "--force", type=bool, default=False, nargs="?", const=True, help="If present, user understands.")
@wrap_unexpected_exceptions
@handle_expected_exceptions
@_event
def configure(self, line, cell="", local_ns=None):
try:
dictionary = json.loads(cell)
except ValueError:
self.ipython_display.send_error(u"Could not parse JSON object from input '{}'".format(cell))
return
args = parse_argstring_or_throw(self.configure, line)
if self.session_started:
if not args.force:
self.ipython_display.send_error(u"A session has already been started. If you intend to recreate the "
u"session with new configurations, please include the -f argument.")
return
else:
self._do_not_call_delete_session(u"")
self._override_session_settings(dictionary)
self._do_not_call_start_session(u"")
else:
self._override_session_settings(dictionary)
self.info(u"")
@magic_arguments()
@cell_magic
@needs_local_scope
@argument("-o", "--output", type=str, default=None, help="If present, indicated variable will be stored in variable"
"of this name in user's local context.")
@argument("-m", "--samplemethod", type=str, default=None, help="Sample method for dataframe: either take or sample")
@argument("-n", "--maxrows", type=int, default=None, help="Maximum number of rows that will be pulled back "
"from the dataframe on the server for storing")
@argument("-r", "--samplefraction", type=float, default=None, help="Sample fraction for sampling from dataframe")
@argument("-c", "--coerce", type=str, default=None, help="Whether to automatically coerce the types (default, pass True if being explicit) "
"of the dataframe or not (pass False)")
@wrap_unexpected_exceptions
@handle_expected_exceptions
def spark(self, line, cell="", local_ns=None):
if self._do_not_call_start_session(u""):
args = parse_argstring_or_throw(self.spark, line)
coerce = get_coerce_value(args.coerce)
self.execute_spark(cell, args.output, args.samplemethod, args.maxrows, args.samplefraction, None, coerce)
else:
return
@magic_arguments()
@cell_magic
@needs_local_scope
@argument("-o", "--output", type=str, default=None, help="If present, query will be stored in variable of this "
"name.")
@argument("-q", "--quiet", type=bool, default=False, const=True, nargs="?", help="Return None instead of the dataframe.")
@argument("-m", "--samplemethod", type=str, default=None, help="Sample method for SQL queries: either take or sample")
@argument("-n", "--maxrows", type=int, default=None, help="Maximum number of rows that will be pulled back "
"from the server for SQL queries")
@argument("-r", "--samplefraction", type=float, default=None, help="Sample fraction for sampling from SQL queries")
@argument("-c", "--coerce", type=str, default=None, help="Whether to automatically coerce the types (default, pass True if being explicit) "
"of the dataframe or not (pass False)")
@wrap_unexpected_exceptions
@handle_expected_exceptions
def sql(self, line, cell="", local_ns=None):
if self._do_not_call_start_session(""):
args = parse_argstring_or_throw(self.sql, line)
coerce = get_coerce_value(args.coerce)
return self.execute_sqlquery(cell, args.samplemethod, args.maxrows, args.samplefraction,
None, args.output, args.quiet, coerce)
else:
return
@magic_arguments()
@cell_magic
@argument("-f", "--force", type=bool, default=False, nargs="?", const=True, help="If present, user understands.")
@wrap_unexpected_exceptions
@handle_expected_exceptions
@_event
def cleanup(self, line, cell="", local_ns=None):
self._assure_cell_body_is_empty(KernelMagics.cleanup.__name__, cell)
args = parse_argstring_or_throw(self.cleanup, line)
if args.force:
self._do_not_call_delete_session(u"")
self.spark_controller.cleanup_endpoint(self.endpoint)
else:
self.ipython_display.send_error(u"When you clean up the endpoint, all sessions will be lost, including the "
u"one used for this notebook. Include the -f parameter if that's your "
u"intention.")
return
@magic_arguments()
@cell_magic
@argument("-f", "--force", type=bool, default=False, nargs="?", const=True, help="If present, user understands.")
@argument("-s", "--session", type=int, help="Session id number to delete.")
@wrap_unexpected_exceptions
@handle_expected_exceptions
@_event
def delete(self, line, cell="", local_ns=None):
self._assure_cell_body_is_empty(KernelMagics.delete.__name__, cell)
args = parse_argstring_or_throw(self.delete, line)
session = args.session
if args.session is None:
self.ipython_display.send_error(u'You must provide a session ID (-s argument).')
return
if args.force:
id = self.spark_controller.get_session_id_for_client(self.session_name)
if session == id:
self.ipython_display.send_error(u"Cannot delete this kernel's session ({}). Specify a different session,"
u" shutdown the kernel to delete this session, or run %cleanup to "
u"delete all sessions for this endpoint.".format(id))
return
self.spark_controller.delete_session_by_id(self.endpoint, session)
else:
self.ipython_display.send_error(u"Include the -f parameter if you understand that all statements executed "
u"in this session will be lost.")
@cell_magic
def _do_not_call_start_session(self, line, cell="", local_ns=None):
# Starts a session unless session is already created or a fatal error occurred. Returns True when session is
# created successfully.
# No need to add the handle_expected_exceptions decorator to this since we manually catch all
# exceptions when starting the session.
if self.fatal_error:
self.ipython_display.send_error(self.fatal_error_message)
return False
if not self.session_started:
skip = False
properties = conf.get_session_properties(self.language)
self.session_started = True
try:
self.spark_controller.add_session(self.session_name, self.endpoint, skip, properties)
except Exception as e:
self.fatal_error = True
self.fatal_error_message = conf.fatal_error_suggestion().format(e)
self.logger.error(u"Error creating session: {}".format(e))
self.ipython_display.send_error(self.fatal_error_message)
if conf.all_errors_are_fatal():
raise e
return False
return self.session_started
@cell_magic
@handle_expected_exceptions
def _do_not_call_delete_session(self, line, cell="", local_ns=None):
try:
if self.session_started:
self.spark_controller.delete_session_by_name(self.session_name)
except:
# The exception will be logged and handled in the frontend.
raise
finally:
self.session_started = False
@magic_arguments()
@cell_magic
@argument("-l", "--language", type=str, help="Language to use.")
def _do_not_call_change_language(self, line, cell="", local_ns=None):
args = parse_argstring_or_throw(self._do_not_call_change_language, line)
language = args.language.lower()
if language not in LANGS_SUPPORTED:
self.ipython_display.send_error(u"'{}' language not supported in kernel magics.".format(language))
return
if self.session_started:
self.ipython_display.send_error(u"Cannot change the language if a session has been started.")
return
self.language = language
self.refresh_configuration()
@magic_arguments()
@line_magic
@argument("-u", "--username", dest='user', type=str, help="Username to use.")
@argument("-p", "--password", type=str, help="Password to use.")
@argument("-s", "--server", dest='url', type=str, help="Url of server to use.")
@argument("-t", "--auth", type=str, help="Auth type for authentication")
@_event
def _do_not_call_change_endpoint(self, line, cell="", local_ns=None):
args = parse_argstring_or_throw(self._do_not_call_change_endpoint, line)
if self.session_started:
error = u"Cannot change the endpoint if a session has been started."
raise BadUserDataException(error)
auth = initialize_auth(args=args)
self.endpoint = Endpoint(args.url, auth)
@line_magic
def matplot(self, line, cell="", local_ns=None):
session = self.spark_controller.get_session_by_name_or_default(self.session_name)
command = Command("%matplot " + line)
(success, out, mimetype) = command.execute(session)
if success:
session.ipython_display.display(out)
else:
session.ipython_display.send_error(out)
def refresh_configuration(self):
credentials = getattr(conf, 'base64_kernel_' + self.language + '_credentials')()
(username, password, auth, url) = (credentials['username'], credentials['password'], credentials['auth'], credentials['url'])
args = Namespace(auth=auth, user=username, password=password, url=url)
auth_instance = initialize_auth(args)
self.endpoint = Endpoint(url, auth_instance)
def get_session_settings(self, line, force):
line = line.strip()
if not force:
return line
else:
if line.startswith("-f "):
return line[3:]
elif line.endswith(" -f"):
return line[:-3]
else:
return None
@staticmethod
def _override_session_settings(settings):
conf.override(conf.session_configs.__name__, settings)
@staticmethod
def _generate_uuid():
return generate_uuid()
@staticmethod
def _assure_cell_body_is_empty(magic_name, cell):
if cell.strip():
raise BadUserDataException(u"Cell body for %%{} magic must be empty; got '{}' instead"
.format(magic_name, cell.strip()))
def load_ipython_extension(ip):
ip.register_magics(KernelMagics)
``` |
{
"source": "j-o-d-o/MLPipe-Trainer",
"score": 3
} |
#### File: examples/cifar10/processor.py
```python
import numpy as np
import cv2
from mlpipe.processors.i_processor import IPreProcessor
class PreProcessData(IPreProcessor):
def process(self, raw_data, input_data, ground_truth, piped_params=None):
ground_truth = np.zeros(10)
ground_truth[raw_data["label"]] = 1.0
png_binary = raw_data["img"]
png_img = np.frombuffer(png_binary, np.uint8)
input_data = cv2.imdecode(png_img, cv2.IMREAD_COLOR)
return raw_data, input_data, ground_truth, piped_params
```
#### File: mlpipe/callbacks/update_manager.py
```python
from mlpipe.utils import Config
from mlpipe.callbacks.fill_training import FillTraining
from mlpipe.utils.api_endpoints import create_training, update_weights, update_training, test_connection
from tensorflow.keras.models import Model
import os
class UpdateManager(FillTraining):
"""
Callback to update the MLPipe - Manager via its API
"""
def __init__(
self,
name: str,
keras_model: Model,
epochs: int,
batches_per_epoch: int,
save_initial_weights: bool=True,
update_frequency: int=1,
epoch_save_condition=None):
"""
:param name: name of the training as string
:param keras_model: keras model that should be saved to the training
:param epochs: integer of how many epochs the model is trained
:param batches_per_epoch: integer on how many batches per epoch are trained
:param save_initial_weights: boolean to determine if weights should be saved initially before training,
default = True
:param update_frequency: defines how often data is sent to the server. In case there are many many batches
it is wise to increase this number. It still sends all info to the server, just
not every single batch.
:param epoch_save_condition: a function to test if the weights of the model should be saved after the epoch,
the function takes an TrainingSchema as argument. It defaults to None which
will save the weights after each epoch
"""
super().__init__(name, keras_model, epochs, batches_per_epoch)
self._epoch_save_condition = epoch_save_condition
self._save_initial_weights = save_initial_weights
self._update_frequency = update_frequency
self._training_mongodb_id = None
def _create_training(self):
if Config.get_job_token() is None or Config.get_job_token() == "":
print("Job Token is not set, therefore no data is transmitted, but the server connection is tested...")
test_res = test_connection()
if test_res.status_code == 200:
print("Connection test: Success")
else:
raise ConnectionError("Connection test: Error, unable to connect!")
else:
json_resp = self._handle_response(create_training(self._training))
if "_id" in json_resp:
self._training_mongodb_id = json_resp["_id"]
def on_train_begin(self, logs=None):
super().on_train_begin(logs)
self._create_training()
if self._training_mongodb_id is not None and self._save_initial_weights:
self._update_weights()
def on_batch_end(self, batch, logs=None):
super().on_batch_end(batch, logs)
should_update = (batch % self._update_frequency) == 0
if self._training_mongodb_id is not None and should_update:
self._handle_response(update_training(self._training_mongodb_id, self._training))
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs)
if self._training_mongodb_id is not None:
self._handle_response(update_training(self._training_mongodb_id, self._training))
if self._epoch_save_condition is None or self._epoch_save_condition(self._training):
self._update_weights()
def on_train_end(self, logs=None):
super().on_train_end(logs)
if self._training_mongodb_id is not None:
self._handle_response(update_training(self._training_mongodb_id, self._training))
def _update_weights(self):
""" save file and update weights, remove tmp h5 file afterwards """
tmp_filename = "tmp_model_weights_save.h5"
if self._training_mongodb_id is not None and self._training.result.model is not None:
self._training.result.model.save(tmp_filename)
self._handle_response(update_weights(
self._training_mongodb_id,
self._training.result.curr_epoch,
self._training.result.curr_batch,
tmp_filename
))
os.remove(tmp_filename)
@staticmethod
def _handle_response(res):
"""
Check for errors and return json response in case of 200 HTTP response code
:param res: response from a HTTP request
:return: dict of json response
"""
if res.status_code != 200:
if res.status_code == 404:
raise ValueError("HTTP Response 404 (Not Found): " + res.text)
if res.status_code == 403:
raise ValueError("HTTP Response 403 (Forbidden): " + res.text)
if res.status_code == 401:
raise PermissionError("HTTP Response 401 (Not Authorized): " + res.text)
if res.status_code == 400:
raise ValueError("HTTP Response 400 (Validation Error): " + res.text)
else:
raise RuntimeError("Unkown HTTP Error: " + res.text)
return res.json()
```
#### File: data_reader/mongodb/data_loader.py
```python
from typing import Tuple
from random import shuffle
import numpy as np
from mlpipe.utils import MLPipeLogger, Config
from mlpipe.data_reader.mongodb import MongoDBConnect
def load_ids(
col_details: Tuple[str, str, str],
data_split: Tuple = (60, 40),
sort_by: dict = None,
limit: int = None,
shuffle_data: bool = False,
shuffle_steps: int = 1):
"""
Load MongoDB Document Ids from a collection and split them in training and validation data set
:param col_details: MongoDB collection details with a tuple of 3 string entries
[client name (from config), database name, collection name]
:param data_split: Tuple of percentage of training and test data e.g. (60, 40) for 60% training and 40% test data
:param sort_by: MongoDB sort expression. e.g. { created_at: -1 }
:param limit: maximum number of ids that should be fetched
:param shuffle_data: determine if dataset should be shuffled before splitting it to train and validation data
:param shuffle_steps: step size for the shuffling (e.g. for time series you want to have a shuffle_size of
BATCH_SIZE + (TIME_STEPS - 1)
:return: training and validation data
"""
MLPipeLogger.logger.info("Loading Document IDs from MongoDB")
mongo_con = MongoDBConnect()
mongo_con.add_connections_from_config(Config.get_config_parser())
collection = mongo_con.get_collection(*col_details)
if sort_by is None:
sort_by = {"_id": 1}
db_cursor = collection.find({}, sort_by)
if limit:
db_cursor.limit(limit)
tmp_docs = []
for doc in db_cursor:
tmp_docs.append(doc["_id"])
if shuffle_data:
if shuffle_steps == 1:
shuffle(tmp_docs)
else:
# if reshape the tmp_docs must be a multiple of shuffle_steps, cut ids that do no fit
overflow = len(tmp_docs) % shuffle_steps
tmp_docs = tmp_docs[:len(tmp_docs) - overflow]
x = np.reshape(tmp_docs, (-1, shuffle_steps))
np.random.shuffle(x)
tmp_docs = x.flatten().tolist()
train_range = int((data_split[0] / 100) * len(tmp_docs))
train_data = tmp_docs[:train_range]
val_data = tmp_docs[train_range:]
MLPipeLogger.logger.info("Documents loaded (train|validation): {0} | {1}\n\n".format(
len(train_data), len(val_data)))
return train_data, val_data
```
#### File: data_reader/mongodb/mongodb_connect.py
```python
from pymongo import MongoClient
from pymongo.database import Database
from pymongo.collection import Collection
import urllib.parse
from typing import NamedTuple, List, NoReturn
from configparser import ConfigParser
import configparser
class MongoDBConnectionConfig(NamedTuple):
name: str
url: str
port: int
user: str = None
pwd: str = None
client: MongoClient = None
class MongoDBConnect:
"""
Class to store and manage mongoDB connections
"""
def __init__(self):
self._connections: List[MongoDBConnectionConfig] = []
def add_connections_from_file(self, file_name) -> NoReturn:
"""
Add connections from config file
:param file_name: path to config file
"""
cp = configparser.ConfigParser()
if len(cp.read(file_name)) == 0:
raise ValueError("Config File could not be loaded, please check the correct path!")
self.add_connections_from_config(cp)
def add_connection(self, config: MongoDBConnectionConfig) -> NoReturn:
"""
Adds a MongoDBConnectionConfig to the connection dict
:param config: config that should be added of type MongoDBConnectionConfig
"""
self._connections.append(config)
def add_connections_from_config(self, config_parser: ConfigParser) -> NoReturn:
"""
Takes a parsed .ini file as argument and adds all connections with type=MongoDB,
Each section (= name) must have url, port and can have pwd and user
:param config_parser: A ConfigParser of a .ini file
"""
for key in config_parser.sections():
if "db_type" in config_parser[key] and config_parser[key]["db_type"] == "MongoDB":
self.add_connection(MongoDBConnectionConfig(
name=key,
url=config_parser[key]["url"],
port=int(config_parser[key]["port"]),
pwd=config_parser[key].get("pwd"),
user=config_parser[key].get("user")
))
def get_client(self, name: str) -> MongoClient:
"""
Get config data by name, connects the client if there is no prior MongoDB connection
:param name: name of the connection config
:return: MongoClient of the connection found for the name
"""
con, i = self.get_connection_by_name(name)
if con.client is None:
con = self.connect_to(name)
return con.client
def get_db(self, name: str, db_name: str) -> Database:
"""
Get mongoDB database by config name and database name,
connects the client if there is no prior MongoDB connection
:param name: name of the connection config
:param db_name: name of the database
:return: MongoDB database for the specified parameters
"""
client = self.get_client(name)
return client[db_name]
def get_collection(self, name: str, db_name: str, collection: str) -> Collection:
"""
Get collection, connects the client if there is no prior MongoDB connection
:param name: name of the connection config
:param db_name: name of the database
:param collection: name of the collection
:return: MongoDB collection for the specified parameters
"""
db = self.get_db(name, db_name)
return db[collection]
def connect_to(self, name: str) -> MongoDBConnectionConfig:
"""
Connect to connection which was previously added by its name
:param name: Key of the connection config as string
:return: The MongoDBConnectionConfig which the connection is to
"""
con, i = self.get_connection_by_name(name)
host = con.url + ":" + str(con.port)
if con.user is not None and con.pwd is not None:
user = urllib.parse.quote_plus(con.user)
pwd = urllib.parse.quote_plus(con.pwd)
con_string = 'mongodb://%s:%s@%s' % (user, pwd, host)
else:
con_string = 'mongodb://%s' % host
db_client = MongoClient(con_string)
new_con = con._replace(client=db_client)
self._connections[i] = new_con
return new_con
def close_connection(self, name: str) -> NoReturn:
"""
Close a single connection based on its config name
:param name: name of the connection config
"""
con, i = self.get_connection_by_name(name)
if con.client is not None:
con.client.close()
con.client = None
def remove_connection(self, name: str) -> NoReturn:
"""
Remove a connection config by its name, closes the connection before
:param name: name of the connection config
"""
self.close_connection(name)
con, i = self.get_connection_by_name(name)
del self._connections[i]
def get_connection_by_name(self, name: str) -> (MongoDBConnectionConfig, int):
"""
Get connection config by its name, does not connect the client in the process!
In case the connection name does not exist as ValueError is raised
:param name: name of the connection config
:return: NamedTuple of MongoDBConnectionConfig which includes all the configuration and MongoDB connection
"""
for i, con in enumerate(self._connections):
if con.name == name:
return con, i
raise ValueError(name + ": Connection does not exist!")
def reset_connections(self) -> NoReturn:
""" Close all added connections """
for con in self._connections:
if con.client is not None:
con.client.close()
self._connections = []
```
#### File: data_reader/mongodb/mongodb_generator.py
```python
import math
import numpy as np
from typing import List, Tuple
from random import shuffle
from mlpipe.utils import Config, MLPipeLogger
from mlpipe.data_reader.base_data_generator import BaseDataGenerator
from mlpipe.data_reader.mongodb import MongoDBConnect
from mlpipe.data_reader.i_cache import ICache
class MongoDBGenerator(BaseDataGenerator):
def __init__(self,
col_details: Tuple[str, str, str],
doc_ids: List[any] = list(),
batch_size: int = 32,
processors: List[any] = list(),
cache: ICache = None,
shuffle_data: bool = True,
data_group_size: int = 1,
fix_batch_size: bool = False):
"""
:param col_details: MongoDB collection details with a tuple of 3 string entries
[client name (from config), database name, collection name]
:param doc_ids: List of doc ids which are used to get the specific data from the MongoDB
:param batch_size: number of batch size
:param processors: List of MLPipe data processors
:param cache: Passing instance of a cache e.g. RedisCache, if it is None, no caching is used.
Only possible if redis is locally available (not installed with mlpipe)
:param shuffle_data: bool flag to determine if set should be shuffled after epoch is done
:param data_group_size: number of steps that should be grouped e.g for time series. The data will still only
move forward one time step. E.g. for data_group_size=3:
[t-5, t-4, t-3], [t-4, t-3, t-2], [t-3, t-2, -1], etc.
data will not be shuffled in case data_group_size > 1
:param fix_batch_size: if true batch size will always be the same, e.g. if batch_size=64 and there are only 63
datasamples left for the final batch, these 63 data points will be ignored. In case the
batch size of your model is fixed, set this to True.
"""
assert (len(col_details) == 3)
super().__init__(batch_size, processors)
self.doc_ids = doc_ids
self.cache = cache
self.shuffle_data = shuffle_data
self.data_group_size = max(data_group_size, 1)
self.docs_per_batch = self.batch_size + (self.data_group_size - 1)
self.col_details = col_details
self.fix_batch_size = fix_batch_size
self.collection = None
self.mongo_con = MongoDBConnect()
self.mongo_con.add_connections_from_config(Config.get_config_parser())
# in case a step_size is chosen > 1, make sure that len(doc_ids) is a multiple of that
# otherwise reshape will not be working and throw errors
if self.data_group_size > 1:
overflow = len(self.doc_ids) % self.docs_per_batch
self.doc_ids = self.doc_ids[:len(self.doc_ids) - overflow]
def _fetch_data(self, query_docs: list) -> List[any]:
"""
Get a set of _ids from the database (in order)
:param query_docs: A list of _ids
:return: A pymongo cursor
"""
# to ensure the order of query_docs, use this method. For more details look at this stackoverflow question:
# https://stackoverflow.com/questions/22797768/does-mongodbs-in-clause-guarantee-order/22800784#22800784
query = [
{"$match": {"_id": {"$in": query_docs}}},
{"$addFields": {"__order": {"$indexOfArray": [query_docs, "$_id"]}}},
{"$sort": {"__order": 1}}
]
docs = self.collection.aggregate(query)
return docs
def __len__(self) -> int:
"""
:return: Number of batches per epoch
"""
if self.fix_batch_size:
num_batches = int(math.floor(len(self.doc_ids) / self.docs_per_batch))
else:
num_batches = int(math.ceil(len(self.doc_ids) / self.docs_per_batch))
return num_batches
def __getitem__(self, idx):
"""
Get batch data
:param idx: current idx in the doc_ids list
:return: arrays for traning_data (x) and labels (y)
"""
# Connection should always be established on first __getitem__ class to support multiprocessing
# every fork needs to have its own database connection
if self.collection is None:
self.collection = self.mongo_con.get_collection(*self.col_details)
batch_ids = self.doc_ids[idx * self.docs_per_batch:(idx + 1) * self.docs_per_batch]
if self.cache is not None and self.cache.exist(batch_ids):
docs = self.cache.get(batch_ids)
else:
docs = self._fetch_data(batch_ids)
if self.cache is not None:
# save fetched data to cache
for doc in docs:
success = self.cache.set(str(doc["_id"]), doc)
if not success:
MLPipeLogger.logger.warning("Redis cache is full")
break
# Create new command cursor since the original one is finished after looping once
docs = self._fetch_data(batch_ids)
if self.data_group_size > 1:
# reshape to fit step_size and copy data
# since docs is a cursor, save in a temporary list
tmp_data = list(docs)
docs = []
start_idx = 0
end_idx = self.data_group_size
while end_idx <= len(tmp_data):
docs.append(tmp_data[start_idx:end_idx])
start_idx += 1
end_idx += 1
batch_x, batch_y = self._process_batch(docs)
input_data = []
if len(batch_x) > 0:
if isinstance(batch_x[0], dict):
# multiple inputs, split them up by name
input_data = {}
for key in batch_x[0]:
input_data[key] = []
# fill dict with data for each key
for batch in batch_x:
for key in batch:
input_data[key].append(np.asarray(batch[key]))
else:
input_data = np.asarray(batch_x)
ground_truth = []
if len(batch_y) > 0:
if isinstance(batch_y[0], dict):
# multiple inputs, split them up by name
ground_truth = {}
for key in batch_y[0]:
ground_truth[key] = []
# fill dict with data for each key
for batch in batch_y:
for key in batch:
ground_truth[key].append(np.asarray(batch[key]))
else:
ground_truth = np.asarray(batch_y)
return input_data, ground_truth
def on_epoch_end(self):
"""
Called after each epoch
"""
if self.shuffle_data:
if self.data_group_size == 1:
shuffle(self.doc_ids)
else:
# we made sure that len(self.doc_ids) is a multiple of self.docs_per_batch in the constructor
x = np.reshape(self.doc_ids, (-1, self.docs_per_batch))
np.random.shuffle(x)
self.doc_ids = x.flatten().tolist()
```
#### File: mlpipe/processors/i_processor.py
```python
from abc import ABCMeta, abstractmethod
class IPreProcessor(metaclass=ABCMeta):
@abstractmethod
def process(self, raw_data, input_data, ground_truth, piped_params=None):
"""
One at a time data processor interface
:param raw_data: the raw_data fetched from a data source
:param input_data: the input_data which will be used by the model
:param ground_truth: the ground_truth / labels which will be used by the model
:param piped_params: any additional parameters that need to be piped from processor to processor
:return: same as input params as these will be the input of the next processor in the the pipline
"""
...
return raw_data, input_data, ground_truth, piped_params
```
#### File: mlpipe/schemas/result.py
```python
from tensorflow.keras.models import Model
class ResultSchema:
"""
Container class for the results of the model
"""
def __init__(self):
self.metrics = {
"training": {},
"validation": {}
}
self.model = None
self.max_batches_per_epoch: int = None
self.max_epochs: int = None
self.curr_epoch: int = -1 # -1 represents initialization
self.curr_batch: int = 0
def append_to_metric(self, metric_name: str, value: any, phase: str="training", epoch: int=None, batch: int=None):
"""
Append a value to a specific metric
:param metric_name: name of the metric
:param value: value of the metric
:param phase: can be "training" or "validation"
:param epoch: integer to specify the epoch
:param batch: integer to specify the batch
"""
if phase not in self.metrics:
raise ValueError("phase must be any of " + str(self.metrics.keys()))
if epoch is None:
epoch = self.curr_epoch
if batch is None:
batch = self.curr_batch
if metric_name not in self.metrics[phase]:
self.metrics[phase][metric_name] = []
self.metrics[phase][metric_name].append({"value": float(value), "epoch": epoch, "batch": batch})
def update_weights(self, model: Model, curr_epoch: int=None, curr_batch: int=None):
"""
Update weights by updating the model member variable
:param model: keras model
:param curr_epoch: integer to specify the current epoch
:param curr_batch: integer to specify the current batch
:return:
"""
if curr_epoch is not None:
self.curr_epoch = curr_epoch
if curr_batch is not None:
self.curr_batch = curr_batch
self.model = model
def get_dict(self) -> dict:
"""
Return the serialized class data as dict
:return: class data as dict
"""
return {
"curr_epoch": self.curr_epoch,
"curr_batch": self.curr_batch,
"max_batches_per_epoch": self.max_batches_per_epoch,
"max_epochs": self.max_epochs,
"metrics": self.metrics
}
``` |
{
"source": "jodonnell/django-countries-plus",
"score": 2
} |
#### File: django-countries-plus/tests/test_middleware.py
```python
from django.core.handlers.base import BaseHandler
from django.test import RequestFactory, TestCase
from countries_plus.middleware import AddRequestCountryMiddleware
from countries_plus.models import Country
class RequestMock(RequestFactory):
def request(self, **request):
"""Construct a generic request object."""
request = RequestFactory.request(self, **request)
handler = BaseHandler()
handler.load_middleware()
for middleware_method in handler._request_middleware:
if middleware_method(request):
raise Exception("Couldn't create request mock object - "
"request middleware returned a response")
return request
class TestCountryByRequest(TestCase):
def setUp(self):
self.request_without_geoip = RequestMock()
self.request_without_geoip.META = {}
self.request_with_geoip = RequestMock()
self.request_with_geoip.META = {
'GEOIP_HEADER': 'TC',
}
self.middleware = AddRequestCountryMiddleware()
self.default_country = Country.objects.create(
name='DefaultCountry',
iso='US',
iso3='USA',
iso_numeric='1',
)
self.test_country = Country.objects.create(
name='TestCountry',
iso='TC',
iso3='TCO',
iso_numeric='2',
)
def tearDown(self):
Country.objects.all().delete()
def test_country_by_request(self):
# Test with missing/badly formed settings
with self.settings(COUNTRIES_PLUS_COUNTRY_HEADER='', COUNTRIES_PLUS_DEFAULT_ISO=''):
with self.assertRaises(AttributeError):
Country.get_by_request(self.request_without_geoip)
# Test without a default
with self.settings(COUNTRIES_PLUS_COUNTRY_HEADER='GEOIP_HEADER',
COUNTRIES_PLUS_DEFAULT_ISO=''):
self.assertIsNone(Country.get_by_request(self.request_without_geoip))
# Test with a default
with self.settings(COUNTRIES_PLUS_COUNTRY_HEADER='GEOIP_HEADER',
COUNTRIES_PLUS_DEFAULT_ISO='US'):
self.assertEqual(Country.get_by_request(self.request_without_geoip),
self.default_country)
self.assertEqual(Country.get_by_request(self.request_with_geoip), self.test_country)
def test_middleware(self):
with self.settings(COUNTRIES_PLUS_COUNTRY_HEADER='GEOIP_HEADER',
COUNTRIES_PLUS_DEFAULT_ISO='US'):
# This should always return none, and it adds country to the request
self.assertEqual(self.middleware.process_request(self.request_with_geoip), None)
self.assertIsInstance(self.request_with_geoip.country, Country)
``` |
{
"source": "jodonnell/Minesweeper-",
"score": 3
} |
#### File: Minesweeper-/minesweeper/tests.py
```python
from django.test import TestCase
from minesweeper.classes.create_board import CreateBoard, CreateTestBoard
from minesweeper.classes.board import Board
BOARD_COLUMNS = 8
BOARD_ROWS = 8
TOTAL_MINES = 10
class CreateBoardTest(TestCase):
def setUp(self):
self.create_board = CreateBoard(BOARD_ROWS, BOARD_COLUMNS, TOTAL_MINES)
self.board = self.create_board.get_board()
def test_create_board(self):
self.failUnlessEqual(len(self.board), BOARD_ROWS)
self.failUnlessEqual(len(self.board[0]), BOARD_COLUMNS)
def test_created_10_mines(self):
mines = 0
for row in range(BOARD_ROWS):
for column in range(BOARD_COLUMNS):
mines += self.board[row][column]
self.failUnlessEqual(mines, TOTAL_MINES)
class CreateTestBoardTest(TestCase):
def test_testing_board(self):
create_test_board = CreateTestBoard(BOARD_ROWS, BOARD_COLUMNS, TOTAL_MINES)
self.failUnlessEqual(create_test_board.get_board(), self.get_test_board())
def get_test_board(self):
return [[1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
class BoardTest(TestCase):
def setUp(self):
create_test_board = CreateTestBoard(BOARD_ROWS, BOARD_COLUMNS, TOTAL_MINES)
self.board = Board(create_test_board)
def test_click_mine(self):
self.failUnlessEqual(self.board.is_mined(0,0), True, 'There was no mine at 0,0')
self.failUnlessEqual(self.board.is_mined(1,0), False, 'There was no mine at 1,0')
def test_place_flag(self):
self.board.place_flag(0,0)
self.failUnlessEqual(self.board.flags_left(), TOTAL_MINES - 1, 'A flag was not placed')
self.board.place_flag(0,0)
self.failUnlessEqual(self.board.flags_left(), TOTAL_MINES, 'Flag was not unclicked')
def test_win(self):
for column in range(BOARD_COLUMNS - 1):
self.board.place_flag(0, column)
self.failUnlessEqual(self.board.has_won(), False, 'The player should not have won')
for column in range(5, BOARD_COLUMNS):
self.board.place_flag(1, column)
self.failUnlessEqual(self.board.has_won(), True, 'The player should have won but did not')
def test_get_num_surronding_mines(self):
mines_surronding = self.board.get_num_surronding_mines(2, 4)
self.failUnlessEqual(mines_surronding, 1, 'There should be one bordering mine at 2,4 found: ' + str(mines_surronding))
mines_surronding = self.board.get_num_surronding_mines(1, 0)
self.failUnlessEqual(mines_surronding, 2, 'There should be two bordering mines at 1,0 found: ' + str(mines_surronding))
mines_surronding = self.board.get_num_surronding_mines(7, 7)
self.failUnlessEqual(mines_surronding, 0, 'There should be zero bordering mines at 7,7 found: ' + str(mines_surronding))
def test_get_clear_area(self):
clear_area = self.board.get_clear_area(7,7, [])
NUM_CLEAR_RETURNED = 53
self.failUnlessEqual(len(clear_area), NUM_CLEAR_RETURNED)
``` |
{
"source": "jodsche/recsys2018-1",
"score": 3
} |
#### File: jodsche/recsys2018-1/json_to_dataframe.py
```python
import os
import json
import pandas as pd
os.makedirs('df_data', exist_ok=True)
def create_df_data():
path = 'data/data'
playlist_col = ['collaborative', 'duration_ms', 'modified_at',
'name', 'num_albums', 'num_artists', 'num_edits',
'num_followers', 'num_tracks', 'pid']
tracks_col = ['album_name', 'album_uri', 'artist_name', 'artist_uri',
'duration_ms', 'track_name', 'track_uri']
playlist_test_col = ['name', 'num_holdouts', 'num_samples', 'num_tracks', 'pid']
filenames = os.listdir(path)
data_playlists = []
data_tracks = []
playlists = []
tracks = set()
for filename in filenames: # delete mpd.slice.833000-833999.json
fullpath = os.sep.join((path, filename))
f = open(fullpath)
js = f.read()
f.close()
mpd_slice = json.loads(js)
for playlist in mpd_slice['playlists']:
data_playlists.append([playlist[col] for col in playlist_col])
for track in playlist['tracks']:
playlists.append([playlist['pid'], track['track_uri'], track['pos']])
if track['track_uri'] not in tracks:
data_tracks.append([track[col] for col in tracks_col])
tracks.add(track['track_uri'])
df_playlists_info = pd.DataFrame(data_playlists, columns=playlist_col)
df_playlists_info['collaborative'] = df_playlists_info['collaborative'].map({'false': False, 'true': True})
df_tracks = pd.DataFrame(data_tracks, columns=tracks_col)
df_tracks['tid'] = df_tracks.index
track_uri2tid = df_tracks.set_index('track_uri').tid
df_playlists = pd.DataFrame(playlists, columns=['pid', 'tid', 'pos'])
df_playlists.tid = df_playlists.tid.map(track_uri2tid)
df_tracks.to_hdf('df_data/df_tracks.hdf', key='abc')
df_playlists.to_hdf('df_data/df_playlists.hdf', key='abc')
df_playlists_info.to_hdf('df_data/df_playlists_info.hdf', key='abc')
f = open('data/challenge_set.json')
js = f.read()
f.close()
mpd_slice = json.loads(js)
data_playlists_test = []
playlists_test = []
for playlist in mpd_slice['playlists']:
data_playlists_test.append([playlist.get(col, '') for col in playlist_test_col])
for track in playlist['tracks']:
playlists_test.append([playlist['pid'], track['track_uri'], track['pos']])
if track['track_uri'] not in tracks:
data_tracks.append([track[col] for col in tracks_col])
tracks.add(track['track_uri'])
df_playlists_test_info = pd.DataFrame(data_playlists_test, columns=playlist_test_col)
df_playlists_test = pd.DataFrame(playlists_test, columns=['pid', 'tid', 'pos'])
df_playlists_test.tid = df_playlists_test.tid.map(track_uri2tid)
df_playlists_test.to_hdf('df_data/df_playlists_test.hdf', key='abc')
df_playlists_test_info.to_hdf('df_data/df_playlists_test_info.hdf', key='abc')
create_df_data()
``` |
{
"source": "jodumagpi/tts_interface",
"score": 2
} |
#### File: tts_interface/dictionary/main.py
```python
from dataset import Dataset
from utils import create_dir, get_path
import configs as cfg
def main():
_ = create_dir(cfg.savedir)
savedir = create_dir(cfg.savedir)
savepath = create_dir(savedir, "wavs_with_lab")
savepath_wavs = create_dir(savedir, "wavs")
metadata_savepath = get_path(savedir)
grapheme_dict_savepath = get_path(savedir, cfg.grapheme_dictionary_name)
phoneme_dict_savepath = get_path(savedir, cfg.phoneme_dictionary_name)
instance = Dataset(
source_dataset_path = cfg.source_dataset_path,
savepath = savepath,
savepath_wavs = savepath_wavs,
metadata_savepath = metadata_savepath,
grapheme_dictionary_savepath = grapheme_dict_savepath,
phoneme_dictionary_savepath = phoneme_dict_savepath,
num_threads=cfg.NUM_THREADS)
instance.prepare_mfa_training()
if __name__ == "__main__":
main()
``` |
{
"source": "jodur/HASS-Deepstack-object",
"score": 2
} |
#### File: custom_components/deepstack_object/image_processing.py
```python
from collections import namedtuple
import datetime
import io
import logging
import os
import re
from datetime import timedelta
from typing import Tuple, Dict, List
from pathlib import Path
from PIL import Image, ImageDraw
import deepstack.core as ds
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
import voluptuous as vol
from homeassistant.util.pil import draw_box
from homeassistant.components.image_processing import (
ATTR_CONFIDENCE,
CONF_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
DEFAULT_CONFIDENCE,
DOMAIN,
PLATFORM_SCHEMA,
ImageProcessingEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_IP_ADDRESS,
CONF_PORT,
HTTP_BAD_REQUEST,
HTTP_OK,
HTTP_UNAUTHORIZED,
)
from homeassistant.core import split_entity_id
_LOGGER = logging.getLogger(__name__)
ANIMAL = "animal"
ANIMALS = [
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
]
OTHER = "other"
PERSON = "person"
VEHICLE = "vehicle"
VEHICLES = ["bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck"]
OBJECT_TYPES = [ANIMAL, OTHER, PERSON, VEHICLE]
CONF_API_KEY = "api_key"
CONF_TARGET = "target"
CONF_TARGETS = "targets"
CONF_TIMEOUT = "timeout"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file"
CONF_SHOW_BOXES = "show_boxes"
CONF_ROI_Y_MIN = "roi_y_min"
CONF_ROI_X_MIN = "roi_x_min"
CONF_ROI_Y_MAX = "roi_y_max"
CONF_ROI_X_MAX = "roi_x_max"
CONF_SCALE = "scale"
CONF_CUSTOM_MODEL = "custom_model"
DATETIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
DEFAULT_API_KEY = ""
DEFAULT_TARGETS = [{CONF_TARGET: PERSON}]
DEFAULT_TIMEOUT = 10
DEFAULT_ROI_Y_MIN = 0.0
DEFAULT_ROI_Y_MAX = 1.0
DEFAULT_ROI_X_MIN = 0.0
DEFAULT_ROI_X_MAX = 1.0
DEAULT_SCALE = 1.0
DEFAULT_ROI = (
DEFAULT_ROI_Y_MIN,
DEFAULT_ROI_X_MIN,
DEFAULT_ROI_Y_MAX,
DEFAULT_ROI_X_MAX,
)
EVENT_OBJECT_DETECTED = "deepstack.object_detected"
BOX = "box"
FILE = "file"
OBJECT = "object"
SAVED_FILE = "saved_file"
MIN_CONFIDENCE = 0.1
# rgb(red, green, blue)
RED = (255, 0, 0) # For objects within the ROI
GREEN = (0, 255, 0) # For ROI box
YELLOW = (255, 255, 0) # Unused
TARGETS_SCHEMA = {
vol.Required(CONF_TARGET): cv.string,
vol.Optional(CONF_CONFIDENCE): vol.All(
vol.Coerce(float), vol.Range(min=10, max=100)
),
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_CUSTOM_MODEL, default=""): cv.string,
vol.Optional(CONF_TARGETS, default=DEFAULT_TARGETS): vol.All(
cv.ensure_list, [vol.Schema(TARGETS_SCHEMA)]
),
vol.Optional(CONF_ROI_Y_MIN, default=DEFAULT_ROI_Y_MIN): cv.small_float,
vol.Optional(CONF_ROI_X_MIN, default=DEFAULT_ROI_X_MIN): cv.small_float,
vol.Optional(CONF_ROI_Y_MAX, default=DEFAULT_ROI_Y_MAX): cv.small_float,
vol.Optional(CONF_ROI_X_MAX, default=DEFAULT_ROI_X_MAX): cv.small_float,
vol.Optional(CONF_SCALE, default=DEAULT_SCALE): vol.All(
vol.Coerce(float, vol.Range(min=0.1, max=1))
),
vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
vol.Optional(CONF_SHOW_BOXES, default=True): cv.boolean,
}
)
Box = namedtuple("Box", "y_min x_min y_max x_max")
Point = namedtuple("Point", "y x")
def point_in_box(box: Box, point: Point) -> bool:
"""Return true if point lies in box"""
if (box.x_min <= point.x <= box.x_max) and (box.y_min <= point.y <= box.y_max):
return True
return False
def object_in_roi(roi: dict, centroid: dict) -> bool:
"""Convenience to convert dicts to the Point and Box."""
target_center_point = Point(centroid["y"], centroid["x"])
roi_box = Box(roi["y_min"], roi["x_min"], roi["y_max"], roi["x_max"])
return point_in_box(roi_box, target_center_point)
def get_valid_filename(name: str) -> str:
return re.sub(r"(?u)[^-\w.]", "", str(name).strip().replace(" ", "_"))
def get_object_type(object_name: str) -> str:
if object_name == PERSON:
return PERSON
elif object_name in ANIMALS:
return ANIMAL
elif object_name in VEHICLES:
return VEHICLE
else:
return OTHER
def get_objects(predictions: list, img_width: int, img_height: int) -> List[Dict]:
"""Return objects with formatting and extra info."""
objects = []
decimal_places = 3
for pred in predictions:
box_width = pred["x_max"] - pred["x_min"]
box_height = pred["y_max"] - pred["y_min"]
box = {
"height": round(box_height / img_height, decimal_places),
"width": round(box_width / img_width, decimal_places),
"y_min": round(pred["y_min"] / img_height, decimal_places),
"x_min": round(pred["x_min"] / img_width, decimal_places),
"y_max": round(pred["y_max"] / img_height, decimal_places),
"x_max": round(pred["x_max"] / img_width, decimal_places),
}
box_area = round(box["height"] * box["width"], decimal_places)
centroid = {
"x": round(box["x_min"] + (box["width"] / 2), decimal_places),
"y": round(box["y_min"] + (box["height"] / 2), decimal_places),
}
name = pred["label"]
object_type = get_object_type(name)
confidence = round(pred["confidence"] * 100, decimal_places)
objects.append(
{
"bounding_box": box,
"box_area": box_area,
"centroid": centroid,
"name": name,
"object_type": object_type,
"confidence": confidence,
}
)
return objects
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
save_file_folder = config.get(CONF_SAVE_FILE_FOLDER)
if save_file_folder:
save_file_folder = Path(save_file_folder)
entities = []
for camera in config[CONF_SOURCE]:
object_entity = ObjectClassifyEntity(
ip_address=config.get(CONF_IP_ADDRESS),
port=config.get(CONF_PORT),
api_key=config.get(CONF_API_KEY),
timeout=config.get(CONF_TIMEOUT),
custom_model=config.get(CONF_CUSTOM_MODEL),
targets=config.get(CONF_TARGETS),
confidence=config.get(CONF_CONFIDENCE),
roi_y_min=config[CONF_ROI_Y_MIN],
roi_x_min=config[CONF_ROI_X_MIN],
roi_y_max=config[CONF_ROI_Y_MAX],
roi_x_max=config[CONF_ROI_X_MAX],
scale=config[CONF_SCALE],
show_boxes=config[CONF_SHOW_BOXES],
save_file_folder=save_file_folder,
save_timestamped_file=config.get(CONF_SAVE_TIMESTAMPTED_FILE),
camera_entity=camera.get(CONF_ENTITY_ID),
name=camera.get(CONF_NAME),
)
entities.append(object_entity)
add_devices(entities)
class ObjectClassifyEntity(ImageProcessingEntity):
"""Perform a object classification."""
def __init__(
self,
ip_address,
port,
api_key,
timeout,
custom_model,
targets,
confidence,
roi_y_min,
roi_x_min,
roi_y_max,
roi_x_max,
scale,
show_boxes,
save_file_folder,
save_timestamped_file,
camera_entity,
name=None,
):
"""Init with the API key and model id."""
super().__init__()
self._dsobject = ds.DeepstackObject(
ip=ip_address,
port=port,
api_key=api_key,
timeout=timeout,
min_confidence=MIN_CONFIDENCE,
custom_model=custom_model,
)
self._custom_model = custom_model
self._confidence = confidence
self._targets = targets
for target in self._targets:
if CONF_CONFIDENCE not in target.keys():
target.update({CONF_CONFIDENCE: self._confidence})
self._targets_names = [
target[CONF_TARGET] for target in targets
] # can be a name or a type
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = "deepstack_object_{}".format(camera_name)
self._state = None
self._objects = [] # The parsed raw data
self._targets_found = []
self._roi_dict = {
"y_min": roi_y_min,
"x_min": roi_x_min,
"y_max": roi_y_max,
"x_max": roi_x_max,
}
self._scale = scale
self._show_boxes = show_boxes
self._last_detection = None
self._image_width = None
self._image_height = None
self._save_file_folder = save_file_folder
self._save_timestamped_file = save_timestamped_file
self._image = None
def process_image(self, image):
"""Process an image."""
self._image = Image.open(io.BytesIO(bytearray(image)))
self._image_width, self._image_height = self._image.size
# resize image if different then default
if self._scale != DEAULT_SCALE:
newsize = (self._image_width * self._scale, self._image_width * self._scale)
self._image.thumbnail(newsize, Image.ANTIALIAS)
self._image_width, self._image_height = self._image.size
with io.BytesIO() as output:
self._image.save(output, format="JPEG")
image = output.getvalue()
_LOGGER.debug(
(
f"Image scaled with : {self._scale} W={self._image_width} H={self._image_height}"
)
)
self._state = None
self._objects = [] # The parsed raw data
self._targets_found = []
saved_image_path = None
try:
predictions = self._dsobject.detect(image)
except ds.DeepstackException as exc:
_LOGGER.error("Deepstack error : %s", exc)
return
self._objects = get_objects(predictions, self._image_width, self._image_height)
self._targets_found = []
for obj in self._objects:
if not (
(obj["name"] in self._targets_names)
or (obj["object_type"] in self._targets_names)
):
continue
## Then check if the type has a configured confidence, if yes assign
## Then if a confidence for a named object, this takes precedence over type confidence
confidence = None
for target in self._targets:
if obj["object_type"] == target[CONF_TARGET]:
confidence = target[CONF_CONFIDENCE]
for target in self._targets:
if obj["name"] == target[CONF_TARGET]:
confidence = target[CONF_CONFIDENCE]
if obj["confidence"] > confidence:
if not object_in_roi(self._roi_dict, obj["centroid"]):
continue
self._targets_found.append(obj)
self._state = len(self._targets_found)
if self._state > 0:
self._last_detection = dt_util.now().strftime(DATETIME_FORMAT)
if self._save_file_folder and self._state > 0:
saved_image_path = self.save_image(
self._targets_found,
self._save_file_folder,
)
# Fire events
for target in self._targets_found:
target_event_data = target.copy()
target_event_data[ATTR_ENTITY_ID] = self.entity_id
if saved_image_path:
target_event_data[SAVED_FILE] = saved_image_path
self.hass.bus.fire(EVENT_OBJECT_DETECTED, target_event_data)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def device_state_attributes(self) -> Dict:
"""Return device specific state attributes."""
attr = {}
attr["targets"] = self._targets
attr["targets_found"] = [
{obj["name"]: obj["confidence"]} for obj in self._targets_found
]
if self._last_detection:
attr["last_target_detection"] = self._last_detection
if self._custom_model:
attr["custom_model"] = self._custom_model
attr["all_objects"] = [
{obj["name"]: obj["confidence"]} for obj in self._objects
]
if self._save_file_folder:
attr[CONF_SAVE_FILE_FOLDER] = str(self._save_file_folder)
if self._save_timestamped_file:
attr[CONF_SAVE_TIMESTAMPTED_FILE] = self._save_timestamped_file
return attr
def save_image(self, targets, directory) -> str:
"""Draws the actual bounding box of the detected objects.
Returns: saved_image_path, which is the path to the saved timestamped file if configured, else the default saved image.
"""
try:
img = self._image.convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Deepstack unable to process image, bad data")
return
draw = ImageDraw.Draw(img)
roi_tuple = tuple(self._roi_dict.values())
if roi_tuple != DEFAULT_ROI and self._show_boxes:
draw_box(
draw,
roi_tuple,
img.width,
img.height,
text="ROI",
color=GREEN,
)
for obj in targets:
if not self._show_boxes:
break
name = obj["name"]
confidence = obj["confidence"]
box = obj["bounding_box"]
centroid = obj["centroid"]
box_label = f"{name}: {confidence:.1f}%"
draw_box(
draw,
(box["y_min"], box["x_min"], box["y_max"], box["x_max"]),
img.width,
img.height,
text=box_label,
color=RED,
)
# draw bullseye
draw.text(
(centroid["x"] * img.width, centroid["y"] * img.height),
text="X",
fill=RED,
)
# Save images, returning the path of saved image as str
latest_save_path = (
directory / f"{get_valid_filename(self._name).lower()}_latest.jpg"
)
_LOGGER.info("Deepstack saved file %s", latest_save_path)
img.save(latest_save_path)
saved_image_path = latest_save_path
if self._save_timestamped_file:
timestamp_save_path = directory / f"{self._name}_{self._last_detection}.jpg"
img.save(timestamp_save_path)
_LOGGER.info("Deepstack saved file %s", timestamp_save_path)
saved_image_path = timestamp_save_path
return str(saved_image_path)
``` |
{
"source": "joduss/alfred-workFlows-iossimulator",
"score": 3
} |
#### File: alfred-workFlows-iossimulator/core/device.py
```python
import json
import subprocess
from core.application import number_of_applications
class DeviceType:
IPhone, IPad, Other = ("iPhone", "iPad", "other")
def __device_type_with_name(name):
deviceType = name.lower().find(
"iphone") == 0 and DeviceType.IPhone or DeviceType.Other
return name.lower().find("ipad") == 0 and DeviceType.IPad or deviceType
class DeviceState:
Unknown, Shutdown, Booted, Creating = (
"unknown", "shutdown", "booted", "creating")
def __device_is_available(device):
return (
('availability' in device and device['availability'] == "(available)")
# XCode 11^
or ('isAvailable' in device and device['isAvailable'] == True)
)
def __prepare_runtime(runtime):
return runtime.split(".")[-1].replace("-", " ", 1).replace("-", ".")
def __device_state_with_name(state):
if state == "Shutdown":
return DeviceState.Shutdown
elif state == "Booted":
return DeviceState.Booted
elif state == "Creating":
return DeviceState.Creating
else:
return DeviceState.Unknown
class Device:
def __init__(self, name, udid, state, runtime, deviceType):
self.name = name
self.udid = udid
self.state = state
self.runtime = runtime
self.type = deviceType
self.numberOfApplications = number_of_applications(udid)
def description(self):
return "name: {0} id: {1} state: {2} runtime: {3} type: {4} number of applications: {5}".format(
self.name,
self.udid,
self.state,
self.runtime,
self.type,
self.numberOfApplications,
)
def applications_description(self):
if self.numberOfApplications == 0:
return "No applications installed"
elif self.numberOfApplications == 1:
return "1 application installed"
else:
return "{} applications installed".format(self.numberOfApplications)
def devices():
devicesJson = subprocess.check_output(
["/usr/bin/xcrun", "simctl", "list", "-j", "devices"])
allDevices = json.loads(devicesJson)["devices"]
iosDevices = [device for device in allDevices.items() if (
device[0].find("iOS") >= 0)]
devices = []
for runtime, rawDevices in iosDevices:
devicesAvailables = (d for d in rawDevices if __device_is_available(d))
for rawDevice in devicesAvailables:
device = Device(
rawDevice["name"],
rawDevice["udid"],
__device_state_with_name(rawDevice["state"]),
__prepare_runtime(runtime),
__device_type_with_name(rawDevice["name"])
)
devices.append(device)
return devices
def device_with_id(udid):
filteredDevices = [d for d in devices() if d.udid == udid]
return filteredDevices[0] if filteredDevices else None
if __name__ == '__main__':
allDevices = devices()
print("\n".join((d.description() for d in allDevices)))
if allDevices:
individualDevice = device_with_id(allDevices[0].udid)
if individualDevice:
print("\n{0}".format(individualDevice.description()))
``` |
{
"source": "joduss/ArticleClassifier",
"score": 2
} |
#### File: classifier/Data/GlobalDataset.py
```python
import math
import tensorflow as tf
class GlobalDataset:
def __init__(self, tf_dataset: tf.data.Dataset, train_ratio: float, validation_ratio: float, batch_size: int = 300):
self.article_length = len(list(tf_dataset.as_numpy_iterator())[0][0])
self.theme_count = len(list(tf_dataset.as_numpy_iterator())[0][1])
self.count = len(list(tf_dataset.as_numpy_iterator()))
self.dataset = tf_dataset.batch(batch_size).repeat().shuffle(batch_size)
self.trainSize = int(train_ratio * self.count)
self.validationSize = int(validation_ratio * self.count)
self.testSize = self.count - self.trainSize - self.validationSize
self.trainData = self.dataset.take(self.trainSize).repeat()
self.validationData = self.dataset.skip(self.trainSize).take(self.validationSize).repeat()
self.testData = self.dataset.skip(self.testSize)
self.train_batch_count = int(math.ceil(self.trainSize / batch_size))
self.test_batch_count = int(math.ceil(self.testSize / batch_size))
self.validation_batch_count = int(math.ceil(self.validationSize / batch_size))
# def __init__(self, X, Y, train_ratio: float, validation_ratio: float, batch_size):
# """
# Creates and wrap a tensorflow dataset.
# :param X: Input
# :param Y: Outputs
# :param train_ratio:
# :param validation_ratio:
# :param batch_size:
# """
# self.X_column_count = len(X[0])
# self.Y_column_count = len(Y[0])
# self.row_count = len(X)
#
# XY = list(zip(X, Y))
# shuffle(XY)
# X, Y = zip(*XY)
#
# self.train_size = math.ceil(train_ratio * self.row_count)
# self.validation_size = math.ceil(validation_ratio * self.row_count)
# self.test_size = self.row_count - self.train_size - self.validation_size
#
# self.train_batch_count = int(math.ceil(self.train_size / batch_size))
# self.test_batch_count = int(math.ceil(self.test_size / batch_size))
# self.validation_batch_count = int(math.ceil(self.validation_size / batch_size))
#
# self.X_test = X[0:self.test_size]
# self.Y_test = Y[0:self.test_size]
#
# self.X_train = X[self.test_size:self.test_size + self.train_size]
# self.Y_train = Y[self.test_size:self.test_size + self.train_size]
# self.X_val = X[self.test_size + self.train_size:]
# self.Y_val = Y[self.test_size + self.train_size:]
#
# # tf.Datasets creation
#
# # Only train shuffle. Not needed to evaluate/test
# self.trainData = tf.data.Dataset.from_tensor_slices((self.X_train, self.Y_train)).batch(batch_size).repeat()
# self.validationData = tf.data.Dataset.from_tensor_slices((self.X_val, self.Y_val)).batch(batch_size).repeat()
# self.testData = tf.data.Dataset.from_tensor_slices((self.X_test, self.Y_test)).batch(batch_size).repeat()
```
#### File: classifier/training/TrainedModel.py
```python
from classifier.models.IClassifierModel import IClassifierModel
from classifier.preprocessing.article_text_tokenizer import ArticleTextTokenizer
from classifier.preprocessing.article_theme_tokenizer import ArticleThemeTokenizer
class TrainedModel:
theme_tokenizer: ArticleThemeTokenizer
article_tokenizer: ArticleTextTokenizer
model: IClassifierModel
def __init__(self, model: IClassifierModel,
article_tokenizer : ArticleTextTokenizer,
theme_tokenizer: ArticleThemeTokenizer):
self.theme_tokenizer = theme_tokenizer
self.article_tokenizer = article_tokenizer
self.model = model
def save(self, directory: str):
self.model.save_model(directory)
self.article_tokenizer.save(f"{directory}{self.model.get_model_name()}-article-tokenizer.json")
self.theme_tokenizer.save(f"{directory}{self.model.get_model_name()}-theme_tokenizer.json")
def load(self, directory: str):
self.model.load_model(directory)
```
#### File: src/data_models/articles.py
```python
from __future__ import annotations
import random
import math
import time
from typing import Callable, Dict, List, TextIO
import json as jsonModule
from data_models.article import Article
from data_models.transformation.article_transformer import ArticleTransformer
from utilities.utility import intersection
class MetaArticles(type):
@property
def items(cls) -> List[Article]:
return cls.items
class Articles(object, metaclass=MetaArticles):
jsonObject: Dict
items: List[Article]
def __init__(self, articles: List[Article] = None, article: Article = None):
if articles is not None and isinstance(articles, list):
self.items = articles
elif article is not None and isinstance(article, Article):
self.items = [article]
elif article is None and articles is None:
pass
else:
raise Exception("article or articles must be provided. NOT BOTH either!")
def __iter__(self):
return self.items.__iter__()
def add(self, article: Article):
self.items.append(article)
@staticmethod
def from_file(path: str, limit: int = None) -> Articles:
with open(path, "r", encoding="utf-8") as file:
try:
return Articles.load_articles(file, limit)
except Exception as e:
time.sleep(5)
return Articles.load_articles(file, limit)
@staticmethod
def load_articles(file: TextIO, limit: int = None) -> Articles:
"""
Creates an object data_models from a json file containing data_models.
:param file:
:param limit:
"""
json = jsonModule.loads(file.read())
if limit is not None:
json = json[0:limit]
articles: List[Article] = []
for jsonArticle in json:
articles.append(ArticleTransformer.transform_to_article(jsonArticle))
return Articles(articles)
def save(self, filepath: str):
with open(filepath, 'w', encoding="utf-8") as outfile:
jsonModule.dump([ArticleTransformer.transform_to_json(article) for article in self.items], outfile, indent=4, ensure_ascii=False)
# def inherit_predictions(self, articles: Articles):
# original_dic = { i.id : i for i in self.items }
#
# for predicted_article in articles:
# original_article: Article = original_dic[predicted_article.id]
# original_article.predicted_themes = predicted_article.predicted_themes
#
# print("done")
def subset(self, size: int or None) -> Articles:
"""
Creates a subset of the articles.
:param size: An integer or None.
:return: The subset of size 'size' or all articles if size is None..
"""
if size is None:
return self
return Articles(self.items[0:size])
def subset_ratio(self, ratio: float) -> Articles:
return self.subset(size=math.ceil(self.count() * ratio))
def articles_with_theme(self, theme: str) -> Articles:
"""
Returns articles which have the given theme in the list of themes (property 'themes')
:param theme: theme that must be present
:return: articles having the given theme.
"""
return Articles(
list(
filter(lambda article: theme in article.themes, self.items)
)
)
def get_by_id(self, article_id: str):
for article in self.items:
if article.id is article_id:
return article
raise Exception("Not found.")
# def filter(self, filter_function: Callable[[Article], bool]) -> Articles:
# return Articles(
# list(
# filter(
# lambda article: filter_function(article),
# self.items,
# )
# )
# )
def articles_with_all_verified_themes(self, themes: List[str]) -> Articles:
"""
Returns a new Articles instance containing articles whose verified themes are containing a given list of themes.
:param themes: All themes that must be have been verified in the articles.
:return:
"""
return Articles(
list(
filter(
lambda article: (len(intersection(themes, article.verified_themes)) == len(themes)),
self.items
)
)
)
def articles_with_any_verified_themes(self, themes: List[str]) -> Articles:
"""
Returns a new Articles instance containing articles whose verified themes are containing at least one of a
theme from a given list of themes.
:param themes: Themes. At least one must be present in the article verified themes.
:return:
"""
return Articles(
list(
filter(
lambda article: (len(intersection(themes, article.verified_themes)) > 0),
self.items
)
)
)
def themes(self) -> List[str]:
"""
Returns the list of themes for each articles according to the order of articles.
"""
return list(
map(lambda article: article.themes, self.items)
)
def title_and_summary(self) -> List[str]:
return list(
map(lambda article: article.title_and_summary(), self.items)
)
def count(self) -> int:
return len(self.items)
def copyEachArticle(self) -> List[Article]:
return list(
map(lambda article: article.copy(), self.items)
)
def deep_copy(self) -> Articles:
return Articles(self.copyEachArticle())
def shuffle(self):
random.shuffle(self.items)
def contains(self, article_id: str) -> bool:
for item in self.items:
if item.id == article_id:
return True
return False
def __sub__(self, other):
"""
Removes from this instance all the articles present in 'other'
:rtype: Articles
"""
if not isinstance(other, Articles):
raise Exception("Must be type Articles")
# just for typing
other_articles: Articles = other
ids: set = set([article.id for article in other_articles])
filtered = [article for article in self.items if article.id not in ids]
return Articles(filtered)
def __getitem__(self, idx):
return self.items[idx]
```
#### File: data_models/weights/theme_weights.py
```python
from typing import Dict, List
from classifier.preprocessing.article_theme_tokenizer import ArticleThemeTokenizer
from data_models.ThemeStat import ThemeStat
class ThemeWeights:
theme_stats: List[ThemeStat]
theme_tokenizer: ArticleThemeTokenizer
def __init__(self, theme_stats: List[ThemeStat], theme_tokenizer: ArticleThemeTokenizer):
self.theme_stats = theme_stats
self.theme_tokenizer = theme_tokenizer
def weight_list(self) -> List[float]:
"""
Returns a list of weight for each theme, ordered by theme index.
"""
theme_weight: List[float] = list([])
#raise Exception("To review")
for theme in self.theme_tokenizer.orderedThemes:
stat = [stat for stat in self.theme_stats if stat.theme == theme][0]
theme_weight.append(stat.binary_weight_pos())
return theme_weight
def weights_of_theme(self, theme_idx: int) -> Dict[int, float]:
"""
Returns the weights for a theme under the form {0 : VAL_1, 1 : VAL_2}
:param theme_idx: index of the theme
"""
theme = self.theme_tokenizer.theme_at_index(theme_idx)
theme_stat = list(filter(lambda stat: stat.theme == theme, self.theme_stats))
if len(theme_stat) == 0:
raise Exception("Theme {} not found.".format(theme))
if len(theme_stat) > 1:
raise Exception("Theme {} found multiple times.".format(theme))
return {0 : theme_stat[0].binary_weight_neg(),
1 : theme_stat[0].binary_weight_pos()}
def weight_array(self) -> List[List[float]]:
theme_weight_array: List[List[float]] = []
# raise Exception("To review")
for theme in self.theme_tokenizer.orderedThemes:
stat = [stat for stat in self.theme_stats if stat.theme == theme][0]
theme_weight = [0,0]
theme_weight[0] = stat.binary_weight_neg()
theme_weight[1] = stat.binary_weight_pos()
theme_weight_array.append(theme_weight)
return theme_weight_array
```
#### File: src/tests/ArticlesPredictionTests.py
```python
import unittest
from classifier.prediction.articles_prediction import ArticlesPrediction
from classifier.preprocessing.article_theme_tokenizer import ArticleThemeTokenizer
from data_models.article import Article
from data_models.articles import Articles
class ArticlesPredictionTests(unittest.TestCase):
def testApplyOnArticlesDefaultThreshold(self):
article1 = Article("1", "title", "summary", ["theme1"], ["theme1", "old_prediction"], ["theme1"])
article2 = Article("2", "title", "summary", ["theme1", "theme2"], ["other__old_predicted_theme"], ["theme1", "theme2", "theme3"])
# article 3 is not used for test, but is necessary for the tokenizer to know the theme3.
article3 = Article("3", "title", "summary", ["theme3"], [], [])
articles = Articles([article1, article2])
theme_tokenizer = ArticleThemeTokenizer(Articles([article1, article2, article3]))
predictions = ArticlesPrediction(theme_tokenizer, articles)
predictions.addPredictionsForArticle([0.1, 0.7, 0], article1.id)
predictions.addPredictionsForArticle([0.4, 0.89, 0.99], article2.id)
# Apply prediction with standard threshold
predicted_articles = predictions.get_articles_with_predictions()
predicted_articles_one = predicted_articles[0]
predicted_articles_two = predicted_articles[1]
self.assertEqual(1, len(predicted_articles_one.predicted_themes))
self.assertFalse("theme1" in predicted_articles_one.predicted_themes)
self.assertTrue("theme2" in predicted_articles_one.predicted_themes)
self.assertFalse("theme3" in predicted_articles_one.predicted_themes)
self.assertEqual(2, len(predicted_articles_two.predicted_themes))
self.assertFalse("theme1" in predicted_articles_two.predicted_themes)
self.assertTrue("theme2" in predicted_articles_two.predicted_themes)
self.assertTrue("theme3" in predicted_articles_two.predicted_themes)
# Check that the 'verified themes' and 'themes' are not touched!
self.assertEqual(1, len(predicted_articles_one.themes))
self.assertTrue("theme1" in predicted_articles_one.themes)
self.assertFalse("theme2" in predicted_articles_one.themes)
self.assertFalse("theme3" in predicted_articles_one.themes)
self.assertEqual(2, len(predicted_articles_two.themes))
self.assertTrue("theme1" in predicted_articles_two.themes)
self.assertTrue("theme2" in predicted_articles_two.themes)
self.assertFalse("theme3" in predicted_articles_two.themes)
self.assertEqual(1, len(predicted_articles_one.verified_themes))
self.assertTrue("theme1" in predicted_articles_one.verified_themes)
self.assertFalse("theme2" in predicted_articles_one.verified_themes)
self.assertFalse("theme3" in predicted_articles_one.verified_themes)
self.assertEqual(3, len(predicted_articles_two.verified_themes))
self.assertTrue("theme1" in predicted_articles_two.verified_themes)
self.assertTrue("theme2" in predicted_articles_two.verified_themes)
self.assertTrue("theme3" in predicted_articles_two.verified_themes)
# Apply prediction with custom threshold
predicted_articles = predictions.get_articles_with_predictions(0.09)
predicted_articles_one = predicted_articles[0]
predicted_articles_two = predicted_articles[1]
self.assertEqual(2, len(predicted_articles_one.predicted_themes))
self.assertTrue("theme1" in predicted_articles_one.predicted_themes)
self.assertTrue("theme2" in predicted_articles_one.predicted_themes)
self.assertFalse("theme3" in predicted_articles_one.predicted_themes)
self.assertEqual(3, len(predicted_articles_two.predicted_themes))
self.assertTrue("theme1" in predicted_articles_two.predicted_themes)
self.assertTrue("theme2" in predicted_articles_two.predicted_themes)
self.assertTrue("theme3" in predicted_articles_two.predicted_themes)
```
#### File: src/tests/ArticlesTests.py
```python
import unittest
from data_models.article import Article
from data_models.articles import Articles
class ArticlesTests(unittest.TestCase):
@staticmethod
def create_articles() -> Articles:
article1 = Article(title="Title", summary="summary", themes=[], verified_themes=[], predicted_themes=[], id="1")
article2 = Article(title="Title", summary="summary", themes=["T"], verified_themes=["T"], predicted_themes=[], id="2")
article3 = Article(title="Title", summary="summary", themes=["T", "T2"], verified_themes=[], predicted_themes=[], id="3")
article4 = Article(title="Title", summary="summary", themes=[], verified_themes=["T"], predicted_themes=[], id="4")
article5 = Article(title="Title", summary="summary", themes=["T2"], verified_themes=["T"], predicted_themes=[], id="5")
article6 = Article(title="Title", summary="summary", themes=["T", "T2", "T3"], verified_themes=["T", "T2", "T3"], predicted_themes=["T3"], id="6")
return Articles([article1, article2, article3, article4, article5, article6])
def test_articles_with_theme(self):
articles = ArticlesTests.create_articles()
filtered = articles.articles_with_theme("T2")
self.assertEqual(3, filtered.count())
self.assertTrue(articles.items[2] in filtered)
self.assertTrue(articles.items[4] in filtered)
self.assertTrue(articles.items[5] in filtered)
self.assertFalse(articles.items[3] in filtered)
def test_articles_with_all_verified_themes(self):
articles = ArticlesTests.create_articles()
filtered = articles.articles_with_all_verified_themes(["T", "T2"])
self.assertEqual(1, filtered.count())
self.assertTrue(articles.items[5] in filtered)
self.assertFalse(articles.items[0] in filtered)
def test_articles_with_any_verified_themes(self):
articles = ArticlesTests.create_articles()
filtered = articles.articles_with_any_verified_themes(["T", "T2", "T3"])
self.assertEqual(4, filtered.count())
self.assertFalse(articles[0] in filtered)
self.assertTrue(articles[1] in filtered)
self.assertFalse(articles[2] in filtered)
self.assertTrue(articles[3] in filtered)
self.assertTrue(articles[4] in filtered)
self.assertTrue(articles[5] in filtered)
def test_themes(self):
themes = ArticlesTests.create_articles().themes()
self.assertEqual(0, len(themes[0]))
self.assertEqual(1, len(themes[1]))
self.assertEqual(2, len(themes[2]))
self.assertEqual(0, len(themes[3]))
self.assertEqual(1, len(themes[4]))
self.assertEqual(3, len(themes[5]))
self.assertEqual("T", themes[5][0])
self.assertEqual("T2", themes[5][1])
self.assertEqual("T3", themes[5][2])
def test_title_and_summary(self):
articles = self.create_articles()
self.assertEqual("Title. summary", articles.title_and_summary()[0])
def test_deep_copy(self):
articles = self.create_articles()
articles_copy = articles.deep_copy()
for i in range(0,articles.count()):
article = articles.items[i]
article_copy = articles_copy.items[i]
self.assertEqual(article.id, article_copy.id)
self.assertEqual(article.summary, article_copy.summary)
self.assertEqual(article.title, article_copy.title)
self.assertEqual(article.themes, article_copy.themes)
self.assertEqual(article.verified_themes, article_copy.verified_themes)
self.assertEqual(article.predicted_themes, article_copy.predicted_themes)
article_copy.predicted_themes.append("T4")
self.assertNotEqual(article.predicted_themes, article_copy.predicted_themes)
article.predicted_themes.append("T4")
self.assertEqual(article.predicted_themes, article_copy.predicted_themes)
def test_substraction(self):
articles = self.create_articles()
articles_to_remove = Articles(self.create_articles()[0:2])
filtered_articles = articles - articles_to_remove
self.assertEqual(filtered_articles.count() + 2, articles.count())
self.assertFalse(filtered_articles.contains(articles_to_remove[0].id))
self.assertFalse(filtered_articles.contains(articles_to_remove[1].id))
self.assertTrue(filtered_articles.contains(articles[2].id))
self.assertTrue(filtered_articles.contains(articles[3].id))
self.assertTrue(filtered_articles.contains(articles[4].id))
self.assertTrue(filtered_articles.contains(articles[5].id))
if __name__ == '__main__':
unittest.main()
```
#### File: src/tests/ArticleThemeTokenizerTests.py
```python
import unittest
from classifier.preprocessing.article_theme_tokenizer import ArticleThemeTokenizer
from data_models.article import Article
from data_models.articles import Articles
class ArticleThemeTokenizerTests(unittest.TestCase):
def test_boolean_vector_to_themes(self):
article1 = Article("1", "title", "summary", ["theme1", "theme2", "theme3"], [], [])
article2 = Article("2", "title", "summary", ["theme1", "theme4"], [], [])
articles = Articles([article1, article2])
tokenizer = ArticleThemeTokenizer(articles)
self.assertEqual(4, tokenizer.themes_count)
self.assertEqual(["theme1", "theme2", "theme3", "theme4"], tokenizer.orderedThemes)
self.assertEqual(["theme1", "theme4"], tokenizer.boolean_vector_to_themes([True, False, False, True]))
self.assertEqual([], tokenizer.boolean_vector_to_themes([False, False, False, False]))
self.assertEqual(["theme3"], tokenizer.boolean_vector_to_themes([False, False, True, False]))
def test_save(self):
article1 = Article("1", "title", "summary", ["theme1", "theme2", "theme3"], [], [])
article2 = Article("2", "title", "summary", ["theme1", "theme4"], [], [])
articles = Articles([article1, article2])
tokenizer = ArticleThemeTokenizer(articles)
tokenizer.save("test.json")
with open("test.json", "r") as file:
content = file.readlines()
print("f")
```
#### File: src/tests/mock_model.py
```python
from tensorflow import keras as k
from tensorflow.keras import layers, models
import numpy as np
from tensorflow.python.keras.models import Model
class MockModel:
@classmethod
def get_model(cls) -> Model:
# Create a fake model. Basically, we simulate a text classifier where we have 3 words which are represented with 3
# digits: 1, 2 and 3. 0 is reserved for padding.
# There is an embedding matrix that encode each word into a vector containing exactly one 1 representing the word itself.
# So word 2 is represented as [0, 1, 0]
# The classifier tells if there is the occurence of a given word. The output consists of a binary vector, where
# the position p_i of a 1 indicates that the word i was present in the input vector.
model = models.Sequential([
layers.Embedding(input_dim=4, output_dim=3, input_length=3),
layers.GlobalMaxPool1D(),
])
model.compile(loss=k.losses.BinaryCrossentropy())
model.layers[0].set_weights([np.array([[0,0,0],[1,0,0], [0, 1, 0], [0,0,1]])])
return model
```
#### File: src/utilities/utility.py
```python
from typing import List
# Removes the enclosed lists that are empty.
def remove_empty_lists(list_of_lists: List[List[str]]):
return list(filter(lambda list: len(list) > 0, list_of_lists))
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
``` |
{
"source": "jodygarnett/bridge-style",
"score": 2
} |
#### File: bridgestyle/mapserver/togeostyler.py
```python
def convert(style, options=None):
raise NotImplementedError("togeostyler.convert() has not been implemented") # TODO
``` |
{
"source": "Jodyheryanto/business-intelligence",
"score": 2
} |
#### File: app/model/word_cloud.py
```python
from app import db
from app.model.post import Posts
from app.model.user import Users
from app.model.source import Sources
class Word_Clouds(db.Model):
id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
user_id = db.Column(db.BigInteger, db.ForeignKey(Users.id))
source_id = db.Column(db.BigInteger, db.ForeignKey(Sources.id))
word = db.Column(db.String(50), nullable=False)
value = db.Column(db.BigInteger, nullable=False)
sentiment = db.Column(db.BigInteger, nullable=False)
sample_id = db.Column(db.BigInteger, db.ForeignKey(Posts.id))
posts = db.relationship("Posts", backref="sample_id")
def __repr__(self):
return '<Word_Clouds {}>'.format(self.word)
```
#### File: server/app/routes.py
```python
from app import app
from app.controller import APIController
from app.controller import UserController
from app.controller import PostController
from app.controller import GooglePlayController
from app.controller import AppStoreController
from app.controller import TwitterController
from app.controller import InstagramController
from app.controller import WordCloudController
from app.controller import NotifController
from app.controller import HelpController
from flask import request
from app import response
@app.route('/getgoogleplay')
def view_googleplay():
args = request.args
if(len(args) == 4):
tanggal_awal = args["tanggal_awal"]
tanggal_akhir = args["tanggal_akhir"]
user_id = int(args["user_id"])
sentiment = args["sentiment"]
sentiments = comma_separated_params_to_list(sentiment)
if(user_id!='' and sentiment!='' and tanggal_akhir!='' and tanggal_awal!='' and user_id!=1):
return GooglePlayController.printData(tanggal_awal, tanggal_akhir, sentiments, user_id)
elif(user_id==1):
return GooglePlayController.printAllData(tanggal_awal, tanggal_akhir, sentiments)
else:
return response.ok([], "")
return {"msg":"Please fill the correct url!"}, 400
@app.route('/rungoogleplay')
def run_googleplay():
args = request.args
if(len(args) == 2):
tanggal_awal = args["tanggal_awal"]
tanggal_akhir = args["tanggal_akhir"]
if(tanggal_akhir!='' and tanggal_awal!=''):
return GooglePlayController.index(tanggal_awal, tanggal_akhir)
else:
return response.ok([], "")
return {"msg":"Please fill the correct url!"}, 400
@app.route('/getinstagram')
def view_instagram():
args = request.args
if(len(args) == 4):
tanggal_awal = args["tanggal_awal"]
tanggal_akhir = args["tanggal_akhir"]
user_id = int(args["user_id"])
sentiment = args["sentiment"]
sentiments = comma_separated_params_to_list(sentiment)
if(user_id!='' and sentiment!='' and tanggal_akhir!='' and tanggal_awal!='' and user_id!=1):
return InstagramController.printData(tanggal_awal, tanggal_akhir, sentiments, user_id)
elif(user_id==1):
return InstagramController.printAllData(tanggal_awal, tanggal_akhir, sentiments)
else:
return response.ok([], "")
return {"msg":"Please fill the correct url!"}, 400
@app.route('/runinstagram')
def run_instagram():
args = request.args
if(len(args) == 2):
tanggal_awal = args["tanggal_awal"]
tanggal_akhir = args["tanggal_akhir"]
if(tanggal_akhir!='' and tanggal_awal!=''):
return InstagramController.index(tanggal_awal, tanggal_akhir)
else:
return response.ok([], "")
return {"msg":"Please fill the correct url!"}, 400
@app.route('/getappstore')
def view_appstore():
args = request.args
if(len(args) == 4):
tanggal_awal = args["tanggal_awal"]
tanggal_akhir = args["tanggal_akhir"]
user_id = int(args["user_id"])
sentiment = args["sentiment"]
sentiments = comma_separated_params_to_list(sentiment)
if(user_id!='' and sentiment!='' and tanggal_akhir!='' and tanggal_awal!='' and user_id!=1):
return AppStoreController.printData(tanggal_awal, tanggal_akhir, sentiments, user_id)
elif(user_id==1):
return AppStoreController.printAllData(tanggal_awal, tanggal_akhir, sentiments)
else:
return response.ok([], "")
return {"msg":"Please fill the correct url!"}, 400
@app.route('/runappstore')
def run_appstore():
args = request.args
if(len(args) == 2):
tanggal_awal = args["tanggal_awal"]
tanggal_akhir = args["tanggal_akhir"]
if(tanggal_akhir!='' and tanggal_awal!=''):
return AppStoreController.index(tanggal_awal, tanggal_akhir)
else:
return response.ok([], "")
return {"msg":"Please fill the correct url!"}, 400
@app.route('/gettwitter')
def view_twitter():
args = request.args
if(len(args) == 4):
tanggal_awal = args["tanggal_awal"]
tanggal_akhir = args["tanggal_akhir"]
user_id = int(args["user_id"])
sentiment = args["sentiment"]
sentiments = comma_separated_params_to_list(sentiment)
if(user_id!='' and sentiment!='' and tanggal_akhir!='' and tanggal_awal!='' and user_id!=1):
return TwitterController.printData(tanggal_awal, tanggal_akhir, sentiments, user_id)
elif(user_id==1):
return TwitterController.printAllData(tanggal_awal, tanggal_akhir, sentiments)
else:
return response.ok([], "")
return {"msg":"Please fill the correct url!"}, 400
@app.route('/runtwitter')
def run_twitter():
args = request.args
if(len(args) == 2):
tanggal_awal = args["tanggal_awal"]
tanggal_akhir = args["tanggal_akhir"]
if(tanggal_akhir!='' and tanggal_awal!=''):
return TwitterController.index(tanggal_awal, tanggal_akhir)
else:
return response.ok([], "")
return {"msg":"Please fill the correct url!"}, 400
def comma_separated_params_to_list(param):
result = []
for val in param.split(','):
if val:
result.append(val)
return result
@app.route('/wordcloud')
def wordcloud():
args = request.args
if(len(args) == 1):
user_id = args["user_id"]
return WordCloudController.printData(user_id)
return {"msg":"Please fill the correct url!"}, 400
@app.route('/login', methods=['POST'])
def login():
return UserController.login()
@app.route('/users', methods=['POST', 'GET'])
def users():
if request.method == 'GET':
return UserController.index()
else:
return UserController.store()
@app.route('/users/<id>', methods=['PUT', 'GET', 'DELETE'])
def usersDetail(id):
if request.method == 'GET':
return UserController.show(id)
elif request.method == 'PUT':
return UserController.update(id)
elif request.method == 'DELETE':
return UserController.delete(id)
@app.route('/users/password/<id>', methods=['PUT'])
def usersPass(id):
if request.method == 'PUT':
return UserController.updatePass(id)
return {"msg":"Please fill the correct url!"}, 400
@app.route('/users/ubahstatus/<id>', methods=['PUT'])
def usersStatus(id):
if request.method == 'PUT':
return UserController.updateStatus(id)
@app.route('/getpost/<id>')
def postDetail(id):
return PostController.show(id)
@app.route('/')
def home():
return {"msg":"Please fill the correct url!"}, 400
@app.route('/refresh', methods=['POST'])
def refresh():
return UserController.refresh()
@app.route('/apiconfig', methods=['PUT', 'GET'])
def apiconfig():
if request.method == 'GET':
return APIController.index()
@app.route('/apiconfig/<id>')
def updateApi(id):
return APIController.update(id)
@app.route('/notifications', methods=['POST', 'GET'])
def notifications():
if request.method == 'GET':
args = request.args
if(len(args) == 1):
user_id = args["user_id"]
return NotifController.showbyuser(user_id)
else:
return NotifController.index()
else:
return NotifController.store()
@app.route('/notifications/<id>', methods=['PUT', 'GET', 'DELETE'])
def sudahBaca(id):
if request.method == 'GET':
return NotifController.sudahBaca(id)
elif request.method == 'DELETE':
return NotifController.delete(id)
else:
return NotifController.update(id)
@app.route('/helps', methods=['POST', 'GET'])
def helps():
if request.method == 'GET':
return HelpController.index()
else:
return HelpController.store()
@app.route('/helps/<id>', methods=['PUT', 'DELETE'])
def ubahHelp(id):
if request.method == 'DELETE':
return HelpController.delete(id)
else:
return HelpController.update(id)
``` |
{
"source": "jodyphelan/pathogenseq",
"score": 2
} |
#### File: pathogenseq/pathogenseq/abi.py
```python
from collections import defaultdict
from Bio import SeqIO
import matplotlib.pyplot as plt
from .files import *
from .nucmer import *
from .mvcf import *
from .fasta import *
#import matplotlib as mpl
#mpl.use('TkAgg')
class abi:
def __init__(self,in_obj,prefix):
if isinstance(in_obj,list):
self.filenames = in_obj
self.records = [SeqIO.read(x,'abi') for x in self.filenames]
self.prefix = prefix
elif isinstance(in_obj,SeqIO.SeqRecord):
self.records = in_obj
self.prefix = in_obj.prefix
self.quals = {}
for rec in self.records:
self.quals[rec.name] = [ord(x) for x in rec.annotations["abif_raw"]["PCON1"]]
def trim_seq(self,rec):
return abi([SeqIO.AbiIO._abi_trim(x) for x in self.records])
def plot_chromatogram(self,start,end,refsequence):
self.signals = {}
self.channels = {'DATA9':"A", 'DATA10':"C", 'DATA11':"G", 'DATA12':"T"}
for c in self.channels:
self.signals[self.channels[c]] = self.record.annotations['abif_raw'][c]
self.ploc1 = self.record.annotations["abif_raw"]["PLOC1"]
cols = {"A":"gold","C":"red","G":"green","T":"blue"}
signal_start = self.ploc1[start-1]-3
signal_end = self.ploc1[end]+5
xvals = list(range(signal_start,signal_end))
for c in ["A","C","G","T"]:
plt.plot(xvals,self.signals[c][signal_start:signal_end], color=cols[c])
plt.xticks(self.ploc1[start-1:end],str(self.record.seq)[start-1:end])
for i,x in enumerate(refsequence):
col = "red" if i==4 else "black"
plt.text(self.ploc1[start-1:end][i],(plt.axis()[3] - plt.axis()[2])*-0.15 ,x,horizontalalignment='center',color=col)
# plt.fill_between([self.ploc1[start-1],self.ploc1[end]],plt.axis()[2],plt.axis()[3])
plt.show()
def write_seq(self):
self.fasta = "%s.fasta" % self.prefix
with open(self.fasta,"w") as F:
for rec in self.records:
F.write(">%s\n%s\n" % (rec.name,rec.seq))
def nucmer_align(self,refseq):
add_arguments_to_self(self,locals())
self.write_seq()
run_cmd("nucmer %(refseq)s %(fasta)s -p %(prefix)s" % vars(self))
return delta("%s.delta" % self.prefix)
def get_variants_vcf(self,refseq,gff=None):
add_arguments_to_self(self,locals())
self.write_seq()
fa = fasta(self.prefix+".fasta")
return bcf(fa.get_ref_variants(refseq,self.prefix,gff))
def get_variants(self,refseq):
add_arguments_to_self(self,locals())
variants = []
for l in cmd_out("minimap2 %(refseq)s %(prefix)s.fasta --cs | sort -k6,6 -k8,8n | paftools.js call -l 100 -L 100 -" % vars(self)):
row = l.strip().split()
if row[0]!="V": continue
variants.append({"refseq":row[1],"refpos":int(row[2]),"refnuc":row[6],"queryseq":row[8],"querypos":int(row[9]),"querynuc":row[7]})
return variants
def load_maf(self,refseq):
self.maf = {}
for l in cmd_out("minimap2 %(refseq)s %(prefix)s.fasta --cs=long | sort -k6,6 -k8,8n | paftools.js view -f maf -" % vars(self)):
row = l.strip().split()
if l=="": continue
if row[0]!="s": continue
self.maf[row[1]] = {"start":int(row[2]),"seq":row[6].upper()}
def get_maf_refseq(self,start,end,refseq = None):
if "maf" not in vars(self): self.load_maf(refseq)
start = start - self.maf[self.prefix]["start"]-1
end = end - self.maf[self.prefix]["start"]
print((start,end))
return self.maf[list(self.maf.keys())[0]]["seq"][start:end]
def plot_variants(self,refseq):
variants = self.get_variants(refseq)
for var in variants:
print(var)
refsequence = self.get_maf_refseq(var["querypos"]-3,var["querypos"]+5,refseq)
self.plot_chromatogram(var["querypos"]-3,var["querypos"]+5,refsequence)
```
#### File: pathogenseq/pathogenseq/bam.py
```python
from __future__ import division
from .files import *
from .utils import *
from .mvcf import *
from .qc import *
from .delly import *
from collections import defaultdict
import re
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
import vcf
import pysam
from tqdm import tqdm
def get_overlapping_reads(infile,chrom,start,end,outfile,flank=30,threads=4):
IN = pysam.AlignmentFile(infile,"rb")
OUT = pysam.AlignmentFile(outfile,"wb",template=IN)
if start-flank<0:
OUT.close()
return 0
else:
i = 0
for read in IN.fetch(chrom,start,end):
if read.reference_start<=start-flank and read.reference_end>=end+flank:
i+=1
OUT.write(read)
OUT.close()
return i
class bam:
"""
A class to perform operations on BAM files such as SNP calling
Args:
bam_file(str): The BAM file [required]
prefix(str): A prefix for output files [required]
ref_file(ref_file): A reference (needed by some methods)
platform(str): Can be either ``Illumina`` or ``minION``
Returns:
bam: A bam class object
"""
def __init__(self,bam_file,prefix,ref_file,platform="Illumina",threads=4):
self.params = {}
self.bam_file = bam_file
self.ref_file = ref_file
index_bam(bam_file,threads=threads)
if filecheck(bam_file):
self.params["bam_file"] = bam_file
self.bam = bam_file
self.params["prefix"] = prefix
self.prefix = prefix
if filecheck(ref_file):
self.params["ref_file"] = ref_file
self.ref_fa = fasta(self.params["ref_file"])
self.ref_fa_dict = self.ref_fa.fa_dict
self.params["platform"] = platform
self.platform = platform
self.params["threads"] = threads
def run_delly(self):
run_cmd("delly call -g %(ref_file)s %(bam_file)s -o %(prefix)s.bcf" % vars(self))
return delly_bcf("%(prefix)s.bcf" % vars(self))
def generate_primer_bcf(self,threads=4,flank=30):
self.params["failed_primers"] = "%(prefix)s.failed_primers.bed" % self.params
primer_ids = []
FAILED = open(self.params["failed_primers"],"w")
for l in tqdm(open(self.params["primer_bed_file"])):
chrom,start,end,pid = l.rstrip().split()[:4]
primer_ids.append(pid)
start = int(start)
end = int(end)
tmp_bcf = "%s.%s.bcf" % (self.prefix,pid)
tmp_bam = "%s.%s.bam" % (self.prefix,pid)
self.params["tmp"] = "%s:%s-%s" % (chrom,start,end)
self.params["pid"] = pid
log("Extracting reads for %s" % pid)#
read_num = get_overlapping_reads(self.bam,chrom,start,end,tmp_bam,flank=30,threads=threads)
if read_num==0:
#cmd = "bcftools mpileup -f %(ref_file)s %(bam_file)s %(mpileup_options)s -r %(tmp)s | bcftools call %(vtype)s -m | bcftools +setGT -Ob -o %(prefix)s.%(pid)s.bcf -- -t a -n ." % self.params
log("No reads for %s" % pid)
FAILED.write(l)
else:
pass
#cmd = "samtools index %(prefix)s.%(pid)s.bam && bcftools mpileup -f %(ref_file)s %(prefix)s.%(pid)s.bam %(mpileup_options)s -r %(tmp)s | bcftools call -t %(tmp)s %(vtype)s -mg %(min_dp)s | bcftools norm -f %(ref_file)s | bcftools +setGT -Ob -o %(prefix)s.%(pid)s.bcf -- -t q -i 'FMT/DP<%(min_dp)s' -n ." % self.params
FAILED.close()
cmd = "cat %(primer_bed_file)s | parallel --progress --col-sep '\\t' -j %(threads)s \"samtools index %(prefix)s.{4}.bam && bcftools mpileup -f %(ref_file)s %(prefix)s.{4}.bam %(mpileup_options)s -B -r {1}:{2}-{3} | bcftools call -t {1}:{2}-{3} %(vtype)s -mg %(min_dp)s | bcftools norm -f %(ref_file)s | bcftools +setGT -Ob -o %(prefix)s.{4}.bcf -- -t q -i 'FMT/DP<%(min_dp)s' -n . && bcftools index %(prefix)s.{4}.bcf\"" % self.params
run_cmd(cmd)
cmd = "cat %(failed_primers)s | parallel --progress --col-sep '\\t' -j %(threads)s \"bcftools mpileup -f %(ref_file)s %(bam_file)s %(mpileup_options)s -r {1}:{2}-{3} | bcftools call %(vtype)s -m | bcftools +setGT -Ob -o %(prefix)s.{4}.bcf -- -t a -n .\"" % self.params
run_cmd(cmd)
cmd = "bcftools concat `cut -f4 %(primer_bed_file)s | awk '{print \"%(prefix)s.\"$1\".bcf\"}'` -a -d all | bcftools sort -Ob -o %(primer_bcf)s" % self.params
run_cmd(cmd)
rm_files(["%s.%s.bcf" % (self.prefix,x) for x in primer_ids])
rm_files(["%s.%s.bam" % (self.prefix,x) for x in primer_ids])
rm_files(["%s.%s.bam.bai" % (self.prefix,x) for x in primer_ids])
def get_calling_params(self):
dp = []
cmd = "samtools depth %(bam_file)s" % self.params
log("Optimising call method")
for l in subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout:
arr = l.rstrip().split()
dp.append(int(arr[2]))
med_dp = np.median(dp)
log("Median depth: %s" % med_dp)
if med_dp<30:
log("Using low depth approach")
return "low"
else:
log("Using high depth approach")
return "high"
def gbcf(self,prefix=None,call_method="low",max_dp=None,min_dp=10,threads=4,vtype="snps",bed_file=None,primers=None,overlap_search=True,chunk_size=50000,mpileup_options=None,low_dp_as_missing=False,platform=None):
"""
Create a gVCF file (for a description see:https://sites.google.com/site/gvcftools/home/about-gvcf)
Args:
ref_file(str): reference file (not required if passed to the bam initiator).
call_method(str): optimise variant calling based on high or low depth. Options: high|low|optimise
min_dp(int): Minimum depth required to group site into reference-block
"""
self.params["min_dp"] = min_dp
self.params["max_dp"] = max_dp
self.params["bcf_file"] = "%s.gbcf" % (prefix if prefix else self.prefix)
self.params["bed_file"] = bed_file
self.params["chunk_size"] = chunk_size
self.params["cmd_split_chr"] = "splitchr.py %(ref_file)s %(chunk_size)s --bed %(bed_file)s --reformat" % self.params if bed_file else "splitchr.py %(ref_file)s %(chunk_size)s --reformat" % self.params
self.params["threads"] = threads
if platform: self.platform = platform
if primers:
self.params["primer_bed_file"] = "%(prefix)s.primers.bed" % self.params
TMP = open(self.params["primer_bed_file"],"w")
positions = self.ref_fa.find_primer_positions(primers)
for x in sorted(positions,key=lambda d:positions[d]["start"]):
p = positions[x]
if p["start"] > p["end"]:
p["start"],p["end"] = p["end"],p["start"]
TMP.write("%s\t%s\t%s\t%s\n" % (p["chrom"],p["start"],p["end"],x))
TMP.close()
if vtype=="snps": self.params["vtype"] = "-V indels"
elif vtype=="indels": self.params["vtype"] = "-V snps"
elif vtype=="both": self.params["vtype"] = ""
else: sys.stderr.write("Please provide valid vtype: [snps|indels|both]...Exiting!"); quit(1)
self.params["primer_cmd"] = " -T ^%(primer_bed_file)s" % self.params if primers else ""
self.params["extra_cmd"] = ""
if call_method=="optimise" and self.platform=="Illumina": call_method = self.get_calling_params()
log("Variant calling optimised for %s" % self.platform)
self.params["mpileup_options"] = ""
if self.platform=="Illumina" and call_method=="high":
self.params["mpileup_options"] = "-B -a DP,AD"
elif self.platform=="Illumina" and call_method=="low":
self.params["mpileup_options"] = "-ABq0 -Q0 -a DP,AD"
elif self.platform=="minION":
self.params["extra_cmd"] = "| bcftools filter -e 'IMF < 0.7' -S 0 -Ou"
if vtype=="snps":
self.params["mpileup_options"] = "-BIq8 -a DP,AD"
else:
self.params["mpileup_options"] = "-Bq8 -a DP,AD"
else:
log("Please choose a valid platform...Exiting!",ext=True)
if mpileup_options:
self.params["mpileup_options"] = mpileup_options
self.params["min_dp_cmd"] = "| bcftools filter -e 'FMT/DP<%(min_dp)s' -Ou -S ." % self.params if low_dp_as_missing else ""
self.params["max_dp_cmd"] = "| bcftools filter -e 'FMT/DP>%(max_dp)s' -Ou -S ." % self.params if max_dp else ""
cmd = "%(cmd_split_chr)s | parallel --progress --col-sep '\\t' -j %(threads)s \"bcftools mpileup -f %(ref_file)s %(bam_file)s %(mpileup_options)s -r {1} | bcftools call %(primer_cmd)s %(vtype)s -mg %(min_dp)s | bcftools norm -f %(ref_file)s %(min_dp_cmd)s %(max_dp_cmd)s %(extra_cmd)s | bcftools view -Ob -o %(prefix)s_{2}.bcf \"" % self.params
run_cmd(cmd)
cmd = "%(cmd_split_chr)s | awk '{print \"%(prefix)s_\"$2\".bcf\"}' | parallel -j %(threads)s \"bcftools index {}\"" % self.params
run_cmd(cmd)
if primers:
self.params["non_primer_bcf"] = "%(prefix)s.non_primer.bcf" % self.params
self.params["primer_bcf"] = "%(prefix)s.primer.bcf" % self.params
if overlap_search:
#self.params["primer_bam"] = "%(prefix)s.primers.bam" % self.params
self.generate_primer_bcf()
#index_bam(self.params["primer_bam"])
#cmd = "bcftools mpileup -f %(ref_file)s %(primer_bam)s %(mpileup_options)s -R %(primer_bed_file)s | bcftools call -T %(primer_bed_file)s %(vtype)s -mg %(min_dp)s | bcftools norm -f %(ref_file)s | bcftools +setGT -Ob -o %(primer_bcf)s -- -t q -i 'FMT/DP<%(min_dp)s' -n ." % self.params
else:
cmd = "bcftools mpileup -f %(ref_file)s %(bam_file)s %(mpileup_options)s -B -R %(primer_bed_file)s | bcftools call %(vtype)s -m | bcftools +setGT -Ob -o %(primer_bcf)s -- -t a -n ." % self.params
run_cmd(cmd)
cmd = "bcftools concat -aD -Ob -o %(non_primer_bcf)s `%(cmd_split_chr)s | awk '{print \"%(prefix)s_\"$2\".bcf\"}'`" % self.params
run_cmd(cmd)
cmd = "bcftools concat %(primer_bcf)s %(non_primer_bcf)s | bcftools sort -Ob -o %(bcf_file)s " % self.params
run_cmd(cmd)
else:
cmd = "bcftools concat -aD -Ob -o %(bcf_file)s `%(cmd_split_chr)s | awk '{print \"%(prefix)s_\"$2\".bcf\"}'`" % self.params
run_cmd(cmd)
cmd = "rm `%(cmd_split_chr)s | awk '{print \"%(prefix)s_\"$2\".bcf*\"}'`" % self.params
run_cmd(cmd)
if primers:
rm_files([self.params["non_primer_bcf"],self.params["primer_bcf"]])
return bcf(self.params["bcf_file"],prefix=self.prefix)
def call_variants(self,prefix=None,gff_file=None,bed_file=None,call_method="optimise",min_dp=10,threads=4,mixed_as_missing=False):
self.params["min_dp"] = min_dp
self.params["bed_file"] = bed_file
self.params["gbcf_file"] = "%s.gbcf" % self.prefix if not prefix else prefix+".gbcf"
self.params["missing_bcf_file"] = "%s.missing.bcf" % self.prefix
# self.params["mixed_cmd"] = " bcftools +setGT -- -t q -i 'GT=\"het\"' -n . | bcftools view -e 'F_MISSING==1' |" % self.params if mixed_as_missing else ""
self.gbcf(prefix=prefix,call_method=call_method,min_dp=min_dp,threads=threads,vtype="both",bed_file=bed_file,low_dp_as_missing=True)
self.params["bcf_file"] = "%s.bcf" % self.prefix
self.params["del_bed"] = bcf(self.params["gbcf_file"]).del_pos2bed()
view_cmd = "bcftools view %(gbcf_file)s |" % self.params
mix_cmd = " bcftools +setGT -- -t q -i 'GT=\"het\" & AD[:1]/(AD[:0]+AD[:1])<0.7' -n . |" % self.params if mixed_as_missing else ""
out_cmd = "bcftools view -T ^%(del_bed)s -g miss -O b -o %(missing_bcf_file)s" % self.params
cmd = "%s %s %s" % (view_cmd,mix_cmd,out_cmd)
run_cmd(cmd)
out_cmd = "bcftools view -g ^miss -c 1 -O b -o %(bcf_file)s" % self.params
cmd = "%s %s %s" % (view_cmd,mix_cmd,out_cmd)
run_cmd(cmd)
final_bcf = self.params["bcf_file"]
if gff_file and filecheck(gff_file):
self.params["gff_file"] = gff_file
self.params["ann_bcf_file"] = "%(prefix)s.csq.bcf" % self.params
cmd = "bcftools csq -p m -f %(ref_file)s -g %(gff_file)s %(bcf_file)s -Ob -o %(ann_bcf_file)s" % self.params
run_cmd(cmd)
final_bcf = self.params["ann_bcf_file"]
return bcf(final_bcf,prefix=self.prefix)
def create_dummy_low_dp_bcf(self,gff_file,min_dp=10,bed_file=None):
self.params["gff_file"] = gff_file
contig_line = "\n".join(["##contig=<ID=%s,length=%s>" % (s,len(self.ref_fa_dict[s])) for s in self.ref_fa_dict])
header = """##fileformat=VCFv4.1
##source=htsbox-pileup-r340
##reference=%s
##contig=<ID=Chromosome,length=4411532>
%s
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMPLE
""" % (self.ref_file,contig_line)
self.params["dp_vcf_file"] = "%(prefix)s.low_cov.vcf" % self.params
OUT = open(self.params["dp_vcf_file"],"w")
OUT.write(header)
self.do_pileup(bed_file)
for l in open(self.params["temp_pileup"]):
row = l.rstrip().split()
ref = row[2]
alleles = row[3].split(",")
depth = [int(x) for x in row[4].split(":")[1].split(",")]
tot_dp = sum(depth)
if tot_dp>min_dp: continue
tmp = ["A","C","G","T"]
fake_allele = tmp.pop()
if fake_allele==ref: fake_allele = tmp.pop()
OUT.write("Chromosome\t%s\t.\t%s\t%s\t255\t.\t.\tGT\t1\n" % (row[1],ref,fake_allele))
OUT.close()
self.params["ann_bcf_file"] = "%(prefix)s.low_cov.bcf" % self.params
cmd = "bcftools csq -p m %(dp_vcf_file)s -f %(ref_file)s -g %(gff_file)s -Ob -o %(ann_bcf_file)s" % self.params
run_cmd(cmd)
return bcf(self.params["ann_bcf_file"])
def get_bam_qc(self,cov_thresholds=[1,5,10,20]):
"""
Get a qc_bam object
Args:
cov_thresholds(list): List of integers to use in the percentage genome covered calculation
Returns:
qc_bam: A qc_bam object
"""
return qc_bam(self.params["bam_file"],self.params["ref_file"],cov_thresholds)
def do_pileup(self,bed_file=None):
self.params["temp"] = bed_file
self.params["temp_pileup"] = "%(prefix)s.temp.pileup" % self.params
self.params["temp_bam"] = "%(prefix)s.temp.bam" % self.params
if bed_file:
cmd = "htsbox pileup -b %(temp)s -f %(ref_file)s -Q 8 %(bam_file)s > %(temp_pileup)s" % self.params
else:
cmd = "htsbox pileup -f %(ref_file)s -Q 8 %(bam_file)s > %(temp_pileup)s" % self.params
run_cmd(cmd)
def htsbox_calls(self,bed_file=None):
self.do_pileup(bed_file=bed_file)
if bed_file:
bed_pos = set()
for l in open(bed_file):
arr = l.rstrip().split()
for i in range(int(arr[1]),int(arr[2])+1):
bed_pos.add((arr[0],str(i)))
final_calls = defaultdict(lambda :defaultdict(list))
if self.params["platform"] == "Illumina":
for l in open(self.params["temp_pileup"]):
arr = l.rstrip().split()
if bed_file and (arr[0],arr[1]) not in bed_pos: continue
calls = arr[3].split(",")
cov = [int(x) for x in arr[4].split(":")[1].split(",")]
tot = sum(cov)
for i in range(len(calls)):
final_calls[arr[0]][arr[1]].append((calls[i],cov[i]/tot,cov[i]))
elif self.params["platform"] == "minION":
for l in open(self.params["temp_pileup"]):
#Chromosome 23 G G,G-1C,G+3AAA 0/1:49,1,1
arr = l.rstrip().split()
if bed_file and (arr[0],arr[1]) not in bed_pos: continue
alleles = arr[3].split(",")
depth = [int(x) for x in arr[4].split(":")[1].split(",")]
max_allele_dp = max(depth)
max_allele = alleles[depth.index(max_allele_dp)]
max_allele_frac = max_allele_dp/tot_dp
if len(max_allele)>1:
max_allele = recode_indels([arr[1],max_allele])[1][0]
if tot_dp<min_dp:
call = "N"
if max_allele_frac<min_frac:
call = "N"
else:
call = max_allele
final_calls[arr[0]][arr[1]].append((call,1,max_allele_dp))
return final_calls
def pileup2vcf(self,min_het_frac=0.3,min_hom_frac=0.6,min_dp=10,bed_file=None,indels=True):
self.params["contig_line"] = "\n".join(["##contig=<ID=%s,length=%s>" % (s,len(self.ref_fa_dict[s])) for s in self.ref_fa_dict])
header = """##fileformat=VCFv4.1
##source=htsbox-pileup-r340
##reference=%(ref_file)s
%(contig_line)s
##INFO=<ID=DP4,Number=4,Type=Integer,Description="Number of high-quality ref-forward , ref-reverse, alt-forward and alt-reverse bases">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Raw Depth">
##INFO=<ID=MinDP,Number=1,Type=Integer,Description="Minimum per-sample depth in this gVCF block">
##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%(prefix)s
""" % self.params
self.params["temp_pileup"] = "%s.temp.pileup" % self.prefix
if bed_file:
self.do_pileup(bed_file)
bed_pos = set()
for l in open(bed_file):
arr = l.rstrip().split()
for i in range(int(arr[1]),int(arr[2])):
bed_pos.add(str(i))
else:
self.do_pileup()
pass
variants = []
self.params["vcf_file"] = "%s.vcf" % self.prefix
OUT = open("%(vcf_file)s" % self.params,"w")
OUT.write(header)
ref_run_start_pos = -1
ref_run_start_ref = "X"
ref_run_min_dp = 0
for l in open(self.params["temp_pileup"]):
#Chromosome 23 G G,G-1C,G+3AAA 0/1:49,1,1
arr = l.rstrip().split()
if bed_file:
if arr[1] not in bed_pos:
continue
alleles = arr[3].split(",")
depth = [int(x) for x in arr[4].split(":")[1].split(",")]
tot_dp = sum(depth)
ref = arr[2]
if ref_run_start_pos==-1:
ref_run_start_pos = arr[1]
ref_run_start_ref = ref
ref_run_min_dp = tot_dp
max_allele_dp = max(depth)
max_allele = alleles[depth.index(max_allele_dp)]
max_allele_frac = max_allele_dp/tot_dp
adjusted_allele_frac = max_allele_dp/(max_allele_dp+sorted(depth)[-2]) if len(depth)>1 else max_allele_frac
ref_depth = depth[alleles.index(ref)] if ref in alleles else 0
if len(max_allele)>1:
indel = recode_indels([max_allele])
max_allele = indel[1][0]
ref = indel[0]
DP4 = "0,%s,0,%s" % (ref_depth,tot_dp-ref_depth)
call = max_allele
# if arr[1]=="13228": import pdb; pdb.set_trace()
if not indels and (len(max_allele)>1 or len(ref)>1): #INDELS!!!!
ref_run_end_pos = arr[1]
if tot_dp<ref_run_min_dp: ref_run_min_dp = tot_dp
elif tot_dp<min_dp:
OUT.write("%s\t%s\t.\t%s\t%s\t255\t.\tDP4=%s\tGT:DP\t%s:%s\n" % (arr[0],arr[1],ref,call,DP4,"./.",tot_dp))
ref_run_start_pos = -1
elif tot_dp>=min_dp and adjusted_allele_frac>min_hom_frac and call==ref: #REF base call
ref_run_end_pos = arr[1]
if tot_dp<ref_run_min_dp: ref_run_min_dp = tot_dp
elif tot_dp>=min_dp and adjusted_allele_frac<=min_hom_frac and adjusted_allele_frac>min_het_frac: # mixed call
if call==ref:
call=alleles[depth.index(sorted(depth)[-2])]
if (len(call)>1 or len(ref)>1) and not indels:
ref_run_end_pos = arr[1]
if tot_dp<ref_run_min_dp: ref_run_min_dp = tot_dp
else:
gt="0/1"
if tot_dp<ref_run_min_dp: ref_run_min_dp = tot_dp
OUT.write("%s\t%s\t.\t%s\t.\t.\t.\tEND=%s;MinDP=%s\tGT:DP\t0/0:%s\n" % (arr[0],ref_run_start_pos,ref_run_start_ref,int(arr[1])-1,ref_run_min_dp,ref_run_min_dp))
OUT.write("%s\t%s\t.\t%s\t%s\t255\t.\tDP4=%s\tGT:DP\t%s:%s\n" % (arr[0],arr[1],ref,call,DP4,gt,tot_dp))
ref_run_start_pos = -1
variants.append((arr[0],arr[1],ref,call,tot_dp,gt))
else:
if call==ref:
call="."
gt="0/0"
else:
gt="1/1"
OUT.write("%s\t%s\t.\t%s\t.\t.\t.\tEND=%s;MinDP=%s\tGT:DP\t0/0:%s\n" % (arr[0],ref_run_start_pos,ref_run_start_ref,int(arr[1])-1,ref_run_min_dp,ref_run_min_dp))
OUT.write("%s\t%s\t.\t%s\t%s\t255\t.\tDP4=%s\tGT:DP\t%s:%s\n" % (arr[0],arr[1],ref,call,DP4,gt,tot_dp))
ref_run_start_pos = -1
variants.append((arr[0],arr[1],ref,call,tot_dp,gt))
OUT.close()
return variants
def sambamba_depth(self,outfile,zero_start=False):
index_bam(self.params["bam_file"])
fdict = fasta(self.params["ref_file"]).fa_dict
cov = {}
self.params["tmp"] = "%s.tmp" % self.prefix
for s in fdict:
cov[s] = ["%s\t%s\t0\t0\t0\t0\t0\t0\t0\t%s" % (s,i+1,self.prefix) for i in range(len(fdict[s]))]
cmd = "sambamba depth base -q 20 -z -t %(threads)s %(bam_file)s > %(tmp)s" % self.params
run_cmd(cmd)
for l in open(self.params["tmp"]):
row = l.rstrip().split()
if row[0]=="REF": continue
if zero_start:
cov[row[0]][int(row[1])] = "\t".join(row)
else:
row[1] = str(int(row[1])+1)
cov[row[0]][int(row[1])-1] = "\t".join(row)
O = open(outfile,"w")
for s in fdict:
for i in range(len(fdict[s])):
O.write("%s\n" % cov[s][i])
O.close()
def get_bed_gt(self,bed_file):
add_arguments_to_self(self,locals())
cmd = "bcftools mpileup -f %(ref_file)s -R %(bed_file)s %(bam_file)s -BI -a AD | bcftools call -m | bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%ALT[\\t%%GT\\t%%AD]\\n'" % vars(self)
results = defaultdict(lambda : defaultdict(dict))
for l in cmd_out(cmd):
#Chromosome 4348079 0/0 51
chrom,pos,ref,alt,gt,ad = l.rstrip().split()
pos =int(pos)
d = {}
alts = alt.split(",")
ad = [int(x) for x in ad.split(",")]
if gt=="0/0":
d[ref] = ad[0]
elif gt=="./.":
d[ref] = 0
else:
for i,a in enumerate([ref]+alts):
d[a] = ad[i]
results[chrom][pos] = d
return results
def bed_cov_plot(self,bed_file):
add_arguments_to_self(self,locals())
bed = load_bed(self.bed_file,[1,2,3,4],4)
for gene in bed:
start = int(bed[gene][1])
end = int(bed[gene][2])
region_size = end-start
offset = int(region_size*0.15)
new_start = start-offset
new_end = end+offset
if region_size<100000:
n,d = "K",1000
elif region_size>100000 and region_size<1000000000:
n,d = "M",1000000
else:
n,d = "G",1000000000
if region_size<10000:
window,step=2,1
elif region_size<100000:
window,step=100,50
elif region_size>100000 and region_size<1000000:
window,step=1000,500
log("Outputting coverage plot for region (%sbp) with window=%s and step=%s" % (region_size,window,step))
self.loc = "%s:%s-%s" % (bed[gene][0],new_start,new_end)
cmd = "samtools depth %(bam_file)s -r %(loc)s" % vars(self)
ref_dp = []
ref_pos = []
for l in cmd_out(cmd):
row = l.rstrip().split()
ref_dp.append(int(row[2]))
ref_pos.append(int(row[1]))
x = []
y = []
hw = int(window/2)
for i in range(hw,len(ref_dp)-hw):
x.append((i+new_start)/d)
y.append(int(np.median(ref_dp[i-hw:i+hw+1])))
fig = plt.figure()
plot = fig.add_subplot(111)
plot.plot(x,y)
plot.set_ylim(bottom=0)
if len(y)>0 and max(y)>200:
plot.set_yscale('symlog')
plot.set_xlabel("Genome Position (%sb)" % n)
plot.set_ylabel("Median Coverage (Window size:%s)" % window)
region_med_dp = np.median(ref_dp)
if len(y)>0 and max(y)>region_med_dp:
ymax = max(y)
else:
ymax = region_med_dp
plot.set_ylim(top=ymax+ymax*0.05 if ymax>0 else 10)
plot.axhline(xmin=0,xmax=1,y=region_med_dp,color="orange",linestyle="dashed")
plot.axvline(ymin=0,ymax=0.05,x=start/d,color="orange")
plot.axvline(ymin=0,ymax=0.05,x=end/d,color="orange")
imgfile = "%s_%s_cov.png" %(self.prefix,gene)
fig.savefig(imgfile)
```
#### File: pathogenseq/pathogenseq/delly.py
```python
from .files import *
from .mvcf import bcf
class delly_bcf(bcf):
def __init__(self,filename):
bcf.__init__(self,filename)
def get_robust_calls(self):
results = []
for l in cmd_out(" bcftools query -f '%%CHROM\\t%%POS\\t[%%END\\t%%GT\\t%%DR\\t%%DV\\t%%RR\\t%%RV]\\n' %(filename)s" % vars(self)):
row = l.split()
if row[3]!="1/1":continue
results.append(row)
return results
def overlap_bed(self,bed_file):
results = []
bed = load_bed(bed_file,[1,2,3,4,5],4)
calls = self.get_robust_calls()
for call in calls:
set_call_pos = set(range(int(call[1]),int(call[2])))
for region in bed:
if bed[region][0]!=call[0]: continue
set_region_pos = set(range(int(bed[region][1]),int(bed[region][2])))
intersect = set_call_pos.intersection(set_region_pos)
if len(intersect)>1:
results.append({"region":region,"start":min(intersect),"end":max(intersect)})
print(results)
```
#### File: pathogenseq/pathogenseq/gemma.py
```python
from __future__ import division
import subprocess
from .files import *
import os
from tqdm import tqdm
import random
r = random.SystemRandom()
class ann:
annfile = ""
tabix = ""
def __init__(self,filename,tabix):
self.annfile = filename
self.tabix = tabix
def pos2ann(self,pos_tuple_list):
#pos_tuple_list = [("Chromosome",1),("Chromosome",2)]
if len(pos_tuple_list)<5000:
p1offset = -1
p2offset = 0
stype = "R"
else:
p1offset = 0
p2offset = 1
stype = "T"
num = r.randint(1,1000000)+r.randint(1,1000000)
temp_bed_file = "temp.%s.bed" % (num)
OUT = open(temp_bed_file,"w")
for chrom,pos in pos_tuple_list:
OUT.write("%s\t%s\t%s\n" % (chrom,int(pos)+p1offset,int(pos)+p2offset))
OUT.close()
results = defaultdict(dict)
for l in subprocess.Popen("%s %s -%s %s " % (self.tabix,self.annfile,stype,temp_bed_file),stdout=subprocess.PIPE,shell=True).stdout:
# added .decode() to convert binary output of subprocess.Popen to string
l = l.decode()
arr = l.rstrip().split()
results[arr[0]][int(arr[1])] = {"alt_aa":{arr[3]:arr[12],arr[4]:arr[13],arr[5]:arr[14]},"change_pos":arr[7],"ref_nt":arr[2],"ref_codon":arr[6],"ref_aa":arr[11],"chr":arr[0],"pos":int(arr[1]),"rv":arr[15],"gene":arr[16],"gene_syn":arr[17],"ncr":arr[18],"start":arr[19],"end":arr[20],"strand":arr[21],"codon_num":arr[24],"gene_nt":arr[25],"operon":arr[26]}
os.remove(temp_bed_file)
return results
class gemma_results:
def __init__(self,filename=None):
self.data = []
if filename:
for l in tqdm(open(filename)):
row = l.rstrip().split()
if row[1]=="rs":continue
chrom,pos,alt = row[1].split("_")
self.data.append({"chrom":chrom,"pos":int(pos),"ref":row[4],"alt":row[5],"pval":float(row[11])})
self.data = sorted(self.data,key=lambda x:x["pval"])
def add_results(self,data):
for d in data:
self.data.append(d)
self.data = sorted(self.data,key=lambda x:x["pval"])
def top_n_hits(self,n=10):
tmp = gemma_results()
tmp.add_results(self.data[:n])
return tmp
def cutoff_hits(self,cutoff=1e-5):
tmp = gemma_results()
tmp.add_results([x for x in self.data if x["pval"]<cutoff])
return tmp
def __str__(self):
print(self.data)
def tb_annotate_hits(self,ann_file):
ann_obj = ann(ann_file,"tabix")
annotation = ann_obj.pos2ann(sorted([(row["chrom"],row["pos"]) for row in self.data],key=lambda x:x[1]))
for d in self.data:
d["gene"] = annotation[d["chrom"]][d["pos"]]["rv"]
d["annotation"] = annotation[d["chrom"]][d["pos"]]
def restrict_hits_to_gene(self,genes):
if "gene" not in self.data[0]:
print("Please annotate hits with tb_annotate_hits()")
return
tmp = gemma_results()
tmp.add_results([x for x in self.data if x["gene"] in genes])
return tmp
def create_tb_panel(self,drug,ann_file,cutoff=1e-5):
amino_acids = ['Cys', 'Ile', 'Ser', 'Val', 'Gly', 'Gln', 'Pro', 'Lys', 'Stop', 'Thr', 'Phe', 'Ala', 'Met', 'Asp', 'His', 'Leu', 'Arg', 'Trp', 'Glu', 'Asn', 'Tyr']
aa_long2short = {"Ala":"A","Arg":"R","Asn":"N","Asp":"D","Cys":"C","Gln":"Q","Glu":"E","Gly":"G","His":"H","Ile":"I","Leu":"L","Lys":"K","Met":"M","Phe":"F","Pro":"P","Ser":"S","Thr":"T","Trp":"W","Tyr":"Y","Val":"V","Stop":"*", "-":"-"}
aa_short2long = {'A': 'Ala', 'R': 'Arg', 'N': 'Asn', 'D': 'Asp', 'C': 'Cys', 'Q': 'Gln', 'E': 'Glu', 'G': 'Gly', 'H': 'His', 'I': 'Ile', 'L': 'Leu', 'K': 'Lys', 'M': 'Met', 'F': 'Phe', 'P': 'Pro', 'S': 'Ser', 'T': 'Thr', 'W': 'Trp', 'Y': 'Tyr', 'V': 'Val', '*': 'Stop', '-': '-'}
target_genes = {
"amikacin":["rrs"],
"bedaquiline":["Rv0678"],
"capreomycin":["rrs","Rv1694"],
"clofazimine":["Rv0678"],
"cycloserine":["Rv2780","Rv3423c"],
"ethambutol":["Rv3794","Rv3795","Rv3793","Rv1267c","Rv3793-Rv3794"],
"ethionamide":["Rv1482c-Rv1483","Rv1484","Rv3854c","Rv3854c-Rv3855","Rv3855"],
"fluoroquinolones":["Rv0005","Rv0006"],
"isoniazid":["Rv2428","Rv2427A-Rv2428","Rv1482c-Rv1483","Rv1484","Rv2245","Rv1908c","Rv1908c-Rv1909c"],
"kanamycin":["Rv2416c-Rv2417c","rrs"],
"linezolid":["Rv0701","rrl"],
"para-aminosalicylic_acid":["Rv2447c","Rv2671","Rv2764c","Rv2754c-Rv2755c"],
"pyrazinamide":["Rv3601c","Rv2043c","Rv2043c-Rv2044c","Rv1630","Rv2042c"],
"rifampicin":["Rv0667","Rv0668"],
"streptomycin":["Rv3919c","Rv0682","rrs"]}
self = self.cutoff_hits(cutoff)
self.tb_annotate_hits(ann_file)
self = self.restrict_hits_to_gene(target_genes[drug.lower()])
for d in self.data:
mut = ""
if "-" in d["gene"] or d["gene"]=="rrs" or d["gene"]=="rrl":
mut = "%s%s%s" % (d["ref"],d["annotation"]["gene_nt"],d["alt"])
else:
mut = "%s%s%s" % (aa_short2long[d["annotation"]["ref_aa"]],d["annotation"]["codon_num"],aa_short2long[d["annotation"]["alt_aa"][d["alt"]]])
print("%s\t%s\t%s\t%s\t%s\t%s" % (drug.upper(),d["pos"],d["ref"],d["alt"],d["gene"],mut))
class gemma_genesum_results:
def __init__(self,filename=None):
self.data = []
if filename:
for l in tqdm(open(filename)):
row = l.rstrip().split()
if row[1]=="rs":continue
self.data.append({"gene":row[1],"pval":float(row[11])})
self.data = sorted(self.data,key=lambda x:x["pval"])
def add_results(self,data):
for d in data:
self.data.append(d)
self.data = sorted(self.data,key=lambda x:x["pval"])
def top_n_hits(self,n=10):
tmp = gemma_results()
tmp.add_results(self.data[:n])
return tmp
def cutoff_hits(self,cutoff=1e-5):
tmp = gemma_results()
tmp.add_results([x for x in self.data if x["pval"]<cutoff])
return tmp
def __str__(self):
print(self.data)
```
#### File: pathogenseq/pathogenseq/logger.py
```python
import json
def log(key,log_file):
x = json.load(open(log_file))
x[key] = True
json.dump(open(log_file,"w"),x)
def checkpoint(key,log_file):
x = json.load(open(log_file))
return False if key in x else True
```
#### File: pathogenseq/pathogenseq/mvcf.py
```python
from __future__ import division
import sys
import subprocess
from .files import *
from .fasta import *
from .mutation_db import *
from collections import defaultdict
import itertools
import json
from tqdm import tqdm
# from bokeh.plotting import figure, output_file, show
# from bokeh.layouts import column
from ete3 import Tree
from colour import Color
import multiprocessing as mp
re_seq = re.compile("([0-9\-]*)([A-Z\*]+)")
re_I = re.compile("([A-Z\*]+)")
number_re = re.compile("[0-9\-]+")
def parse_mutation(x):
tmp = x.split(">")
aa_changed = True if len(tmp)>1 else False
re_obj = re_seq.search(tmp[0])
change_num = re_obj.group(1)
ref_aa = re_obj.group(2)
alt_aa = re_seq.search(tmp[1]).group(2) if aa_changed else None
return change_num,ref_aa,alt_aa
# def load_variants(filename):
# variants = defaultdict(lambda:defaultdict(dict))
# vcf_reader = vcf.Reader(open(filename))
# for rec in tqdm(vcf_reader):
# for s in rec.samples:
# variants[rec.CHROM][rec.POS][s.sample] = s.gt_bases.split("/")[0] if s["GT"]!="./." else "N"
# return variants
def get_missing_positions(bcf_file):
cmd = "bcftools query -f '%%CHROM\\t%%POS\\n' %s" % bcf_file
results = []
for l in subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout:
row = l.decode().rstrip().split()
results.append((row[0],int(row[1])))
return results
v = True
class bcf:
def __init__(self,filename,prefix=None,threads=4):
self.samples = []
self.filename = filename
self.threads = threads
if prefix==None:
if filename[-4:]==".bcf":
self.prefix = filename[:-4]
elif filename[-5:]==".gbcf":
self.prefix = filename[:-5]
elif filename[-7:]==".vcf.gz":
self.prefix = filename[:-7]
elif filename[-4:]==".vcf":
self.prefix = filename[:-4]
else:
self.prefix = filename
else:
self.prefix = prefix
self.prefix = self.prefix
self.temp_file = get_random_file()
index_bcf(filename,self.threads)
cmd = "bcftools query -l %(filename)s > %(temp_file)s" % vars(self)
run_cmd(cmd)
for l in open(self.temp_file):
self.samples.append(l.rstrip())
os.remove(self.temp_file)
self.vcf = "%s.vcf" % self.prefix
def per_sample_bcf2fa(self,s,ref,nochrom=False):
self.tmp_sample = s
self.ref = ref
cmd = "bcftools view --threads %(threads)s -s %(tmp_sample)s %(filename)s -Ou | bcftools filter -e 'GT=\"het\"' -S . -Ou | bcftools view --threads %(threads)s -i 'GT==\"./.\"' -Ou | bcftools query -f '%%CHROM\\t%%POS\\n'" % vars(self)
self.tmp_file = "%(prefix)s.%(tmp_sample)s.missing.bed" % vars(self)
TMP = open(self.tmp_file,"w")
for l in cmd_out(cmd):
row = l.rstrip().split()
TMP.write("%s\t%s\t%s\n" % (row[0],int(row[1])-1,row[1]))
TMP.close()
self.tmp_fa = "%(prefix)s.%(tmp_sample)s.tmp.fasta" % vars(self)
cmd = "bcftools consensus -f %(ref)s %(filename)s -o %(tmp_fa)s -m %(tmp_file)s -s %(tmp_sample)s" % vars(self)
run_cmd(cmd)
fa_dict = fasta(self.tmp_fa).fa_dict
self.final_fa = "%(prefix)s.%(tmp_sample)s.fasta" % vars(self)
FA = open(self.final_fa,"w")
for seq in fa_dict:
log("Writing consensus for %s" % seq)
if nochrom:
FA.write(">%s\n%s\n" % (self.tmp_sample,fa_dict[seq].replace("*","N")))
else:
FA.write(">%s_%s\n%s\n" % (self.tmp_sample,seq,fa_dict[seq].replace("*","N")))
FA.close()
rm_files([self.tmp_file,self.tmp_fa])
def del_pos2bed(self):
self.del_bed = "%s.del_pos.bed" % self.prefix
OUT = open(self.del_bed,"w")
cmd = "bcftools view --threads %(threads)s -Ou -v indels %(filename)s | bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%ALT\\n' | awk 'length($3)>1'" % vars(self)
sys.stderr.write(cmd)
j = 0
for l in subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout:
j+=1
row = l.decode().rstrip().split()
start_pos = int(row[1])+1
for i in range(start_pos,start_pos+len(row[2])-1):
OUT.write("%s\t%s\t%s\n" % (row[0],i-1,i))
if j==0:
OUT.write("dummy\t1\t1\n")
OUT.close()
return self.del_bed
def load_variants(self,chrom=None,pos=None):
variants = defaultdict(lambda:defaultdict(lambda:defaultdict(dict)))
raw_variants = defaultdict(lambda:defaultdict(lambda:defaultdict(dict)))
if chrom and pos:
cmd = "bcftools view --threads %(threads)s %s %s:%s | bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%ALT[\\t%%TGT:%%AD]\\n' | sed 's/\.\/\./N\/N/g' | sed 's/\*[\/|]\*/\.\/\./g'" % (self.filename,chrom,pos)
else:
cmd = "bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%ALT[\\t%%TGT:%%AD]\\n' %s | sed 's/\.\/\./N\/N/g' | sed 's/\*[\/|]\*/\.\/\./g'" % self.filename
log(cmd)
for l in cmd_out(cmd):
row = l.decode().rstrip().split()
alts = row[3].split(",")
alleles = [row[2]]+alts
for i in range(len(self.samples)):
calls,ad = row[i+4].replace("|","/").split(":")
call1,call2 = calls.split("[/|]")
if calls=="N/N":
raw_variants[row[0]][row[1]][self.samples[i]]["N"] = 1.0
continue
elif calls=="%s/%s" % (row[2],row[2]) and ad==".":
raw_variants[row[0]][row[1]][self.samples[i]][row[2]] = 1.0
continue
ad = [int(x) if x!="." else 0 for x in ad.split(",")]
sum_ad = sum(ad)
for j in range(1,len(alleles)):
if ad[j]==0: continue
raw_variants[row[0]][row[1]][self.samples[i]][alleles[j]] = ad[j]/sum_ad
for tchrom in raw_variants:
for tpos in raw_variants[tchrom]:
variants[tchrom][int(tpos)] = raw_variants[tchrom][tpos]
if chrom and pos and len(variants)==0:
log("Variant not found",True)
if chrom and pos:
return variants[chrom][int(pos)]
else:
return variants
def load_variants_alt(self,tchrom=None,tpos=None):
variants = defaultdict(lambda:defaultdict(dict))
raw_variants = defaultdict(lambda:defaultdict(dict))
if tchrom and tpos:
cmd = "bcftools view --threads %s %s %s:%s | bcftools query -f '%%CHROM\\t%%POS[\\t%%IUPACGT]\\n' | sed 's/\.\/\./N/g'" % (self.threads,self.filename,tchrom,tpos)
else:
cmd = "bcftools query -f '%%CHROM\\t%%POS[\\t%%IUPACGT]\\n' %s | sed 's/\.\/\./N/g'" % self.filename
log(cmd)
for l in tqdm(subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout):
row = l.decode().rstrip().split()
for i in range(len(self.samples)):
raw_variants[row[0]][row[1]][self.samples[i]] = row[i+2]
for chrom in raw_variants:
for pos in raw_variants[chrom]:
variants[chrom][int(pos)] = raw_variants[chrom][pos]
if chrom and pos and len(variants)==0:
log("Variant not found",True)
if tchrom and tpos:
return variants[tchrom][int(tpos)]
else:
return variants
def load_stats(self,convert=False,ref=None):
add_arguments_to_self(self,locals())
self.smallest_bin = 1/len(self.samples)
self.stats_file = "%s.stats.txt" % self.filename
if convert:
cmd = "bcftools convert --gvcf2vcf --fasta-ref %(ref)s -Ou %(filename)s | bcftools stats -v -s - > %(stats_file)s" % vars(self)
else:
cmd = "bcftools stats -v -s - %(filename)s > %(stats_file)s" % vars(self)
run_cmd(cmd)
results = defaultdict(lambda:defaultdict(dict))
for l in open(self.stats_file):
row = l.rstrip().split("\t")
if l[0]=="#": continue
if row[0]=="SN":
results["SN"][row[2][:-1]] = int(row[3])
elif row[0]=="AF":
results["AF"]["SNP"][float(row[2])] = int(row[3])
results["AF"]["INDEL"][float(row[2])] = int(row[6])
elif row[0]=="QUAL":
results["QUAL"]["SNP"][int(row[2])] = int(row[3])
results["QUAL"]["INDEL"][int(row[2])] = int(row[6])
elif row[0]=="IDD":
results["IDD"][int(row[2])] = int(row[3])
elif row[0]=="ST":
results["ST"][row[2]] = int(row[3])
elif row[0]=="DP":
if row[2][0]==">": continue
results["DP"][int(row[2])] = int(row[3])
elif row[0]=="PSC":
results["PSC"][row[2]]["nRefHom"] = int(row[3])
results["PSC"][row[2]]["nNonRefHom"] = int(row[4])
results["PSC"][row[2]]["nHets"] = int(row[5])
return results
def plot_stats(self,outfile):
stats = self.load_stats()
output_file(outfile)
sn = figure(title="Summary stats", x_range=stats["SN"].keys(),toolbar_location=None, tools="")
sn.vbar(x=stats["SN"].keys(),top=stats["SN"].values(),width=0.9)
# show the results
show(sn)
def split_on_metadata(self,meta_file,remove_monomorphic = False, threads = 4):
self.threads = threads
meta = defaultdict(list)
for l in open(meta_file):
#sample data
row = l.rstrip().split()
meta[row[1]].append(row[0])
for m in meta:
self.tmp_file = "%s.tmp.txt" % self.prefix
open(self.tmp_file,"w").write("\n".join(meta[m]))
self.tmp_bcf = "%s.%s.bcf" % (self.prefix,m)
self.remove_monomorphic = "| bcftools +fill-AN-AC | bcftools view --threads %(threads)s -c1 " %vars(self) if remove_monomorphic else ""
cmd = "bcftools view --threads %(threads)s -S %(tmp_file)s %(filename)s %(remove_monomorphic)s -Ob -o %(tmp_bcf)s" % vars(self)
run_cmd(cmd)
def annotate(self,ref_file,gff_file):
self.ref_file = ref_file
self.gff_file = gff_file
self.ann_file = "%s.ann.bcf" % self.prefix
cmd = "bcftools csq -p m -f %(ref_file)s -g %(gff_file)s %(bcf)s -o %(ann_file)s" % vars(self)
run_cmd(cmd,verbose=v)
def extract_matrix(self,matrix_file=None,fmt="old",annotation=False):
self.matrix_file = matrix_file if matrix_file==True else self.prefix+".mat"
if fmt=="new":
O = open(self.matrix_file,"w").write("chr\tpos\tref\tinfo\ttype\t%s\n" % ("\t".join(self.samples)))
if annotation:
cmd = "bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%BCSQ\\t.[\\t%%IUPACGT]\\n' %(filename)s | sed 's/\.\/\./N/g' >> %(matrix_file)s" % vars(self)
else:
cmd = "bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t.\\t.[\\t%%IUPACGT]\\n' %(filename)s | sed 's/\.\/\./N/g' >> %(matrix_file)s" % vars(self)
elif fmt=="old":
O = open(self.matrix_file,"w").write("chr\tpos\tref\t%s\n" % ("\t".join(self.samples)))
cmd = "bcftools query -f '%%CHROM\\t%%POS\\t%%REF[\\t%%IUPACGT]\\n' %(filename)s | sed 's/\.\/\./N/g' >> %(matrix_file)s" % vars(self)
else:
log("Choose valid format [old,new]...Exiting!",ext=True)
run_cmd(cmd,verbose=v)
def vcf_to_fasta(self,outfile,ref_file,threads=4,chunk_size = 50000, bed_file=None):
self.ref_file = ref_file
self.chunk_size = chunk_size
self.cmd_split_chr = "splitchr.py %(ref_file)s %(chunk_size)s --bed %(bed_file)s --reformat" % vars(self) if bed_file else "splitchr.py %(ref_file)s %(chunk_size)s --reformat" % vars(self)
self.tmp_file = "%s.tmp.txt" % self.prefix
self.threads = threads
cmd = "%(cmd_split_chr)s | parallel --col-sep '\\t' -j %(threads)s \"bcftools view %(filename)s -r {1} -Ou | bcftools query -f '%%POS[\\t%%IUPACGT]\\n' | sed 's/\*[\/|]\*/\.\/\./g' | datamash transpose > %(prefix)s.{2}.tmp.txt\"" % vars(self)
run_cmd(cmd)
cmd = "paste `%(cmd_split_chr)s | awk '{print \"%(prefix)s.\"$2\".tmp.txt\"}'` > %(tmp_file)s" % vars(self)
run_cmd(cmd)
cmd = "rm `%(cmd_split_chr)s | awk '{print \"%(prefix)s.\"$2\".tmp.txt\"}'`" % vars(self)
run_cmd(cmd)
O = open(outfile,"w")
for i,l in enumerate(open(self.tmp_file)):
row = l.rstrip().split()
if i==0: continue
s = self.samples[i-1]
seq = "".join(row).replace("./.","N").replace("*","N")
O.write(">%s\n%s\n" % ( s,seq))
O.close()
def bcf2vcf(self):
if nofile(self.vcf):
cmd = "bcftools view --threads %(threads)s %(filename)s -Ov -o %(vcf)s" % vars(self)
run_cmd(cmd)
def get_venn_diagram_data(self,samples,outfile):
samples = samples.split(",")
if len(samples)>4:
log(samples)
log("Can't handle more than 4 samples...Exiting!",True)
if nofile(self.vcf): self.bcf2vcf()
vcf_reader = vcf.Reader(open(self.vcf,"r"))
results = defaultdict(int)
tot_snps = defaultdict(int)
data = defaultdict(int)
for record in vcf_reader:
tmp = []
for s in record.samples:
if s.sample not in samples: continue
if s.gt_nums=="1/1":
tmp.append(s.sample)
tot_snps[s.sample]+=1
for x in itertools.combinations(tmp,2):
tmp_str = "_".join(sorted([str(samples.index(d)) for d in x]))
data["overlap_"+tmp_str] +=1
for x in itertools.combinations(tmp,3):
tmp_str = "_".join(sorted([str(samples.index(d)) for d in x]))
data["overlap_"+tmp_str] +=1
for x in itertools.combinations(tmp,4):
tmp_str = "_".join(sorted([str(samples.index(d)) for d in x]))
data["overlap_"+tmp_str] += 1
for i,si in enumerate(samples):
if si not in self.samples:
log("Can't find %s in samples...Exiting" % si,True)
data["id_%s"%i] = si
data["tot_snps_%s"%i] = tot_snps[si]
data["outfile"] = outfile
if len(samples)==2:
rscript = """
library(VennDiagram)
pdf("%(outfile)s")
draw.pairwise.venn(area1=%(tot_snps_0)s, area2=%(tot_snps_1)s, cross.area=%(overlap_0_1)s, category = c("%(id_0)s","%(id_1)s"),fill=rainbow(2))
dev.off()
""" % data
elif len(samples)==3:
rscript = """
library(VennDiagram)
pdf("%(outfile)s")
draw.triple.venn(area1=%(tot_snps_0)s, area2=%(tot_snps_1)s, area3=%(tot_snps_2)s, n12=%(overlap_0_1)s, n23=%(overlap_1_2)s, n13=%(overlap_0_2)s, n123=%(overlap_0_1_2)s, category = c("%(id_0)s","%(id_1)s","%(id_2)s"),fill=rainbow(3))
dev.off()
""" % data
elif len(samples)==4:
rscript="""
library(VennDiagram)
pdf("%(outfile)s")
draw.quad.venn(area1=%(tot_snps_0)s, area2=%(tot_snps_1)s, area3=%(tot_snps_2)s, area4=%(tot_snps_3)s,
n12=%(overlap_0_1)s, n13=%(overlap_0_2)s, n14=%(overlap_0_3)s, n23=%(overlap_1_2)s, n24=%(overlap_1_3)s, n34=%(overlap_2_3)s,
n123=%(overlap_0_1_2)s, n124=%(overlap_0_1_3)s, n134=%(overlap_0_2_3)s, n234=%(overlap_1_2_3)s,
n1234=%(overlap_0_1_2_3)s,
category = c("%(id_0)s","%(id_1)s","%(id_2)s","%(id_3)s"),fill=rainbow(4))
dev.off()
""" % data
temp_r_script = "%s.temp.R" % self.prefix
open(temp_r_script,"w").write(rscript)
cmd = "Rscript %s" % temp_r_script
run_cmd(cmd)
rm_files([temp_r_script])
def merge_in_snps(self,bcf,outfile):
self.new_bcf = bcf
self.targets_file = "%(prefix)s.targets" % vars(self)
self.tmp_file = "%(prefix)s.temp.bcf" % vars(self)
self.tmp2_file = "%(prefix)s.temp2.bcf" % vars(self)
self.outfile = outfile
cmd = "bcftools view --threads %(threads)s -Ou -v snps %(bcf)s | bcftools query -f '%%CHROM\\t%%POS\\n' | awk '{print $1\"\t\"$2-1\"\t\"$2}' > %(targets_file)s" % vars(self)
run_cmd(cmd)
cmd = "bcftools view --threads %(threads)s -T %(targets_file)s %(new_bcf)s -Ob -o %(tmp_file)s" % vars(self)
run_cmd(cmd)
index_bcf(self.tmp_file,self.threads)
cmd = "bcftools view --threads %(threads)s -T %(targets_file)s %(bcf)s -Ob -o %(tmp2_file)s" % vars(self)
run_cmd(cmd)
index_bcf(self.tmp2_file,self.threads)
cmd = "bcftools merge --threads %(threads)s -Ou %(tmp2_file)s %(tmp_file)s | bcftools view --threads %(threads)s -i 'F_MISSING<0.5' -Ob -o %(outfile)s" % vars(self)
run_cmd(cmd)
def annotate_from_bed(self,bed_file,outfile=None,nested=False):
temp_vcf = "%s.temp.vcf" % self.prefix
self.vcf_from_bed(bed_file,temp_vcf)
bed_dict = defaultdict(dict)
for l in open(bed_file):
#chrom pos pos allele data
row = l.rstrip().split()
bed_dict[row[0]][int(row[1])] = (row[3],row[4])
vcf_reader = vcf.Reader(open(temp_vcf))
results = defaultdict(list)
for record in tqdm(vcf_reader):
for s in record.samples:
if s.gt_bases==None: continue
nuc = s.gt_bases.split("/")[0]
if nuc==bed_dict[record.CHROM][record.POS][0]:
results[s.sample].append(bed_dict[record.CHROM][record.POS][1])
if outfile:
O = open(outfile,"w")
for s in self.samples:
if nested:
switch = True
tmp = sorted(list(set(results[s])))
for i in range(len(tmp)-1):
if tmp[i] not in tmp[i+1]: switch = False
else:
switch = False
meta = tmp[-1] if switch else ";".join(sorted(list(set(results[s]))))
if outfile:
O.write("%s\t%s\n" % (s,meta))
if outfile:
O.close()
return results
def extract_compressed_json(self,outfile):
self.bcf2vcf()
vcf_reader = vcf.Reader(open(self.vcf))
results = defaultdict(lambda: defaultdict(dict))
for record in tqdm(vcf_reader):
tmp = defaultdict(list)
for s in record.samples:
if s.gt_bases==None:
tmp["N"].append(self.samples.index(s.sample))
elif s.gt_nums=="1/1":
tmp[s.gt_bases.split("/")[0]].append(self.samples.index(s.sample))
results[record.CHROM][record.POS] = tmp
json.dump({"variants":results,"samples":self.samples},open(outfile,"w"))
def bed_subset(self,bed_file,out_file,vcf=False):
temp_bed = "%s.temp.bed" % self.prefix
cmd = "awk '{print $1\"\\t\"$2-1\"\\t\"$3}' %s > %s" % (bed_file,temp_bed)
run_cmd(cmd)
if vcf:
cmd = "bcftools view -R %s %s -o %s " % (temp_bed,self.filename,out_file)
else:
cmd = "bcftools view -R %s %s -Ob -o %s " % (temp_bed,self.filename,out_file)
run_cmd(cmd)
if not vcf:
return bcf(out_file)
def odds_ratio(self,bed_file,meta_file,ann_file):
drugs,meta = load_tsv(meta_file)
log(drugs)
bed_dict = load_bed(bed_file,columns=[5,6],key1=4,key2=5)
subset_bcf_name = "%s.subset.bcf" % self.prefix
subset_bcf = self.bed_subset(bed_file,subset_bcf_name)
variants = subset_bcf.load_csq(ann_file)
for gene in bed_dict:
for drug_combo in bed_dict[gene]:
for var in bed_dict[gene][drug_combo][0].split(","):
for drug in bed_dict[gene][drug_combo][1].split(","):
if drug not in drugs: continue
print(drugs)
print(drug)
tbl = [[0.5,0.5],[0.5,0.5]]
change_num,ref_aa,alt_aa = parse_mutation(var)
print(gene)
print(var)
if gene not in variants: continue
if change_num not in variants[gene]: continue
try:
tbl[0][0] += len([s for s in meta.keys() if variants[gene][change_num][s]==alt_aa and meta[s][drug]=="1"])
tbl[1][0] += len([s for s in meta.keys() if variants[gene][change_num][s]==alt_aa and meta[s][drug]=="0"])
tbl[0][1] += len([s for s in meta.keys() if variants[gene][change_num][s]==ref_aa and meta[s][drug]=="1"])
tbl[1][1] += len([s for s in meta.keys() if variants[gene][change_num][s]==ref_aa and meta[s][drug]=="0"])
except:
pass
if tbl[0][0]+tbl[1][0]==1: continue
OR = (tbl[0][0]/tbl[0][1])/(tbl[1][0]/tbl[1][1])
log("%s\t%s\t%s\t%s\t%s" % (var,gene,drug,OR,tbl))
def load_csq_alt(self,ann_file=None,changes=False,use_genomic=True,use_gene=True):
ann = defaultdict(dict)
if ann_file:
for l in tqdm(open(ann_file)):
#chrom pos gene gene/codon_pos
row = l.rstrip().split()
ann[row[0]][int(row[1])] = (row[2],row[3])
nuc_variants = self.load_variants()
prot_dict = defaultdict(lambda:defaultdict(dict))
prot_variants = defaultdict(lambda:defaultdict(dict))
change_num2pos = defaultdict(lambda:defaultdict(set))
ref_codons = defaultdict(lambda:defaultdict(dict))
variants = {s:[] for s in self.samples}
cmd = "bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%ALT[\\t%%SAMPLE\\t%%TBCSQ\\t%%TGT\\t%%AD]\\n' %s" % self.filename
sys.stderr.write("%s\n"%cmd)
for line in tqdm(subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout):
row = line.decode().rstrip().split()
chrom = row[0]
pos = int(row[1])
ref = row[2]
alts = row[3].split(",")
alleles = [ref]+alts
if chrom in ann and pos in ann[chrom]:
ann_pos = int(ann[chrom][pos][1])
ann_gene = ann[chrom][pos][0]
else:
ann_pos = None
if len(row)==4:
for alt in alts:
if chrom in ann and pos in ann[chrom]:
cng = "%s%s>%s" % (ann_pos,ref,alt)
for sample in self.samples:
if sample in nuc_variants[chrom][pos] and alt in nuc_variants[chrom][pos][sample]:
variants[sample].append({"sample":sample,"gene_id":ann_gene,"chr":chrom,"genome_pos":pos,"type":"non_coding","change":cng,"freq":nuc_variants[chrom][pos][sample][alt]})
continue
for i in range(4,len(row)-4,5):
sample = row[i]
info = row[i+1].split("|") if row[i+1]!="." else row[i+2].split("|")
call1,call2 = row[i+3].split("/")
ad = [int(x) if x!="." else 0 for x in row[i+4].split(",")]
adr = {alleles[i]:d/sum(ad) for i,d in enumerate(ad)}
if row[i+1][0]=="@": continue
if info[-1]=="pseudogene": continue
gene = info[1]
if info[0]=="intron":continue
if info[0]=="coding_sequence":
cng = "%s%s>%s" % (ann_pos,call1,call2)
variants[sample].append({"sample":sample,"gene_id":ann_gene,"chr":chrom,"genome_pos":pos,"type":"non_coding","change":cng,"freq":adr[call2]})
elif info[0]=="missense&inframe_altering" or info[0]=="missense" or info[0]=="*missense" or info[0]=="start_lost" or info[0]=="*start_lost" or info[0]=="stop_gained" or info[0]=="*stop_gained":
variants[sample].append({"sample":sample,"gene_id":gene,"chr":chrom,"genome_pos":pos,"type":info[0],"change":info[5],"freq":adr[call2]})
elif info[0]=="synonymous&stop_retained" or info[0]=="inframe_insertion" or info[0]=="*inframe_insertion" or info[0]=="inframe_deletion" or info[0]=="*inframe_deletion" or info[0]=="synonymous" or info[0]=="*synonymous" or info[0]=="stop_retained":
change_num,ref_nuc,alt_nuc = parse_mutation(info[6])
change = "%s%s>%s" % (ann_pos,ref_nuc,alt_nuc) if ann_pos else "%s%s>%s" % (pos,ref_nuc,alt_nuc)
variants[sample].append({"sample":sample,"gene_id":gene,"chr":chrom,"genome_pos":pos,"type":info[0],"change":change,"freq":adr[call2]})
elif info[0]=="*stop_lost&frameshift" or info[0]=="*stop_lost" or info[0]=="stop_lost" or info[0]=="frameshift" or info[0]=="*frameshift" or info[0]=="stop_lost&frameshift" or info[0]=="non_coding" or info[0]=="*stop_lost&frameshift" or info[0]=="*stop_lost&inframe_deletion" or info[0]=="frameshift&start_lost":
if chrom in ann and pos in ann[chrom]:
gene = ann[chrom][pos][0]
gene_pos = ann[chrom][pos][1]
change = "%s%s>%s" % (gene_pos,ref,call2)
variants[sample].append({"sample":sample,"gene_id":gene,"chr":chrom,"genome_pos":pos,"type":info[0],"change":change,"freq":adr[call2]})
else:
sys.stderr.write(line)
sys.stderr.write(info[0]+"\n")
sys.stderr.write("Unknown variant type...Exiting!\n")
quit(1)
return variants
def load_csq(self,ann_file=None,changes=False,use_genomic=True,use_gene=True):
ann = defaultdict(dict)
if ann_file:
for l in tqdm(open(ann_file)):
#chrom pos gene gene/codon_pos
row = l.rstrip().split()
ann[row[0]][int(row[1])] = (row[2],row[3])
nuc_variants = self.load_variants_alt()
prot_dict = defaultdict(lambda:defaultdict(dict))
prot_variants = defaultdict(lambda:defaultdict(dict))
change_num2pos = defaultdict(lambda:defaultdict(set))
ref_codons = defaultdict(lambda:defaultdict(dict))
cmd = "bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%ALT[\\t%%SAMPLE\\t%%TBCSQ]\\n' %s" % self.filename
sys.stderr.write("%s\n"%cmd)
for line in tqdm(subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout):
row = line.decode().rstrip().split()
chrom = row[0]
pos = int(row[1])
ref = row[2]
alt = row[3]
if chrom in ann and pos in ann[chrom]:
ann_pos = int(ann[chrom][pos][1])
ann_gene = ann[chrom][pos][0]
else:
ann_pos = None
if len(row)==4:
if chrom in ann and pos in ann[chrom]:
for sample in self.samples:
prot_variants[ann_gene][ann_pos][sample] = "%s%s>%s" % (ann_pos,ref,alt)
prot_dict[ann_gene][ann_pos][sample] = nuc_variants[chrom][pos][sample]
ref_codons[ann_gene][ann_pos] = ref
continue
for i in range(4,len(row)-2,3):
sample = row[i]
info = row[i+1].split("|") if row[i+1]!="." else row[i+2].split("|")
if row[i+1][0]=="@": continue
if info[-1]=="pseudogene": continue
gene = info[1]
if info[0]=="intron":continue
if info[0]=="frameshift&start_lost" or info[0]=="missense&inframe_altering" or info[0]=="missense" or info[0]=="*missense" or info[0]=="start_lost" or info[0]=="*start_lost" or info[0]=="*stop_lost" or info[0]=="stop_lost" or info[0]=="stop_gained" or info[0]=="*stop_gained":
change_num,ref_aa,alt_aa = parse_mutation(info[5])
change_num2pos[gene][change_num].add((chrom,pos))
ref_codons[gene][change_num] = ref_aa
prot_variants[gene][change_num][row[i]] = info[5]
prot_dict[gene][change_num][sample] = alt_aa
elif info[0]=="stop_lost&frameshift" or info[0]=="inframe_insertion" or info[0]=="*inframe_insertion" or info[0]=="inframe_deletion" or info[0]=="*inframe_deletion" or info[0]=="frameshift" or info[0]=="*frameshift" or info[0]=="synonymous" or info[0]=="*synonymous" or info[0]=="stop_retained":
change_num,ref_nuc,alt_nuc = parse_mutation(info[6])
change_num2pos[gene][change_num].add((chrom,pos))
ref_codons[gene][change_num] = ref_nuc
change = "%s%s>%s" % (ann_pos,ref_nuc,alt_nuc) if ann_pos else None
if use_genomic and use_gene and change:
prot_variants[gene][change_num][row[i]] = change
elif use_genomic:
prot_variants[gene][change_num][row[i]] = info[6]
elif use_gene and change:
prot_variants[gene][change_num][row[i]] = change
else:
prot_variants[gene][change_num][row[i]] = info[5]
prot_dict[gene][change_num][sample] = alt_nuc
elif info[0]=="non_coding":
if chrom in ann and pos in ann[chrom]:
gene = ann[chrom][pos][0]
gene_pos = ann[chrom][pos][1]
prot_variants[gene][gene_pos][sample] = "%s%s>%s" % (gene_pos,ref,alt)
prot_dict[gene][gene_pos][sample] = alt
ref_codons[gene][gene_pos] = ref
else:
sys.stderr.write(line)
sys.stderr.write("Unknown variant type...Exiting!\n")
quit(1)
for gene in prot_variants:
for change_num in prot_variants[gene]:
for s in set(self.samples)-set(prot_variants[gene][change_num].keys()):
if "N" in [nuc_variants[chrom][pos][s] for chrom,pos in change_num2pos[gene][change_num]]:
prot_variants[gene][change_num][s] = "?"
prot_dict[gene][change_num][s] = "?"
else:
pass
prot_dict[gene][change_num][s] = ref_codons[gene][change_num]
# if nuc_variants[row[0]][int(row[1])][s]=="N":
# prot_dict[gene][change_num] = "?"
for locus in prot_variants:
prot_variants[locus] = prot_variants[locus].values()
return prot_variants if changes else prot_dict
def ancestral_reconstruct(self,ref_file,tree_file):
self.ref_file = ref_file
self.tree_file = tree_file
cmd = "bcftools query -f '%%CHROM\\t%%POS\n' %(filename)s" % vars(self)
variants = {}
for i,l in enumerate(subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout):
row = l.decode().rstrip().split()
variants[i] = (row[0],row[1])
self.reduced_bcf = "%(prefix)s.reduced.bcf" % vars(self)
cmd = "bcftools view --threads %(threads)s -c 3 %(filename)s -Ob -o %(reduced_bcf)s" % vars(self)
run_cmd(cmd)
reduced = {}
cmd = "bcftools query -f '%%CHROM\\t%%POS\n' %(reduced_bcf)s" % vars(self)
for i,l in enumerate(subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout):
row = l.decode().rstrip().split()
reduced[i] = (row[0],row[1])
new_bcf = bcf(self.reduced_bcf)
self.fasta_file = "%(prefix)s.reduced.snps.fa" % vars(self)
new_bcf.vcf_to_fasta(self.fasta_file,self.ref_file)
self.new_tree_file = "%s.newick.txt" % self.prefix
self.reconstructed_fasta = "%s.reconstructed.fasta" % self.prefix
cmd = "fastml -s %(fasta_file)s -t %(tree_file)s -x %(new_tree_file)s -j %(reconstructed_fasta)s -qf -mn" % vars(self)
run_cmd(cmd,verbose=2)
def itol_from_bcf(self,mutation_file,amino_acid=False,supress_ref=False,supress_missing=False):
if amino_acid:
all_csq = self.load_csq()
ref = ""
for l in open(mutation_file):
mutation = l.rstrip()
if amino_acid:
gene,variant = mutation.split("__")
change_num,ref_aa,alt_aa = parse_mutation(variant)
ref = ref_aa
if gene in all_csq and change_num in all_csq[gene]:
variant_dict = all_csq[gene][change_num]
else:
continue
else:
chrom,pos = mutation.split("__")
variant_dict = self.load_variants_alt(chrom,pos)
for l in cmd_out("bcftools view %s %s:%s | bcftools query -f '%%REF'" % (self.filename,chrom,pos)):
ref = l.rstrip()
num_var = len(set(variant_dict.values())) if not supress_ref else len(set([d for d in variant_dict.values() if d!=ref]))
tmp_col = {"A":"#c15959","C":"#77ad78","G":"#3b3561","T":"#76bed0","N":"#c5c5c5"}
cols = [x.get_hex() for x in list(Color("red").range_to(Color("blue"),num_var))]
col_dict = {d:cols[i] for i,d in enumerate(set(variant_dict.values()))} if amino_acid else {d:tmp_col[d] for d in list(set(variant_dict.values())) }
shape_line = "\t".join(["1" for x in range(num_var)])
col_line = "\t".join(col_dict.values())
lab_line = "\t".join(col_dict.keys())
print(col_dict)
outfile = "%s.itol.txt" % mutation
OUT = open(outfile,"w")
OUT.write("""DATASET_COLORSTRIP
SEPARATOR TAB
DATASET_LABEL %s
COLOR #ff0000
LEGEND_TITLE Amino acid
LEGEND_SHAPES %s
LEGEND_COLORS %s
LEGEND_LABELS %s
DATA
""" % (mutation,shape_line,col_line,lab_line))
for s in self.samples:
if amino_acid:
if variant_dict[s]==alt_aa:
OUT.write("%s\t%s\n" % (s,col_dict[variant_dict[s]]))
else:
if supress_ref and variant_dict[s]==ref: continue
if supress_missing and variant_dict[s]=="N": continue
OUT.write("%s\t%s\n" % (s,col_dict[variant_dict[s]]))
OUT.close()
def compress_variants(self):
cmd = "bcftools query -f '%%CHROM\\t%%POS[\\t%%GT]\\n' %(filename)s" % vars(self)
results = defaultdict(list)
for l in subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout:
row = l.decode().rstrip().split()
results[tuple(row[2:])].append([row[0],row[1]])
final_results = {}
for i,key in enumerate(results):
var_name = "variant_%s" % i
O = open(var_name+".pheno","w")
for j,s in enumerate(self.samples):
tmp = "-9"
if key[j]=="0/0": tmp = "1"
elif key[j]=="1/1": tmp = "2"
O.write("0\t%s\t%s\n" % (s,tmp))
final_results[var_name] = results[key]
O.close()
json.dump(final_results,open("%s.compressed_variants.json" % self.prefix,"w"))
return final_results
def reheader(self,index_file):
idx = {}
for l in open(index_file):
row = l.rstrip().split()
if row[0] in idx: sys.stderr.write("Duplicate values in index file (%s)...Exiting!\n"%row[0]); quit(1)
idx[row[0]] = row[1]
new_bcf_file = "%(prefix)s.reheader.bcf" % vars(self)
tmp_header = "%(prefix)s.tmp.header" % vars(self)
OUT = open(tmp_header,"w")
for l in subprocess.Popen("bcftools view --threads %(threads)s -h %(filename)s" % vars(self),shell=True,stdout=subprocess.PIPE).stdout:
if l.decode()[:2]=="##": OUT.write(l.decode()); continue
row = l.decode().rstrip().split();
for i in range(9,len(row)):
if row[i] not in idx: log("%s not found in index file...Exiting!" % row[i],True)
row[i] = idx[row[i]]
OUT.write("%s\n" % "\t".join(row))
OUT.close()
cmd = "bcftools reheader -h %s %s > %s" % (tmp_header,self.filename,new_bcf_file)
run_cmd(cmd)
rm_files([tmp_header])
def filt_variants(self,outfile,bed_include=None,bed_exclude=None,threads=4,fmiss=0.1,remove_monomorphic=True):
add_arguments_to_self(self,locals())
self.bed_include = "bcftools view --threads %(threads)s -T %s -Ou |" % bed_include if bed_include!=None else ""
self.bed_exclude = "bcftools view --threads %(threads)s -T ^%s -Ou |" % bed_exclude if bed_exclude!=None else ""
self.remove_monomorphic = "| bcftools +fill-AN-AC | bcftools view --threads %(threads)s -c1 " %vars(self) if remove_monomorphic else ""
"""Extract all variant positions"""
cmd = "bcftools view --threads %(threads)s %(filename)s -Ou | %(bed_include)s %(bed_exclude)s bcftools view --threads %(threads)s -i 'AC>=0 && F_MISSING<%(fmiss)s' %(remove_monomorphic)s -o %(outfile)s -O b" % vars(self)
run_cmd(cmd)
return bcf(self.outfile,threads=self.threads)
def extract_variants(self,outfile,min_dp=10,bed_include=None,bed_exclude=None,threads=4):
add_arguments_to_self(self,locals())
self.bed_include = "bcftools view --threads %(threads)s -T %(bed_include)s -Ou |" % vars(self) if bed_include!=None else ""
self.bed_exclude = "bcftools view --threads %(threads)s -T ^%(bed_exclude)s -Ou |" % vars(self) if bed_exclude!=None else ""
cmd = "bcftools +setGT %(filename)s -Ou -- -t q -i 'FMT/DP<%(min_dp)s' -n . | %(bed_include)s %(bed_exclude)s bcftools view --threads %(threads)s -i 'AC>=1' -o %(outfile)s -O b" % vars(self)
run_cmd(cmd)
return bcf(self.outfile,threads=self.threads)
def filt_non_uniq(self,mappability_file,outfile):
"""Filter out non unique positions"""
add_arguments_to_self(self,locals())
non_uniq = []
self.non_uniq_bed = "%s.genome.non_uniq.bed" % self.prefix
O = open(self.non_uniq_bed,"w")
for l in open(self.mappability_file):
arr = l.rstrip().split()
if float(arr[3])<1:
O.write(l)
O.close()
cmd = "bcftools view --threads %(threads)s -T ^%(non_uniq_bed)s %(filename)s -O b -o %(outfile)s" % vars(self)
run_cmd(cmd)
return bcf(self.outfile,threads=self.threads)
def remove_monomorphic(self,outfile):
add_arguments_to_self(self,locals())
cmd = " bcftools +fill-AN-AC %(filename)s | bcftools view -c 1 -Ob -o %(outfile)s" % vars(self)
run_cmd(cmd)
return bcf(self.outfile,threads=self.threads)
def sample_filt(self,outfile,miss_cut=0.15,mix_cut=0.15,keep_samples=None):
"""Filter out low quality samples"""
add_arguments_to_self(self,locals())
self.hq_sample_file = "%s.HQ.samples.txt" % self.prefix
self.lq_sample_file = "%s.LQ.samples.txt" % self.prefix
self.qual_file = "%s.sample_quals.txt" % self.prefix
if keep_samples and filecheck(keep_samples):
self.keep_samples = [x.rstrip() for x in open(keep_samples).readlines()]
else:
self.keep_samples = []
num_calls = int(subprocess.Popen("bcftools view --threads %(threads)s %(filename)s -H | wc -l" % vars(self),shell=True,stdout=subprocess.PIPE).communicate()[0].rstrip())
miss = {}
mix = {}
self.lq_samples = []
self.hq_samples = []
HQ = open(self.hq_sample_file,"w")
LQ = open(self.lq_sample_file,"w")
QF = open(self.qual_file,"w")
QF.write("sample\tmix\tmiss\n")
self.bcftools_stats_file = "%s.bcftools_stats.txt" % self.prefix
cmd = "bcftools stats %(filename)s -s - | grep ^PSC > %(bcftools_stats_file)s" % vars(self)
run_cmd(cmd)
for l in open(self.bcftools_stats_file):
row = l.rstrip().split()
s = row[2]
miss[s] = (num_calls-sum([int(row[i]) for i in [3,4,5]]))/num_calls
mix[s] = int(row[5])/num_calls
QF.write("%s\t%s\t%s\n" % (s,mix[s],miss[s]))
if s in self.keep_samples:
self.hq_samples.append(s)
HQ.write("%s\n" % s)
elif miss[s]>self.miss_cut or mix[s]>self.mix_cut:
self.lq_samples.append(s)
LQ.write("%s\n" % s)
else:
self.hq_samples.append(s)
HQ.write("%s\n" % s)
HQ.close()
LQ.close()
QF.close()
cmd = "bcftools view --threads %(threads)s -S %(hq_sample_file)s -a -c 1 -o %(outfile)s -O b %(filename)s" % vars(self)
run_cmd(cmd)
return bcf(self.outfile,threads=self.threads)
def mask_mixed(self,outfile,remove_monomorphic=True):
"""Create a BCF file with mixed called masked as missing"""
add_arguments_to_self(self,locals())
self.remove_monomorphic = "| bcftools +fill-AN-AC | bcftools view --threads %(threads)s -c1 " %vars(self) if remove_monomorphic else ""
cmd = "bcftools +setGT %(filename)s -Ou -- -t q -i 'GT=\"het\"' -n . | bcftools view --threads %(threads)s %(remove_monomorphic)s -Ob -o %(outfile)s" % vars(self)
run_cmd(cmd)
return bcf(self.outfile,threads=self.threads)
def generate_consensus(self,ref,threads=4,no_chrom=False):
add_arguments_to_self(self,locals())
cmd_file = get_random_file()
O = open(cmd_file,"w")
nochrom = "--no-chrom" if no_chrom else ""
for s in self.samples:
O.write("bcf2sample_consensus.py %s %s %s %s\n" % (self.filename,s,ref,nochrom))
O.close()
run_cmd("cat %s | parallel -j %s" % (cmd_file,threads))
rm_files([cmd_file])
def distance(self,outfile):
add_arguments_to_self(self,locals())
matrix = [[0 for x in self.samples] for s in self.samples]
miss_matrix = [[0 for x in self.samples] for s in self.samples]
sample_idx = {s:self.samples.index(s) for s in self.samples}
cmd = "bcftools query -i'GT!=\"ref\"' -f '[\\t%%SAMPLE:%%GT]\\n' %(filename)s" % vars(self)
num_snps = 0
for l in tqdm(cmd_out(cmd)):
num_snps+=1
alt_samples = defaultdict(set)
miss_samples = set()
row = l.strip().split()
for x in row:
s,c = x.split(":")
if c=="./.":
miss_samples.add(s)
else:
alt_samples[c].add(s)
for c in alt_samples:
others = (set(self.samples)-alt_samples[c]) - miss_samples
for s in alt_samples[c]:
idx = sample_idx[s]
for x in others:
matrix[idx][sample_idx[x]]+=1
matrix[sample_idx[x]][idx]+=1
for si in miss_samples:
for sj in set(self.samples)-miss_samples:
miss_matrix[sample_idx[si]][sample_idx[sj]]+=1
miss_matrix[sample_idx[sj]][sample_idx[si]]+=1
for sj in miss_samples:
if si==sj: continue
if sample_idx[si]>sample_idx[sj]:
miss_matrix[sample_idx[si]][sample_idx[sj]]+=1
for i in range(len(self.samples)):
for j in range(len(self.samples)):
if j>=i: continue
scaler = num_snps / (num_snps-miss_matrix[i][j])
#log("Num SNPs: %s, %s-%s, missing: %s, non missing: %s, abs_dist: %s scale factor: %s, scaled_dist: %s" % (num_snps,self.samples[i],self.samples[j],miss_matrix[i][j],num_snps-miss_matrix[i][j],matrix[i][j],scaler,matrix[i][j]*scaler))
matrix[i][j] = matrix[i][j]*scaler
matrix[j][i] = matrix[i][j]
OUT = open(outfile,"w")
OUT.write("\t".join(self.samples)+"\n")
OUT.write("\n".join(["\t".join([str(d) for d in matrix[j]]) for j in range(len(self.samples))]))
OUT.write("\n")
OUT.close()
return {"sample":self.samples,"matrix":matrix}
def extract_dosage(self,outfile):
add_arguments_to_self(self,locals())
cmd = "bcftools query %(filename)s -f '%%CHROM\\t%%POS\\t%%REF\\t.\\t.[\\t%%AD]\\n'" % vars(self)
def process_ad(ad):
if ad==".":
return "%.3f" % 0
else:
dp = [int(x) if x!="." else 0 for x in ad.split(",")]
if sum(dp)==0:
return "NA"
if sum(dp)!=0:
return "%.3f" % (dp[0]/sum(dp))
else:
return "%.3f" % 1
idx = range(5,5+len(self.samples))
O = open(self.outfile,"w")
O.write("chr\tpos\tref\tinfo\ttype\t%s\n" % ("\t".join(self.samples)))
for l in tqdm(cmd_out(cmd)):
row = l.decode().rstrip().split()
row[5:] = [process_ad(x) for x in row[5:]]
O.write("%s\n" % "\t".join(row))
O.close()
def bed_consensus(self,bed_file,ref_file):
add_arguments_to_self(self,locals())
bed = load_bed(self.bed_file,[1,2,3,4],4)
for gene in bed:
self.loc = "%s:%s-%s" % (bed[gene][0],bed[gene][1],bed[gene][2])
self.gene = gene
for s in self.samples:
self.samp = s.replace("/","\/")
cmd = "samtools faidx %(ref_file)s %(loc)s | bcftools consensus -s %(samp)s -H 2 %(filename)s | sed 's/%(loc)s/%(samp)s_%(gene)s %(loc)s/' > %(samp)s_%(gene)s.fasta" % vars(self)
run_cmd(cmd)
def get_mean_genotype(self,outfile=None):
add_arguments_to_self(self,locals())
if self.outfile==None:
self.outfile = self.prefix+".geno"
O = open(self.outfile,"w")
for l in tqdm(cmd_out("bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%ALT[\\t%%TGT]\\n' %(filename)s" % vars(self))):
row = l.rstrip().split()
alts = row[3].split(",")
for alt in alts:
ref = "%s/%s" % (row[2],row[2])
tmp = "%s/%s" % (alt,alt)
genos = []
for x in row[4:]:
if x==ref:
genos.append("0")
elif x==tmp:
genos.append("1")
else:
genos.append("NA")
O.write("%s, %s, %s, %s\n" % (row[0]+"_"+row[1]+"_"+alt,row[2],alt,", ".join(genos)))
O.close()
def get_variant_matrix(self,outfile=None):
add_arguments_to_self(self,locals())
if self.outfile==None:
self.outfile = self.prefix+".variants.matrix"
csq = self.load_csq_alt()
var = set()
for s in csq:
for v in csq[s]:
var.add((v["gene_id"],v["change"]))
O = open(self.outfile,"w")
O.write("gene\tmutation\t%s\n" % ("\t".join(self.samples)))
for gene,change in sorted(var,key=lambda x:x[0]):
tmp = []
for s in self.samples:
if len([x for x in csq[s] if x["change"]==change and x["gene_id"]==gene])>0:
tmp.append("1")
else:
tmp.append("0")
O.write("%s\t%s\t%s\n" % (gene,change,"\t".join(tmp)))
O.close()
def get_snp_ann(self,outfile=None):
add_arguments_to_self(self,locals())
print("chrom\tpos\tref\talt\tgene\tchange\tconsequence")
for l in cmd_out("bcftools query -f '%%CHROM\\t%%POS\\t%%REF\t%%ALT[\\t%%IUPACGT:%%TBCSQ{1}]\\n' %(filename)s" % vars(self)):
# print l.rstrip()
row = l.rstrip().split()
chrom,pos,ref,alt = row[:4]
gene = "-"
change = "-"
changetype = "-"
if len(row)!=4:
changes = {}
changetypes = {}
for x in row[4:]:
allele,info = x.split(":")
if info[0]=="@": continue
gene = info.split("|")[1]
changetypes[allele] = info.split("|")[0]
if info.split("|")[0]!="non_coding":
changes[allele] = info.split("|")[5]
change = ",".join(changes[a] if a in changes else "-" for a in alt.split(","))
changetype = ",".join(changetypes[a] if a in changetypes else "-" for a in alt.split(","))
print("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (chrom,pos,ref,alt,gene,change,changetype))
def get_bed_gt(self,bed_file,ref_file):
add_arguments_to_self(self,locals())
cmd = "bcftools convert --gvcf2vcf -f %(ref_file)s %(filename)s | bcftools view -T %(bed_file)s | bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%ALT[\\t%%GT\\t%%AD]\\n'" % vars(self)
results = defaultdict(lambda : defaultdict(dict))
for l in cmd_out(cmd):
#Chromosome 4348079 0/0 51
chrom,pos,ref,alt,gt,ad = l.rstrip().split()
pos =int(pos)
d = {}
alts = alt.split(",")
ad = [int(x) for x in ad.split(",")] if ad!="." else [100]
if gt=="0/0":
d[ref] = ad[0]
elif gt=="./.":
d[ref] = 0
else:
for i,a in enumerate([ref]+alts):
d[a] = ad[i]
results[chrom][pos] = d
return results
def get_plink_dist(self):
tmpfile = get_random_file()
cmd = "bcftools view %s > %s" % (self.filename,tmpfile)
run_cmd(cmd)
cmd = "plink --vcf %s --distance square --allow-extra-chr --out %s --double-id" % (tmpfile,tmpfile)
run_cmd(cmd)
O = open("%s.dist" % (self.prefix),"w")
dists = []
for l in open("%s.dist"%tmpfile):
row = [float(d)/2 for d in l.rstrip().split()]
O.write("%s\n" % "\t".join([str(x) for x in row]))
dists.append(row)
O.close()
run_cmd("rm %s*" % tmpfile)
return dists
def get_clusters(self,cutoff=10,meta_file=None,col_scheme=None,shape_scheme=None,remove_singletons=False):
if meta_file:
meta = {}
colour_vals = set()
shape_vals = set()
for l in open(meta_file):
row = l.rstrip().split()
meta[row[0]] = row[1:]
colour_vals.add(row[1])
shape_vals.add(row[2])
meta_cols = {}
if col_scheme:
for l in open(col_scheme):
row = l.rstrip().split()
meta_cols[row[0]] = row[1]
else:
cols = [x.get_hex() for x in list(Color("red").range_to(Color("blue"),len(colour_vals)))]
for i,x in enumerate(colour_vals):
meta_cols[x] = cols[i]
meta_shapes = {}
if shape_scheme:
for l in open(shape_scheme):
row = l.rstrip().split()
meta_shapes[row[0]] = row[1]
else:
shapes = ["circle","square","triangle","cross","diamond","star","wye"]
for i,x in enumerate(shape_vals):
meta_shapes[x] = shapes[i]
print(meta_shapes)
print(meta_cols)
dists = self.get_plink_dist()
edges = []
tmp_node_set = set()
for i in range(len(dists)):
for j in range(len(dists)):
if j>=i:continue
if dists[i][j]<cutoff:
edge = {"source":self.samples[i], "target":self.samples[j], "snps":dists[i][j]}
tmp_node_set.add(self.samples[i])
tmp_node_set.add(self.samples[j])
edges.append(edge)
nodes = [{"id":s} for s in tmp_node_set] if remove_singletons else [{"id":s} for s in self.samples]
if meta_file:
for n in nodes:
n["meta1"] = meta[n["id"]][0]
print(meta[n["id"]])
n["col"]=meta_cols[meta[n["id"]][0]]
if len(meta[n["id"]])>1:
n["meta2"] = meta[n["id"]][1]
print(meta_shapes)
n["shape"]=meta_shapes[meta[n["id"]][1]]
graph = {"nodes":nodes,"edges":edges}
json.dump(graph,open("%s.distance_clusters.json" % self.prefix,"w"))
return graph
def num_one_sample_snps(self,cutoff=10):
cmd = "bcftools query -i 'AC=2' %(filename)s -f '%%CHROM\\t%%POS\\n'" % vars(self)
positions = []
for pos in cmd_out(cmd):
positions.append(pos.split())
return positions
def get_genesum(self,outfile=None):
add_arguments_to_self(self,locals())
if self.outfile==None:
self.outfile = self.prefix+".gensum"
genesum = defaultdict(lambda:defaultdict(int))
O = open(self.outfile,"w")
for l in tqdm(cmd_out("bcftools query -f '[%%SAMPLE\\t%%GT\\t%%TBCSQ\\n]' %(filename)s" % vars(self))):
row = l.split()
#por4A 1/1 synonymous|Rv0002|gene1|protein_coding|+|109L|2378G>A synonymous|Rv0002|gene1|protein_coding|+|109L|2378G>A
info = row[2].split("|")
if info[0]=="synonymous": continue
if info[0][0]=="@": continue
genesum[info[1]][row[0]]+=1
for gene in genesum:
O.write("%s\tNA\tNA\t%s\n" % (gene,"\t".join(str(genesum[gene][s]) for s in self.samples)))
O.close()
def get_snp_pos(self):
positions = []
for l in cmd_out("bcftools query -f '%%CHROM\\t%%POS\\n' %(filename)s" % vars(self)):
row = l.rstrip().split()
positions.append((row[0],int(row[1])))
return positions
def get_snp_info(self):
info = []
for l in cmd_out("bcftools query -f '%%CHROM\\t%%POS\\t%%REF\\t%%ALT\\t%%BCSQ\\n' %(filename)s" % vars(self)):
row = l.rstrip().split()
info.append(row)
return info
```
#### File: pathogenseq/pathogenseq/qc.py
```python
from __future__ import division
import sys
import subprocess
from .files import *
from .fasta import *
from .fastq import *
import numpy as np
import gzip
#import matplotlib as mpl
#mpl.use('Agg')
#import matplotlib.pyplot as plt
#plt.ioff()
import json
import re
from collections import defaultdict
################################
########## Functions ###########
################################
def gsize_convert(x):
d = {"G":1e9,"M":1e6,"K":1e3}
num = float(x[:-1])
char = x[-1]
if char not in d: log("%s not a valid value");quit()
return num*d[char]
def get_genome_cov(bam_file,ref_file,min_dp,bed_file=None):
fdict = fasta(ref_file).fa_dict
ref_cov = {}
for s in fdict:
ref_cov[s] = [0 for x in range(len(fdict[s]))]
if bed_file:
samtools_cmd = "samtools view -bL %s %s | samtools depth -aa --reference %s -" % (bed_file,bam_file,ref_file)
else:
samtools_cmd = "samtools depth -aa --reference %s %s" % (ref_file,bam_file)
log("\nRunning command:\n%s" % samtools_cmd)
for line in subprocess.Popen(samtools_cmd,shell=True,stdout=subprocess.PIPE).stdout:
arr = line.decode().rstrip().split()
if arr[0] not in ref_cov: log("Can't find %s in FASTA...Have you used the correct reference sequence?" % arr[0]);quit()
ref_cov[arr[0]][int(arr[1])-1] = int(arr[2])
all_genome = []
for s in fdict:
all_genome+=ref_cov[s]
genome_cov = {}
for dp in min_dp:
genome_cov[dp] = len([1 for d in all_genome if d>=dp])/len(all_genome)
med = int(np.median(all_genome))
return genome_cov,med,ref_cov
def flagstat(bam_file):
lines = []
samtools_cmd = "samtools flagstat %s" % (bam_file)
for l in subprocess.Popen(samtools_cmd,shell=True,stdout=subprocess.PIPE).stdout:
arr = l.rstrip().split()
lines.append(arr)
num = int(lines[4][0])
pct = 0.0 if num==0 else float(lines[4][4][1:-1])
return num,pct
################################
########### Classes ############
################################
class qc_fastq:
"""
A class to extract basic QC stats and run QC programs on fastQ files
Args:
prefix(str): Prefix for output files
fq1(str): First read file [required]
fq2(str): Second read file. Pass NoneType if there is no second read
optimise(Bool): Choose if you want to calculate metrics based on whole read file or based on the first 10 percent.
threads(int): Number of threads to use for multithreaded methods
kraken_db(str): Location of the kraken database (if needed)
Returns:
qc_fastq: A qc_fastq class object
"""
def __init__(self,prefix,fq1,fq2=None,optimise=True,threads=4):
self.params = {"fq1":"","fq2":""}
self.read_len = []
self.read_num = 0
self.paired = False
self.kraken_run = False
if filecheck(fq1):
self.params["fq1"] = fq1
if fq2 and filecheck(fq2):
self.params["fq2"] = fq2
self.paired = True
self.params["prefix"] = prefix
self.params["threads"] = threads
self.read_num = int(gz_file_len(fq1)/4)*2 if self.paired else int(gz_file_len(fq1)/4)
self.read_pairs = self.read_num/2 if self.paired else self.read_num
FQ = gzip.open(fq1)
i = int(self.read_pairs*0.10) if optimise else self.read_pairs
for j in range(i):
FQ.readline()
self.read_len.append(len(FQ.readline()))
FQ.readline()
FQ.readline()
self.median_read_len = np.median(self.read_len)
self.mean_read_len = np.mean(self.read_len)
def approx_depth(self,genome_size):
"""Return approx depth for a given genome size"""
return self.read_num*self.mean_read_len/gsize_convert(genome_size)
def run_centrifuge(self,centrifuge_db,filter_fastq=None,threads=4):
self.params["centrifuge_db"] = centrifuge_db
self.params["centrifuge_report"] = "%(prefix)s.centrifuge.report.txt" % self.params
self.params["centrifuge_log"] = "%(prefix)s.centrifuge.log" % self.params
self.params["threads"] = threads
if self.paired:
cmd = "centrifuge -x %(centrifuge_db)s -1 %(fq1)s -2 %(fq2)s -S %(centrifuge_log)s --report-file %(centrifuge_report)s -p %(threads)s" % self.params
else:
cmd = "centrifuge -x %(centrifuge_db)s -U %(fq1)s -S %(centrifuge_log)s --report-file %(centrifuge_report)s -p %(threads)s" % self.params
run_cmd(cmd)
if filter_fastq:
num_mtb = 0
taxa = filter_fastq.split(",")
self.params["cf_filt_fq_1"] = "%(prefix)s_1.centrifuge_filt.fastq.gz" % self.params
self.params["cf_filt_fq_2"] = "%(prefix)s_2.centrifuge_filt.fastq.gz" % self.params
self.params["tmp_file"] = get_random_file()
read_names = set()
for l in open(self.params["centrifuge_log"]):
#K00250:202:HNN53BBXX:8:1101:6066:998 NC_016947.1 1138382 21377 21377 235 302 4
row = l.rstrip().split()
if row[2] in taxa:
num_mtb+=1
read_names.add(row[0])
O = open(self.params["tmp_file"],"w")
O.write("\n".join(list(read_names)))
O.close()
cmd = "seqtk subseq %(fq1)s %(tmp_file)s | pigz -p %(threads)s -c > %(cf_filt_fq_1)s" % self.params
run_cmd(cmd)
cmd = "seqtk subseq %(fq2)s %(tmp_file)s | pigz -p %(threads)s -c > %(cf_filt_fq_2)s" % self.params
run_cmd(cmd)
rm_files([self.params["tmp_file"]])
top_hit = ""
top_num_reads = 0
for l in open(self.params["centrifuge_report"]):
#Mycobacterium avium 1764 species 6256976 13835 352 2.10431e-06
row = l.rstrip().split("\t")
if row[0]=="name": continue
if int(row[5])>top_num_reads:
top_hit = row[0].replace(" ","_")
top_num_reads = int(row[4])
if filter_fastq:
tmp = [top_hit,num_mtb/self.read_num]
return self.params["cf_filt_fq_1"],self.params["cf_filt_fq_2"],tmp
else:
return top_hit,top_num_reads
def run_kraken(self,kraken_db,filter_fastq = None):
"""
Run kraken with an option to create filtered fastq files
Args:
filter_fastq(str): NCBI Taxonomy code use when extracting reads
"""
self.params["kraken_db"] = kraken_db
self.params["kraken_file"] = "%(prefix)s.kraken" % self.params
cmd = "kraken --db %(kraken_db)s --threads %(threads)s --fastq-input --gzip-compressed --output %(kraken_file)s --paired --check-names %(fq1)s %(fq2)s" % self.params
run_cmd(cmd)
if filter_fastq:
taxa = filter_fastq.split(",")
o1 = "%(prefix)s_1.kraken_filt.fastq.gz" % self.params
o2 = "%(prefix)s_2.kraken_filt.fastq.gz" % self.params
self.params["kr_filt_fq_1"] = o1
self.params["kr_filt_fq_2"] = o2
readnames = set()
for l in open(self.params["kraken_file"]):
arr = l.rstrip().split()
if arr[2] in taxa:
readnames.add(arr[1])
R1 = gzip.open(self.params["fq1"])
R2 = gzip.open(self.params["fq2"])
O1 = gzip.open(o1,"wb")
O2 = gzip.open(o2,"wb")
for seqname1 in R1:
seqname1 = seqname1.rstrip()
seq1 = next(R1).rstrip()
next(R1)
qual1 = next(R1).rstrip()
seqname2 = next(R2).rstrip()
seq2 = next(R2).rstrip()
next(R2)
qual2 = next(R2).rstrip()
if seqname1.split()[0][1:] in readnames:
O1.write("%s\n%s\n+\n%s\n" % (seqname1,seq1,qual1))
O2.write("%s\n%s\n+\n%s\n" % (seqname2,seq2,qual2))
O1.close()
O2.close()
self.run_kraken = True
class qc_bam:
"""
A class to extract basic QC stats and run QC programs on fastQ files
Args:
bam(str): Bam file
ref(str): Refrence fasta
cov_thresholds(list): List of integers to use in the percentage genome covered calculation
Returns:
qc_bam: A qc_bam class object
"""
def __init__(self,bam,ref,cov_thresholds=[1,5,10],threads=4,bed_file=None):
self.bam = None
self.ref = None
self.threads = threads
if filecheck(bam): self.bam = bam
if filecheck(ref): self.ref = ref
self.genome_cov,self.med_dp,self.ref_dp = get_genome_cov(bam,ref,cov_thresholds,bed_file=bed_file)
self.num_reads_mapped,self.pct_reads_mapped = flagstat(bam)
def plot_cov(self,chrom,imgfile,start=None,end=None,window=10000,step=5000,optimise=True,plot_median=True,primers=None):
"""
Plot coverage across chromosomes
Args:
chrom(str): Chromosome name
imgfile(str): Name of the output png
window(int): Window size for the sliding window coverage calculation
step(int): Step size for the sliding window coverage calculation
optimise(bool): Optimise window and step size for chromosome len
"""
if plot_median:
chrom_med_dp = np.median(self.ref_dp[chrom])
if start and end:
region_size = end-start
offset = int(region_size*0.05)
new_start = start-offset
new_end = end+offset
else:
offset=False
region_size = len(self.ref_dp[chrom])
start = 0
end = region_size
new_start = start
new_end = end
if region_size<100000:
n,d = "K",1000
elif region_size>100000 and region_size<1000000000:
n,d = "M",1000000
else:
n,d = "G",1000000000
if optimise:
if region_size<10000:
window,step=2,1
elif region_size<100000:
window,step=100,50
elif region_size>100000 and region_size<1000000:
window,step=1000,500
else:
if region_size<10000:
window,step=2,1
log("Outputting coverage plot for region (%sbp) with window=%s and step=%s" % (region_size,window,step))
x = []
y = []
hw = int(window/2)
for i in range(new_start+hw,new_end-hw,step):
x.append(i/d)
y.append(int(np.median(self.ref_dp[chrom][i-hw:i+hw+1])))
fig = plt.figure()
plot = fig.add_subplot(111)
plot.plot(x,y)
plot.set_ylim(bottom=0)
if max(y)>200:
plot.set_yscale('symlog')
plot.set_xlabel("Genome Position (%sb)" % n)
plot.set_ylabel("Median Coverage (Window size:%s)" % window)
if plot_median:
ymax = max(y) if max(y)>chrom_med_dp else chrom_med_dp
plot.set_ylim(top=ymax+ymax*0.05)
plot.axhline(xmin=0,xmax=1,y=chrom_med_dp,color="orange",linestyle="dashed")
if offset:
plot.axvline(ymin=0,ymax=0.05,x=start/d,color="orange")
plot.axvline(ymin=0,ymax=0.05,x=end/d,color="orange")
if primers:
locations = fasta(self.ref).find_primer_positions(primers)
for primer in sorted(locations,key=lambda x:locations[x]["start"]):
p = locations[primer]
plot.plot((p["start"]/d,p["end"]/d),(0,0),'r-',lw=3)
fig.savefig(imgfile)
def save_cov(self,filename,bed=None):
"""Save coverage to a json file"""
if bed:
bed_regions = load_bed(bed,[1,2,3],4)
bed_cov = {d:[] for d in bed_regions.keys()}
for locus in bed_regions:
tmp = bed_regions[locus]
for i in range(int(tmp[1]),int(tmp[2])):
bed_cov[locus].append(self.ref_dp[tmp[0]][i])
json.dump(bed_cov,open(filename,"w"))
else:
json.dump(self.ref_dp,open(filename,"w"))
def region_cov(self,regions):
"""
Return a dictionary with mean depth across selected regions
Args:
regions(list): A list with each element consisting of a ``tuple`` with 4 strings: 1) chromosome, 2) start, 3) end and 4) ID
Returns:
dict: A dictionary with mean depth across selected regions
"""
results = {}
for chrom,start,end,name in regions:
results[name] = np.mean(self.ref_dp[chrom][int(start)-1:int(end)])
return results
def bed_cov(self,bed_file):
"""
Return a dictionary with mean depth across selected regions in BED file
Args:
bed_file(str): A bed file with the 4th column containing the region ID
Returns:
dict: A dictionary with mean depth across selected regions
"""
regions = []
for l in open(bed_file):
#Chromosome start end name
arr = l.rstrip().split()
regions.append(tuple(arr[:4]))
return self.region_cov(regions)
def gff_cov(self,gff_file,key="ID"):
"""
Return a dictionary with mean depth across selected regions in GFF file
Args:
gff_file(str): A gff file region coordinates
key(str): A key to use as the region ID (e.g. for ID=katG the key is 'ID')
Returns:
dict: A dictionary with mean depth across selected regions
"""
regions = []
key_re = re.compile("%s=([\w\.\-\_]+)"%key)
for l in open(gff_file):
if l[0]=="#": continue
arr = l.rstrip().split()
if "%s="%key not in l:
log("Warining: %s not found in %s" % (key,l))
continue
name = key_re.search(l).group(1)
regions.append((arr[0],arr[3],arr[4],name))
return self.region_cov(regions)
def extract_gc_skew(self,filename,window=1000,step=500):
fa_dict = fasta(self.ref).fa_dict
hw = int(window/2)
results = defaultdict(list)
for s in fa_dict:
for i in range(hw,len(fa_dict[s])-hw,step):
seq = fa_dict[s][i-hw:i+hw]
tmp = dict((c, seq.count(c)) for c in ["C","G"])
results[int((tmp["G"]+tmp["C"])/(window)*100)].append(int(np.median(self.ref_dp[s][i-hw:i+hw])))
json.dump(results,open(filename,"w"))
```
#### File: pathogenseq/pathogenseq/varmatrix.py
```python
from __future__ import division
import sys
from .files import *
_chrom_key="chr"
class varmat:
def __init__(self,filename):
self.filename = filename
self.positions = []
self.header = []
self.genos = []
self.samples = []
self.ref = []
self.num_samples = None
self.num_cols = None
for l in open(self.filename):
row = l.rstrip().split()
if row[0]==_chrom_key:
self.samples = row[3:]
self.header = row
self.num_samples = len(row)-3
self.num_cols = len(row)
continue
self.positions.append((row[0],row[1]))
self.genos.append(row[3:])
self.ref.append(row[2])
self.set_positions = set(self.positions)
def compare_varmat(mat1,mat2,report=None):
if mat1.samples!=mat2.samples:
log("Samples not identical",ext=True)
mat1_uniq_pos = []
mat2_uniq_pos = []
intersect_pos = []
geno_discrepancies = {}
genotype_call_discrepancies = 0
genotype_position_discrepancies = 0
for chrompos in mat1.set_positions.union(mat2.set_positions):
if chrompos in mat1.set_positions and chrompos not in mat2.set_positions:
mat1_uniq_pos.append(chrompos)
elif chrompos not in mat1.set_positions and chrompos in mat2.set_positions:
mat2_uniq_pos.append(chrompos)
else:
intersect_pos.append(chrompos)
mat1_genos = mat1.genos[mat1.positions.index(chrompos)]
mat2_genos = mat2.genos[mat2.positions.index(chrompos)]
if mat1_genos!=mat2_genos:
genotype_position_discrepancies+=1
tmp = []
for i in range(mat1.num_samples):
if mat1_genos[i]!=mat2_genos[i]:
tmp.append((mat1.samples[i],mat1_genos[i],mat2_genos[i]))
genotype_call_discrepancies+=1
geno_discrepancies[chrompos] = tmp
open(report+".general.txt","w").write("Intersecrion\t%s\nmat1_uniq\t%s\nmat2_uniq\t%s\ngenotype_call_discrepancies\t%s\ngenotype_position_discrepancies\t%s\n" % (len(intersect_pos),len(mat1_uniq_pos),len(mat2_uniq_pos),genotype_call_discrepancies,genotype_position_discrepancies) )
open(report+".mat1_uniq.txt" ,"w").write("\n".join(["%s\t%s" % (x,y) for x,y in mat1_uniq_pos]))
open(report+".mat2_uniq.txt" ,"w").write("\n".join(["%s\t%s" % (x,y) for x,y in mat1_uniq_pos]))
O = open(report+".genotype_discrepancies.txt" ,"w")
for chrompos in geno_discrepancies:
for s in geno_discrepancies[chrompos]:
O.write("%s\t%s\t%s\t%s\t%s\n" % (chrompos[0],chrompos[1],s[0],s[1],s[2]))
O.close()
return geno_discrepancies
```
#### File: pathogenseq/scripts/generate_run_file.py
```python
import sys
import pathogenseq.files as ps
import csv
import argparse
def main(args):
out_script = "%s.run.sh" % args.prefix
O = open(out_script,"w")
samples = []
for row in csv.DictReader(open(args.sample_file)):
params = {}
params["ref_file"] = "%s/%s" % (args.ref_dir,row["Reference"])
ps.filecheck(params["ref_file"])
params["r1"] = "%s/%s" % (args.fastq_dir,row["ReadF"])
ps.filecheck(params["r1"])
params["prefix"] = row["ID"]
samples.append(row["ID"])
params["threads"] = args.threads
params["mapper"] = args.mapper
params["centrifuge"] = "--centrifuge %s" % args.centrifuge if args.centrifuge else ""
if "Primers" in row and row["Primers"]!="NA":
params["primers"] = "--primers %s/%s" % (args.primer_dir,row["Primers"])
else:
params["primers"] = ""
if args.platform=="illumina":
params["r2"] = "%s/%s" % (args.fastq_dir,row["ReadR"])
ps.filecheck(params["r2"])
O.write("illumina_pipeline.py %(ref_file)s %(r1)s %(r2)s %(prefix)s -t %(threads)s -m %(mapper)s %(primers)s %(centrifuge)s\n" % params)
else:
params["window"] = "--window %s" % args.window if args.window else ""
O.write("minION_pipeline.py %(ref_file)s %(r1)s %(prefix)s -t %(threads)s %(primers)s %(centrifuge)s %(window)s\n" % params)
O.close()
open("%s.samples.txt" % args.prefix,"w").write("\n".join(samples))
parser = argparse.ArgumentParser(description='TBProfiler pipeline',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('sample_file', help='First read file')
parser.add_argument('prefix', help='First read file')
parser.add_argument('--platform','-m',choices=["illumina","minION"],default="illumina", help='First read file')
parser.add_argument('--ref_dir','-r',default=".",type=str, help='First read file')
parser.add_argument('--fastq_dir','-f',default=".",type=str, help='First read file')
parser.add_argument('--primer_dir',"-p",default=".",type=str, help='First read file')
parser.add_argument('--threads',"-t",type=int,default=1, help='First read file')
parser.add_argument('--mapper',type=str,choices=["bwa","minimap2","bowtie2"],default="bwa", help='First read file')
parser.add_argument('--centrifuge','-c',type=str,default=None)
parser.add_argument('--window',default=None,type=int, help='First read file')
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
```
#### File: pathogenseq/scripts/mapping.py
```python
import pathogenseq as ps
import argparse
def main(args):
if not args.prefix:
ps.log("Please specify prefix with -p")
quit(1)
if not args.ref:
ps.log("Please use --ref to provide a reference... Exiting",ext=T)
x= ps.fastq(args.prefix,args.ref,args.r1,args.r2,threads=args.threads)
x.illumina(mapper=args.mapper)
parser = argparse.ArgumentParser(description='TBProfiler pipeline',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--r1','-1', help='First read file')
parser.add_argument('--r2','-2', help='Second read file')
parser.add_argument('--ref','-r', help='Reference Sequence')
parser.add_argument('--threads','-t', type=int, default=1, help='Number of threads')
parser.add_argument('--prefix','-p', help='Prefix for files')
parser.add_argument('--mapper','-m', type=str,default="bwa",choices=["bwa","minimap2","bowtie2"],help='Mapping tool to use')
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
```
#### File: pathogenseq/scripts/rename_tree_leaves.py
```python
import pathogenseq as ps
import sys
import argparse
def main(args):
tree = ps.tree(args.tree)
tree.rename_nodes(args.index_file,args.outfile,args.strict,args.append)
parser = argparse.ArgumentParser(description='TBProfiler pipeline',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('tree',help='bcf file')
parser.add_argument('index_file',help='reference file')
parser.add_argument('outfile',help='reference file')
parser.add_argument('--strict',action="store_true",help='reference file')
parser.add_argument('--append',default="_",type=str,help='reference file')
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
``` |
{
"source": "jodyphou/liveFast",
"score": 3
} |
#### File: jodyphou/liveFast/theVault.py
```python
import sqlite3
import sys
import os
THE_PATH = "PARTS.com.swag"
ROOT_SUBSTRING = 'xxxxx'
CHILD_REV_SUBSTRING = 'xx'
ARCHIVE_SUBSTRING = 'archive'
PDF_SUBSTRING = 'pdf'
DB_FILE = 'vault.db'
PART_NUBMER_SKIP = ['240','540']
DIRECTORY_LIST =[]
def traverse():
cParentFolder = ''
parentFolder = 'Test'
tableName = 'Bob'
for dirItems in DIRECTORY_LIST:
currentPath = THE_PATH + '\\' + dirItems
print currentPath
for subdir, dirs, files in os.walk(currentPath):
noVaultPath = subdir[len(THE_PATH) + 1: ]
firstSlashIndex = noVaultPath.find('\\')
if firstSlashIndex != -1:
parentFolder = noVaultPath[:firstSlashIndex]
if cParentFolder.find(parentFolder) == -1:
cParentFolder = parentFolder
folderSplit = cParentFolder.replace('_','-').replace(' ','').replace('&','').split('-')
try:
folderSplit.remove('xxxxx')
except ValueError:
print 'Opps'
folderSplit.append(folderSplit[0])
folderSplit.pop(0)
tableName = ''.join(folderSplit)
print tableName
else:
pass
else:
pass
subdirNoArch = subdir.lower()
if subdirNoArch.find(ARCHIVE_SUBSTRING) == -1:
if len(files) > 0:
partNumberIndex = subdir.rfind('\\')
partNumber = subdir[partNumberIndex + 1:]
if len(partNumber) == 9:
foundRev = revMatch( partNumber, files )
insertIntoDB(tableName, partNumber, files, foundRev )
else:
#underscore
underScoreIndex = partNumber.find('_')
spaceIndex = partNumber.find(' ')
if underScoreIndex > 0:
foundRev = revMatch( partNumber[:underScoreIndex], files)
insertIntoDB(tableName, partNumber[:underScoreIndex], files, foundRev )
elif spaceIndex > 0:
foundRev = revMatch( partNumber[:9], files )
insertIntoDB(tableName, partNumber[:9], files, foundRev )
else:
foundRev = revMatch( partNumber[:12], files )
insertIntoDB(tableName, partNumber[:12], files, foundRev )
else:
pass
else:
pass
def insertIntoDB(parentPart, partNumber, fileName, rev):
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
fileNameStr = ','.join(fileName)
#print parentPart + "," + partNumber + "," + fileNameStr + "," + rev
try:
c.execute ( "INSERT into %s VALUES(NULL, \'%s\', \'%s\', \'%s\')" % (parentPart, partNumber, fileNameStr , rev ) )
except sqlite3.OperationalError:
print 'Failed! ' + ( "INSERT into %s VALUES(NULL, \'%s\', \'%s\', \'%s\')" % (parentPart, partNumber, fileNameStr , rev ) )
conn.commit()
conn.close()
pass
def revMatch(partNumber, fileName):
lHyphenIndex = partNumber.find('-')
childPN = partNumber[lHyphenIndex + 1:lHyphenIndex + 6]
for items in fileName:
# the file name at least contains the child partnumber
if items.find(childPN) > 0:
#pdf...
if items.find(PDF_SUBSTRING) > 0:
childPNIndex = items.find(childPN)
if len(partNumber) == 9:
#XYZ-XXXXX
if len(items[childPNIndex + 5:-4]) > 0:
parsePNSub = items[childPNIndex + 5:-4]
################################################################
#first use case the word rev...
parsePNlower = parsePNSub.lower()
parsePNRevIndex = parsePNlower.find('rev')
if parsePNRevIndex > 0:
parsePNSplitSpace = parsePNSub[parsePNRevIndex:].split(' ')
if len(parsePNSplitSpace) > 1:
# Rev X
if len(parsePNSplitSpace[0]) == 3:
#print partNumber + " " + parsePNSplitSpace[1] + " " + items
return parsePNSplitSpace[1]
else:
#print partNumber + " " + parsePNSplitSpace[0][3:] + " " + items
return parsePNSplitSpace[0][3:]
else:
#print partNumber + " " + parsePNSub[parsePNRevIndex:][3] + " " + items
return parsePNSub[parsePNRevIndex:][3]
else:
#print partNumber + " " + items[childPNIndex + 5:-4] + " " + items
return items[childPNIndex + 5:-4]
else:
#print partNumber + " None " + items
return "None"
else:
#XYZ-XXXXX-XX
parsePNSub = items[childPNIndex + 5:-4]
################################################################
#first use case the word rev...
parsePNlower = parsePNSub.lower()
parsePNRevIndex = parsePNlower.find('rev')
if parsePNRevIndex > 0:
parsePNSplitSpace = parsePNSub[parsePNRevIndex:].split(' ')
if len(parsePNSplitSpace) > 1:
# Rev X
if len(parsePNSplitSpace[0]) == 3:
#print partNumber + " " + parsePNSplitSpace[1] + " " + items
return parsePNSplitSpace[1]
else:
#print partNumber + " " + parsePNSplitSpace[0][3:] + " " + items
return parsePNSplitSpace[0][3:]
else:
#print partNumber + " " + parsePNSub[parsePNRevIndex:][3] + " " + items
return parsePNSub[parsePNRevIndex:][3]
else:
################################################################
#dash number
#-XX
dashString = items[childPNIndex + 5:-4]
dashIndex = dashString.find(partNumber[-2:])
underscoreIndex = dashString.find('_')
if dashIndex > 0 and underscoreIndex == -1:
#print partNumber + " " + dashString[dashIndex + 2:] + " " + items
return dashString[dashIndex + 2:]
elif dashIndex == 0 and underscoreIndex == -1:
#print partNumber + " " + dashString[dashIndex + 2:] + " " + items
return dashString[dashIndex + 2:]
elif underscoreIndex > 0:
#print partNumber + " " + dashString[underscoreIndex + 1:] + " " + items
return dashString[underscoreIndex + 1:]
else:
if len(items[childPNIndex + 5:-4]) == 1:
#print partNumber + items[childPNIndex + 5:-4] + " " + items
return items[childPNIndex + 5:-4]
else:
#print partNumber + " None " + items
return "None"
else:
pass
#print "skip! " + items
return 'None'
def initDB():
cParentFolder = ''
parentFolder = 'Test'
if( os.path.isfile(DB_FILE)):
os.remove(DB_FILE)
#create table
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
skipFlag = 0
for subdir, dirs, files in os.walk(THE_PATH):
noVaultPath = subdir[len(THE_PATH) + 1: ]
firstSlashIndex = noVaultPath.find('\\')
for pnSkip in PART_NUBMER_SKIP:
if subdir.find(pnSkip) >= 0:
skipFlag = 1
#print 'Skip! ' + tableName
break
if firstSlashIndex != -1 and skipFlag == 0:
parentFolder = noVaultPath[:firstSlashIndex]
if cParentFolder.find(parentFolder) == -1:
cParentFolder = parentFolder
folderSplit = cParentFolder.replace('_','-').replace(' ','').replace('&','').split('-')
try:
folderSplit.remove('xxxxx')
except ValueError:
print 'Opps'
folderSplit.reverse()
tableName = ''.join(folderSplit)
sqlStatement = "CREATE TABLE %s (id integer primary key autoincrement, partNumber varchar, files varchar, revision varchar)" % tableName
print tableName
#print sqlStatement
c.execute(sqlStatement)
else:
pass
else:
print subdir
pass
conn.commit()
conn.close()
def initDirectoryFile():
cParentFolder = ''
parentFolder = 'Test'
if( os.path.isfile(DB_FILE)):
os.remove(DB_FILE)
#create table
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
skipFlag = 0
fileDir = open('dir.txt', 'r')
for dir in fileDir:
#print dir
cParentFolder = dir[:-1]
DIRECTORY_LIST.append(cParentFolder )
folderSplit = cParentFolder.replace('_','-').replace(' ','').replace('&','').split('-')
try:
folderSplit.remove('xxxxx')
except ValueError:
print 'Opps'
#folderSplit.reverse()
folderSplit.append(folderSplit[0])
folderSplit.pop(0)
tableName = ''.join(folderSplit)
sqlStatement = "CREATE TABLE %s (id integer primary key autoincrement, partNumber varchar, files varchar, revision varchar)" % tableName
#print "CREATE TABLE %s (id integer primary key autoincrement, partNumber varchar, files varchar, revision varchar)" % tableName
#print tableName
c.execute(sqlStatement)
conn.commit()
conn.close()
fileDir.close()
#print DIRECTORY_LIST
def createDirectoryFile():
fileDir = open('dir.txt', 'w+')
os.chdir(THE_PATH)
for name in os.listdir("."):
if os.path.isdir(name):
DIRECTORY_LIST.append(name)
fileDir.write(name + '\n')
os.chdir(os.getcwd())
fileDir.close()
print DIRECTORY_LIST
#createDirectoryFile()
initDirectoryFile()
print "DB Table Init!"
#initDB()
print "Traverse!"
traverse()
``` |
{
"source": "jodysankey/pythonpath",
"score": 2
} |
#### File: pythonpath/src/git_validation.py
```python
import os
import subprocess
from subprocess import DEVNULL
def _local_head(path):
"""Returns a tuple containing the hash of a local repo's head and an error string,
exactly one of which will be None."""
try:
output = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=path)
except subprocess.CalledProcessError as ex:
return (None, 'Failed to get hash of local repo, return code: {}'.format(ex.returncode))
return (output.decode().strip(), None)
def _remote_head(url):
"""Returns a tuple containing the hash of a remote repo's head and an error string,
exactly one of which will be None."""
try:
output = subprocess.check_output(['git', 'ls-remote', url, 'HEAD'], cwd='/')
except subprocess.CalledProcessError as ex:
return (None, 'Failed to get hash of remote url, return code: {}'.format(ex.returncode))
return (output.decode().split()[0], None)
def _has_untracked_files(path):
"""Returns True iff the git repository at path contains untracked files."""
try:
output = subprocess.check_output(['git', 'ls-files', '--exclude-standard',
'--others'], cwd=path)
except subprocess.CalledProcessError:
return True
return len(output.decode().strip()) > 0
def _has_dirty_files(path):
"""Returns True iff the git repository at path contains dirty files."""
return subprocess.call(['git', 'diff-files', '--quiet'],
cwd=path, stdout=DEVNULL, stderr=DEVNULL) != 0
def _has_staged_files(path):
"""Returns True iff the git repository at path contains staged files."""
return subprocess.call(['git', 'diff-index', '--quiet', '--cached', 'HEAD'],
cwd=path, stdout=DEVNULL, stderr=DEVNULL) != 0
def check_repo(local_path, remote_url):
"""Determines if local_path is a valid git repo in sync with remote_url.
Returns a dict containing the following values:
* is_valid - True iff local_path is a git repository
* is_synchronized - True iff local_path has no modified files or staged
changes and the HEAD hash matches that of remote_url.
* problem - If is_valid or is_synchronized is false, a string explaining why.
* local_hash - The HEAD of local_path, if known.
* remote_hash - The HEAD of remote_url, if known."""
# Start with an easy to return failure state.
ret = {
'is_valid': False,
'is_synchronized': False,
'problem': None,
'local_hash': None,
'remote_hash': None,
}
# Check the local_path is a valid git repo and store its HEAD.
if not os.path.isdir(local_path):
ret['problem'] = '{} is not a valid directory'.format(local_path)
return ret
(ret['local_hash'], ret['problem']) = _local_head(local_path)
if ret['local_hash'] is None:
return ret
ret['is_valid'] = True
# Get the HEAD of the remote.
(ret['remote_hash'], ret['problem']) = _remote_head(remote_url)
if ret['remote_hash'] is None:
return ret
# Search for various forms of dirtyness on the local repo.
if _has_untracked_files(local_path):
ret['problem'] = 'Repository contains untracked files.'
elif _has_dirty_files(local_path):
ret['problem'] = 'Repository contains modified files.'
elif _has_staged_files(local_path):
ret['problem'] = 'Repository contains staged files.'
elif ret['local_hash'] != ret['remote_hash']:
ret['problem'] = 'Repository HEAD does not match remote url.'
else:
ret['is_synchronized'] = True
return ret
```
#### File: src/sitemgt/actors.py
```python
import socket
import subprocess
import datetime
import tagwriter
import xml.etree.ElementTree
import os
from xml.sax.saxutils import escape
from .paths import getDeploymentFile, getStatusReportFile
from .functionality import ActorRequirement
from .general import SiteObject, Health, FAIL, DEGD, FAULT, UNKNOWN, OFF, GOOD
from .deployment import Deployment
from .statusreport import HostStatusReport
_MAX_STATUS_REPORTS = 100
def _aptitudeSearchList(search):
"""Returns a list of package name,description tuples for the specified aptitude search"""
args = ['aptitude', '--disable-columns', '--display-format', '%p %d', 'search', search]
# Allow a none zero return code if nothing matches the search
raw = subprocess.run(args, stdout=subprocess.PIPE).stdout.decode('utf-8')[:-1]
# Define a table to strip characters that potentially cause a problem in XML strings
drop_table = dict.fromkeys(map(ord, '"<>\\'), None)
if len(raw) > 0:
sanitized_lines = [line.translate(drop_table) for line in raw.split('\n')]
return [line.split(maxsplit=1) for line in sanitized_lines]
else:
return []
class Actor(SiteObject):
"""A high level site capability"""
_expand_dicts = [['members', 'groups', 'responsibility_dict', 'requirement_dict', 'expected_deployments']]
_expand_objects = []
def __init__(self, x_definition, x_functionality, is_group, typename):
"""Initialize the object"""
# Set basic attributes
SiteObject.__init__(self, x_definition, typename)
# Mark dictionaries with blanks until link attaches them
self.responsibilities = {}
if is_group:
self.members = {}
for x_m in x_definition.findall('Member'):
self.members[x_m.get('name')] = None
else:
self.groups = {}
self.requirements = {}
if x_functionality is not None:
for x_req in x_functionality.findall('*'):
req = ActorRequirement(x_req, self)
self.requirements[req.uid] = req
def _classLink(self, siteDescription):
"""Initialize references to other actor objects"""
# Define actor group membership
if hasattr(self, 'members'):
for member_name in sorted(self.members.keys()):
member = siteDescription.actors[member_name]
self.members[member_name] = member
member.groups[self.name] = self
def _crossLink(self, site_description):
"""Initialize references to other non-actor objects"""
# Ask requirements to link their components, and deploy these requirements
for req in self.requirements.values():
req._crossLink(site_description)
self._deployRequirement(req)
def isHostSet(self):
"""Returns true if this actor is a host or host group"""
return isinstance(self, Host) or isinstance(self, HostGroup)
def isGroup(self):
"""Returns true if this actor is a group or users or hosts"""
return hasattr(self,'members')
def _deployRequirement(self, requirement):
"""Document the deployment of all components needed by a requirement"""
# More specific types of actor can override this with their own values
pass
def _setHealthAndStatus(self):
"""Unless overridden the actor health is unmonitored"""
self._health = OFF
class Host(Actor):
"""A computer within the site"""
def __init__(self, x_definition, x_functionality):
"""Initialize the object"""
Actor.__init__(self, x_definition, x_functionality, False, 'host')
self.expected_deployments = {}
def _deployRequiredComponent(self, component, requirement, primary):
"""Document the need for a component due to a requirement"""
if component.name in self.expected_deployments.keys():
# If this deployment is already linked to the host, just add the requirement
self.expected_deployments[component.name]._addRequirement(requirement, primary)
else:
# Must create a new deployment
depl = Deployment(self, component)
depl._addRequirement(requirement, primary)
# Now mark this same requirement as secondary for any components necessary to support this component
for dep_component in component.dependencies.values():
self._deployRequiredComponent(dep_component, requirement, False)
def _deployRequirement(self, requirement):
"""Document the deployment of all components needed by a requirement"""
for component in requirement.primary_components.values():
# Any component directly related to a requirement is primary
self._deployRequiredComponent(component, requirement, True)
def _deployComponent(self, component, location):
"""Document the deployment of a component to a location"""
if component.name in self.expected_deployments.keys():
# If this deployment is already linked to the host, just set the location
self.expected_deployments[component.name].location = location
else:
# Must create a new deployment (it will link itself to us)
Deployment(self, component, location)
def gatherDeploymentStatus(self, cm_working_root):
"""Determine the current state of all deployments on this host"""
# This function only works if we *ARE* the host
if self.name.lower() != socket.gethostname().lower():
raise Exception("Can only gather deployments for current host ({}), not {}".format(socket.gethostname(), self.name))
self.resetDeploymentStatus();
# Build a list unexpected packages (i.e. installed, orphan, but not expected)
expected_packages = []
for package_set in [depl.component.package for depl in self.expected_deployments.values() if hasattr(depl.component,'package')]:
expected_packages.extend(package_set)
raw_orphaned = subprocess.check_output(['debfoster','-ns']).decode('utf-8')
orphaned_packages = raw_orphaned[raw_orphaned.find('\n')+1:].split()
orphaned_packages = [p for p in orphaned_packages if not p.startswith('linux-headers-') and not p.startswith('linux-image-')]
installed_packages = _aptitudeSearchList('~i')
self.unexpected_packages = []
for tupl in installed_packages:
if tupl[0] in orphaned_packages and tupl[0] not in expected_packages:
self.unexpected_packages.append(tupl)
self.upgradable_packages = _aptitudeSearchList('~U')
#Ask each expected deployment to deal with itself, providing the installed_set for speed
for depl in self.expected_deployments.values():
depl.gatherStatus(installed_packages, cm_working_root)
self.status_date = datetime.datetime.today()
def getStatusReportList(self):
"""Returns a list of the top X status reports from the standard file, if this exists"""
status_filename = getStatusReportFile(self.name)
if os.path.exists(status_filename):
return HostStatusReport.createListFromXmlFile(status_filename, _MAX_STATUS_REPORTS)
else:
return []
def resetDeploymentStatus(self):
"""Clears all existing component deployment information"""
self.resetHealth()
self._status = None
if hasattr(self,'status_date'):
delattr(self,'status_date')
if hasattr(self,'upgradable_packages'):
delattr(self,'upgradable_packages')
if hasattr(self,'unexpected_packages'):
delattr(self,'unexpected_packages')
for depl in self.expected_deployments.values():
depl.resetStatus()
def deploymentFileExists(self):
"""Returns true if the standard XML deployment file for the host exists"""
return os.path.exists(getDeploymentFile(self.name))
def saveDeploymentStatusToXmlFile(self):
"""Dumps the current component deployment status using an XML tag writer object on the standard file"""
tag_writer = tagwriter.TagWriter(getDeploymentFile(self.name))
tag_writer.open('DeploymentStatus')
tag_writer.open('Host','name="{}" date="{}"'.format(self.name, self.status_date.strftime("%Y-%m-%d %H:%M")))
for depl in self.expected_deployments.values():
depl.saveStatus(tag_writer)
for pkg in self.upgradable_packages:
tag_writer.write('Upgradable','name="{}" description="{}"'.format(pkg[0], escape(pkg[1])))
for pkg in self.unexpected_packages:
tag_writer.write('Unexpected','name="{}" description="{}"'.format(pkg[0], escape(pkg[1])))
tag_writer.close(2)
def loadDeploymentStatusFromXmlFile(self):
"""Load the component deployment status from the standard file, using an XML ElementTree"""
self.resetDeploymentStatus()
# Find root element and check it is for the correct host. Throw but don't crash if the XML
# is malformed because we've had errors from some of the many hosts in the past.
status_file = getDeploymentFile(self.name)
try:
book = xml.etree.ElementTree.parse(status_file).getroot()
x_host = book.find('Host')
if x_host.get('name') == self.name:
self.status_date = datetime.datetime.strptime(x_host.get('date'), "%Y-%m-%d %H:%M")
for x_d in x_host.findall('Deployment'):
if x_d.get('name') in self.expected_deployments.keys():
self.expected_deployments[x_d.get('name')].loadStatus(x_d)
self.upgradable_packages = [(p.get('name'),p.get('description')) for p in x_host.findall('Upgradable')]
self.unexpected_packages = [(p.get('name'),p.get('description')) for p in x_host.findall('Unexpected')]
except xml.etree.ElementTree.ParseError as e:
print("Error parsing status file {}: {}".format(status_file, e))
def _setHealthAndStatus(self):
"""Determines health of the host, based on state of its software deployments.
Note this function sets status"""
# Typically our state is based only on the deployments
if len(self.expected_deployments) == 0:
self._health = GOOD
self._status = "No deployments"
else:
self._health = Health.amortized([d.health for d in self.expected_deployments.values()])
if self._health is UNKNOWN:
self._status = "Unknown deployment state"
elif self._health is OFF: #Dont see how this could happen
self._status = "Unmonitored deployments"
elif self._health in [FAIL, DEGD, FAULT]:
self._status = "Deployment problem"
else:
self._status = "Good"
# But if they look ok, check our off nominal packages
if self._health is GOOD:
if hasattr(self,'upgradable_packages') and len(self.upgradable_packages)>0:
self._health = FAULT
self._status = "PackagesNeedUpgrade"
elif hasattr(self,'unexpected_packages') and len(self.unexpected_packages)>0:
self._health = GOOD
self._status = "UnexpectedPackages"
class HostGroup(Actor):
"""A collections of computers within the site"""
def __init__(self, x_definition, x_functionality):
"""Initialize the object"""
Actor.__init__(self, x_definition, x_functionality, True, 'hostgroup')
def _deployRequirement(self, requirement):
"""Document the deployment of all components needed by a requirement to our members"""
for member in self.members.values():
member._deployRequirement(requirement)
def _deployComponent(self, component, location):
"""Document the deployment of a component to a location to our members"""
for member in self.members.values():
member._deployComponent(component, location)
class User(Actor):
"""A user of the site"""
def __init__(self, x_definition, x_functionality):
"""Initialize the object"""
Actor.__init__(self, x_definition, x_functionality, False, 'user')
class UserGroup(Actor):
"""A collection of users of the site"""
def __init__(self, x_definition, x_functionality):
"""Initialize the object"""
Actor.__init__(self, x_definition, x_functionality, True, 'usergroup')
```
#### File: src/sitemgt/paths.py
```python
import os
import subprocess
from subprocess import DEVNULL
SITE_BASE_DIR = os.environ["SITEPATH"]
SITE_XML_FILE = os.path.join(SITE_BASE_DIR, "repo/site/xml/SiteDescription.xml")
CHECK_RESULTS_DIR = os.path.join(SITE_BASE_DIR, "checks")
CHECK_SRC_DIR = os.path.join(SITE_BASE_DIR, "repo/site/checks")
CM_WORKING_DIR = os.path.join(SITE_BASE_DIR, "repo")
CM_UPSTREAM_DIR = os.path.join(SITE_BASE_DIR, "repo.git")
def getDeploymentFile(host_name):
"""Return the qualified path name of the deployment record for the specified host"""
return os.path.join(SITE_BASE_DIR, "status/{}_deployment.xml".format(host_name))
def getStatusReportFile(host_name):
"""Return the qualified path name of the deployment record for the specified host"""
return os.path.join(SITE_BASE_DIR, "status/{}_status_report.xml".format(host_name))
# Note these two functions are pretty fragile. Cannot afford to have multiple scripts that
# using them in parallel, errors will be thrown up to the caller, and we rely on the environment
# variable being mounted directly. All that said the effect of failure is pretty benign.
class MountedSiteDirectories(object):
"""Simple context manager class to mount any unmounted filesystem mounts referencing
the site path, then unmount the same set at completion."""
@staticmethod
def _defined_mounts():
"""Returns all the site mountpoints defined in fstab."""
lines = subprocess.check_output(['findmnt', '--fstab', '--noheadings', '--list',
'--output', 'TARGET']).decode('utf-8').split()
return set([line for line in lines if line.startswith(SITE_BASE_DIR)])
@staticmethod
def _mounted_mounts():
"""Returns all the site mountpoints currently mounted."""
lines = subprocess.check_output(['findmnt', '--kernel', '--noheadings', '--list',
'--output', 'TARGET']).decode('utf-8').split()
return set([line for line in lines if line.startswith(SITE_BASE_DIR)])
def __init__(self):
self.mounted = set()
def __enter__(self):
for mount in set.difference(self._defined_mounts(), self._mounted_mounts()):
if subprocess.call(['mount', mount], stdout=DEVNULL, stderr=DEVNULL) == 0:
self.mounted.add(mount)
else:
raise Exception('Failed to mount site at {}'.format(mount))
def __exit__(self, type, value, traceback):
to_unmount = list(self.mounted)
for mount in to_unmount:
if subprocess.call(['umount', mount], stdout=DEVNULL, stderr=DEVNULL) == 0:
self.mounted.remove(mount)
```
#### File: pythonpath/tests/test_classifydir.py
```python
import os
import tempfile
import unittest
import classifydir
class DateBatchTestCase(unittest.TestCase):
def _rel_path(self, subdir_string):
"""Returns a path inside the test directory where each character in subdir_string
defines a single letter subdirectory, e.g. _self_path(abc) = /tmp/UNIQUE_NAME/a/b/c."""
path = self.test_dir.name
for char in subdir_string:
path = os.path.join(path, char)
return path
def _create_directories(self, subdir_strings):
"""Create all the directories supplied in one character per path segment form."""
for path in [self._rel_path(s) for s in subdir_strings]:
os.mkdir(path)
def _create_classify(self, subdir_string, volume, protection,
recurse='true', compress='false', name=None):
"""Create a standard .classify file in the requested location."""
lines = ['{}={}'.format(*tup) for tup in zip(
('volume', 'protection', 'recurse', 'compress'),
(volume, protection, recurse, compress))]
if name:
lines.append('name={}'.format(name))
self._create_raw_classify(subdir_string, lines)
def _create_raw_classify(self, subdir_string, lines):
"""Create a .classify file containing the suppled lines in the requested location."""
path = self._rel_path(subdir_string)
with open(os.path.join(path, classifydir.MAGIC_FILE), 'w') as f:
f.writelines(['{}\n'.format(l) for l in lines])
def _create_files(self, subdir_string, sizes):
"""Create a set of text files of the requested sizes in the requested location. Files will
be named with a sequential number starting at 1."""
path = self._rel_path(subdir_string)
for number, size in enumerate(sizes, start=1):
with open(os.path.join(path, str(number)), 'w') as f:
f.write("x" * size)
def setUp(self):
self.test_dir = tempfile.TemporaryDirectory(prefix='classifydir_test_')
self.test_subdir = os.path.split(self.test_dir.name)[1]
def tearDown(self):
self.test_dir.cleanup()
def test_single_recursive_tree(self):
self._create_directories(('a', 'ab', 'abc', 'd'))
self._create_classify('', 'huge', 'secret', recurse='true', name='root')
self._create_files('ab', (100, 200, 300))
self._create_files('abc', (1000,))
self._create_files('d', (20000,))
cd = classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
# Test directory is the only root.
self.assertTrue(cd.is_archive_root())
self.assertEqual([d.name for d in cd.descendants()], ['root', 'a', 'b', 'c', 'd'])
self.assertEqual([d.name for d in cd.descendant_members()], ['root', 'a', 'b', 'c', 'd'])
self.assertEqual([d.name for d in cd.descendant_roots()], ['root'])
self.assertEqual([d.name for d in cd.descendant_attenuations()], [])
# With lots of properties.
self.assertEqual(cd.total_size(), 21668) # includes the classify file.
self.assertEqual(cd.total_file_count(), 6) # includes the classify file.
self.assertEqual(cd.volume, 'huge')
self.assertEqual(cd.protection, 'secret')
self.assertEqual(cd.archive_size(), 21668)
self.assertEqual(cd.archive_file_count(), 6)
self.assertEqual(list(cd.archive_filenames()), [
os.path.join(self.test_dir.name, '.classify'),
os.path.join(self.test_dir.name, 'a', 'b', '1'),
os.path.join(self.test_dir.name, 'a', 'b', '2'),
os.path.join(self.test_dir.name, 'a', 'b', '3'),
os.path.join(self.test_dir.name, 'a', 'b', 'c', '1'),
os.path.join(self.test_dir.name, 'd', '1')])
# Adding a file should change the hash and date
with open(os.path.join(self._rel_path('d'), 'new'), 'w') as f:
f.write("y" * 80)
updated_cd = classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
self.assertEqual(updated_cd.archive_file_count(), 7)
self.assertNotEqual(cd.archive_hash(), updated_cd.archive_hash())
def test_single_tree_without_fetching(self):
self._create_directories(('a', 'd'))
self._create_classify('', 'large', 'secret', recurse='true', name='root')
self._create_files('a', (100, 200, 300))
self._create_files('d', (20000,))
cd = classifydir.ClassifiedDir(self.test_dir.name, fetch_info=False)
# If we don't fetch into many properties should be None, but the filenames still work.
self.assertEqual([d.name for d in cd.descendants()], ['root', 'a', 'd'])
self.assertEqual([d.name for d in cd.descendant_members()], ['root', 'a', 'd'])
self.assertEqual([d.name for d in cd.descendant_roots()], ['root'])
self.assertIsNone(cd.total_size())
self.assertIsNone(cd.total_file_count())
self.assertEqual(cd.volume, 'large')
self.assertEqual(cd.protection, 'secret')
self.assertIsNone(cd.archive_size())
self.assertIsNone(cd.archive_file_count())
self.assertIsNone(cd.archive_hash())
self.assertEqual(list(cd.archive_filenames()), [
os.path.join(self.test_dir.name, '.classify'),
os.path.join(self.test_dir.name, 'a', '1'),
os.path.join(self.test_dir.name, 'a', '2'),
os.path.join(self.test_dir.name, 'a', '3'),
os.path.join(self.test_dir.name, 'd', '1')])
def test_sibling_archives(self):
self._create_directories(('a', 'b', 'c', 'cd'))
self._create_classify('a', 'small', 'restricted', recurse='true')
self._create_classify('b', 'medium', 'confidential', recurse='true')
self._create_classify('c', 'large', 'none', recurse='true')
self._create_files('a', (100, 200))
self._create_files('b', (2000,))
self._create_files('c', (30000,))
cd = classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
# Test directory is not itself a root.
self.assertFalse(cd.is_archive_root())
self.assertEqual([d.name for d in cd.descendants()], [self.test_subdir, 'a', 'b', 'c', 'd'])
self.assertEqual(cd.total_size(), 32486) # includes the classify files.
self.assertEqual(cd.total_file_count(), 7) # includes the classify files.
# But should contain three roots.
roots = list(cd.descendant_roots())
self.assertEqual([d.name for d in roots], ['a', 'b', 'c'])
for root in roots:
self.assertTrue(root.is_archive_root())
# With lots of properties.
self.assertEqual(roots[0].archive_size(), 363)
self.assertEqual(roots[0].total_file_count(), 3)
self.assertEqual(roots[0].volume, 'small')
self.assertEqual(roots[0].protection, 'restricted')
self.assertEqual([d.name for d in roots[0].descendants()], ['a'])
self.assertEqual(roots[1].archive_size(), 2066)
self.assertEqual(roots[1].total_file_count(), 2)
self.assertEqual(roots[1].volume, 'medium')
self.assertEqual(roots[1].protection, 'confidential')
self.assertEqual([d.name for d in roots[1].descendants()], ['b'])
self.assertEqual(roots[2].archive_size(), 30057)
self.assertEqual(roots[2].total_file_count(), 2)
self.assertEqual(roots[2].volume, 'large')
self.assertEqual(roots[2].protection, 'none')
self.assertEqual([d.name for d in roots[2].descendants()], ['c', 'd'])
def test_archives_inside_non_recursive(self):
self._create_directories(('a', 'ab', 'c'))
self._create_classify('', 'small', 'restricted', recurse='false', name='root')
self._create_classify('a', 'small', 'confidential', recurse='true')
self._create_classify('c', 'none', 'none', recurse='true')
self._create_files('', (100, 200))
self._create_files('ab', (1000,))
self._create_files('c', (20000,))
cd = classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
# Test directory should have one child root (volume=none gets ignored) but since its not
# recursive these should not be considered members.
self.assertTrue(cd.is_archive_root())
self.assertEqual([d.name for d in cd.descendants()], ['root', 'a', 'b', 'c'])
self.assertEqual([d.name for d in cd.descendant_members()], ['root'])
self.assertEqual([d.name for d in cd.descendant_roots()], ['root', 'a'])
# Root totals include all files but the archive shouldn't include files in subdirectories.
self.assertEqual(cd.total_size(), 21495)
self.assertEqual(cd.total_file_count(), 7)
self.assertEqual(cd.archive_size(), 374)
self.assertEqual(cd.archive_file_count(), 3)
self.assertEqual(list(cd.archive_filenames()), [
os.path.join(self.test_dir.name, '.classify'),
os.path.join(self.test_dir.name, '1'),
os.path.join(self.test_dir.name, '2')])
# The lower level archive should include its own files.
child = list(cd.descendant_roots())[1]
self.assertTrue(child.is_archive_root())
self.assertEqual(child.total_size(), 1065)
self.assertEqual(child.total_file_count(), 2)
self.assertEqual(child.archive_size(), 1065)
self.assertEqual(child.archive_file_count(), 2)
self.assertEqual(list(child.archive_filenames()), [
os.path.join(self.test_dir.name, 'a', '.classify'),
os.path.join(self.test_dir.name, 'a', 'b', '1')])
def test_missing_classify_in_non_recursive(self):
self._create_directories(('a', 'b'))
self._create_classify('', 'small', 'restricted', recurse='false', name='root')
self._create_classify('a', 'small', 'confidential', recurse='true')
# Note: no classify in b, this is an error!
with self.assertRaises(Exception):
classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
def test_override_archives_inside_recursive_archive(self):
self._create_directories(('a', 'ab', 'c', 'cd', 'e'))
self._create_classify('', 'small', 'restricted', recurse='true', name='root')
# Note this overrides a directory inside the recursive parent.
self._create_classify('a', 'small', 'confidential', recurse='true')
# Note this ceases archiving inside the recursive parent.
self._create_classify('c', 'none', 'none', recurse='true')
# Note this tries to restart archiving in a subdirectory with a different name.
self._create_classify('cd', 'medium', 'restricted', recurse='true')
self._create_files('', (100, 200))
self._create_files('a', (10000,))
self._create_files('ab', (20000,))
self._create_files('c', (1000,))
self._create_files('cd', (2000,))
self._create_files('e', (5000,))
cd = classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
self.assertTrue(cd.is_archive_root())
self.assertEqual([d.name for d in cd.descendants()], ['root', 'a', 'b', 'c', 'd', 'e'])
# Members should not include the overridden directory or the attenuated directory.
self.assertEqual([d.name for d in cd.descendant_members()], ['root', 'e'])
self.assertEqual([d.name for d in cd.descendant_roots()], ['root', 'a', 'd'])
self.assertEqual([d.name for d in cd.descendant_attenuations()], ['c'])
# Root totals include all files but the archive shouldn't include files in the overridden
# directory.
self.assertEqual(cd.total_size(), 38558)
self.assertEqual(cd.total_file_count(), 11)
self.assertEqual(cd.archive_size(), 5373)
self.assertEqual(cd.archive_file_count(), 4)
self.assertEqual(list(cd.archive_filenames()), [
os.path.join(self.test_dir.name, '.classify'),
os.path.join(self.test_dir.name, '1'),
os.path.join(self.test_dir.name, '2'),
os.path.join(self.test_dir.name, 'e', '1')])
# The lower level archives should include thier own files.
child = list(cd.descendant_roots())[1]
self.assertTrue(child.is_archive_root())
self.assertEqual(child.total_size(), 30065)
self.assertEqual(child.total_file_count(), 3)
self.assertEqual(child.archive_size(), 30065)
self.assertEqual(child.archive_file_count(), 3)
self.assertEqual(list(child.archive_filenames()), [
os.path.join(self.test_dir.name, 'a', '.classify'),
os.path.join(self.test_dir.name, 'a', '1'),
os.path.join(self.test_dir.name, 'a', 'b', '1')])
# The lower level archives should include thier own files.
child = list(cd.descendant_roots())[2]
self.assertTrue(child.is_archive_root())
self.assertEqual(child.archive_size(), 2064)
self.assertEqual(child.archive_file_count(), 2)
self.assertEqual(list(child.archive_filenames()), [
os.path.join(self.test_dir.name, 'c', 'd', '.classify'),
os.path.join(self.test_dir.name, 'c', 'd', '1')])
def test_parse_with_comments(self):
self._create_raw_classify('', [
'# Test file with some comments',
' # Some not at the start',
'volume=small',
'protection=none # Comments allowed after text',
'recurse=true',
'compress=true'])
classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
def test_parse_fails_missing_parameter(self):
self._create_raw_classify('', [
'volume=small',
'recurse=true',
'compress=true'])
with self.assertRaises(Exception):
classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
def test_parse_fails_duplicate_parameter(self):
self._create_raw_classify('', [
'volume=small',
'protection=restricted',
'protection=restricted',
'recurse=true',
'compress=true'])
with self.assertRaises(Exception):
classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
def test_parse_fails_unknown_parameter(self):
self._create_raw_classify('', [
'volume=small',
'protection=restricted',
'mystery=what_am_i',
'recurse=true',
'compress=true'])
with self.assertRaises(Exception):
classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
def test_parse_fails_invalid_value(self):
self._create_raw_classify('', [
'volume=small',
'protection=super_dooper_spicy_secret',
'recurse=true',
'compress=true'])
with self.assertRaises(Exception):
classifydir.ClassifiedDir(self.test_dir.name, fetch_info=True)
def test_parse_fails_malformed_line(self):
self._create_raw_classify('', [
'volume='
'protection=none'
'recurse=true'
'compress=true'])
with self.assertRaises(Exception):
classifydir.ClassifiedDir(self.test_dir.name, fetch_info=False)
if __name__ == "__main__":
unittest.main()
```
#### File: pythonpath/tests/test_sitedescription.py
```python
import sitemgt
import os
import shutil
import unittest
import platform
# NOT WORKING YET !!!!!
# NOT WORKING YET !!!!!
# Need to actually define the expected output and check input thoroughly checks each condition
# also possible that dictionary ordering wont be stable
if platform.system()=='Windows':
#test_path = os.getenv("TMP") + "/SiteDescriptionTest" # TMP set to some hideous directory
test_path = r"C:\Temp\SiteDescriptionTest"
else:
test_path = "/tmp/SiteDescriptionTest"
XML_NAME = "TestCase.xml"
sd_filename = os.path.join(test_path,XML_NAME)
SITE_DESCRIPTION_XML = """<?xml version="1.0" encoding="UTF-8"?>
<SiteDescription xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<!--Start of Hosts and Users section-->
<!--Start of Hosts and Users section-->
<!--Start of Hosts and Users section-->
<Actors>
<Host name="server1" purpose="Server" os="Debian"/>
<Host name="client1" purpose="First client" os="Ubuntu"/>
<Host name="client2" purpose="Second client" os="Ubuntu"/>
<Host name="client3" purpose="Third client" os="WindowsXP"/>
<HostGroup name="ubuntu" description="All Ubuntu clients">
<Member name="client1"/>
<Member name="client2"/>
</HostGroup>
<User name="user1" type="Human" email="<EMAIL>"/>
<User name="user2" type="Human" email="<EMAIL>"/>
<User name="guest" type="Human"/>
<User name="root" type="Role" email="<EMAIL>"/>
<User name="sysUser1" type="System"/>
<UserGroup name="humans" description="All humans">
<Member name="user1"/>
<Member name="user2"/>
<Member name="guest"/>
</UserGroup>
</Actors>
<!--Start of Capability and Requirements section-->
<!--Start of Capability and Requirements section-->
<!--Start of Capability and Requirements section-->
<Functionality>
<Capability name="Webcam" description="Capture, post, and store pictures from the webcam">
<SystemRequirement uid="S001" text="% shall post a webcam image every 2 minutes" importance="3">
<Requirement uid="H001"/>
<Requirement uid="H002"/>
</SystemRequirement>
<SystemRequirement uid="S002"
text="% shall store webcam image every 2 minutes for 3 days, and then every hour thereafter" importance="3">
<Requirement uid="H001"/>
<Requirement uid="H003"/>
</SystemRequirement>
<HostResponsibility host_set="server1" description="server1 is responsible for all webcam duties"/>
</Capability>
<Capability name="Time Sync" description="Synchronize all hosts to current time">
<SystemRequirement uid="S003" text="% shall provide a local time sync service, synchronized to an atomic clock" importance="4">
<Requirement uid="H004"/>
</SystemRequirement>
<SystemRequirement uid="S004" text="All hosts should synchronize time using the local service" importance="3">
<Requirement uid="H005"/>
<Requirement uid="H006"/>
<Requirement uid="U001"/>
</SystemRequirement>
<HostResponsibility host_set="server1" description="server1 provides the time sync server"/>
<HostResponsibility host_set="ubuntu" description="hosts in ubuntu must synchronize to the local server"/>
<HostResponsibility host_set="client3" description="client3 must synchronize to the local server"/>
<UserResponsibility user_set="root" description="root must verfify time sync is working"/>
</Capability>
<HostSet name="server1">
<HostRequirement uid="H001" text="% shall provide a webcam image capture service">
<Component name="webcam-app"/>
<Component name="webcam-init"/>
<Component name="reset-webcam"/>
</HostRequirement>
<HostRequirement uid="H002"
text="% shall post a webcam image to www.jsankey.com every 2 minutes">
<Component name="webcam-netrc"/>
<Component name="webcam-rc"/>
</HostRequirement>
<HostRequirement uid="H003"
text="% shall store webcam image every 2 minutes for 3 days, and then every hour thereafter"
notes="The webcam package is configured to store images all images in /home/open/webcam/live, and then archive-webcam
is used to preserve the first image each hour after a certain age in /home/open/webcam/archive, and delete all others">
<Component name="webcam-rc"/>
<Component name="archive-webcam"/>
</HostRequirement>
<HostRequirement uid="H004"
text="% shall provide a local time sync service, synchronized to an atomic clock">
<Component name="ntp"/>
<Component name="server-ntp.conf"/>
</HostRequirement>
</HostSet>
<HostSet name="ubuntu">
<HostRequirement uid="H005" text="All hosts in % shall synchronize time to the local sync service">
<Component name="client-ntpdate"/>
</HostRequirement>
</HostSet>
<HostSet name="client3">
<HostRequirement uid="H006" text="% shall synchronize time to the local sync service">
<Component name="windows-time-sync"/>
</HostRequirement>
</HostSet>
<UserSet name="root">
<UserRequirement uid="U001" text="% shall verify correct time sync operation"/>
</UserSet>
</Functionality>
<!--Start of Software components section-->
<!--Start of Software components section-->
<!--Start of Software components section-->
<Software>
<ScriptingLanguages>
<Language name="python3">
<Application name="python3"/>
</Language>
<Language name="bash">
<Application name="bash"/>
</Language>
</ScriptingLanguages>
<Components default_cm_repository="oberon/site">
<RepoApplication name="nfs-kernel-server"/>
<RepoApplication name="python3"/>
<RepoApplication name="bash"/>
<!--Webcam-->
<RepoApplication name="webcam-app" package="webcam"/>
<ConfigurationFile name="fs-nfs-exports" cm_location="oberon/etc" cm_filename="exports" notes="NFS shares configuration">
<Deployment host_set="server1" directory="/etc"/>
</ConfigurationFile>
<ConfigurationFile name="webcam-rc" cm_location="oberon/webcam" cm_filename="webcamrc" notes="Configures intervals and storage location for webcam">
<Deployment host_set="server1" directory="/etc/webcam" filename=".webcamrc"/>
</ConfigurationFile>
<ConfigurationFile name="webcam-netrc" cm_location="oberon/webcam" cm_filename="netrc" notes="Configures FTP storage location for webcam">
<Deployment host_set="server1" directory="/etc/webcam" filename=".netrc"/>
</ConfigurationFile>
<Script name="webcam-init" cm_location="oberon/init" cm_filename="webcamd" language="bash" status="working">
<Deployment host_set="server1" directory="/etc/init.d"/>
</Script>
<Script name="reset-webcam" cm_location="oberon/cron" language="bash" status="working" notes="Restarts webcam service to avoid crash after too many image captures">
<Deployment host_set="server1" directory="/etc/cron.daily" filename="srv-01-reset-webcam"/>
</Script>
<Script name="archive-webcam" cm_location="oberon/cron" language="python3" status="working" notes="For images older than an age, moves the first for each hour and deletes the rest">
<Deployment host_set="server1" directory="/etc/cron.daily" filename="srv-01-reset-webcam"/>
</Script>
<!--Time Sync-->
<RepoApplication name="ntp"/>
<ConfigurationFile name="server-ntp.conf" cm_location="oberon/etc" notes="NTP time server configuration" cm_filename="ntp.conf">
<Deployment host_set="server1" directory="/etc"/>
</ConfigurationFile>
<ConfigurationFile name="client-ntpdate" cm_location="kubuntu/etc" cm_filename="ntpdate" notes="Ubuntu NTP time client FTP configuration">
<Deployment host_set="ubuntu" directory="/etc/default"/>
</ConfigurationFile>
<NonRepoApplication name="windows-time-sync" installationType="exe" installLocation="C:\Program Files\TimeSync" vendor="fictional"/>
</Components>
</Software>
</SiteDescription>
"""
class SiteDescriptionTestCase(unittest.TestCase):
def setUp(self):
#Make sure test directory is fresh
if os.path.isdir(test_path):
shutil.rmtree(test_path)
os.mkdir(test_path)
#Create our standard input XML in the test directory
f = open(sd_filename,'w')
f.write(SITE_DESCRIPTION_XML)
f.close()
def testConstruction (self):
"""Just verify the string representation for the entire site matches the expectation"""
sd = sitemgt.SiteDescription(sd_filename)
result_string = str(sd)
f_actual = open(os.path.join(test_path,"actual.txt"),'w')
f_actual.write(result_string)
f_actual.close()
#self.failUnlessEqual(result,target,"Structure 1 - Did not return correct list")
if __name__ == "__main__":
# This way of running allows quick focus on a particular test by entering the number, or all
# tests by just entering "test"
ldr = unittest.TestLoader()
#ldr.testMethodPrefix = "testStructureOne"
ldr.testMethodPrefix = "test"
suite = ldr.loadTestsFromTestCase(SiteDescriptionTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
``` |
{
"source": "jodysankey/scripts",
"score": 2
} |
#### File: jodysankey/scripts/build_archives.py
```python
from os import path
import argparse
import datetime
import grp
import os
import pwd
import subprocess
import sys
import tarfile
import classifydir
# Set VERBOSE to see added freshened archives and skipped files in stdout
VERBOSE = False
def freshen_archive_sets(search_paths, output_path, sizes, key_file):
"""Updates all tar files for archives of the specified sizes located on any of the supplied
search paths. tars are created or freshened in size based directories in the output_path, and
any unrecognized files in output_path will be deleted on completion. Where encryption is
required, the contents of key_file will be used as the key."""
if not path.exists(output_path):
_write_error('Archive output directory does not exist: ' + output_path)
return
if not path.exists(key_file):
_write_error('Key file does not exist: ' + key_file)
return
cds = [classifydir.ClassifiedDir(s, True) for s in search_paths if path.exists(s)]
for size in sizes:
size_output_path = path.join(output_path, size)
if not path.exists(size_output_path):
_write_status('Creating archive size output directory: ' + size_output_path)
os.makedirs(size_output_path)
freshen_archives([a for cd in cds for a in cd.descendantRoots() if a.volume == size],
size_output_path,
key_file)
def freshen_archives(archives, output_path, key_file):
"""Updates all tar files for a collection of archives in the same output directory.
tars are created or freshened in output_path, and any unrecognized files in output_path will
be deleted on completion. Where encryption is required, the contents of key_file will be used
as the key."""
created_archive_names = set()
existing_tars = set(os.listdir(output_path))
for archive in archives:
# Handle and report the case where two archives were given the same name.
if archive.name in created_archive_names:
_write_error('Additional archive with same name ({}) in dir {}, skipping'.format(
archive.name, output_path))
continue
created_archive_names.add(archive.name)
# Create or update the archive timestamp
tar_name = _archive_tar_filname(archive)
if tar_name in existing_tars:
freshen_timestamp(path.join(output_path, tar_name), 'existing tar')
existing_tars.remove(tar_name)
create_tar(archive, output_path)
if _should_encrypt_archive(archive):
encrypt_tar(archive, output_path, key_file)
# Anything else remaining in the archive directory is no longer needed
for tar_name in existing_tars:
remove_file(path.join(output_path, tar_name), 'unwanted existing tar')
def create_tar(archive, output_path):
"""Creates an optionally compressed tarfile in the specified directory containing the contents
of an classified directory object at an archive root."""
unencrypted_path = path.join(output_path, _archive_tar_filname(archive, before_encryption=True))
_write_status('Creating new tar: ' + unencrypted_path)
tar_file = tarfile.open(unencrypted_path, 'w:gz' if archive.compress else 'w')
for f in archive.archiveFilenames():
try:
tar_file.add(f, path.relpath(f, archive.full_path), recursive=False)
except IOError:
if VERBOSE: _write_status('Skipping unreadable file {}'.format(f))
tar_file.close()
def encrypt_tar(archive, output_path, key_file):
"""Creates an encrypted tarfile in the specified directory using an existing unencrypted
version and a supplied key file. The unencrypted archive is removed after encryption."""
unencrypted_path = path.join(output_path, _archive_tar_filname(archive, before_encryption=True))
encrypted_path = path.join(output_path, _archive_tar_filname(archive, before_encryption=False))
_write_status('Encrypting archive to {}'.format(encrypted_path))
result = subprocess.run(['gpg',
'--no-options',
'--batch',
'--cipher-algo', 'AES256',
'--passphrase-file', key_file,
'--output', encrypted_path,
'--symmetric', unencrypted_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if result.returncode != 0:
_write_error('Non zero return code {} from PGP encrypting to {}: {}'.format(
result.returncode, encrypted_path, result.stdout.decode('utf-8')))
remove_file(unencrypted_path, 'pre-encryption tar')
def freshen_timestamp(filename, role):
"""Touches the specified file to freshen its timestamp."""
if VERBOSE:
_write_status('Freshen timestamp on {}: {}'.format(role, filename))
try:
os.utime(filename, None)
except IOError:
_write_error('Could not update timestamp on {}: {}'.format(role, filename))
def remove_file(filename, role):
"""Deletes the specified file, catching and logging any errors."""
_write_status('Removing {}: {}'.format(role, filename))
try:
os.remove(filename)
except (IOError, OSError):
_write_error('Could not delete {}: {}'.format(role, filename))
def _write_status(text):
"""Record an informational note, complete with timestamp."""
print(datetime.datetime.now().isoformat() + ' ' + text, file=sys.stdout)
def _write_error(text):
"""Record an problem, complete with timestamp."""
global errors_found
errors_found = True
print(datetime.datetime.now().isoformat() + ' ERROR ' + text, file=sys.stdout)
print(datetime.datetime.now().isoformat() + ' ' + text, file=sys.stderr)
def _should_encrypt_archive(archive):
"""Returns true iff the output of the supplied classified directory should be encrypted"""
return archive.protection in ('secret', 'confidential', 'restricted')
def _archive_tar_filname(archive, before_encryption=False):
"""Returns the expected filename for a classified directory, including hash."""
return '{}_{}{}.tar{}{}'.format(
archive.name,
archive.archiveHash(),
'.secret' if archive.protection == 'secret' else '',
'.gz' if archive.compress else '',
'.aes' if _should_encrypt_archive(archive) and not before_encryption else '')
def main():
parser = argparse.ArgumentParser(description='Builds tar archives with appropriate encryption '
'and compression based on .classify files found in a set of '
'search directories.')
parser.add_argument('-k', '--keyfile', required=True, help='Path of encryption keyfile')
parser.add_argument('-s', '--search_dirs', nargs='+', required=True,
help='Paths to be searched for .classify files')
parser.add_argument('-z', '--sizes', default='small,medium,large',
help='Comma separated list of archive sizes')
parser.add_argument('-a', '--archive_dir', required=True, help='Path to output archive files')
parser.add_argument('-u', '--user', help='Username to run operations as')
parser.add_argument('-g', '--group', help='Group name to run operations as')
args = parser.parse_args()
if args.group:
os.setgid(grp.getgrnam(args.group).gr_gid)
if args.user:
os.setuid(pwd.getpwnam(args.user).pw_uid)
os.environ['HOME'] = os.path.expanduser('~' + args.user)
freshen_archive_sets(args.search_dirs, args.archive_dir, args.sizes.split(','), args.keyfile)
sys.exit(0)
if __name__ == '__main__':
main()
```
#### File: jodysankey/scripts/hash_files.py
```python
import argparse
import base64
import hashlib
import os
import re
import sys
BLOCK_SIZE = 1024*1024
LOG_INTERVAL = 100
def _create_arg_parser():
"""Returns a configured ArgumentParser."""
parser = argparse.ArgumentParser(description='Builds a list of files and their hashes '
'sorted by hash')
parser.add_argument('-i', '--in', required=True, action='append', dest='input',
help='path to search for files, more than one may be used')
parser.add_argument('-e', '--exclude', action='append', metavar='EX',
help='regex to exclude files or directories from the search')
parser.add_argument('-o', '--out', required=True, dest='output',
help='output filename')
parser.add_argument('-s', '--silent', action='store_true',
help='don\'t output progress information')
return parser
def _gather_paths(in_dirs, exclude_regex, log_interval):
"""Returns a list of fully qualified paths for every not-excluded file in in_dirs."""
output = []
count_since_log, file_count, dir_count, exclude_file_count, exclude_dir_count = (0, 0, 0, 0, 0)
excludes = [re.compile(r) for r in exclude_regex]
for in_dir in in_dirs:
for root, dirs, files in os.walk(in_dir):
# Remove any directories that match the regex exclusion patterns so we don't walk them
filtered_dirs = [d for d in dirs if not any((ex.search(d) for ex in excludes))]
exclude_dir_count += (len(dirs) - len(filtered_dirs))
dirs[:] = filtered_dirs
# Remove any files that match the regex exclusion patterns
filtered_files = [f for f in files if not any((ex.search(f) for ex in excludes))]
exclude_file_count += (len(files) - len(filtered_files))
dir_count += 1
for f in filtered_files:
output.append(os.path.join(root, f))
file_count += 1
count_since_log += 1
if count_since_log >= log_interval:
print("Gathered {} files from {} dirs, excluded {} files and {} dirs".format(
file_count, dir_count, exclude_file_count, exclude_dir_count))
count_since_log = 0
return output
def hash_file(path):
"""Returns a base64 encoded hash of the input file."""
hasher = hashlib.sha256()
with open(path, 'rb') as f:
buffer = f.read(BLOCK_SIZE)
while len(buffer) > 0:
hasher.update(buffer)
buffer = f.read(BLOCK_SIZE)
return base64.urlsafe_b64encode(hasher.digest()[:12]).decode('utf-8')
def hash_paths(paths, log_interval):
"""Returns a map of the base64 hash to the filename for all paths in path."""
output = {}
count_since_log = 0
for path in paths:
output[hash_file(path)] = os.path.basename(path)
count_since_log += 1
if count_since_log >= log_interval:
print("Hashed {} of {} files: {}".format(len(output), len(paths),
os.path.basename(path)))
count_since_log = 0
return output
def _output_hashes(hashes, output_path):
"""Writes the supplied dictionary of hashes to an ordered file."""
keys = sorted(hashes.keys())
with open(output_path, 'w') as f:
for k in keys:
f.write("{}\t{}\n".format(k, hashes[k]))
def main():
"""Hashes a set of files based on command line arguments."""
args = _create_arg_parser().parse_args()
log_interval = sys.maxsize if args.silent else LOG_INTERVAL
exclude_paths = ["^[.]"] + (args.exclude if args.exclude else [])
if not args.silent:
print("Gathering matching files.")
paths = _gather_paths(args.input, exclude_paths, log_interval * 10)
if not args.silent:
print("Calculating hashes.")
hashes = hash_paths(paths, log_interval)
if not args.silent:
print("Writing output file.")
_output_hashes(hashes, args.output)
if not args.silent:
print("Done.")
if __name__ == '__main__':
main()
```
#### File: jodysankey/scripts/movable_host.py
```python
import os
import subprocess
import sys
CONNECT_OPTION = 'x-jms.connect'
SITE_MOUNTPOINT = os.environ['SITEPATH']
BACKUP_SCRIPT = '/etc/backup-system'
SITE_SYNC_SCRIPT = ['host_site_sync', '-x']
class MountableDirectory(object):
"""A class to manage a mount in fstab in terms of the local mount point."""
def __init__(self, mount_path):
"""Initialize given a mount point."""
if not os.path.exists(mount_path):
raise IOError('MountableDirectory error: {} does not exist'.format(mount_path))
self.path = mount_path
self.performed_mount = False
def is_mounted(self):
"""Returns true if point is currently mounted."""
return subprocess.call(['findmnt', '-mln', self.path]) == 0
def mount(self):
"""Mounts the point, returning true on success and remembering if we needed to act"""
self.performed_mount = False
if self.is_mounted():
return True
pprint("Mounting {}".format(self.path))
success = run_and_report(['mount', self.path])
if success:
self.performed_mount = True
return success
def unmount(self, force):
"""(Optionally force) unmounts the point if currently mounted, returning true on success"""
if not self.is_mounted():
return True
pprint("Unmounting {}".format(self.path))
return run_and_report(['umount', '-f', self.path] if force else ['umount', self.path])
def restore(self, force):
"""Unmounts the point iff the most recent mount did mounting, returning true on success."""
if self.performed_mount:
return self.unmount(force)
return True
def run_and_report(command):
"""Runs a command, returning True on success and printing retcode on error."""
ret_code = subprocess.call(command)
if ret_code != 0:
eprint("{} failed with code {}".format(command[0], ret_code))
return ret_code == 0
def pprint(string):
"""Prints a string with a standard prefix."""
print(' ' + string)
def eprint(string):
"""Prints a string with a standard error prefix."""
print("ERROR: " + string)
def print_usage():
"""Print standard help string then quit"""
print("\n Usage: {} connect|disconnect [force]|backup\n".format(sys.argv[0]))
print(" connect Mount connectable network mounts")
print(" disconnect [Forcibly] unmount connectable network mounts")
print(" backup (Root only) Backup machine configuration, user homes, and logs to")
print(" network and sync site status, temporarily mounting if necessary")
print("Script (c)2011-2020 <NAME>")
print("Currently running in Python v{}.{}.{}\n".format(*sys.version_info))
sys.exit()
def do_backup():
"""Perform a standard backup operation, mounting as necessary."""
if os.geteuid() != 0:
eprint('Only root can perform a backup')
return
pprint("Executing backup script {}...".format(BACKUP_SCRIPT))
run_and_report(BACKUP_SCRIPT)
site_mount = MountableDirectory(SITE_MOUNTPOINT)
if not site_mount.mount():
eprint('Could not mount ' + SITE_MOUNTPOINT)
else:
pprint("Executing site status script {}...".format(SITE_SYNC_SCRIPT))
run_and_report(SITE_SYNC_SCRIPT)
site_mount.restore(False)
def connectable_mounts():
"""Returns a list of MountableDirectory s for all mounts marked as connectable."""
# Findmnt can output dirs of everything in fstab with the magic option, allow non-zero
# return codes since there might not be any matching mountpoints.
try:
mount_dirs = subprocess.check_output(
['findmnt', '-sn', '-O', CONNECT_OPTION, '-o', 'TARGET']).decode('utf-8').split('\n')
return [MountableDirectory(mnt) for mnt in mount_dirs]
except subprocess.CalledProcessError:
return []
if __name__ == '__main__':
# If run as a script take parameters to feed the function from the command line
if not (len(sys.argv) == 2 or (len(sys.argv) == 3 and sys.argv[2] == "force")):
print_usage()
sys.exit(1)
if sys.argv[1] == "connect":
for mnt in connectable_mounts():
mnt.mount()
elif sys.argv[1] == "disconnect":
for mnt in connectable_mounts():
mnt.unmount(len(sys.argv) == 3 and sys.argv[2] == "force")
elif sys.argv[1] == "backup":
do_backup()
else:
print_usage()
sys.exit(1)
```
#### File: jodysankey/scripts/pass_once_daily.py
```python
import os
import re
import sys
import subprocess
from datetime import date, datetime
def printUsage():
print("Usage:")
print(" {} trigger_file [essid]".format(os.path.basename(__file__)))
print(" trigger_file = a file to use to record previous executions")
print(" essid = an optional ESSID srting that must be in the current")
print(" iwconfig otherwise the script will fail")
print("Return values:")
print(" 0 = if supplied, essid is present and first time this")
print(" condition has been met today.")
print(" 1 = run has already succeeded for today")
print(" 2 = mount_point was not mounted")
print(" 3 = invalid arguments")
print(" 4 = some other problem")
# try:
# mounts = subprocess.check_output(["mount"]).decode("utf-8").split("\n")
# filtered = [m for m in mounts if (" " + mount_point + " ") in m]
# except Exception as e:
# print(sys.exc_info())
# sys.exit(4)
# if len(filtered) == 0:
# sys.exit(2)
if __name__ == '__main__':
#Just print usage if wrong number of arguments supplied
if len(sys.argv) < 2 or len(sys.argv) > 3:
printUsage()
sys.exit(3)
trigger_file = sys.argv[1]
essid = None if len(sys.argv) == 2 else sys.argv[2]
#If trigger file is present and modified today we're good
if os.path.exists(trigger_file):
if date.fromtimestamp(os.path.getmtime(trigger_file)) == date.today():
sys.exit(1)
#If we had an ESSID check it is currently connected
if essid is not None:
finder = re.compile("ESSID.+" + essid)
try:
iwconfig = subprocess.check_output(["iwconfig"]).decode("utf-8").split("\n")
filtered = [l for l in iwconfig if finder.search(l)]
except Exception as e:
print(sys.exc_info())
sys.exit(4)
if len(filtered) == 0:
sys.exit(2)
#Write the current datetime to the file
try:
with open(trigger_file, "a") as fh:
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), file=fh)
except Exception as e:
print(sys.exc_info())
sys.exit(4)
#Return success
sys.exit(0)
```
#### File: jodysankey/scripts/scanning.py
```python
import os
import re
import sys
import scan_core
SCAN_PATH = os.path.expanduser('~/tmp/scan')
def print_option_set(option_set, leader):
"""Print a line for each option in the set, prefixed with leader"""
for option in option_set:
labels = ",".join(option['labels'])
option_set = leader + labels + " "*(20-len(labels)) + "- " + option['description']
print(option_set)
def print_usage():
"""Print standard help string then quit"""
leader = " "
print("\n Usage: scanning [-v|-c|-k=N] SOURCE PAPER SCALE COLOR [basename]\n")
print(" SOURCE Paper source:")
print_option_set(scan_core.SOURCES, leader)
print(" PAPER Paper size:")
print_option_set(scan_core.PAPERS, leader)
print(" SCALE Scaling factor:")
print_option_set(scan_core.SCALES, leader)
print(" COLOR Colour mode:")
print_option_set(scan_core.COLORS, leader)
print(" basename Desired base filename, optionally including path")
print(" -v View each scan when conversion is complete")
print(" -c Confirm each scan before saving in final location")
print(" -d Print the scanning a conversion commands used for debugging")
print(" -k=N Do not convert page N of scan\n")
print("SCANNING Script (c)2010 <NAME>")
version = sys.version_info
print("Currently running in Python v{}.{}.{}\n".format(*version))
sys.exit()
def die(print_string):
"""Prints the specified string then exits with code 1"""
print(print_string)
sys.exit(1)
def main():
"""Run the scanning function using parameters from the command line."""
if len(sys.argv) < 2:
print_usage()
args = sys.argv[1:]
#Declare and initialize the variables controlled by switch
check = False
view = False
debug = False
kills = []
#Eat any switches from the front
while args and args[0].startswith('-'):
arg = args.pop(0).lower()
print("eating " + arg)
mko = re.search(r"-k=([1-9]+)$", arg)
if mko is not None:
kills.append(int(mko.groups()[0]))
elif arg == '-c':
check = True
elif arg == '-v':
view = True
elif arg == '-d':
debug = True
elif arg == '--help':
print_usage()
else:
die("ERROR: Switch '{}' not recognized".format(arg))
# Do we have enough parameters left?
if len(args) not in range(4, 6):
print(args)
die("ERROR: Wrong number of parameters supplied")
dest = os.path.join(SCAN_PATH, args[4]) if len(args) == 5 else None
scan_core.perform_scan(dest, args[0], args[1], args[2], args[3],
view=view, check=check, kills=kills, debug=debug)
if __name__ == '__main__':
main()
``` |
{
"source": "jodythai/scoopy_kitty_recognition",
"score": 2
} |
#### File: scoopy_kitty_recognition/test/test_snoopy_kitty_recognition.py
```python
from snoopy_kitty_recognition import snoopy_kitty_recognition
def test_fib() -> None:
assert snoopy_kitty_recognition.fib(0) == 0
assert snoopy_kitty_recognition.fib(1) == 1
assert snoopy_kitty_recognition.fib(2) == 1
assert snoopy_kitty_recognition.fib(3) == 2
assert snoopy_kitty_recognition.fib(4) == 3
assert snoopy_kitty_recognition.fib(5) == 5
assert snoopy_kitty_recognition.fib(10) == 55
``` |
{
"source": "JodyZ0203/Virtual_Parent",
"score": 3
} |
#### File: JodyZ0203/Virtual_Parent/phase1.py
```python
from random import randint
from random import choice
import json
from portal import Checker
#portal()
#passwordVault = {'James': '<PASSWORD>', 'Kobe': '<PASSWORD>'} #todo: store this file
#with open('intents.json') as file:
# data = json.load(file)
preMadeInformation = '''
{
"users": [
{
"userId": "JL7",
"name": "<NAME>",
"age": 12,
"password": "<PASSWORD>&",
"birthday": [
"2005",
"02",
"02"
],
"country": "Finland"
},
{
"userId": "BB08",
"name": "<NAME>",
"age": 6,
"password": "<PASSWORD>",
"birthday": [
"2003",
"03",
"24"
],
"country": "Canada"
},
{
"userId": "Ll213",
"name": "<NAME>",
"age": 25,
"password": "<PASSWORD>",
"birthday": [
"2005",
"05",
"06"
],
"country": "Brazil"
},
{
"userId": "Tester1",
"name": "Tester1",
"age": 22,
"birthday": [
"<PASSWORD>",
"00",
"00"
],
"password": "<PASSWORD>",
"country": "Null"
},
{
"userId": "king257",
"name": "<NAME>",
"age": 24,
"birthday": [
"2002",
"03",
"12"
],
"password": "<PASSWORD>",
"country": "United States"
},
{
"userId": "Jinl7",
"name": "<NAME>",
"age": 19,
"password": "D~b2374dsfdsf3427di&",
"birthday": [
"2005",
"02",
"02"
],
"country": "Finland"
},
{
"userId": "BBB0308",
"name": "<NAME>",
"age": 23,
"password": "<PASSWORD>",
"birthday": [
"2003",
"03",
"24"
],
"country": "Canada"
},
{
"userId": "Ll223213",
"name": "<NAME>",
"age": 25,
"password": "<PASSWORD>",
"birthday": [
"2005",
"05",
"06"
],
"country": "Brazil"
},
{
"userId": "JL7",
"name": "<NAME>",
"age": 18,
"password": "<PASSWORD>&",
"birthday": [
"2005",
"02",
"02"
],
"country": "Finland"
},
{
"userId": "BB08",
"name": "<NAME>",
"age": 16,
"password": "<PASSWORD>",
"birthday": [
"2003",
"03",
"24"
],
"country": "Canada"
},
{
"userId": "Ll213",
"name": "<NAME>",
"age": 15,
"password": "<PASSWORD>",
"birthday": [
"2005",
"05",
"06"
],
"country": "Brazil"
},
{
"userId": "Tester1",
"name": "Tester1",
"age": 20,
"birthday": [
"0000",
"00",
"00"
],
"password": "<PASSWORD>",
"country": "Null"
},
{
"userId": "king257",
"name": "<NAME>",
"age": 24,
"birthday": [
"2002",
"03",
"12"
],
"password": "<PASSWORD>",
"country": "United States"
},
{
"userId": "Jinl7",
"name": "<NAME>",
"age": 13,
"password": "<PASSWORD>&",
"birthday": [
"2005",
"02",
"02"
],
"country": "Finland"
},
{
"userId": "BBB0308",
"name": "<NAME>",
"age": 13,
"password": "<PASSWORD>",
"birthday": [
"2003",
"03",
"24"
],
"country": "Canada"
},
{
"userId": "Ll223213",
"name": "<NAME>",
"age": 35,
"password": "<PASSWORD>",
"birthday": [
"2005",
"05",
"06"
],
"country": "Brazil"
},
{
"userId": "JL7",
"name": "<NAME>",
"age": 47,
"password": "<PASSWORD>&",
"birthday": [
"2005",
"02",
"02"
],
"country": "Finland"
},
{
"userId": "BB08",
"name": "<NAME>",
"age": 6,
"password": "<PASSWORD>",
"birthday": [
"2003",
"03",
"24"
],
"country": "Canada"
},
{
"userId": "Ll213",
"name": "<NAME>",
"age": 11,
"password": "<PASSWORD>232",
"birthday": [
"2005",
"05",
"06"
],
"country": "Brazil"
},
{
"userId": "Tester1",
"name": "Tester1",
"age": 10,
"birthday": [
"0000",
"00",
"00"
],
"password": "<PASSWORD>",
"country": "Null"
},
{
"userId": "king257",
"name": "<NAME>",
"age": 21,
"birthday": [
"2002",
"03",
"12"
],
"password": "<PASSWORD>",
"country": "United States"
},
{
"userId": "Jinl7",
"name": "<NAME>",
"age": 19,
"password": "<PASSWORD>&",
"birthday": [
"2005",
"02",
"02"
],
"country": "Finland"
},
{
"userId": "BBB0308",
"name": "<NAME>",
"age": 20,
"password": "<PASSWORD>",
"birthday": [
"2003",
"03",
"24"
],
"country": "Canada"
}
]
}
data = json.loads(preMadeInformation)
with open('basicInformation.json', 'w') as file:
json.dump(data, file, indent=3)
'''
class User:
'''
the class that holds information about the user
Attributes
----------
name : str
The name of the user
password : str
This is the user password
age : int
The age of the user
Methods
-------
verifyLogin()
Attempts to verify the password
changePassword()
Attempts to change a password
'''
def __init__(self, name, password, age):
'''
Constructor that initialize a user
Parameters
----------
name : str
The name of the user
password : str
The password of the user
age: int
The age of the user
'''
self.id = self
self.name = name
self.password = password
self.age = age
def verifyLogin(self):
'''
This function checks to see if the password our user enters is correct or not
It asks the user to enter the password and see if it matches with what we have in the database
Parameters
----------
none
Returns
-------
bool
True if the verification is successful
False if the attempted failed
Warnings
--------
If a incorrect name is entered, it will raise a key error.
Raises
------
KeyError
If the input of the username is incorrect, and it can not be found in the dictionary
NameError
If the input is a name and not a string, so it is not defined
TypeError
There should not be any input, otherwise it will raise a type error.
'''
userName = input("name: ")
passWord = input("password: ")
checkName = passwordVault[userName]
if checkName == passWord:
print("Login Success")
print('Loading...')
return True
else:
print("False!!")
return False
def changePassword(self):
'''
This function allows you to change your password
Parameters
----------
Returns
-------
void
Warnings
--------
If a incorrect option is choosed, a UnboundLocalError will surface.
Raises
------
TypeError
There should not be any input, otherwise it will raise a type error.
'''
id = self.name
option = input("Do you want me to generate a password for you? (yes/no): ")
if option == 'yes':
newPassword = generatePassword(True)
elif option == 'no':
newPassword = input("Enter your new password: ")
passwordVault[id] = newPassword
print('Your name is: '+ self.name)
print('Your new password is: ' + passwordVault[id])
print(passwordVault)
def generatePassword(x):
'''
This function generates a random password
Parameters
------------
x: bool
Returns
-------
str
The password generated using this function should be a string because it consists of numbers and characters.
Warnings
--------
If will break if the input is not True.
'''
alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
length = len(alpha)
if bool(x):
for i in range(length):
wordOne = choice(alpha) + choice(alpha)
wordTwo = choice(alpha) + choice(alpha)
password = rand<PASSWORD>(1<PASSWORD>, 9<PASSWORD>)
newPassword = (wordOne+str(password)+wordTwo)
return newPassword
else:
pass
a = input(str('NMAE: '))
b = input(str('Password: '))
Jody111 = User(a, b, 18)
#Jody111.verifyLogin()
#print(generatePassword('str'))
#Jody111.changePassword()
'''
try:
Jody111.changePassword(<PASSWORD>)
except TypeError:
print("Raised a TypeError as expected")
try:
Jody111.verifyLogin('james')
Jody111.verifyLogin(1)
except TypeError:
print("Raised a TypeError as expected")
except NameError:
print("Raised a NameError as expected")
except KeyError:
print("Raised a KeyError as expected")
'''
``` |
{
"source": "jody-zeitler/pdfrelay",
"score": 3
} |
#### File: pdfrelay/pdfrelay/exception.py
```python
class PdfRelayException(Exception):
def __init__(self, *args, **kwargs):
super(PdfRelayException, self).__init__(args, kwargs)
class JobError(PdfRelayException):
"""Issue with the parameters of the conversion job"""
class EngineError(PdfRelayException):
"""Engine process spawning/execution error"""
class MetadataError(PdfRelayException):
"""An error occurred in the retrieval or saving of PDF metadata"""
```
#### File: pdfrelay/pdfrelay/model.py
```python
import os
import random
from .exception import *
class ConversionJob(object):
"""Container class for passing around job data"""
def __init__(self, options):
self.html = options.pop('html', None)
self.url = options.pop('url', None)
if not (self.html or self.url):
raise JobError("'html' or 'url' parameter not supplied")
self.arguments = options.pop('arguments', [])
self.metadata = options.pop('metadata', {})
self.pdf = None
self.error = None
self.header_file = None
self.footer_file = None
for k,v in options.items():
if k.startswith('metadata'):
self.metadata[k[8:]] = v
elif k == '--header-html':
self.arguments.append(k)
self.arguments.append( self.make_header(v) )
elif k == '--footer-html':
self.arguments.append(k)
self.arguments.append( self.make_footer(v) )
else:
self.arguments.append(k)
self.arguments.append(v)
def make_header(self, html):
self.header_file = '/dev/shm/pdfrelay_header_{}.html'.format(random.randint(1000000, 9999999))
with open(self.header_file, 'w') as outfile:
outfile.write(html)
return self.header_file
def make_footer(self, html):
self.footer_file = '/dev/shm/pdfrelay_footer_{}.html'.format(random.randint(1000000, 9999999))
with open(self.footer_file, 'w') as outfile:
outfile.write(html)
return self.footer_file
def cleanup_files(self):
if self.header_file and os.path.isfile(self.header_file):
os.remove(self.header_file)
if self.footer_file and os.path.isfile(self.footer_file):
os.remove(self.footer_file)
``` |
{
"source": "Joe0313/CS-12th-Grader",
"score": 2
} |
#### File: CS-12th-Grader/CS-12th-Joe/crawler.py
```python
import re
import os
import time
import random
import pickle
import datetime
import argparse
import prettytable
from tqdm import tqdm
from lxml import etree
from DecryptLogin import login
'''命令行参数解析'''
def parseArgs():
parser = argparse.ArgumentParser(description='下载指定微博用户的所有微博数据')
parser.add_argument('--username', dest='username', help='用户名', type=str, required=True)
parser.add_argument('--password', dest='password', help='密码', type=str, required=True)
args = parser.parse_args()
return args
'''目标用户微博数据爬取'''
class WeiboSpider():
def __init__(self, username, password, **kwargs):
self.session = WeiboSpider.login(username, password)
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'
}
self.savedir = os.path.join(os.getcwd(), 'datas')
'''外部调用接口'''
def start(self):
while True:
# 使用者输入目标用户的用户ID'请
user_id = 3537635312
# 提取该目标用户的基本信息供使用者确认输入是否有误
url = f'https://weibo.cn/{user_id}'
res = self.session.get(url, headers=self.headers)
selector = etree.HTML(res.content)
base_infos = selector.xpath("//div[@class='tip2']/*/text()")
num_wbs, num_followings, num_followers = int(base_infos[0][3: -1]), int(base_infos[1][3: -1]), int(base_infos[2][3: -1])
num_wb_pages = selector.xpath("//input[@name='mp']")
num_wb_pages = int(num_wb_pages[0].attrib['value']) if len(num_wb_pages) > 0 else 1
url = f'https://weibo.cn/{user_id}/info'
res = self.session.get(url, headers=self.headers)
selector = etree.HTML(res.content)
nickname = selector.xpath('//title/text()')[0][:-3]
# 使用者确认是否要下载该用户的所有微博
tb = prettytable.PrettyTable()
tb.field_names = ['用户名', '关注数量', '被关注数量', '微博数量', '微博页数']
tb.add_row([nickname, num_followings, num_followers, num_wbs, num_wb_pages])
print('获取的用户信息如下:')
print(tb)
is_download = input('是否爬取该用户的所有微博?(y/n, 默认: y) ——> ')
if is_download == 'y' or is_download == 'yes' or not is_download:
userinfos = {'user_id': user_id, 'num_wbs': num_wbs, 'num_wb_pages': num_wb_pages}
self.__downloadWeibos(userinfos)
# 使用者是否要继续下载
is_continue = input('是否还需下载其他用户的微博数据?(n/y, 默认: n) ——> ')
if is_continue == 'n' or is_continue == 'no' or not is_continue:
break
'''下载某个用户的所有微博数据'''
def __downloadWeibos(self, userinfos):
# 用于存储微博数据
weibos_dict = {}
# 一些必要的信息
num_wbs = userinfos.get('num_wbs')
user_id = userinfos.get('user_id')
num_wb_pages = userinfos.get('num_wb_pages')
# 爬取所有微博数据
page_block_size = random.randint(1, 5)
page_block_count = 0
for page in tqdm(range(1, num_wb_pages+1)):
url = f'https://weibo.cn/{user_id}?page={page}'
res = self.session.get(url, headers=self.headers)
selector = etree.HTML(res.content)
contents = selector.xpath("//div[@class='c']")
if contents[0].xpath("div/span[@class='ctt']"):
for i in range(0, len(contents)-2):
content = contents[i]
weibo_info = self.__parseWeiboContent(content)
print(list(weibo_info.values())[0][-1])
weibos_dict.update(weibo_info)
# --每爬一定页数保存一下
if page % 20 == 0:
self.__save(weibos_dict, user_id)
# --避免给服务器带来过大压力and避免被封, 每爬几页程序就休息一下
page_block_count += 1
if page_block_count % page_block_size == 0:
time.sleep(random.randint(6, 12))
page_block_size = random.randint(1, 5)
page_block_count = 0
filepath = self.__save(weibos_dict, user_id)
print('用户%s的所有微博数据下载完毕, 数据保存在%s...' % (user_id, filepath))
'''处理不同类型的微博'''
def __parseWeiboContent(self, content):
weibo_info = {}
# 是否为原创微博
is_ori = False if len(content.xpath("div/span[@class='cmt']")) > 3 else True
# 微博ID
weibo_id = content.xpath('@id')[0][2:]
# 微博文本
if is_ori:
if u'全文' in content.xpath('div//a/text()'):
url = f'https://weibo.cn/comment/{weibo_id}'
res = self.session.get(url, headers=self.headers)
selector = etree.HTML(res.content)
weibo_text_tmp = selector.xpath("//div[@class='c']")[1]
weibo_text = weibo_text_tmp.xpath('string(.)').replace(u'\u200b', '').encode('utf-8', 'ignore').decode('utf-8')
weibo_text = weibo_text[weibo_text.find(':')+1: weibo_text.rfind(weibo_text_tmp.xpath("//span[@class='ct']/text()")[0])]
else:
weibo_text = content.xpath('string(.)').replace(u'\u200b', '').encode('utf-8', 'ignore').decode('utf-8')
weibo_text = weibo_text[:weibo_text.rfind(u'赞')]
else:
if u'全文' in content.xpath('div//a/text()'):
url = f'https://weibo.cn/comment/{weibo_id}'
res = self.session.get(url, headers=self.headers)
selector = etree.HTML(res.content)
weibo_text_tmp = selector.xpath("//div[@class='c']")[1]
weibo_text = weibo_text_tmp.xpath('string(.)').replace(u'\u200b', '').encode('utf-8', 'ignore').decode('utf-8')
weibo_text = weibo_text[weibo_text.find(':')+1: weibo_text.rfind(weibo_text_tmp.xpath("//span[@class='ct']/text()")[0])]
weibo_text = weibo_text[:weibo_text.rfind(u'原文转发')]
else:
weibo_text = content.xpath('string(.)').replace(u'\u200b', '').encode('utf-8', 'ignore').decode('utf-8')
weibo_text = weibo_text[weibo_text.find(':')+1: weibo_text.rfind(u'赞')][:weibo_text.rfind(u'赞')]
# 微博发布的时间
publish_time = content.xpath("div/span[@class='ct']")[0]
publish_time = publish_time.xpath('string(.)').replace(u'\u200b', '').encode('utf-8', 'ignore').decode('utf-8')
publish_time = publish_time.split(u'来自')[0]
if u'刚刚' in publish_time:
publish_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
elif u'分钟' in publish_time:
passed_minutes = publish_time[:publish_time.find(u'分钟')]
publish_time = (datetime.datetime.now() - datetime.timedelta(int(passed_minutes))).strftime('%Y-%m-%d %H:%M')
elif u'今天' in publish_time:
today = datetime.datetime.now().strftime('%Y-%m-%d')
publish_time = today + ' ' + publish_time[3:]
if len(publish_time) > 16: publish_time = publish_time[:16]
elif u'月' in publish_time:
year = datetime.datetime.now().strftime('%Y')
month = publish_time[0: 2]
day = publish_time[3: 5]
publish_time = year + '-' + month + '-' + day + ' ' + publish_time[7: 12]
else:
publish_time = publish_time[:16]
# 点赞, 转发, 评论数
info_from_others = content.xpath('div')[-1].xpath('string(.)').replace(u'\u200b', '').encode('utf-8', 'ignore').decode('utf-8')
info_from_others = info_from_others[info_from_others.rfind(u'赞'):]
info_from_others = re.findall(r'\d+', info_from_others, re.M)
num_likes = int(info_from_others[0])
num_forwards = int(info_from_others[1])
num_comments = int(info_from_others[2])
# 整合数据
weibo_info[weibo_id] = [is_ori, publish_time, num_likes, num_forwards, num_comments, weibo_text]
# 返回提取的数据
return weibo_info
'''保存微博数据'''
def __save(self, weibos_dict, user_id):
if not os.path.exists(self.savedir):
os.mkdir(self.savedir)
filepath = os.path.join(self.savedir, user_id+'.pkl')
f = open(filepath, 'wb')
pickle.dump(weibos_dict, f)
f.close()
return filepath
'''利用DecryptLogin模拟登录'''
@staticmethod
def login(username, password):
lg = login.Login()
_, session = lg.weibo(username, password, 'mobile')
return session
'''run'''
if __name__ == '__main__':
args = parseArgs()
wb_spider = WeiboSpider(args.username, args.password)
wb_spider.start()
``` |
{
"source": "Joe0400Student/fquery",
"score": 3
} |
#### File: Joe0400Student/fquery/interpreter.py
```python
VERSION = "0.1.0-dev"
YEAR = "2021"
from functools import reduce
from zipfile import ZipFile
from io import TextIOWrapper as io_wrap
class UnresolvableVariable(Exception):
pass
class Value:
"""Value Object
This type stores the evaluation of any data
Attributes:
__init__: Constructor
step: Small-step executive function
pr: Human-Readable format string-builder
value: Contained value of the type
"""
def __init__(self, value, environ={}):
"""Creates a Value Object
Parameter:
value -> any: The stored evaluation
environ -> dict:
"""
self.value, self.environment = value, environ
def step(self,local_environment: dict):
""" Iteratively small-step execution
Parameter:
local_environment -> dict: local updates to the stored environment
Returns:
Value-Object
"""
return self
def pr(self):
return f"{self.value}"
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
def apply_all_dts(self,f):
return f(self)
class Operator:
def __init__(self, oper, arg1, arg2, environ={}):
self.oper = oper
self.arg1, self.arg2 = arg1, arg2
self.environment = environ
def step(self, local_environment: dict):
#print("step in operator")
temp_local = local_environment
for k in self.environment:
e = self.environment[k]
temp_local[k] = e
if(not isinstance(self.arg1, Value)):
return Operator(self.oper, self.arg1.step(temp_local), self.arg2, self.environment)
if(not isinstance(self.arg2, Value)):
return Operator(self.oper, self.arg1, self.arg2.step(temp_local), self.environment)
return Value(self.oper(self.arg1.value,self.arg2.value),self.environment)
def pr(self):
return f"{self.arg1.pr()} operator {self.arg2.pr()}"
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.arg1.update_all_variables(kwargs)
self.arg2.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.arg1 = self.arg1.apply_all_dts(f)
self.arg2 = self.arg2.apply_all_dts(f)
return self
class Variable:
def __init__(self, name: str, environment: dict):
self.environment = environment
if(name in environment):
self.name = environment[self.name]
self.resolved = True
else:
self.name = name
self.resolved = False
def step(self, local_environment):
for k in self.environment:
local_environment[k] = self.environment[k]
if(self.resolved):
return self.name
else:
if self.name in local_environment:
temp = local_environment[self.name]
temp.update_all_variables(self.environment)
return temp
else:
raise UnresolvableVariable()
def pr(self):
return self.name
def update_all_variables(self,diction):
for k in diction:
self.environment[k] = diction[k]
def apply_all_dts(self,f):
return f(self)
class Apply:
def __init__(self, name: str, assigned, program, environment: dict):
self.name, self.program = name, program
self.environment = environment
self.name = name
self.assigned = assigned
#print(self.assigned.pr())
def step(self, local_environment: dict):
#rint("step in apply")
temp_env = local_environment
for k in self.environment:
temp_env[k] = self.environment[k]
if(not isinstance(self.assigned, Value)):
#self.assigned = self.assigned.step(temp_env)
return Apply(self.name, self.assigned.step(temp_env), self.program, self.environment)
self.program.update_all_variables({self.name:self.assigned})
return self.program
def pr(self):
return f"Apply {self.name}={self.assigned.pr()} on {self.program.pr()}"
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.assigned.update_all_variables(kwargs)
self.program.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.assigned = self.assigned.apply_all_dts(f)
self.program = self.program.apply_all_dts(f)
return self
class If_Else:
def __init__(self, conditional, yes, no, environment: dict):
self.conditional, self.yes, self.no, self.environment = conditional, yes, no, environment
def step(self, local_environment: dict):
#print("step in if_else")
updated_local_env = local_environment
for k in self.environment:
updated_local_env[k] = self.environment[k]
if(isinstance(self.conditional, Value)):
return self.yes if self.conditional.value else self.no
else:
return If_Else(self.conditional.step(updated_local_env),self.yes,self.no,self.environment)
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.conditional.update_all_variables(kwargs)
self.yes.update_all_variables(kwargs)
self.no.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.conditional = self.conditional.apply_all_dts(f)
self.yes = self.yes.apply_all_dts(f)
self.no = self.no.apply_all_dts(f)
return self
def pr(self):
return f"if {self.conditional.pr()} then {self.yes.pr()} else {self.no.pr()}"
class QuitException(Exception):
pass
class Chain:
def __init__(self, name: str, assigned, program, environment: dict):
self.name, self.program = name, program
self.environment = environment
self.name = name
self.assigned = assigned
#print(self.assigned.pr())
def step(self, local_environment: dict):
#rint("step in apply")
temp_env = local_environment
for k in self.environment:
temp_env[k] = self.environment[k]
name = self.name
ass = self.assigned
def tester(v):
if(isinstance(v,Variable)):
if(v.name == name):
return ass
return v
return self.program.apply_all_dts(tester)
def pr(self):
return f"Apply {self.name}={self.assigned.pr()} on {self.program.pr()}"
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.assigned.update_all_variables(kwargs)
self.program.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.program = self.program.apply_all_dts(f)
self.assigned = self.assigned.apply_all_dts(f)
return self
class FSLoader:
def __init__(self,file_name,environment={}):
descriptor=Value("a",{})
self.file_name = file_name
self.descriptor = descriptor
self.environment=environment
def step(self, local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.file_name, Value)):
return FSLoader(self.file_name.step(temp_env),self.descriptor,{})
if(not isinstance(self.descriptor, Value)):
return FSLoader(self.file_name, self.descriptor.step(temp_env),{})
self.file_name = self.file_name.value
self.descriptor = self.descriptor.value
self.file = ZipFile(self.file_name,self.descriptor)
self.manifest = io_wrap(self.file.open("manifest.txt"))
table_names = map(lambda a: a[:-1], self.manifest.readlines())
self.tables = {line:io_wrap(self.file.open(f"{line}.table")) for line in table_names}
return Loaded_Table(self.file, self.manifest, self.tables,self.environment)
def update_all_variables(self, kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.file_name.update_all_variables(kwargs)
self.descriptor.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.file_name = self.file_name.apply_all_dts(f)
return self
class Loaded_Table:
def __init__(self,file, manifest, tables, environment={}):
self.file = file
self.manifest = manifest
self.tables = tables
self.environment = environment
def step(self, local_environment={}):
raise Exception("Loaded table trying to be propogated back down the call trace, <you shouldnt be seeing this, make an issue on https://github.com/Joe0400Student/fquery>")
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
def apply_all_dts(self,f):
return self
class UnwrapTable:
def __init__(self, file_loader, table_name,environment={}):
self.file_loader = file_loader
self.table_name = table_name
self.environment = environment
def step(self, local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.file_loader, Loaded_Table)):
return UnwrapTable(self.file_loader.step(temp_env),self.table_name,self.environment)
if(not isinstance(self.table_name, Value)):
return UnwrapTable(self.file_loader,self.table_name.step(temp_env),self.environment)
return Value(self.file_loader.tables[self.table_name.value],self.environment)
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.file_loader.update_all_variables(kwargs)
self.table_name.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.file_loader = self.file_loader.apply_all_dts(f)
self.table_name = self.table_name.apply_all_dts(f)
return self
class Iterator:
def __init__(self, iterator, environment={}):
self.iterator = iterator
self.environment = environment
def step(self, local_environment={}):
return self
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
def apply_all_dts(self,f):
return self
class FullResolvedIterator(Iterator):
def __init__(self,iterator, environment={}):
super().__init__(iterator, environment)
class List(FullResolvedIterator):
def __init__(self, lst, environment={}):
super().__init__(lst, environment)
from itertools import chain
class ConcatenateIterator(Iterator):
def __init__(self, iter1, iter2, environment={}):
super().__init__((iter1,iter2), environment)
def step(self, local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.iterator[0], FullResolvedIterator)):
return ConcatenateIterator(self.iterator[0].step(local_env), self.iterator[1], self.environment)
if(not isinstance(self.iterator[1], FullResolvedIterator)):
return ConcatenateIterator(self.iterator[0],self.iterator[1].step(local_env),self.environment)
return FullResolvedIterator(chain(self.iterator[0].iterator, self.iterator[1].iterator),self.environment)
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
for iterators in self.iterator:
iterators.update_all_variables(kwargs)
def apply_all_dts(self,f):
for iterators in self.iterator:
iterators = iterators.apply_all_dts(f)
return self
class Map(Iterator):
def __init__(self, iter1, lambd, val, environment={}):
super().__init__(iter1, environment)
self.lambd = lambd
self.val = val
def step(self, local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.iterator, Map) and not isinstance(self.iterator, FullResolvedIterator)):
return Map(self.iterator.step(temp_env), self.lambd, self.val, self.environment)
if(not isinstance(self.val, Value)):
return Map(self.iterator, self.lambd, self.val.step(temp_env), self.environment)
if(isinstance(self.iterator, Map)):
return Map(self.iterator.iterator, Chain(self.iterator.val.value,self.iterator.lambd,self.lambd,self.iterator.lambd.environment),self.val,self.environment)
return FullResolvedIterator(map(lambda a: execute(Apply(self.val.value, a, self.lambd, self.lambd.environment), temp_env), self.iterator.iterator),self.environment)
def update_all_variables(self, kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.iterator.update_all_variables(kwargs)
self.lambd.update_all_variables(kwargs)
self.val.update_all_variables(kwargs)
def apply_all_dts(self, f):
self.lambd = self.lambd.apply_all_dts(f)
self.iterator.apply_all_dts(f)
self.val.apply_all_dts(f)
return self
class Filter(Iterator):
def __init__(self, iter1, lambd, val, environment={}):
super().__init__(iter1, environment)
self.lambd = lambd
self.val = val
def step(self, local_environment={}):
temp_env = local_environment
for k in self.environment:
temp_env[k] = self.environment[k]
if(not isinstance(self.iterator, Filter) and not isinstance(self.iterator, Map) and not isinstance(self.iterator, FullResolvedIterator)):
return Filter(self.iterator.step(temp_env), self.lambd, self.val, self.environment)
if(not isinstance(self.val, Value)):
return Filter(self.iterator, self.lambd, self.val.step(temp_env), self.environment)
if(isinstance(self.iterator, Map)):
return Map(Filter(self.iterator.iterator, Chain(self.val.value, self.iterator.lambd, self.lambd,self.iterator.lambd.environment), self.iterator.val,self.iterator.environment),self.iterator.lambd, self.iterator.val, self.iterator.environment)
if(isinstance(self.iterator, Filter)):
return Filter(self.iterator.iterator, Apply(self.iterator.val.value, Variable(self.value.val,temp_env), And(self.lambd,self.iterator.lambd,temp_env),temp_env), self.value, self.environment)
return FullResolvedIterator(filter(lambda a: execute(Apply(self.val.value,a,self.lambd,temp_env),temp_env).value, self.iterator.iterator),self.environment)
def update_all_variables(self, kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.iterator.update_all_variables(kwargs)
self.lambd.update_all_variables(kwargs)
self.val.update_all_variables(kwargs)
def apply_all_dts(self, f):
self.lambd = self.lambd.apply_all_dts(f)
self.iterator.apply_all_dts(f)
self.val.apply_all_dts(f)
return self
class ToList:
def __init__(self,iterator, environment={}):
self.iterator = iterator
self.environment=environment
def step(self, local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.iterator, FullResolvedIterator)):
return ToList(self.iterator.step(temp_env),self.environment)
if(isinstance(self.iterator, List)):
return self.iterator
return List(list(self.iterator.iterator),self.environment)
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.iterator.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.iterator = self.iterator.apply_all_dts(f)
return self
class Access:
def __init__(self,array, location, environment={}):
self.array = array
self.location = location
self.environment = enivronment
def step(self,local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.array, List) and not isinstance(self.array,Iterator)):
return Access(self.array.step(temp_env),self.location, self.environment)
if(isinstance(self.array,Iterator)):
return Access(ToList(self.array, self.environment), self.location,self.environment)
if(not isinstance(self.location, Value)):
return Access(self.array, self.location.step(temp_env),self.environment)
return Value(self.array.iterator[self.location.value],self.array.environment)
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.array.update_all_variables(kwargs)
self.location.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.array = self.array.apply_all_dts(f)
self.location = self.location.apply_all_dts(f)
return self
class Print:
def __init__(self,expression,environment={}):
self.expression = expression
self.environment = environment
def step(self, local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.expression, Value) and not isinstance(self.expression, List)):
return Print(self.expression.step(temp_env),self.environment)
if(isinstance(self.expression,List)):
print(f'[{", ".join(map(lambda a: str(execute(a,temp_env).value) ,self.expression.iterator))}]')
return Value(None,self.environment)
if(isinstance(self.expression,NamedTuple)):
for key in self.expression.value:
while(not isinstance(self.expression.value[key],Value)):
self.expression.value[key] = self.expression.value[key].step(temp_env)
print({key:self.expression.value[key].value for key in self.expression.value})
return Value(None,self.environment)
print(self.expression.value)
return Value(None,self.environment)
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.expression.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.expression = self.expression.apply_all_dts(f)
return self
class Get:
def __init__(self,Input_Response=Value(">:",{}),force_type=lambda a:a,environment={}):
self.force_type = force_type
self.environment = environment
self.input_response = Input_Response
def step(self,local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.input_response,Value)):
return Get(self.input_response.step(temp_env),self.force_type,self.environment)
return Value(self.force_type(input(self.input_response.value)),self.environment)
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.input_response.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.input_response = self.input_response.apply_all_dts(f)
return self
class Not(Operator):
def __init__(self,arg1,env={}):
super().__init__(lambda a,b: not a, arg1, arg1, env)
class And(Operator):
def __init__(self,arg1,arg2,env={}):
super().__init__(lambda a,b: a and b, arg1, arg2, env)
class Or(Operator):
def __init__(self,arg1,arg2,env={}):
super().__init__(lambda a,b: a or b, arg1, arg2, env)
class Negate(Operator):
def __init__(self,arg1,env={}):
super().__init__(lambda a,b: -a, arg1, arg1, env)
class Equal(Operator):
def __init__(self, arg1, arg2, env={}):
super().__init__(lambda a, b: a == b, arg1, arg2, env)
class Less(Operator):
def __init__(self, arg1, arg2, env={}):
super().__init__(lambda a, b: a < b, arg1, arg2, env)
class Add(Operator):
def __init__(self, arg1, arg2, env={}):
super().__init__(lambda a,b: a+b, arg1, arg2, env)
class Mult(Operator):
def __init__(self, arg1, arg2, env={}):
super().__init__(lambda a,b: a*b, arg1, arg2, env)
class Invert(Operator):
def __init__(self, arg1, env={}):
super().__init__(lambda a,b: 1/a, arg1, arg2, env)
class NamedTuple(Value):
def __init__(self,data,env={}):
if(isinstance(data,list)):
super().__init__({name:None for name in data},env)
else:
super().__init__(data,env)
def step(self,local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
for vals in self.value:
if(not isinstance(self.value[vals],Value)):
data_cpy = self.value
data_cpy[vals] = data_cpy[vals].step(temp_env)
return NamedTuple(data_cpy,self.environment)
return EvaluatedTuple(self.value,self.environment)
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
for keys in self.value:
self.value[keys].update_all_variables(kwargs)
def apply_all_dts(self,f):
for keys in self.value:
self.value[keys] = self.value[keys].apply_all_dts(f)
return self
class EvaluatedTuple(NamedTuple):
def __init__(self,data,env={}):
print("constructing")
super().__init__(data,env)
def step(self,local_environment={}):
raise Exception(" YOU SHOULD NOT BE ABLE TO SEE THIS! ")
class GetTupleData:
def __init__(self,tup,attribute_name,environment={}):
self.tup = tup
self.attribute_name = attribute_name
self.environment = environment
def step(self,local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.tup,EvaluatedTuple)):
return GetTupleData(self.tup.step(temp_env),self.attribute_name,self.environment)
if(not isinstance(self.attribute_name,Value)):
return GetTupleData(self.tup,self.attribute_name.step(temp_env),self.environment)
if(self.attribute_name.value not in self.tup.value):
raise Exception(f"Member Name {self.attribute_name.value} not in tuple!")
return self.tup.value[self.attribute_name.value]
class UpdateTuple:
def __init__(self,tup,attribute_name,value,eager=True,environment={}):
self.tup = tup
self.attribute_name = attribute_name
self.value = value
self.eager = eager
self.environment = environment
def step(self,local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.tup,NamedTuple)):
return UpdateTuple(self.tup.step(temp_env),self.attribute_name,self.value,self.eager,self.environment)
if(not isinstance(self.attribute_name,Value)):
return UpdateTuple(self.tup,self.attribute_name.step(temp_env),self.value,self.eager,self.environment)
if(self.eager and not isinstance(self.value,Value)):
return UpdateTuple(self.tup,self.attribute_name,self.value.step(temp_env),self.eager,self.environment)
if(self.attribute_name.value not in self.tup.value):
raise Exception(f"Member Name {self.attribute_name.value} not in tuple!")
copy_of_tuple_data = self.tup.value
copy_of_tuple_data[self.attribute_name.value] = self.value
return NamedTuple(copy_of_tuple_data,self.tup.environment)
def execute(program, environment: dict) -> Value:
while(not isinstance(program, Value)):
#print("stepping")
#print(environment['A'].value)
program = program.step(environment)
#print(program.pr())
#input()
return program
class Type:
def __init__(self,expression,environment={}):
self.expression = expression
self.environment = environment
def step(self,local_environment={}):
temp_env = local_environment
for var in self.environment:
temp_env[var] = self.environment[var]
if(not isinstance(self.expression, Value)):
return Type(self.expression.step(temp_env),self.environment)
if(isinstance(self.expression,NamedTuple)):
return Value("NamedTuple",self.environment)
return Value(type(self.expression.value),self.environment)
def update_all_variables(self,kwargs):
for k in kwargs:
self.environment[k] = kwargs[k]
self.expression.update_all_variables(kwargs)
def apply_all_dts(self,f):
self.expression = self.expression.apply_all_dts(f)
return self
def main() -> None:
print(f"fang[ver:{VERSION}][{YEAR}] -- Copyright <NAME> - Python 3.10")
environment = {"collatz":
If_Else(
Operator(
lambda a,b: a != b,
Variable('A',{}),
Value(1,{}),
{}
),
If_Else(
Operator(
lambda a,b: a % b == 0,
Variable('A',{}),
Value(2,{}),
{}
),
Apply(
'A',
Operator(
lambda a,b: a // b,
Variable('A',{}),
Value(2,{}),
{}
),
Variable("collatz",{}),
{}
),
Apply(
'A',
Operator(
lambda a,b: a + b,
Operator(
lambda c,d: c*d,
Variable('A',{}),
Value(3,{}),
{}
),
Value(1,{}),
{}
),
Variable("collatz",{}),
{}
),
{}
),
Variable('A',{}),
{}
),
}
execute(Print(Get(Value("please input something here: ",{})),{}),environment)
execute(Print(Apply("A",Value(69,{}),Variable("collatz",{}),{}),{}),environment)
execute(Print(Apply("B",Value(20,{}),Chain("C",Operator(lambda a,b: a*b,Variable("B",{}),Variable("B",{}),{}),Operator(lambda a,b: a+b,Variable("C",{}),Value(2,{}),{}),{}),{}),{}),environment)
execute(Print(Add(Value(69,{}),Value(42000,{}),{}),{}),environment)
execute(Print(GetTupleData(NamedTuple({"first":Value(2,{}),"second":Value(3,{})},{}),Value("second"),{}),{}),environment)
execute(Print(UpdateTuple(UpdateTuple(NamedTuple({"first":Value(2,{}),"second":Value(1,{})},{}),Value("second"),GetTupleData(NamedTuple({"first":Value(2,{}),"second":Value(3,{})},{}),Value("first"),{}),True,{}),Value("first"),GetTupleData(NamedTuple({"first":Value(2,{}),"second":Value(3,{})},{}),Value("second"),{}),True,{}),{}),environment)
execute(Print(List([Value("first"),Value("second"),Value("third")]),{}),environment)
#print(execute(UnwrapTable(FSLoader(Value("./Loaders/table.ftab",{})),Value("table1",{}),{}),environment).value)
execute(Print(Type(Value(None,{}),{}),{}),environment)
execute(Print(ToList(Map(List([Value(3),Value(9),Value(2)]),Mult(Variable("var",{}),Value(2,{}),{}),Value("var",{}),{}),{}),{}),environment)
while True:
selected_text = input(">> ")
try:
main()
except QuitException:
print("Quitting the interpreter, bye!")
except KeyboardInterrupt:
print("\nKeyboard interrupt triggered")
``` |
{
"source": "Joe0708/zhilianRefresh",
"score": 3
} |
#### File: Joe0708/zhilianRefresh/refresh.py
```python
import requests
import argparse
import json
from bs4 import BeautifulSoup
import time
class ZlAccount:
def __init__(self):
self.loginname=''
self.password=''
self.loggedin=False
self.session=requests.Session()
self.session.headers.update({'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36'})
#self.session.headers.update({'host':'i.zhaopin.com'})
def __del__(self):
if self.session:
self.session.close()
def set_cookie(self,cookie):
# cookies can be obtained from your Chrome console by the following command: 'document.cookie'
# pass the raw string to this method.
self.session.headers.update({'Cookie':cookie})
def refresh_resumes(self):
s=self.session
resp=s.get('https://i.zhaopin.com')
soup=BeautifulSoup(resp.content,'html.parser')
#print(soup.prettify())
search_result=soup.find_all('a',class_='myLinkA linkRefresh')
if search_result:
link=search_result[0].attrs['url']
params=link.split('?')[1]
kvpairs=params.split('&')
resume={}
resume['resumeId']=kvpairs[1].split('=')[1]
resume['resumenum']=kvpairs[2].split('=')[1]
self.refresh(resume)
else:
print('no resume found.')
#print(resp.content)
def refresh(self,resume):
# resume should be a dict contains two keys:resumeId,resumenum
url='https://i.zhaopin.com/ResumeCenter/MyCenter/RefreshResume'
resume['version']='1'
resume['language']='1'
resume['t']=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) #str(time.time())
print('resume dict: %s'%resume)
r=self.session.get(url,data=resume)
soup=BeautifulSoup(r.content,'html.parser')
#print(soup.prettify())
sr=soup.find_all('h3',class_='entH setH')
review=soup.find_all("p",class_='saleP')
if sr:
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ": " + sr[0].text + "," + review[0].text)
else:
print('failed..')
def main():
parser = argparse.ArgumentParser(description='refresh resume on zl')
parser.add_argument('cookiefile',type=str,help='path/to/your/cookie/file')
args = parser.parse_args()
za=ZlAccount()
f=open(args.cookiefile,'r')
cookie=f.readline()
#print(cookie)
f.close()
za.set_cookie(cookie.replace('\n',''))
za.refresh_resumes()
main()
``` |
{
"source": "joe1234wu/fbpcs",
"score": 2
} |
#### File: common/service/pcs_container_service.py
```python
from typing import Optional, Dict, List
from fbpcp.entity.container_instance import ContainerInstance
from fbpcp.error.pcp import PcpError
from fbpcp.service.container import ContainerService
from fbpcp.service.container_aws import AWSContainerService
from fbpcs.common.entity.pcs_container_instance import PCSContainerInstance
from fbpcs.experimental.cloud_logs.log_retriever import CloudProvider, LogRetriever
class PCSContainerService(ContainerService):
def __init__(self, inner_container_service: ContainerService) -> None:
self.inner_container_service: ContainerService = inner_container_service
self.log_retriever: Optional[LogRetriever] = None
if isinstance(self.inner_container_service, AWSContainerService):
self.log_retriever = LogRetriever(CloudProvider.AWS)
def get_region(
self,
) -> str:
return self.inner_container_service.get_region()
def get_cluster(
self,
) -> str:
return self.inner_container_service.get_cluster()
def create_instance(
self,
container_definition: str,
cmd: str,
env_vars: Optional[Dict[str, str]] = None,
) -> ContainerInstance:
instance = self.inner_container_service.create_instance(
container_definition, cmd, env_vars
)
log_url = None
if self.log_retriever:
log_url = self.log_retriever.get_log_url(instance.instance_id)
return PCSContainerInstance.from_container_instance(instance, log_url)
def create_instances(
self,
container_definition: str,
cmds: List[str],
env_vars: Optional[Dict[str, str]] = None,
) -> List[ContainerInstance]:
return [
self.create_instance(container_definition, cmd, env_vars) for cmd in cmds
]
def get_instance(self, instance_id: str) -> Optional[ContainerInstance]:
instance = self.inner_container_service.get_instance(instance_id)
if instance is not None:
log_url = None
if self.log_retriever:
log_url = self.log_retriever.get_log_url(instance_id)
return PCSContainerInstance.from_container_instance(instance, log_url)
def get_instances(
self, instance_ids: List[str]
) -> List[Optional[ContainerInstance]]:
return [self.get_instance(instance_id) for instance_id in instance_ids]
def cancel_instance(self, instance_id: str) -> None:
return self.inner_container_service.cancel_instance(instance_id)
def cancel_instances(self, instance_ids: List[str]) -> List[Optional[PcpError]]:
return self.inner_container_service.cancel_instances(instance_ids)
def get_current_instances_count(self) -> int:
return self.inner_container_service.get_current_instances_count()
```
#### File: experimental/cloud_logs/log_retriever.py
```python
import re
from fbpcs.private_computation.entity.cloud_provider import CloudProvider
class LogRetriever:
"""Retrieves logs for containers under a specific cloud provider.
Private attributes:
_cloud_provider: Cloud Provider for which this log retriever was initialized
"""
def __init__(self, cloud_provider: CloudProvider) -> None:
self._cloud_provider = cloud_provider
def get_log_url(self, container_id: str) -> str:
"""Get the log url for a container
Args:
container_id: identifier for container for which the log URL should be retrieved
Returns:
return: The log URL for said container
Raises:
IndexError: can be raised when using CloudProvider.AWS
NotImplementedError: if anything other than CloudProvider.AWS is used
"""
if self._cloud_provider is CloudProvider.AWS:
return self._get_aws_cloudwatch_log_url(container_id)
else:
raise NotImplementedError(
f"Retrieving log URLs for {self._cloud_provider} is not yet supported."
)
def _get_aws_cloudwatch_log_url(self, container_id: str) -> str:
"""Return a CloudWatch URL given a container id.
Args:
container_id: AWS arn of a container run in ECS
Returns:
return: The Cloudwatch URL for said container
Raises:
IndexError: if container_id is not well-formed
"""
container_id_info = container_id.split(":")
log_region = container_id_info[3]
task_id_info = container_id_info[-1].split("/")
cluster_name = task_id_info[1]
container_name = self._get_container_name(cluster_name, log_region)
task_id = task_id_info[-1]
log_group_name = f"$252Fecs$252F{container_name}"
log_stream_name = f"ecs$252F{container_name}$252F{task_id}"
return (
f"https://{log_region}.console.aws.amazon.com/cloudwatch/home?"
f"region={log_region}#logsV2:log-groups/"
f"log-group/{log_group_name}/"
f"log-events/{log_stream_name}"
)
def _get_container_name(self, cluster_name: str, log_region: str) -> str:
"""Get the container_name. For publisher side log group, if it's created by PCE service,
it will be changed to format "onedocker-container-shared-<region>"
Args:
cluster_name: the name of cluster in format "onedocker-cluster-<tag>"
log_region: the AWS region
Returns:
return: The container_name
Raises:
IndexError: if container_name is not well-formed
"""
container_name = cluster_name.replace("-cluster", "-container")
container_name_parts = container_name.split("-")
# If the name does not have a 32 bit random string inside, return directly
# Otherwise, it means it's a container created by the PCE service,
# and it should be replaced with "-shared-<region>"
if not re.search(r"[0-9a-f]{32}", container_name_parts[-1]):
return container_name
return f"{container_name.rsplit('-', 1)[0]}-shared-{log_region}"
```
#### File: cloud_bridge/data_ingestion/data_transformation_lambda.py
```python
from __future__ import print_function
import base64
import json
import os
import re
from typing import Dict, List, Tuple
# initiate
print("Loading lambda function...")
BROWSER_NAME_REGEXES: List[Tuple[re.Pattern, str]] = [
(re.compile(r".*Chrome.*Mobile Safari/[0-9.]+$"), "Chrome Mobile"),
(re.compile(r".*Chrome.*Safari/[0-9.]+$"), "Chrome Desktop"),
(re.compile(r".*Mobile.*Safari/[0-9.]+$"), "Mobile Safari"),
(re.compile(r".*FBIOS;.*"), "Facebook for iOS"),
(
re.compile(r".*(CPU OS|iPhone OS|CPU iPhone).*Instagram.*"),
"Instagram IAB for iOS",
),
(re.compile(r".*Instagram.*Android.*"), "Instagram IAB for Android"),
(re.compile(r".*FB4A.*"), "Facebook for Android"),
]
DEVICE_OS_REGEXES: List[Tuple[re.Pattern, str]] = [
(re.compile(r".*(CPU OS|iPhone OS|CPU iPhone).*"), "iOS"),
(re.compile(r".*Android.*"), "Android"),
(re.compile(r".*Windows NT.*"), "Windows NT"),
(re.compile(r".*Mac OS X.*"), "Mac OS X"),
]
OS_VERSION_REGEXES: List[re.Pattern] = [
re.compile(r".*(CPU OS|iPhone OS|CPU iPhone) +(\d+)[_\.](\d+)(?:[_\.](\d+))?.*"),
re.compile(r".*(Intel Mac OS X) +(\d+)[_\.](\d+)(?:[_\.](\d+))?.*"),
re.compile(r".*(Android) +(\d+)[_\.](\d+)(?:[_\.](\d+))?.*"),
]
BROWSER_NAME = "browser_name"
DEVICE_OS = "device_os"
DEVICE_OS_VERSION = "device_os_version"
def lambda_handler(
event: Dict[str, List[Dict[str, str]]], context: Dict[str, str]
) -> Dict[str, List[Dict[str, str]]]:
output = []
##### NOTE: this script assume the schema is correct, no missing items
for record in event["records"]:
row = {}
recordId = record["recordId"]
row["recordId"] = recordId
row["result"] = "Ok"
decoded_data = json.loads(base64.b64decode(record["data"]))
dic = dict(os.environ.items())
debug = "DEBUG" in dic.keys() and dic["DEBUG"] == "true"
if debug:
print(f"Processing record for recordId: {recordId}")
# if loaded as str, load again
if type(decoded_data) is str:
decoded_data = json.loads(decoded_data)
if "serverSideEvent" not in decoded_data.keys():
msg = f"Error: serverSideEvent does not exist for recordId: {recordId}"
print(msg)
continue
row_data = decoded_data["serverSideEvent"]
data_source_id = decoded_data.get("pixelId")
# as of H2 2021, it should only be "website".
action_source = row_data.get("action_source")
timestamp = row_data.get("event_time")
event_type = row_data.get("event_name")
dummy_dict = {}
currency_type = row_data.get("custom_data", dummy_dict).get("currency")
conversion_value = row_data.get("custom_data", dummy_dict).get("value")
email = row_data.get("user_data", dummy_dict).get("em")
device_id = row_data.get("user_data", dummy_dict).get("madid")
phone = row_data.get("user_data", dummy_dict).get("ph")
client_ip_address = row_data.get("user_data", dummy_dict).get(
"client_ip_address"
)
client_user_agent = row_data.get("user_data", dummy_dict).get(
"client_user_agent"
)
click_id = row_data.get("user_data", dummy_dict).get("fbc")
login_id = row_data.get("user_data", dummy_dict).get("fbp")
parsed_user_agent_fields = (
_parse_client_user_agent(client_user_agent) if client_user_agent else {}
)
# app data fields
app_data_fields = [
"advertiser_tracking_enabled",
"application_tracking_enabled",
"consider_views",
"device_token",
"include_dwell_data",
"include_video_data",
"install_referrer",
"installer_package",
"receipt_data",
"url_schemes",
"extinfo",
]
app_data = row_data.get("app_data", dummy_dict)
parsed_app_data = {}
for field in app_data_fields:
if field in app_data:
parsed_app_data[field] = app_data[field]
# make sure not all values are None
if all(
value is None
for value in [
timestamp,
currency_type,
conversion_value,
event_type,
email,
device_id,
phone,
click_id,
login_id,
]
):
msg = f"All essential columns are None/Null. Skip recordId: f{recordId}"
print(msg)
continue
data = {}
user_data = {}
data["data_source_id"] = data_source_id
data["timestamp"] = timestamp
data["currency_type"] = currency_type
data["conversion_value"] = conversion_value
data["event_type"] = event_type
data["action_source"] = action_source
if email:
user_data["email"] = email
if device_id:
user_data["device_id"] = device_id
if phone:
user_data["phone"] = phone
if client_ip_address:
user_data["client_ip_address"] = client_ip_address
if client_user_agent:
user_data["client_user_agent"] = client_user_agent
if click_id:
user_data["click_id"] = click_id
if login_id:
user_data["login_id"] = login_id
if BROWSER_NAME in parsed_user_agent_fields:
user_data[BROWSER_NAME] = parsed_user_agent_fields[BROWSER_NAME]
if DEVICE_OS in parsed_user_agent_fields:
user_data[DEVICE_OS] = parsed_user_agent_fields[DEVICE_OS]
if DEVICE_OS_VERSION in parsed_user_agent_fields:
user_data[DEVICE_OS_VERSION] = parsed_user_agent_fields[DEVICE_OS_VERSION]
data["user_data"] = user_data
data["app_data"] = parsed_app_data
# firehose need data to be b64-encoded
data = json.dumps(data) + "\n"
data = data.encode("utf-8")
row["data"] = base64.b64encode(data)
output.append(row)
print("finished data transformation.")
return {"records": output}
def _parse_client_user_agent(client_user_agent: str) -> Dict[str, str]:
parsed_fields = {}
for (regex, browserName) in BROWSER_NAME_REGEXES:
if regex.match(client_user_agent):
parsed_fields[BROWSER_NAME] = browserName
break
for (regex, deviceOs) in DEVICE_OS_REGEXES:
if regex.match(client_user_agent):
parsed_fields[DEVICE_OS] = deviceOs
break
for regex in OS_VERSION_REGEXES:
matches = regex.match(client_user_agent)
if matches:
groups = list(filter(lambda item: type(item) == str, matches.groups()))
if len(groups) > 1:
parsed_fields[DEVICE_OS_VERSION] = ".".join(groups[1:])
break
return parsed_fields
```
#### File: kodiak/ffi_codegen/codegen.py
```python
import subprocess
import sys
from dataclasses import dataclass
from typing import Optional
@dataclass
class TypeInfo:
arg_name: str
cpp_name: str
rust_name: str
cpp_clear_type: str
mpc_engine_type: str
@dataclass
class OperatorInfo:
name: str
symbol: str
ret: Optional[str] = None
# TODO: Changing all these `false` to `true` makes it a batch game
BOOLEAN_TYPE = TypeInfo(
arg_name="mpc_bool",
cpp_name="CppMPCBool",
rust_name="bool",
cpp_clear_type="bool",
mpc_engine_type="SecBit<false>",
)
ARITHMETIC_TYPES = [
TypeInfo(
arg_name="mpc_int32",
cpp_name="CppMPCInt32",
rust_name="i32",
cpp_clear_type="int32_t",
mpc_engine_type="SecSignedInt<32, false>",
),
TypeInfo(
arg_name="mpc_int64",
cpp_name="CppMPCInt64",
rust_name="i64",
cpp_clear_type="int64_t",
mpc_engine_type="SecSignedInt<64, false>",
),
TypeInfo(
arg_name="mpc_uint32",
cpp_name="CppMPCUInt32",
rust_name="u32",
cpp_clear_type="uint32_t",
mpc_engine_type="SecUnsignedInt<32, false>",
),
TypeInfo(
arg_name="mpc_uint64",
cpp_name="CppMPCUInt64",
rust_name="u64",
cpp_clear_type="uint64_t",
mpc_engine_type="SecUnsignedInt<64, false>",
),
]
BOOLEAN_OPS = [
OperatorInfo(name="and", symbol="&"),
OperatorInfo(name="or", symbol="||"),
OperatorInfo(name="xor", symbol="^"),
]
ARITHMETIC_OPS = [
OperatorInfo(name="add", symbol="+"),
OperatorInfo(name="sub", symbol="-"),
# OperatorInfo(name="mul", symbol="*"),
# OperatorInfo(name="div", symbol="/"),
]
COMPARISON_OPS = [
# OperatorInfo(name="neq", symbol="!="),
OperatorInfo(name="eq", symbol="==", ret=BOOLEAN_TYPE),
OperatorInfo(name="lt", symbol="<", ret=BOOLEAN_TYPE),
OperatorInfo(name="gt", symbol=">", ret=BOOLEAN_TYPE),
OperatorInfo(name="lte", symbol="<=", ret=BOOLEAN_TYPE),
OperatorInfo(name="gte", symbol=">=", ret=BOOLEAN_TYPE),
]
def get_license_and_generated_header() -> str:
return (
"/*\n"
" * Copyright (c) Meta Platforms, Inc. and affiliates.\n"
" *\n"
" * This source code is licensed under the MIT license found in the\n"
" * LICENSE file in the root directory of this source tree.\n"
# Note: we split this @gen over two lines because otherwise Phabricator
# will think *this* file is generated :)
" */\n\n/* @gen"
"erated file - do not modify directly! */\n\n"
)
def get_h_pragma_and_includes() -> str:
return (
"#pragma once\n\n"
"#include <map>\n"
"#include <memory>\n"
"#include <string>\n\n"
"#include <fbpcf/engine/communication/SocketPartyCommunicationAgentFactory.h>\n"
"#include <fbpcf/frontend/mpcGame.h>\n"
"#include <fbpcf/mpc_std_lib/oram/IWriteOnlyOram.h>\n"
"#include <fbpcf/mpc_std_lib/oram/LinearOramFactory.h>\n"
"#include <fbpcf/scheduler/IScheduler.h>\n"
"#include <fbpcf/scheduler/SchedulerHelper.h>\n"
)
def get_using_declaration(type_info: TypeInfo) -> str:
return (
f"using {type_info.cpp_name} = typename fbpcf::frontend::"
# TODO: Only handles schedulerId=0
f"MpcGame<0>::template {type_info.mpc_engine_type};"
)
def get_kodiak_game_classes() -> str:
return (
"constexpr int32_t PUBLISHER_ROLE = 0;\n"
"constexpr int32_t PARTNER_ROLE = 1;\n\n"
"template <int schedulerId, bool batched = false>\n"
"class KodiakGameDetail : public fbpcf::frontend::MpcGame<schedulerId> {\n"
" public:\n"
" explicit KodiakGameDetail(std::unique_ptr<fbpcf::scheduler::IScheduler> scheduler)\n"
" : fbpcf::frontend::MpcGame<schedulerId>(std::move(scheduler)) {}\n"
"};\n"
# TODO: Adding ', true' as a second template argument makes this a batch game
"class KodiakGame : public KodiakGameDetail<0> {\n"
" public:\n"
" explicit KodiakGame(std::unique_ptr<fbpcf::scheduler::IScheduler> scheduler)\n"
" : KodiakGameDetail<0>(std::move(scheduler)) {}\n"
"};\n"
"std::unique_ptr<KodiakGame> new_kodiak_game(int32_t role, const std::string& host, int16_t port) {\n"
" std::map<int, fbpcf::engine::communication::SocketPartyCommunicationAgentFactory::PartyInfo> partyInfos{"
" {{PUBLISHER_ROLE, {host, port}}, {PARTNER_ROLE, {host, port}}}};\n"
" auto commAgentFactory = std::make_unique<fbpcf::engine::communication::SocketPartyCommunicationAgentFactory>(role, std::move(partyInfos));\n"
" auto scheduler = fbpcf::scheduler::createLazySchedulerWithRealEngine(role, *commAgentFactory);\n"
" return std::make_unique<KodiakGame>(std::move(scheduler));\n"
"}\n"
)
def func_to_header_declaration(f: str) -> str:
return f[: f.index("{") - 1] + ";"
def make_new_func(type_info: TypeInfo) -> str:
cpp_typename = type_info.cpp_name
arg_name = type_info.arg_name
clear_type = type_info.cpp_clear_type
return (
# Signature and funcname
f"std::unique_ptr<{cpp_typename}> new_{arg_name}"
# parameters
f"({clear_type} a, int32_t partyId) {{\n"
# statements
f" return std::make_unique<{cpp_typename}>(a, partyId);\n"
# end of func
"}"
)
def make_reveal_func(type_info: TypeInfo) -> str:
ret_type = type_info.cpp_clear_type
arg_name = type_info.arg_name
cpp_typename = type_info.cpp_name
return (
# Signature and funcname
f"{ret_type} reveal_{arg_name}"
# parameters
f"(const {cpp_typename}& a) {{\n"
# statements
# TODO: Open to *both* parties
f" auto res = a.openToParty(0);\n"
f" return res.getValue();\n"
# end of func
"}"
)
def make_mux_func(type_info: TypeInfo) -> str:
cpp_type = type_info.cpp_name
arg_name = type_info.arg_name
return (
# Signature and funcname
f"std::unique_ptr<{cpp_type}> {arg_name}_mux"
# parameters
f"(const CppMPCBool& choiceBit, const {cpp_type}& trueCase, const {cpp_type}& falseCase) {{\n"
# statements
f" return std::make_unique<{cpp_type}>(trueCase.mux(choiceBit, falseCase));"
# end of func
"}"
)
def make_binop_func(type_info: TypeInfo, op_info: OperatorInfo) -> str:
# If the operator explicitly defines a return type, use that
# otherwise fall back to assuming A <op> A -> A
param_typename = type_info.cpp_name
if op_info.ret is not None:
ret_typename = op_info.ret.cpp_name
else:
ret_typename = param_typename
funcname = f"{type_info.arg_name}_{op_info.name}"
op = op_info.symbol
return (
# Signature and funcname
f"std::unique_ptr<{ret_typename}> {funcname}"
# parameters
f"(const {param_typename}& a, const {param_typename}& b) {{\n"
# statements
f" return std::make_unique<{ret_typename}>(a {op} b);\n"
# end of func
"}"
)
def main() -> None:
if len(sys.argv) != 2:
sys.exit(f"Usage: python3 {sys.argv[0]} outfile")
header_filename = sys.argv[1] + ".h"
output_filename = sys.argv[1] + ".cpp"
with open(header_filename, "w") as f_h, open(output_filename, "w") as f_cpp:
# First write the license, include header, and namespace declarations
# This is the "set up" for the h file
print(get_license_and_generated_header(), file=f_h)
print(get_h_pragma_and_includes(), file=f_h)
print("namespace kodiak_cpp {\n", file=f_h)
for type_info in [BOOLEAN_TYPE] + ARITHMETIC_TYPES:
print(get_using_declaration(type_info), file=f_h)
print(get_kodiak_game_classes(), file=f_h)
# First write the license, include header, and namespace declarations
# This is the "set up" for the cpp file
print(get_license_and_generated_header(), file=f_cpp)
print('#include "fbpcs/kodiak/include/ffi.h"\n', file=f_cpp)
print("#include <memory>\n", file=f_cpp)
print("using namespace kodiak_cpp;\n", file=f_cpp)
# Write all the functions for the boolean type
type_info = BOOLEAN_TYPE
print(f"Gen functions for {type_info.arg_name}")
new_f = make_new_func(type_info)
print(func_to_header_declaration(new_f), file=f_h)
print(new_f, file=f_cpp)
reveal_f = make_reveal_func(type_info)
print(func_to_header_declaration(reveal_f), file=f_h)
print(reveal_f, file=f_cpp)
for operator_info in BOOLEAN_OPS:
binop_f = make_binop_func(type_info, operator_info)
print(func_to_header_declaration(binop_f), file=f_h)
print(binop_f, file=f_cpp)
# Write all the functions for the arithmetic types
for type_info in ARITHMETIC_TYPES:
print(f"Gen functions for {type_info.arg_name}")
new_f = make_new_func(type_info)
print(func_to_header_declaration(new_f), file=f_h)
print(new_f, file=f_cpp)
reveal_f = make_reveal_func(type_info)
print(func_to_header_declaration(reveal_f), file=f_h)
print(reveal_f, file=f_cpp)
mux_f = make_mux_func(type_info)
print(func_to_header_declaration(mux_f), file=f_h)
print(mux_f, file=f_cpp)
for operator_info in ARITHMETIC_OPS:
binop_f = make_binop_func(type_info, operator_info)
print(func_to_header_declaration(binop_f), file=f_h)
print(binop_f, file=f_cpp)
for operator_info in COMPARISON_OPS:
binop_f = make_binop_func(type_info, operator_info)
print(func_to_header_declaration(binop_f), file=f_h)
print(binop_f, file=f_cpp)
print("} // namespace kodiak_cpp", file=f_h)
# Finally, run the auto-formatters on the code
subprocess.run(["clang-format", "-i", header_filename])
subprocess.run(["clang-format", "-i", output_filename])
if __name__ == "__main__":
main()
```
#### File: pc_pre_validation/tests/validation_report_test.py
```python
from unittest import TestCase
from fbpcs.pc_pre_validation.enums import ValidationResult
from fbpcs.pc_pre_validation.validation_report import ValidationReport
class TestValidationReport(TestCase):
def test_get_str_for_report_with_details(self) -> None:
expected_report_str = """Validation Report: test_validator_name
Result: success
Message: test_message
Details:
{
"test_key_1": 5,
"test_key_2": {
"test_key_3": {
"test_key_4": 1
},
"test_key_5": {
"test_key_6": 1,
"test_key_7": 2
}
}
}"""
report = ValidationReport(
validation_result=ValidationResult.SUCCESS,
validator_name="test_validator_name",
message="test_message",
details={
"test_key_1": 5,
"test_key_2": {
"test_key_3": {
"test_key_4": 1,
},
"test_key_5": {
"test_key_6": 1,
"test_key_7": 2,
},
},
},
)
self.assertEqual(expected_report_str, str(report))
def test_get_str_for_report_without_details(self) -> None:
expected_report_str = """Validation Report: test_validator_name
Result: failed
Message: test_message"""
report = ValidationReport(
validation_result=ValidationResult.FAILED,
validator_name="test_validator_name",
message="test_message",
)
self.assertEqual(expected_report_str, str(report))
```
#### File: pid/repository/pid_instance.py
```python
import abc
import threading
from fbpcs.pid.entity.pid_instance import PIDInstance
class PIDInstanceRepository(abc.ABC):
def __init__(self) -> None:
"""
IMPORTANT: after acquiring this lock, and before releasing it, there cannot
be async calls, otherwise it will end up with deadlock.
"""
self.lock = threading.Lock()
@abc.abstractmethod
def create(self, instance: PIDInstance) -> None:
pass
@abc.abstractmethod
def read(self, instance_id: str) -> PIDInstance:
pass
@abc.abstractmethod
def update(self, instance: PIDInstance) -> None:
pass
@abc.abstractmethod
def delete(self, instance_id: str) -> None:
pass
```
#### File: pid_service/tests/test_pid_prepare_stage.py
```python
import unittest
from typing import Tuple, Dict
from unittest.mock import MagicMock, patch, AsyncMock
from fbpcp.entity.container_instance import ContainerInstance, ContainerInstanceStatus
from fbpcs.data_processing.pid_preparer.union_pid_preparer_cpp import (
CppUnionPIDDataPreparerService,
)
from fbpcs.pcf.tests.async_utils import to_sync
from fbpcs.pid.entity.pid_instance import PIDStageStatus
from fbpcs.pid.entity.pid_stages import UnionPIDStage
from fbpcs.pid.service.pid_service.pid_prepare_stage import PIDPrepareStage
from fbpcs.pid.service.pid_service.pid_stage import PIDStage
from fbpcs.pid.service.pid_service.pid_stage_input import PIDStageInput
def data_test_run() -> Tuple[
Dict[str, bool],
Dict[str, bool],
]:
return ({"wait_for_containers": True}, {"wait_for_containers": False})
class TestPIDPrepareStage(unittest.TestCase):
@patch("fbpcs.pid.repository.pid_instance.PIDInstanceRepository")
@to_sync
async def test_prepare(
self,
mock_instance_repo: unittest.mock.MagicMock,
) -> None:
async def _run_sub_test(
wait_for_containers: bool,
expected_container_status: ContainerInstanceStatus,
) -> None:
with patch.object(
CppUnionPIDDataPreparerService, "prepare_on_container_async"
) as mock_prepare_on_container_async, patch.object(
PIDStage, "update_instance_containers"
):
container = ContainerInstance(
instance_id="123",
ip_address="192.0.2.0",
status=expected_container_status,
)
mock_prepare_on_container_async.return_value = container
stage = PIDPrepareStage(
stage=UnionPIDStage.PUBLISHER_PREPARE,
instance_repository=mock_instance_repo,
storage_svc="STORAGE", # pyre-ignore
onedocker_svc="ONEDOCKER", # pyre-ignore
onedocker_binary_config=MagicMock(
task_definition="offline-task:1#container",
tmp_directory="/tmp/",
binary_version="latest",
),
)
res = await stage.prepare(
instance_id="123",
input_path="in",
output_path="out",
num_shards=1,
wait_for_containers=wait_for_containers,
)
self.assertEqual(
PIDStage.get_stage_status_from_containers([container]),
res,
)
data_tests = (
(True, ContainerInstanceStatus.COMPLETED),
(True, ContainerInstanceStatus.FAILED),
(False, ContainerInstanceStatus.STARTED),
)
for data_test in data_tests:
with self.subTest(
wait_for_containers=data_test[0],
expected_container_status=data_test[1],
):
# reset mocks for each subTests
mock_instance_repo.reset_mock()
await _run_sub_test(
data_test[0],
data_test[1],
)
@to_sync
@patch(
"fbpcs.private_computation.service.run_binary_base_service.RunBinaryBaseService.wait_for_containers_async"
)
@patch("fbpcp.service.storage.StorageService")
@patch("fbpcs.pid.repository.pid_instance.PIDInstanceRepository")
@patch("fbpcp.service.onedocker.OneDockerService")
async def test_run(
self,
mock_onedocker_svc: unittest.mock.MagicMock,
mock_instance_repo: unittest.mock.MagicMock,
mock_storage_svc: unittest.mock.MagicMock,
mock_wait_for_containers_async: unittest.mock.MagicMock,
) -> None:
async def _run_sub_test(
wait_for_containers: bool,
) -> None:
ip = "192.0.2.0"
container = ContainerInstance(
instance_id="123", ip_address=ip, status=ContainerInstanceStatus.STARTED
)
mock_onedocker_svc.start_containers = MagicMock(return_value=[container])
mock_onedocker_svc.wait_for_pending_containers = AsyncMock(
return_value=[container]
)
container.status = (
ContainerInstanceStatus.COMPLETED
if wait_for_containers
else ContainerInstanceStatus.STARTED
)
mock_wait_for_containers_async.return_value = [container]
stage = PIDPrepareStage(
stage=UnionPIDStage.PUBLISHER_PREPARE,
instance_repository=mock_instance_repo,
storage_svc=mock_storage_svc,
onedocker_svc=mock_onedocker_svc,
onedocker_binary_config=MagicMock(
task_definition="offline-task:1#container",
tmp_directory="/tmp/",
binary_version="latest",
),
)
instance_id = "444"
stage_input = PIDStageInput(
input_paths=["in"],
output_paths=["out"],
num_shards=2,
instance_id=instance_id,
)
# Basic test: All good
with patch.object(PIDPrepareStage, "files_exist") as mock_fe:
mock_fe.return_value = True
stage = PIDPrepareStage(
stage=UnionPIDStage.PUBLISHER_PREPARE,
instance_repository=mock_instance_repo,
storage_svc=mock_storage_svc,
onedocker_svc=mock_onedocker_svc,
onedocker_binary_config=MagicMock(
task_definition="offline-task:1#container",
tmp_directory="/tmp/",
binary_version="latest",
),
)
status = await stage.run(
stage_input, wait_for_containers=wait_for_containers
)
self.assertEqual(
PIDStageStatus.COMPLETED
if wait_for_containers
else PIDStageStatus.STARTED,
status,
)
self.assertEqual(mock_onedocker_svc.start_containers.call_count, 2)
if wait_for_containers:
self.assertEqual(mock_wait_for_containers_async.call_count, 2)
else:
mock_wait_for_containers_async.assert_not_called()
mock_instance_repo.read.assert_called_with(instance_id)
self.assertEqual(mock_instance_repo.read.call_count, 4)
self.assertEqual(mock_instance_repo.update.call_count, 4)
with patch.object(PIDPrepareStage, "files_exist") as mock_fe, patch.object(
PIDPrepareStage, "prepare"
) as mock_prepare:
mock_fe.return_value = True
status = await stage.run(
stage_input, wait_for_containers=wait_for_containers
)
mock_prepare.assert_called_with(
instance_id, "in", "out", 2, wait_for_containers, None
)
# Input not ready
with patch.object(PIDPrepareStage, "files_exist") as mock_fe:
mock_fe.return_value = False
status = await stage.run(
stage_input, wait_for_containers=wait_for_containers
)
self.assertEqual(PIDStageStatus.FAILED, status)
# Multiple input paths (invariant exception)
with patch.object(PIDPrepareStage, "files_exist") as mock_fe:
with self.assertRaises(ValueError):
mock_fe.return_value = True
stage_input.input_paths = ["in1", "in2"]
stage = PIDPrepareStage(
stage=UnionPIDStage.PUBLISHER_PREPARE,
instance_repository=mock_instance_repo,
storage_svc=mock_storage_svc,
onedocker_svc=mock_onedocker_svc,
onedocker_binary_config=MagicMock(
task_definition="offline-task:1#container",
tmp_directory="/tmp/",
binary_version="latest",
),
)
status = await stage.run(
stage_input, wait_for_containers=wait_for_containers
)
for data_test in data_test_run():
wait_for_containers = data_test["wait_for_containers"]
with self.subTest(
"Subtest with wait_for_containers: {wait_for_containers}",
wait_for_containers=wait_for_containers,
):
# reset mocks for each subTests
mock_onedocker_svc.reset_mock()
mock_instance_repo.reset_mock()
mock_storage_svc.reset_mock()
mock_wait_for_containers_async.reset_mock()
await _run_sub_test(wait_for_containers)
```
#### File: fbpcs/pl_coordinator/pl_instance_runner.py
```python
import logging
from multiprocessing import Process
from time import sleep, time
from typing import Any, Dict, List, Optional, Type
from fbpcs.pl_coordinator.constants import (
MIN_TRIES,
MAX_TRIES,
MIN_NUM_INSTANCES,
MAX_NUM_INSTANCES,
PROCESS_WAIT,
INSTANCE_SLA,
POLL_INTERVAL,
WAIT_VALID_STAGE_TIMEOUT,
WAIT_VALID_STATUS_TIMEOUT,
RETRY_INTERVAL,
CANCEL_STAGE_TIMEOUT,
)
from fbpcs.pl_coordinator.exceptions import (
PLInstanceCalculationException,
IncompatibleStageError,
)
from fbpcs.pl_coordinator.pc_partner_instance import PrivateComputationPartnerInstance
from fbpcs.pl_coordinator.pc_publisher_instance import (
PrivateComputationPublisherInstance,
)
from fbpcs.pl_coordinator.pl_graphapi_utils import PLGraphAPIClient
from fbpcs.private_computation.entity.private_computation_instance import (
AggregationType,
AttributionRule,
)
from fbpcs.private_computation.entity.private_computation_instance import (
PrivateComputationGameType,
)
from fbpcs.private_computation.entity.private_computation_status import (
PrivateComputationInstanceStatus,
)
from fbpcs.private_computation.stage_flows.private_computation_base_stage_flow import (
PrivateComputationBaseStageFlow,
)
class LoggerAdapter(logging.LoggerAdapter):
def __init__(self, logger: logging.Logger, prefix: str) -> None:
super(LoggerAdapter, self).__init__(logger, {})
self.prefix = prefix
def process(self, msg, kwargs):
return "[%s] %s" % (self.prefix, msg), kwargs
def run_instance(
config: Dict[str, Any],
instance_id: str,
input_path: str,
num_mpc_containers: int,
num_pid_containers: int,
stage_flow: Type[PrivateComputationBaseStageFlow],
logger: logging.Logger,
game_type: PrivateComputationGameType,
attribution_rule: Optional[AttributionRule] = None,
aggregation_type: Optional[AggregationType] = None,
concurrency: Optional[int] = None,
num_files_per_mpc_container: Optional[int] = None,
k_anonymity_threshold: Optional[int] = None,
num_tries: Optional[int] = 2, # this is number of tries per stage
dry_run: Optional[bool] = False,
) -> None:
num_tries = num_tries if num_tries is not None else MAX_TRIES
if num_tries < MIN_TRIES or num_tries > MAX_TRIES:
raise ValueError(f"num_tries must be between {MIN_TRIES} and {MAX_TRIES}.")
client = PLGraphAPIClient(config["graphapi"]["access_token"], logger)
instance_runner = PLInstanceRunner(
config,
instance_id,
input_path,
num_mpc_containers,
num_pid_containers,
logger,
client,
num_tries,
game_type,
dry_run,
stage_flow,
attribution_rule,
aggregation_type,
concurrency,
num_files_per_mpc_container,
k_anonymity_threshold,
)
logger.info(f"Running private lift for instance {instance_id}")
instance_runner.run()
def run_instances(
config: Dict[str, Any],
instance_ids: List[str],
input_paths: List[str],
num_shards_list: List[str],
stage_flow: Type[PrivateComputationBaseStageFlow],
logger: logging.Logger,
num_tries: Optional[int] = 2, # this is number of tries per stage
dry_run: Optional[bool] = False,
) -> None:
if len(instance_ids) is not len(input_paths):
raise ValueError(
"Number of instances and number of input paths must be the same"
)
if len(input_paths) is not len(num_shards_list):
raise ValueError(
"Number of input paths and number of num_shards must be the same"
)
if not MIN_NUM_INSTANCES <= len(instance_ids) <= MAX_NUM_INSTANCES:
raise ValueError(
f"Number of instances must be between {MIN_NUM_INSTANCES} and {MAX_NUM_INSTANCES}"
)
processes = list(
map(
lambda instance_id, input_path, num_shards: Process(
target=run_instance,
kwargs={
"config": config,
"instance_id": instance_id,
"input_path": input_path,
"num_mpc_containers": num_shards,
"num_pid_containers": num_shards,
"stage_flow": stage_flow,
"logger": LoggerAdapter(logger=logger, prefix=instance_id),
"game_type": PrivateComputationGameType.LIFT,
"num_tries": num_tries,
"dry_run": dry_run,
},
),
instance_ids,
input_paths,
num_shards_list,
)
)
for process in processes:
process.start()
sleep(PROCESS_WAIT)
for process in processes:
process.join(INSTANCE_SLA)
class PLInstanceRunner:
"""
Private Lift Partner-Publisher computation for an instance.
"""
def __init__(
self,
config: Dict[str, Any],
instance_id: str,
input_path: str,
num_mpc_containers: int,
num_pid_containers: int,
logger: logging.Logger,
client: PLGraphAPIClient,
num_tries: int,
game_type: PrivateComputationGameType,
dry_run: Optional[bool],
stage_flow: Type[PrivateComputationBaseStageFlow],
attribution_rule: Optional[AttributionRule] = None,
aggregation_type: Optional[AggregationType] = None,
concurrency: Optional[int] = None,
num_files_per_mpc_container: Optional[int] = None,
k_anonymity_threshold: Optional[int] = None,
) -> None:
self.logger = logger
self.instance_id = instance_id
self.publisher = PrivateComputationPublisherInstance(
instance_id, logger, client
)
self.partner = PrivateComputationPartnerInstance(
instance_id=instance_id,
config=config,
input_path=input_path,
game_type=game_type,
attribution_rule=attribution_rule,
aggregation_type=aggregation_type,
concurrency=concurrency,
num_files_per_mpc_container=num_files_per_mpc_container,
k_anonymity_threshold=k_anonymity_threshold,
num_mpc_containers=num_mpc_containers,
num_pid_containers=num_pid_containers,
logger=logger,
)
self.num_tries = num_tries
self.dry_run = dry_run
self.stage_flow = stage_flow
def get_valid_stage(self) -> Optional[PrivateComputationBaseStageFlow]:
if not self.is_finished():
publisher_stage = self.publisher.get_valid_stage(self.stage_flow)
partner_stage = self.partner.get_valid_stage(self.stage_flow)
# expected for all joint stages
if publisher_stage is partner_stage:
return publisher_stage
elif publisher_stage is None:
return partner_stage
elif partner_stage is None:
return publisher_stage
elif publisher_stage is partner_stage.previous_stage:
# if it's not a joint stage, the statuses don't matter at all since
# each party operates independently
# Example: publisher is PREPARE_DATA_FAILED, partner is PREPARE_DATA_COMPLETED
if not publisher_stage.is_joint_stage or (
# it's fine if one party is completed and the other is started
# because the one with the started status just needs to call
# update_instance one more time
# Example: publisher is COMPUTATION_STARTED, partner is COMPUTATION_COMPLETED
self.stage_flow.is_started_status(self.publisher.status)
and self.stage_flow.is_completed_status(self.partner.status)
):
return publisher_stage
elif partner_stage is publisher_stage.previous_stage:
# Example: publisher is PREPARE_DATA_COMPLETED, partner is PREPARE_DATA_FAILED
if not partner_stage.is_joint_stage or (
# Example: publisher is COMPUTATION_COMPLETED, partner is COMPUTATION_STARTED
self.stage_flow.is_started_status(self.partner.status)
and self.stage_flow.is_completed_status(self.publisher.status)
):
return partner_stage
# Example: partner is CREATED, publisher is PID_PREPARE_COMPLETED
# Example: publisher is COMPUTATION COMPLETED, partner is PREPARE_COMPLETED
# Example: publisher is COMPUTATION_COMPLETED, partner is COMPUTATION_FAILED
raise IncompatibleStageError.make_error(
publisher_stage.name, partner_stage.name
)
return None
def wait_valid_stage(self, timeout: int) -> PrivateComputationBaseStageFlow:
self.logger.info("Polling instances expecting valid stage.")
if timeout <= 0:
raise ValueError(f"Timeout must be > 0, not {timeout}")
start_time = time()
while time() < start_time + timeout:
valid_stage = self.get_valid_stage()
if valid_stage is None:
self.logger.info(
f"Valid stage not found. Publisher status: {self.publisher.status}. Partner status: {self.partner.status}"
)
sleep(POLL_INTERVAL)
else:
self.logger.info(f"Valid stage found: {valid_stage}")
return valid_stage
raise PLInstanceCalculationException(
"Timeout error",
f"Waiting for valid stage timed out after {timeout}s.",
"Try running again",
)
def is_finished(self) -> bool:
return self.publisher.is_finished() and self.partner.is_finished()
def run(self) -> None:
tries = 0
while tries < self.num_tries:
tries += 1
try:
if self.is_finished():
self.logger.info(
f"Private Lift run completed for instance {self.instance_id}. View results at {self.partner.output_dir}"
)
return
# in case the publisher has a status of TIMEOUT
self.publisher.wait_valid_status(WAIT_VALID_STATUS_TIMEOUT)
valid_stage = self.wait_valid_stage(WAIT_VALID_STAGE_TIMEOUT)
if valid_stage is not None:
self.run_stage(valid_stage)
# run the next stage
if not self.dry_run:
self.run()
break
except Exception as e:
if tries >= self.num_tries:
raise e
self.logger.error(
f"Error: type: {type(e)}, message: {e}. Retries left: {self.num_tries - tries}."
)
sleep(RETRY_INTERVAL)
def run_stage(self, stage: PrivateComputationBaseStageFlow) -> None:
self.logger.info(f"Running publisher-partner {stage.name}")
# call publisher <STAGE>
self.logger.info(f"Invoking publisher {stage.name}.")
self.publisher.run_stage(stage)
server_ips = None
# if it's a joint stage, it means partner must wait for publisher to provide server ips.
# if it is not a joint stage, publisher and partner can run in parallel
if stage.is_joint_stage:
# keep polling graphapi until publisher status is <STAGE>_STARTED and server_ips are available
self.publisher.wait_stage_start(stage)
server_ips = self.publisher.server_ips
if server_ips is None:
raise ValueError(f"{stage.name} requires server ips but got none.")
self.logger.info(f"Starting partner {stage.name}:")
self.partner.run_stage(stage, server_ips)
self.wait_stage_complete(stage)
def wait_stage_complete(self, stage: PrivateComputationBaseStageFlow) -> None:
start_status = stage.started_status
complete_status = stage.completed_status
fail_status = stage.failed_status
timeout = stage.timeout
start_time = time()
cancel_time = 0
while time() < start_time + timeout:
self.publisher.update_instance()
self.partner.update_instance()
self.logger.info(
f"Publisher status: {self.publisher.status}. Partner status: {self.partner.status}."
)
if (
self.publisher.status is complete_status
and self.partner.status is complete_status
):
self.logger.info(f"Stage {stage.name} is complete.")
return
if (
self.publisher.status
in [fail_status, PrivateComputationInstanceStatus.TIMEOUT]
or self.partner.status is fail_status
):
if (
self.publisher.status
in [fail_status, PrivateComputationInstanceStatus.TIMEOUT]
and self.partner.status is start_status
and cancel_time <= CANCEL_STAGE_TIMEOUT
):
# wait 5 minutes for partner to become fail status on its own
# if not, only perform 'cancel_stage' one time
if cancel_time == CANCEL_STAGE_TIMEOUT:
self.logger.error(f"Canceling partner stage {stage.name}.")
self.partner.cancel_current_stage()
else:
self.logger.info(
f"Waiting to cancel partner stage {stage.name}."
)
# only cancel once
cancel_time += POLL_INTERVAL
else:
raise PLInstanceCalculationException(
f"Stage {stage.name} failed.",
f"Publisher status: {self.publisher.status}. Partner status: {self.partner.status}.",
"Try running again",
)
sleep(POLL_INTERVAL)
raise PLInstanceCalculationException(
f"Stage {stage.name} timed out after {timeout}s. Publisher status: {self.publisher.status}. Partner status: {self.partner.status}.",
"unknown",
"Try running again",
)
```
#### File: fbpcs/private_computation/pc_attribution_runner.py
```python
import json
import logging
from datetime import datetime, timezone
from typing import Type, Optional, Dict, Any
import dateutil.parser
from fbpcs.pl_coordinator.pl_graphapi_utils import (
PLGraphAPIClient,
)
from fbpcs.pl_coordinator.pl_instance_runner import (
run_instance,
)
from fbpcs.private_computation.entity.private_computation_instance import (
AttributionRule,
AggregationType,
PrivateComputationGameType,
)
from fbpcs.private_computation.entity.private_computation_status import (
PrivateComputationInstanceStatus,
)
from fbpcs.private_computation.stage_flows.private_computation_base_stage_flow import (
PrivateComputationBaseStageFlow,
)
class LoggerAdapter(logging.LoggerAdapter):
def __init__(self, logger: logging.Logger, prefix: str) -> None:
super(LoggerAdapter, self).__init__(logger, {})
self.prefix = prefix
def process(self, msg, kwargs):
return "[%s] %s" % (self.prefix, msg), kwargs
# dataset information fields
AD_OBJECT_ID = "ad_object_id"
TARGET_OBJECT_TYPE = "target_object_type"
DATASETS_INFORMATION = "datasets_information"
INSTANCES = "instances"
NUM_SHARDS = "num_shards"
NUM_CONTAINERS = "num_containers"
# instance fields
TIMESTAMP = "timestamp"
ATTRIBUTION_RULE = "attribution_rule"
STATUS = "status"
"""
The input to this function will be the input path, the dataset_id as well as the following params to choose
a specific dataset range to create and run a PA instance on
1) start_date - start date of the FB Opportunity data
2) end_date - end date of the FB Opportunity data
3) attribution_rule - attribution rule for the selected data
4) result_type - result type for the selected data
"""
def run_attribution(
config: Dict[str, Any],
dataset_id: str,
input_path: str,
timestamp: str,
attribution_rule: AttributionRule,
aggregation_type: AggregationType,
concurrency: int,
num_files_per_mpc_container: int,
k_anonymity_threshold: int,
stage_flow: Type[PrivateComputationBaseStageFlow],
logger: logging.Logger,
num_tries: Optional[int] = 2, # this is number of tries per stage
) -> None:
## Step 1: Validation. Function arguments and for private attribution run.
# obtain the values in the dataset info vector.
client = PLGraphAPIClient(config["graphapi"]["access_token"], logger)
datasets_info = _get_attribution_dataset_info(client, dataset_id, logger)
datasets = datasets_info[DATASETS_INFORMATION]
matched_data = {}
attribution_rule_str = attribution_rule.name
attribution_rule_val = attribution_rule.value
instance_id = None
# Validate if input is datetime or timestamp
is_date_format = _iso_date_validator(timestamp)
if is_date_format:
dt = datetime.fromisoformat(timestamp)
else:
dt = datetime.fromtimestamp(int(timestamp), tz=timezone.utc)
print(dt)
return
dt = datetime.fromtimestamp(int(timestamp), tz=timezone.utc)
# Verify that input has matching dataset info:
# a. attribution rule
# b. timestamp
if len(datasets) == 0:
raise ValueError("Dataset for given parameters and dataset invalid")
for data in datasets:
if data["key"] == attribution_rule_str:
matched_attr = data["value"]
for m_data in matched_attr:
m_time = dateutil.parser.parse(m_data[TIMESTAMP])
if m_time == dt:
matched_data = m_data
break
if len(matched_data) == 0:
raise ValueError("No dataset matching to the information provided")
# Step 2: Validate what instances need to be created vs what already exist
dataset_instance_data = _get_existing_pa_instances(client, dataset_id)
existing_instances = dataset_instance_data["data"]
for inst in existing_instances:
inst_time = dateutil.parser.parse(inst[TIMESTAMP])
print(inst[STATUS])
if (
inst[ATTRIBUTION_RULE] == attribution_rule_val
and inst_time == dt
and inst[STATUS]
!= PrivateComputationInstanceStatus.POST_PROCESSING_HANDLERS_COMPLETED
):
instance_id = inst["id"]
break
if instance_id is None:
instance_id = _create_new_instance(
dataset_id,
int(timestamp),
attribution_rule_val,
client,
logger,
)
instance_data = _get_pa_instance_info(client, instance_id, logger)
num_pid_containers = instance_data[NUM_CONTAINERS]
num_mpc_containers = instance_data[NUM_SHARDS]
## Step 3. Run Instances. Run maximum number of instances in parallel
logger.info(f"Start running instance {instance_id}.")
run_instance(
config,
instance_id,
input_path,
num_pid_containers,
num_mpc_containers,
stage_flow,
logger,
PrivateComputationGameType.ATTRIBUTION,
attribution_rule,
AggregationType.MEASUREMENT,
concurrency,
num_files_per_mpc_container,
k_anonymity_threshold,
num_tries,
)
logger.info(f"Finished running instances {instance_id}.")
def _create_new_instance(
dataset_id: str,
timestamp: int,
attribution_rule: str,
client: PLGraphAPIClient,
logger: logging.Logger,
) -> str:
instance_id = json.loads(
client.create_pa_instance(
dataset_id,
timestamp,
attribution_rule,
2,
).text
)["id"]
logger.info(
f"Created instance {instance_id} for dataset {dataset_id} and attribution rule {attribution_rule}"
)
return instance_id
def get_attribution_dataset_info(
config: Dict[str, Any], dataset_id: str, logger: logging.Logger
) -> str:
client = PLGraphAPIClient(config["graphapi"]["access_token"], logger)
return json.loads(
client.get_attribution_dataset_info(
dataset_id,
[AD_OBJECT_ID, TARGET_OBJECT_TYPE, DATASETS_INFORMATION],
).text
)
def _get_pa_instance_info(
client: PLGraphAPIClient, instance_id: str, logger: logging.Logger
) -> Any:
return json.loads(client.get_instance(instance_id).text)
def _iso_date_validator(timestamp: str) -> Any:
try:
datetime.strptime(timestamp, "%Y-%m-%d")
return True
except Exception:
pass
else:
return False
def _get_attribution_dataset_info(
client: PLGraphAPIClient, dataset_id: str, logger: logging.Logger
) -> Any:
return json.loads(
client.get_attribution_dataset_info(
dataset_id,
[AD_OBJECT_ID, TARGET_OBJECT_TYPE, DATASETS_INFORMATION],
).text
)
def _get_existing_pa_instances(client: PLGraphAPIClient, dataset_id: str) -> Any:
return json.loads(client.get_existing_pa_instances(dataset_id).text)
```
#### File: scripts/tests/test_gen_config.py
```python
import unittest
from unittest.mock import patch
from fbpcs.scripts import gen_config
class TestGenConfig(unittest.TestCase):
def test_prompt(self) -> None:
# Test if Valid replacement exists and --accept_all passed - we use existing
res = gen_config.prompt("key", replacements={"key": "baz"}, accept_all=True)
self.assertEqual(res, "baz")
# Test with an actual value provided
with patch("builtins.input", return_value="foo"):
# 1. No valid replacement
res = gen_config.prompt("key", replacements={"bar": "baz"})
self.assertEqual(res, "foo")
# 2. Valid replacement exists and we override
res = gen_config.prompt("key", replacements={"key": "baz"})
self.assertEqual(res, "foo")
# Test with hitting enter without typing
with patch("builtins.input", return_value=""):
# 1. No valid replacement
res = gen_config.prompt("key", replacements={"bar": "baz"})
self.assertEqual(res, "")
# 2. Valid replacement exists and we keep
res = gen_config.prompt("key", replacements={"key": "baz"})
self.assertEqual(res, "baz")
def test_build_replacements_from_config(self) -> None:
config = {"a": "123", "b": ["1", "2", "3"], "c": {"d": "e"}}
# This will look weird, but basically we expect to keep all "leaf"
# nodes as replacement values, but also including basic lists
expected = {
"a": "123",
"b": ["1", "2", "3"],
"d": "e",
}
res = gen_config.build_replacements_from_config(config)
self.assertEqual(res, expected)
@patch("builtins.input", return_value="new")
def test_update_dict(self, mock_input) -> None:
# Simple replacement (call prompt 1 time)
d = {"key": "REPLACE"}
expected = {"key": "new"}
gen_config.update_dict(d, replace_key="REPLACE")
self.assertEqual(d, expected)
self.assertEqual(mock_input.call_count, 1)
# Replace within a nested dict (call prompt 2 times)
d = {"key": "REPLACE", "key2": {"key3": "REPLACE"}}
expected = {"key": "new", "key2": {"key3": "new"}}
gen_config.update_dict(d, replace_key="REPLACE")
self.assertEqual(d, expected)
self.assertEqual(mock_input.call_count, 3)
# Replace within existing replaement (no input called so mock_input.call_count does not change)
d = {"key": "REPLACE"}
replacements = {"key": "new"}
expected = {"key": "new"}
gen_config.update_dict(
d, replace_key="REPLACE", replacements=replacements, accept_all=True
)
self.assertEqual(d, expected)
self.assertEqual(mock_input.call_count, 3)
@patch("fbpcp.util.yaml.load", return_value="LOAD")
@patch("fbpcp.util.yaml.dump")
@patch("fbpcs.scripts.gen_config.update_dict")
def test_gen_config(self, mock_update, mock_dump, mock_load) -> None:
args = {
"<input_path>": "foo",
"<new_output_path>": "bar",
"--replace": "REPLACE",
"--accept_all": True,
}
gen_config.gen_config(args)
mock_load.assert_called_once_with("foo")
mock_update.assert_called_once_with("LOAD", "REPLACE", {}, True)
mock_dump.assert_called_once_with("LOAD", "bar")
```
#### File: utils/config_yaml/config_yaml_dict.py
```python
from pathlib import Path
from typing import Any, Dict
from fbpcp.util import yaml
from fbpcs.utils.config_yaml.exceptions import (
ConfigYamlFieldNotFoundError,
ConfigYamlValidationError,
ConfigYamlFileParsingError,
)
from yaml import YAMLError
class ConfigYamlDict(Dict[str, Any]):
"""Wrapper around dict that throws a custom KeyError exception to inform the user
that there is something wrong with their config.yml file."""
def __getitem__(self, key: str) -> Any:
"""Override of dict key access, e.g. x = my_dict[key]"""
try:
val = super().__getitem__(key)
except KeyError:
raise ConfigYamlFieldNotFoundError(key) from None
if val == "TODO":
raise ConfigYamlValidationError(
key,
"TODOs found in config",
"Fill in remaining TODO entries in config.yml",
)
return val
def __setitem__(self, key: str, value: Any) -> None:
"""Override of dict item setting, e.g. my_dict[key] = x.
Specifically, if value is a dict, it converts the dict to ConfigYamlDict
"""
value = self.from_dict(value) if isinstance(value, dict) else value
super().__setitem__(key, value)
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "ConfigYamlDict":
"""Converts a normal dictionary to a ConfigYamlDict"""
my_dict = cls()
for k, v in d.items():
my_dict[k] = v
return my_dict
@classmethod
def from_file(cls, config_file_path: str) -> "ConfigYamlDict":
"""Read a yaml file to a ConfigYamlDict"""
try:
config_dict = yaml.load(Path(config_file_path))
except YAMLError as e:
raise ConfigYamlFileParsingError(config_file_path, str(e))
return ConfigYamlDict.from_dict(config_dict)
``` |
{
"source": "Joe12827/stocksite",
"score": 3
} |
#### File: server/api/example_api.py
```python
from flask_restful import Resource
from flask_restful import request
from flask_restful import reqparse
import json
from .swen_344_db_utils import *
class ExampleApi(Resource):
def get(self):
# NOTE: No need to replicate code; use the util function!
result = exec_get_one("SELECT COUNT(*) FROM courses");
return result
class TestMessage(Resource):
def get(self):
return "Modal components can use onOpened to fetch data dynamically!"
``` |
{
"source": "Joe1sn/CryotoWork",
"score": 3
} |
#### File: CryotoWork/RSA/RSA.py
```python
import binascii
import struct
import sys
from random import randint
class RSA(object):
"""
docstring for RSA
"""
def __init__(self, KeyLen=1024):
self.ChineseMod = False
self.Debug = True
self.n = 0
self.e = 65537
self.p = 0
self.q = 0
self.phi_n = 0
# RSA.test()
#1.初始化生成p,q
#pq不相等,二进制共模数长度和KeyLen一致
while (self.p==self.q and bin(self.p*self.q) != KeyLen+2):
self.p = RSA.PrimerGen(KeyLen//2)
self.q = RSA.PrimerGen(KeyLen//2)
#2.得到n和phi(n),求逆得到u
self.n = self.p*self.q
self.phi_n = (self.p-1)*(self.q-1)
#3.利用欧几里得拓展求私钥d
# self.d = RSA.ExtendEuclid(self.e,self.phi_n)
ed = 1
while 1:
#虽然我知道这里利用拓展欧几里得算法可以求逆
#但是结果可能为负数,而且速度跟不上
#而且公钥e就是一个素数,通过便利很快就能找到
if (ed*self.phi_n+1)%self.e==0:
self.d = (ed*self.phi_n+1)//self.e
break
ed+=1
if self.Debug:
print("p>",self.p)
print("q>",self.q)
print("n>",self.n)
print("phi_n>",self.phi_n)
print("e>",self.e)
print("d>",self.d)
#bytes 转 hex
#str->byte->number
def Byte2Hex(Msg):
return int.from_bytes((bytes(Msg,encoding='utf8')),byteorder='big',signed=False)
#number->byte->str
def Hex2Byte(num):
num = hex(num)[2:]#去掉十六进制的0x开头
result = ""
i = 0
while(i<len(num)):#两两一组解析为十六进制数
result+=chr(int(num[i]+num[i+1],16))
i+=2
return result
#报错输出
def ErrorCather(self,ErrorMsg_zhCN, ErrorMsg_en):
if self.ChineseMod:
print(ErrorMsg_zhCN)#输出中文
else:
print(ErrorMsg_en)#输出英文
#模重复平方
def ReModule(b,n,m): #b^n(mod m)
#参数为数字
if str(b).isdigit() and str(n).isdigit() and str(m).isdigit():
#参数必须大于0
if b > 0 and n>0 and m>0:
#二进制转换n
result=1 #返回值
n1=bin(n) #幂数转为二进制
BinList = list(str(n1)[2:][::-1]) #二进制反序
#开始遍历
for i in BinList: #模重复平方
# print(result)
if int(i) == 1:
result = (result*b)%m
b = (b*b)%m
else:
b = (b*b)%m
return result
else:
ErrorCather("参数必须大于0","arguments must lager than 0")
return
else:
ErrorCather("参数必须全为整数","arguments must be integers")
return
#大素数生成
def PrimerGen(size): #生成size位的素数
if str(size).isdigit(): #如果size是素数
while True:
n = randint(1<<(size-1), 1 << (size+1)) #求2^size之间的大数
#利用二进制右移得到大数
if n % 2 != 0: #排除偶数
found = True #设置返回结果
# 随机性测试
for i in range(0, 2): #进行 2*3 轮素性检测
#这里 2轮,检测函数 3轮
#如果检测失败
if RSA.PrimerCheck(n) == False:
found = False
break#退出
#通过检测
if found == True:
return n
else: #size非素数,报错+返回None
ErrorCather("参数为素数的位数","argument is digits of a primer number")
return
#<NAME>素性检测
#费马小定理+二次探测
def PrimerCheck(num,times=3): #对num检测times次
if str(num).isdigit():
if num < 3:
return num==2
u = num-1
t = 0
while u%2 ==0:#若为偶数
u//=2
t+=1
for i in range(1,times+1): #费马小定理检测
x = randint(2,num-1) #生成size位的随机数
v = RSA.ReModule(x,u,num)
if v==1 or v==num-1: #余数为1,则该数可能为素数
continue
for j in range(t+1):
v = v*v%num
if v==num-1:
break
else:
return False
return True
else:
ErrorCather("参数必须全为整数","arguments must be integers")
#欧几里得算法
def Ecuild(a,b):
#判断ab是否为数字
if a.isdigit() and b.isdigit():
if a>0 and b>0: #ab均大于0
try: #是的话开始计算
# 算法简介:
# 一般欧几里得算法要求 a<b 否则交换
# 这里利用python的语法糖,
# 若a>b,那么第一个循环的过程是交换a,b
# 而且利用循环避免重复递归导致递归深度过大超过语言规定最大深度
while a != 0:
a, b = b % a, a
return b
#循环报错
except (TypeError, ValueError):
ErrorCather("循环失败","loop error")
else:
ErrorCather("两参数必须为大于零整数","two arguments must be unsigned integers")
else:
ErrorCather("两参数必须为大于零整数,而不是其他","two arguments must be unsigned integers,arguments type error")
#拓展欧几里得实现
def ExtendEuclid(a,b):
#检查参数
if (not str(a).isdigit()) or (not str(b).isdigit()):
ErrorCather("参数为大于0的整数","argument must be integers greater than 0")
return
a,b = int(a),int(b)
if a <=0 or b <=0:
ErrorCather("参数为大于0的整数","argument must be integers greater than 0")
return
else:
#如果 a< b,交换ab的值
if a < b:
a,b = b,a
try:
#进行循环的计算
# 1赋初始值
R,S,T = a,1,0
_R,_S,_T = b,0,1
# 2开始循环
while _R != 0:
q = R // _R #2-1 得到整商
#2-2 获得暂时的R' S' T'
tempR = R-q*_R
tempS = S-q*_S
tempT = T-q*_T
#2-3 更新R S T和R' S' T'
R, _R = _R, tempR
S, _S = _S, tempS
T, _T = _T, tempT
#3.循环完毕,返回
return T
#中途出错
except TypeError as e:
ErrorCather("类型错误",e)
#欧拉定理求公钥
def Euler(a,n): #a^phi(n)−1
if a%n==0:
print(str(a)+"%"+str(n)+"==0")
return False
phi_n = 0 #使用该变量来遍历
for i in range(1,n):
if RSA.gcd(i,n)!=0:
phi_n+=1
return pow(a,phi_n)-1
#0x50字节对齐
def ByteAlign(self,Msg):
# 如果不满0x100对齐,就补上\x00
if (0x50-len(Msg)%0x50) != 0:
rest = (0x50-len(Msg)%0x50)
Msg += "\x00"*rest
return Msg
#对齐检查
def AlignCheck(Msg):
if len(Msg)%0x50 != 0:
ErrorCather("参数未对齐","arguments must aligned to 0x100")
return 0
else:
return 1
#RSA加密模块
def Encrypt(self,Msg):
#检查对齐
if len(Msg) != 0x50:
ErrorCather("消息长度必须为0x50","Message length must be 0x50")
return
else:
cipher = RSA.ReModule(RSA.Byte2Hex(Msg),self.e,self.n)
return cipher
#RSA解密模块
def Decrypt(self,Cipher):
return RSA.Hex2Byte((RSA.ReModule(Cipher,self.d,self.n)))
``` |
{
"source": "joe2018/AsiaDataPlatform",
"score": 2
} |
#### File: AsiaDataPlatform/platformproject/form.py
```python
from django import forms
from django.forms import fields,widgets
from platformproject.models import *
from django.core.exceptions import ValidationError
import re
class UserInfo(forms.ModelForm):
user_name = fields.CharField(
min_length=6,
max_length=12,
strip=True,
required=True,
widget=widgets.TextInput(attrs={'placeholder': '用户名为8-12个字符'}),
error_messages={
'required': '用户名不能为空',
'min_length': '用户名最少为6个字符',
'max_length': '用户名最不超过为20个字符'
}
)
user_hashpas = fields.CharField(
required=True,
widget=widgets.PasswordInput(attrs={'placeholder': '请输入密码,必须包含数字,字母,特殊字符'}),
min_length=6,
max_length=12,
error_messages={
'required': '密码不能为空',
'min_length': '密码最少6个字符',
'max_length': '密码不超过12个字符'
}
)
pwd_again = fields.CharField(
widget=widgets.PasswordInput(attrs={ 'placeholder': '请再次输入密码!'}),
required=True,
error_messages={'required': '请再次输入密码!!!!'}
)
#user_key = fields.CharField('识别码', max_length=32)
user_email = fields.EmailField(
widget=widgets.TextInput(attrs={'placeholder': '请输入邮箱'}),
required = True,
error_messages = {'required': '邮箱不能为空',
'invalid': '请输入正确的邮箱格式'}
)
def clean_user_name(self):
username = self.cleaned_data.get('user_name')
users = user.objects.filter(user_name=username).count()
print(users)
if users:
raise ValidationError('用户名重复')
return username
def clean_user_email(self):
email = self.cleaned_data.get('user_email')
mobile_re = re.compile(r'^[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+){0,4}@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+){0,4}$')
if not mobile_re.match(str(email)):
raise ValidationError('邮箱格式错误')
return email
def clean(self):
password1 = self.cleaned_data.get('user_hashpas')
password2 = self.cleaned_data.get('pwd_again')
if password1 and password2 and password1 != password2:
self.add_error('pwd_again','两次输入密码不一致')
return None
else:
return self.cleaned_data
``` |
{
"source": "Joe2357/G-Meet",
"score": 3
} |
#### File: G-Meet/opencv/model.py
```python
import cv2 as cv
import argparse
import sys
import numpy as np
import os.path
import time
print("PYTHON RUNNING")
# Initialize the parameters
confThreshold = 0.5 # Confidence threshold
nmsThreshold = 0.4 # Non-maximum suppression threshold
inpWidth = 416 # Width of network's input image
inpHeight = 416 # Height of network's input image
parser = argparse.ArgumentParser(description='Object Detection using YOLO in OPENCV')
parser.add_argument('--device', default='cpu', help="Device to perform inference on 'cpu' or 'gpu'.")
parser.add_argument('--image', help='Path to image file.')
parser.add_argument('--video', help='Path to video file.')
args = parser.parse_args()
# Load names of classes
classesFile = "./opencv/obj.names"
classes = None
with open(classesFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Give the configuration and weight files for the model and load the network using them.
modelConfiguration = "./opencv/yolo_n.cfg"
modelWeights = "./opencv/yolo_n_10000.weights"
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
count = 0
exCount = 0
recoredFrame = 0
detectedFrame = False
detectedTime = 0
inferenceTime = 0
totalDetectionTime = 0
from matplotlib import pyplot as plt
x_values = []
y_values = []
picNum = 0
if (args.device == 'cpu'):
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
print('Using CPU device.')
elif (args.device == 'gpu'):
net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
print('Using GPU device.')
# Get the names of the output layers
def getOutputsNames(net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
#return [layersNames[i - 1] for i in net.getUnconnectedOutLayers()]
# Draw the predicted bounding box
def drawPred(classId, conf, left, top, right, bottom):
# Draw a bounding box.
cv.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3)
label = '%.2f' % conf
# Get the label for the class name and its confidence
if classes:
assert (classId < len(classes))
label = '%s:%s' % (classes[classId], label)
# Display the label at the top of the bounding box
labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
cv.rectangle(frame, (left, top - round(1.5 * labelSize[1])), (left + round(1.5 * labelSize[0]), top + baseLine),
(255, 255, 255), cv.FILLED)
cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1)
# Remove the bounding boxes with low confidence using non-maxima suppression
def postprocess(frame, outs):
global x_values
global y_values
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
global count
global recoredFrame
global detectedFrame
global detectedTime
global inferenceTime
global totalDetectionTime
global picNum
recoredFrame += 1
if len(classIds) < 3:
y_values.append(1)
totalDetectionTime += inferenceTime
else:
y_values.append(0)
x_values.append(recoredFrame / 10)
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
drawPred(classIds[i], confidences[i], left, top, left + width, top + height)
if recoredFrame % 10 == 0:
if len(x_values) >= 50:
x_values = x_values[len(x_values)-50:]
y_values = y_values[len(y_values)-50:]
plt.clf()
plt.plot(x_values, y_values, 'b')
plt.title("Detecting Graph")
plt.xlabel("Total detected time : %.2f sec" % (totalDetectionTime / 1000))
plt.ylabel("Detect or not")
plt.ylim([-0.1, 1.1])
picNum += 1
newPath = './public/plotImage/room1-graph.png'
plt.savefig(newPath)
print("Total Time : %.2f second" % (totalDetectionTime / 1000))
# print("Postprocess ends")
# Process inputs
winName = 'Deep learning object detection in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
outputFile = "./opencv/yolo_out_py.avi"
if (args.image):
# Open the image file
if not os.path.isfile(args.image):
print("Input image file ", args.image, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.image)
outputFile = args.image[:-4] + '_yolo_out_py.jpg'
elif (args.video):
# Open the video file
if not os.path.isfile(args.video):
print("Input video file ", args.video, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.video)
outputFile = args.video[:-4] + '_yolo_out_py.avi'
else:
# Webcam input
cap = cv.VideoCapture(0) # camera index
# Get the video writer initialized to save the output video
if (not args.image):
vid_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
(round(cap.get(cv.CAP_PROP_FRAME_WIDTH)), round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))
while cv.waitKey(10) < 0:
# print("tempTick: ", tempTick)
# get frame from the video
hasFrame, frame = cap.read()
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
print("Output file is stored as ", outputFile)
cv.waitKey(3000)
# Release device
cap.release()
break
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))
# Remove the bounding boxes with low confidence
postprocess(frame, outs)
# Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
inferenceTime = (t * 1000.0 / cv.getTickFrequency())
label = 'Inference time: %.2f ms' % inferenceTime
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
# Write the frame with the detection boxes
vid_writer.write(frame.astype(np.uint8))
cv.imshow(winName, frame)
``` |
{
"source": "joe-23/BOSS-V-algorithm",
"score": 3
} |
#### File: joe-23/BOSS-V-algorithm/structure5_featureoptimization.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import nquad
from itertools import product
from diversipy import lhd_matrix, transform_spread_out
from structure1_utils import WrongCallToMethod, find_indices, check_duplicate
from structure3_bayesianoptimization import CBayesOpt
from structure4_featureselection import CFeatureSelector
debug = False # Print information at debug level (e.g. model parameters and CV score)
# 1. Define Feature optimization class, which uses Bayesian optimization and feature selection to select the best features
#####################################################################################################################
class CFeatureOpt(CBayesOpt):
"""Callable class, based on Bayesian Optimization with a set of sensible defaults, that also performs Sequential Forward Feature Selection (SFS).
Exposes a method, refit_regression(), for fitting the internal regression model via Cross-validation."""
def __init__(self,af,GPkernel,floating=True,target_features=None,greater_is_better=True,random_state=None,verbose=True):
super().__init__(af,GPkernel,greater_is_better,False,True,True,random_state,verbose)
self.sfs = CFeatureSelector(floating=floating,target_features=target_features,random_state=random_state)
self.k = 2 # Exponent of the minima MD function
def fit(self,X,Y,LR_Y=None,X_add=None,LR_Y_add=None,initial_features=[],mandatory_features=[]):
"""
Perform feature selection and fit the Bayesian optimization Gaussian process model.
Input: X - numpy 2D array
matrix of observed data
Input: Y - numpy 1D array
vector of observed evaluations of the objective function, Y = f(X)
Input: LR_Y - numpy 1D array
vector of observed evaluations of the constraint function, LR_Y = c(X); LR_Y will be used instead of Y
Input: X_add - numpy 2D array
matrix of additional observed data to be used in the feature selection procedure
Input: LR_Y_add - numpy 1D array
vector of additional observed evaluations of the constraint function to be used in the feature selection procedure, either LR_Y_add = c(X_add) or LR_Y_add = f(X_add)
Input: initial_features - list
list of indexes (representing features) that are to be excluded from the search
if initial_features == 'all' then no feature selection is performed
Input: mandatory_features - list
list of indexes (representing features) that are not to be removed and are automatically included in the initial_features list
"""
##- Store the original dimension of the space; done once to avoid overwritings
if not hasattr(self,"full_dim"):
self.full_dim = X.shape[1]
##- Store mandatory features used by the transformation functions of the EIC acquisition function
self.mandatory_features = mandatory_features
##- Perform feature selection
if LR_Y is not None:
self.selected_features, score, self.fitted_lm = self.sfs(X,LR_Y,X_add,LR_Y_add,initial_features=initial_features,mandatory_features=self.mandatory_features)
else:
self.selected_features, score, self.fitted_lm = self.sfs(X,Y,X_add,LR_Y_add,initial_features=initial_features,mandatory_features=self.mandatory_features)
if debug and self.verbose:
print("Features: ", self.selected_features)
print("Linear model intercept: ", self.fitted_lm.steps[-1][1].intercept_)
print("Linear model betas: ", self.fitted_lm.steps[-1][1].coef_)
print("CV score = ", score)
##- Fit the Gaussian process model
super().fit(X[:,self.selected_features],Y)
return self
def refit_regression(self,X_train,Y_train):
"""
Re-evaluate the internal regression model by performing grid search (GridSearchCV).
Input: X_train - numpy 2D array
matrix of observed data
Input: Y_train - numpy 1D array
vector of observed evaluations of the objective, Y_train = f(X_train)
Output: score of the best model object - float,
best regression model object - sklearn.pipeline.Pipeline,
parameters of the best model object - dictionary
"""
best_score, best_estimator, best_params = self.sfs.compute_CV(X_train,Y_train)
return best_score, best_estimator, best_params
def acquisition(self,x):
"""
Compute single-parameter acquisition function ready to be used with minimize() method, or for plotting.
Input: x - numpy 1D array
Output: acquisition function evaluated at x - float
"""
x = np.array(x).reshape(1,-1)
af = self.AF(x,self)[0]
coeff = self.__get_coeff(x)
return coeff * af
def __get_coeff(self,x):
"""
Compute multiplicative coefficient for acquisition function at x using minima distribution.
See [Luo (2019)] and [Villafan (2020); Chapter 4.2] for details.
Input: x - numpy 1D array
Output: coefficient for acquisition function evaluated at x - float
"""
##- Set coefficient to zero to prevent the optimization method from drawing this configuration
##- This may affect Confidence Bound acquisition in a bad way, because its values may also be negative:
##- e.g., when minimizing a positive function ( f(x)>0 for every x ) such as Performance models
if hasattr(self,"threshold"):
pred_val = self.fitted_lm.predict(x)
if not self.maximize and pred_val > self.threshold:
return 0.0
if self.maximize and pred_val < self.threshold:
return 0.0
##- Compute coefficient using minima distribution of the linear regression model
if hasattr(self,"mix_af_with_lm"):
return self.__tau_k(x) / self.__I
else:
return 1.0
def __tau_k(self,*x):
"""
Compute the exponential-type Tau function of the linear regression model function.
Input: *x - list
list of elements that will be rebuilt into a numpy 1D array
Output: Nascent minima distribution (MD) function evaluated at x - float
"""
x = np.array(x).reshape(1,-1)
r = self.fitted_lm.predict(x)
r = (r - self.__LR_min) / (self.__LR_max - self.__LR_min)
return np.exp(-self.k*r)
def __get_LR_range(self,bounds):
"""
Compute range of the function which is the numerator of the nascent minima distribution function.
Since we are computing the minima MD of a linear function, the max and min are at the vertices.
Input: bounds - numpy 2D array
matrix of [lower_bound,upper_bound] where indices represent the dimension (starting from 0)
Output: max, min - [float, float]
maximum and minimum value of tau_k() evaluated at the boundaries
"""
bounds = np.array(bounds).reshape(-1,2)
vertices = list(product(*zip(bounds[:,0],bounds[:,1])))
evals = [self.fitted_lm.predict(np.array(vertex).reshape(1,-1)) for vertex in vertices]
return max(evals), min(evals)
def set_EIC_params(self,lb,ub,compute_mean=None,compute_sigma=None):
"""
Set bound parameters and tranformations used by constrained EI (EIC) acquisition function.
These parameters bound the value of the objective: lb < c(x) < ub.
The tranformations compute the constraints mean and std from the objective's mean and std.
The mandatory_features will be used for the computation of the transformations.
Input: lb - float
lower bound for constrained EI (EIC) acquisition function
Input: ub - float
upper bound for constrained EI (EIC) acquisition function
Input: compute_mean - function
function with signature f(mu,x_1,x_2,..,x_m) that transforms the objective mean 'mu' into the constraint function mean, where x_1,x_2,..,x_m are the mandatory configuration features;
if None the constraint is computed on the objective function, and not on a function derived from it
Input: compute_sigma - function
function with signature g(sigma,x_1,x_2,..,x_m) that transforms the objective std 'sigma' into the constraint function std, where x_1,x_2,..,x_m are the mandatory configuration features;
if None the constraint is computed on the objective function, and not on a function derived from it
"""
self.EIC_params_given = True
self.lb = lb
self.ub = ub
self.compute_mean = compute_mean
self.compute_sigma = compute_sigma
def __set_EIC_params(self):
"""
Set EIC params, if they were given.
"""
if hasattr(self,"EIC_params_given"):
##- Compute index of mandatory configuration features among the ones selected by the feature selector
idx = []
for feature_idx in self.mandatory_features:
idx.append(find_indices(self.selected_features,lambda x_idx: x_idx == feature_idx))
if debug:
print("**** indices = {}".format(idx))
##- Redefine functions in order to change their signature
def sub_compute_mean(mu,x):
return self.compute_mean(mu,*x[:,idx])
def sub_compute_sigma(sigma,x):
return self.compute_sigma(sigma,*x[:,idx])
##- Set parameters to be used at the current iteration
super().set_EIC_params(self.lb,self.ub,sub_compute_mean,sub_compute_sigma)
def set_threshold(self,threshold):
"""
Set threshold level coefficient for acquisition function at x to be used by Variants C or D of the algorithm.
See [Villafan (2020)] for details.
Input: threshold - float
"""
self.threshold = threshold
def draw(self,bounds,n_restarts=None,return_prob_bound=False,fill_result=False,mix_models=False):
"""
Draw next observation via Bayesian optimization.
WARNING: fit() method must be called first.
Input: bounds - numpy 2D array
matrix of [lower_bound,upper_bound] where indices represent the dimension (starting from 0)
Input: x0_list - numpy 1D array OR iterable of numpy 1D arrays
optimization starting point
Input: n_restarts - int
number of points to be generated within provided bounds, i.e. number of times to run the minimizer in minimize() method
Input: choose - str
choose = ["lhd","random"]
if "lhd" n_restarts samples are drawn from a space-filling latin hypercube design
if "random" n_restarts samples are drawn from a uniform distribution on the domain
Input: use_derivatives - bool
use derivatives of conditional mean and variance during optimization when True
Input: return_prob_bound - bool
if using a constrained acquisition function, the probability that the constraints are met at the result of this method is returned when True
Input: fill_result - bool
the resulting configuration's irrelevant dimensions are set 0 when True;
in this way the configuration can be evaluated by the objective function if available
Input: mix_models - bool
the linear model is used to modify the prediction from the acquisition maximization when True
Output: result - numpy 1D array
new observation to query
AF_value - float
acquisition function evaluated at result
"""
##- Set EIC params, if they were given
self.__set_EIC_params()
##- Compute tau_k() function normalization constant: its integral; may take long to compute a d-dimensional integral
##- Another way is to normalize the values of tau_k() by normalizing the values of the LR predictive function
if mix_models:
self.mix_af_with_lm = mix_models
##- To normalize tau_k(), either compute its integral
# self.__I = nquad(self.__tau_k,bounds[self.selected_features,:])[0]
# self.__LR_max, self.__LR_min = 1.0, 0.0
##- Or normalize the LR values
self.__I = 1.0
self.__LR_max, self.__LR_min = self.__get_LR_range(bounds[self.selected_features,:])
##- Draw next configuration
if return_prob_bound:
result, AF_value, prob_bound = super().draw(bounds[self.selected_features,:],n_restarts=n_restarts,choose="lhd",return_prob_bound=True)
else:
result, AF_value = super().draw(bounds[self.selected_features,:],n_restarts=n_restarts,choose="lhd",return_prob_bound=False)
##- Fill irrelevant dimensions of resulting observation with 0's
if fill_result:
temp = np.full((1,self.full_dim),0.0)
temp[:,self.selected_features] = result
result = temp.reshape(1,-1)[0]
if return_prob_bound:
return result, AF_value, prob_bound
else:
return result, AF_value
def __call__(self,X,Y,bounds,initial_features=[],mandatory_features=[],max_iter_BO=None,mix_models=False,
return_prob_bound=False,LR_Y=None,X_add=None,LR_Y_add=None):
"""
Callable method that performs one iteration of Feature selection and one of Bayesian optimization.
Input: X - numpy 2D array
matrix of observed data
Input: Y - numpy 1D array
vector of observed evaluations of the objective, Y = f(X)
Input: bounds - numpy 2D array
matrix of [lower_bound,upper_bound] where indices represent the dimension (starting from 0)
Input: initial_features - list
list of indexes (representing features) that are to be excluded from the search
if initial_features == 'all' then no feature selection is performed
Input: mandatory_features - list
list of indexes (representing features) that are not to be removed and are automatically included in the initial_features list
Input: max_iter_BO - int
maximization of acquisition function stopping criterion based on maximum number of iterations
Input: mix_models - bool
the linear model is used to modify the prediction from the acquisition maximization when True
Input: return_prob_bound - bool
if using a constrained acquisition function, the probability that the constraints are met at the result of this method is returned when True
Input: LR_Y - numpy 1D array
vector of observed evaluations of the constraint function, LR_Y = c(X); LR_Y will be used instead of Y
Input: X_add - numpy 2D array
matrix of additional observed data to be used in the feature selection procedure
Input: LR_Y_add - numpy 1D array
vector of additional observed evaluations of the constraint function to be used in the feature selection procedure, either LR_Y_add = c(X_add) or LR_Y_add = f(X_add)
Output: result, AF_value, selected_features, fitted_lm - [numpy 1D array, list, sklearn.linear_model]
result is the suggested configuration
AF_value is the evaluation of the acquisition function at suggested configuration
selected_features are the relevant features selected by the provided SFS
fitted_lm is the linear model that has been fitted on the subset of X with features selected_features
"""
##- Feature selection via fit() method
self.fit(X,Y,LR_Y,X_add,LR_Y_add,initial_features,mandatory_features)
##- Bayesian optimization via draw() method
result = self.draw(bounds,n_restarts=max_iter_BO,return_prob_bound=return_prob_bound,mix_models=mix_models)
if return_prob_bound:
return [result[0], result[1], self.selected_features, self.fitted_lm, result[2]]
else:
return [result[0], result[1], self.selected_features, self.fitted_lm]
def optimize(self,OF,bounds,X=None,n_samples=3,max_iter=100,design="lhd",
threshold=None,mix_models=True,initial_features=[],
max_random_jumps=None,tol=1e-10,plot_acquisition=False):
"""
Iterative algorithm that performs feature selection and Bayesian optimization.
WARNING: Optimization happens on the boundary of the domains:
the values of the irrelevant features are set to 0.
See [Villafan (2020); Chapter 4.2] for details.
Input: OF - function
objective function
Input: bounds - numpy 2D array
bounds must be an array of length d of elements [lower_bound,upper_bound]
Input: X - numpy 2D array
matrix of known observations
Input: n_samples - int
number of starting observations in case X is not given
Input: max_iter - int
stopping criterion based on maximum number of iterations
Input: design - str
design = ["lhd","random"]
if "lhd" n_samples are drawn from a space-filling latin hypercube design
if "random" n_samples are drawn from a uniform distribution on the domain
Input: threshold - float
objective threshold value; this value is enforced on all acquisition functions so that their value is 0 at points where the threshold is not met
Input: mix_models - bool
the linear model is used to modify the prediction from the acquisition maximization when True
Input: initial_features - list
list of indexes (representing features) that are to be excluded from the search
if initial_features == 'all' then no feature selection is performed
Input: max_random_jumps - int
maximum number of random jumps the algorithm can make when sampling too close to already sampled points; if None is given, a default value is chosen accordingly based on the dimensionality of OF
Input: tol - float
tolerance at which to make a random jump
Output: result of the optimization procedure: observation and its evaluation - [numpy 1D array, float]
"""
##- Define the matrix of starting observations
self.x_list, self.y_list = [], []
if X is None:
if design == "lhd":
grid = transform_spread_out(lhd_matrix(n_samples,bounds.shape[0]))
x0_list = bounds[:,0] + (bounds[:,1]-bounds[:,0])*grid
elif design == "random":
x0_list = np.random.uniform(bounds[:, 0],bounds[:, 1],(n_samples,bounds.shape[0]))
else:
raise ValueError("Only 'random' and 'lhd' designs are supported as of now.")
for x in x0_list:
self.x_list.append(x)
self.y_list.append(OF(x))
else:
for x in X:
self.x_list.append(x)
self.y_list.append(OF(x))
X = np.array(self.x_list).reshape(-1,bounds.shape[0])
Y = np.array(self.y_list).reshape(-1,1)
##- Set parameters
if threshold is not None:
self.set_threshold(threshold)
##- Instantiate check_duplicate class
is_duplicate = check_duplicate()
##- Define the list of sampled configurations x and observations; used by plot_convergence() method
self.x_output_list , self.y_output_list = [], []
##- Iterative optimization of the objective function
self.selected_features = initial_features
for i in range(0,max_iter):
##- selected_features is updated everytime fit() method is called
self.fit(X,Y,initial_features=self.selected_features)
##- Select the point where to evaluate OF next via maximization of AF
new_Obs = self.draw(bounds,fill_result=True,mix_models=mix_models)[0]
eval_newObs = OF(new_Obs)
self.x_output_list.append(new_Obs)
self.y_output_list.append(eval_newObs)
##- Select another point randomly in case of duplicate and print to screen
new_Obs, duplicate = is_duplicate(new_Obs,X,bounds,max_random_jumps,tol)
if duplicate and self.verbose:
print("Iteration {}: configuration chosen ramdomly!".format(i+1))
eval_newObs = OF(new_Obs)
##- Print to screen
if self.verbose:
print("Iteration {}: x = {}, f(x) = {}".format(i+1,new_Obs,eval_newObs))
##- Add the newly acquired observation to appropriate lists for plotting
self.x_list.append(new_Obs)
self.y_list.append(eval_newObs)
X = np.array(self.x_list).reshape(-1,bounds.shape[0])
Y = np.array(self.y_list).reshape(-1,1)
##- Plot acquisition function
if plot_acquisition:
self.plot_acquisition(bounds,OF)
best_index = np.argmax(self.y_list) if self.maximize else np.argmin(self.y_list)
best_x = self.x_list[best_index]
best_y = self.y_list[best_index]
return best_x, best_y
def plot_acquisition(self,bounds,OF=None,plot_regression=True):
"""
Plot objective and acquisition functions.
WARNING: fit() method must first be called.
Input: bounds - numpy 2D array
bounds must be an array of length d of elements [lower_bound,upper_bound]
Input: OF - function
objective function
"""
##- Check whether fit() method has been called
if not hasattr(self, "X"):
raise WrongCallToMethod("The method fit() must first be called.")
if self.d == 1:
n_points = 1000
xc = np.linspace(bounds[:,0],bounds[:,1],n_points).reshape(n_points,1)
mu, sigma = self.gp.predict(xc,return_std=True)
mu = mu.reshape(n_points,1) + self.Y_mean
sigma = sigma.reshape(n_points,1)
plt.figure(figsize=(10,7))
plt.subplot(2, 1, 1)
if OF is not None:
yc = np.array(list(map(OF,xc))).reshape(n_points,1)
plt.plot(xc,yc,'b-',lw=2)
if plot_regression:
lr_pred = self.fitted_lm.predict(xc)
plt.plot(xc,lr_pred,'k-',lw=2)
plt.plot(self.X,self.Y + self.Y_mean,'ko',markersize=5,label=u'Observations')
plt.plot(xc,mu,'g-',lw=1.5,label=u'Posterior mean')
plt.fill(np.concatenate([xc,xc[::-1]]),\
np.concatenate([mu - 1.96 * sigma, (mu + 1.96 * sigma)[::-1]]),\
alpha=0.5,fc='g',ec='None',label='95% P.I.')
plt.axvline(x=self.x_list[-1],color='r',lw=2.5)
plt.xlim(*bounds)
plt.ylabel(r'Objective function', fontsize=16)
plt.legend(loc='upper left', fontsize=16)
plt.subplot(2, 1, 2)
yc = np.array(list(map(lambda x: -x, list(map(self.acquisition,xc)))))
yc_normalized = (yc - min(yc))/(max(yc - min(yc))) # normalize acquisition
yc_normalized = yc_normalized.reshape(n_points,1)
plt.plot(xc,yc_normalized,'r-',lw=1.5)
plt.axvline(x=self.x_list[-1],color='r',lw=2.5)
plt.xlim(*bounds)
plt.xlabel(r'x', fontsize=16)
plt.ylabel(r'Acquisition function', fontsize=16)
plt.show(block=True)
else:
print("Plot of acquisition function is {}-dimensional. Thus, it will not be shown.".format(self.d))
# def plot_acquisition(self,bounds,OF=None,plot_regression=True):
# """
# Plot objective and acquisition functions.
# WARNING: fit() method must first be called.
# Input: bounds - numpy 2D array
# bounds must be an array of length d of elements [lower_bound,upper_bound]
# Input: OF - function
# objective function
# """
# ##- Check whether fit() method has been called
# if not hasattr(self, "X"):
# raise WrongCallToMethod("The method fit() must first be called.")
# if self.d == 1:
# n_points = 1000
# xc = np.linspace(bounds[:,0],bounds[:,1],n_points).reshape(n_points,1)
# mu, sigma = self.gp.predict(xc,return_std=True)
# mu = mu.reshape(n_points,1) + self.Y_mean
# sigma = sigma.reshape(n_points,1)
# plt.figure(figsize=(10,7))
# plt.subplot(2, 1, 1)
# if OF is not None:
# yc = np.array(list(map(OF,xc))).reshape(n_points,1)
# plt.plot(xc,yc,'b-',lw=2)
# # if plot_regression:
# # lr_pred = self.fitted_lm.predict(xc)
# # plt.plot(xc,lr_pred,'k-',lw=2)
# plt.plot(self.X,self.Y + self.Y_mean,'ko',markersize=5,label=u'Observations')
# plt.plot(xc,mu,'g-',lw=1.5,label=u'Posterior mean')
# plt.fill(np.concatenate([xc,xc[::-1]]),\
# np.concatenate([mu - 1.96 * sigma, (mu + 1.96 * sigma)[::-1]]),\
# alpha=0.5,fc='g',ec='None',label='95% P.I.')
# # plt.axvline(x=self.x_list[-1],color='r',lw=2.5)
# plt.xlim(*bounds)
# plt.ylabel(r'Objective function', fontsize=16)
# plt.legend(loc='upper left', fontsize=16)
# plt.subplot(2, 1, 2)
# yc = np.array(list(map(lambda x: -x, list(map(self.acquisition,xc)))))
# yc_normalized = (yc - min(yc))/(max(yc - min(yc))) # normalize acquisition
# yc_normalized = yc_normalized.reshape(n_points,1)
# from structure3_bayesianoptimization import CB, SEI
# yc2, yc3 = [], []
# for xxx in xc:
# yc2.append(-1.0 * CB(xxx,self)[0])
# yc3.append(-1.0 * SEI(xxx,self)[0])
# yc2 = np.array(yc2).reshape(n_points,1)
# yc3 = np.array(yc3).reshape(n_points,1)
# yc2_normalized = (yc2 - min(yc2))/(max(yc2 - min(yc2))) # normalize acquisition
# yc3_normalized = (yc3 - min(yc3))/(max(yc3 - min(yc3))) # normalize acquisition
# plt.plot(xc,yc_normalized,'r-',lw=1.5)
# # plt.axvline(x=self.x_list[-1],color='r',lw=2.5)
# plt.plot(xc,yc2_normalized,'b-',lw=1.5)
# plt.plot(xc,yc3_normalized,'g-',lw=1.5)
# plt.xlim(*bounds)
# plt.xlabel(r'x', fontsize=16)
# plt.ylabel(r'Acquisition functions', fontsize=16)
# plt.show(block=True)
# else:
# print("Plot of acquisition function is {}-dimensional. Thus, it will not be shown.".format(self.d))
``` |
{
"source": "joe247/CSE-LABS",
"score": 3
} |
#### File: S6/CS334-NPL/002b_thread (004b).py
```python
import threading
import time
def dummy():
pass
def main():
for i in range(5):
tid = threading.Thread(target=dummy, args=[])
print (f'Created new thread {tid} {threading.get_ident()}')
time.sleep(1)
if __name__ == '__main__':
main()
```
#### File: S6/CS334-NPL/003c_shared_memory (005c).py
```python
(005c).py
import multiprocessing
def square_list(mylist, result, square_sum):
""" function to square a given list """
# append squares of mylist to result array
for idx, num in enumerate(mylist):
result[idx] = num * num
# square_sum value
square_sum.value = sum(result)
# print result Array
print(f'Result (in process p1): {result[:]}')
# print square_sum Value
print(f'Sum of squares (in process p1): {square_sum.value}')
if __name__ == "__main__":
# input list
mylist = [1,2,3,4]
# creating Array of int data type with space for 4 integers
result = multiprocessing.Array('i', 4)
# creating Value of int data type
square_sum = multiprocessing.Value('i')
# creating new process
p1 = multiprocessing.Process(target=square_list, args=(mylist, result, square_sum))
# starting process
p1.start()
# wait until process is finished
p1.join()
# print result array
print(f'Result (in main process): {result[:]}')
# print square_sum Value
print(f'Sum of squares (in main process): {square_sum.value}')
```
#### File: S6/CS334-NPL/005_reader_writer (007).py
```python
import threading
class RWLock:
''' A simple reader-writer lock Several readers can hold the lock
simultaneously, XOR one writer. Write locks may have priority over reads to
prevent write starvation. '''
def __init__(self):
self.rwlock = 0
self.writers_waiting = 0
self.monitor = threading.Lock()
self.readers_ok = threading.Condition(self.monitor)
self.writers_ok = threading.Condition(self.monitor)
def acquire_read(self):
''' Acquire a read lock. Several threads can hold this typeof lock.
It is exclusive with write locks. '''
self.monitor.acquire()
while self.rwlock < 0 or self.writers_waiting:
self.readers_ok.wait()
self.rwlock += 1
self.monitor.release()
def acquire_write(self):
''' Acquire a write lock. Only one thread can hold this lock, and
only when no read locks are also held. '''
self.monitor.acquire()
while self.rwlock != 0:
self.writers_waiting += 1
self.writers_ok.wait()
self.writers_waiting -= 1
self.rwlock = -1
self.monitor.release()
def release(self):
# Release a lock, whether read or write.
self.monitor.acquire()
if self.rwlock < 0:
self.rwlock = 0
else:
self.rwlock -= 1
wake_writers = self.writers_waiting and self.rwlock == 0
wake_readers = self.writers_waiting == 0
self.monitor.release()
if wake_writers:
self.writers_ok.acquire()
self.writers_ok.notify()
self.writers_ok.release()
elif wake_readers:
self.readers_ok.acquire()
self.readers_ok.notifyAll()
self.readers_ok.release()
if __name__ == '__main__':
import time
rwl = RWLock()
class Reader(threading.Thread):
def run(self):
print(self, 'start')
rwl.acquire_read()
print(self, 'acquired')
time.sleep(2)
print(self, 'stop')
rwl.release()
print(self, 'released')
class Writer(threading.Thread):
def run(self):
print(self, 'start')
rwl.acquire_write()
print(self, 'acquired')
time.sleep(5)
print(self, 'stop')
rwl.release()
print(self, 'released')
Reader().start() # Reader-1
time.sleep(0.5)
Reader().start() # Reader-2
time.sleep(0.5)
Writer().start() # Writer-1
time.sleep(0.5)
``` |
{
"source": "joe28965/cuorabot",
"score": 2
} |
#### File: cuorabot_bringup/launch/bringup_launch.py
```python
import os
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration, Command
from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
xacro_path = os.path.join(get_package_share_directory('cuorabot_description'), 'urdf', 'cuorabot.urdf.xacro')
# Launch configuration variables specific to simulation
use_sim_time = LaunchConfiguration('use_sim_time')
# Declare the launch arguments
declare_use_sim_time_cmd = DeclareLaunchArgument(
'use_sim_time',
default_value='false',
description='Use simulation (Gazebo) clock if true')
# Specify the actions
start_robot_state_publisher_cmd = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
name='robot_state_publisher',
output='screen',
parameters=[{
'use_sim_time': use_sim_time,
'robot_description':Command(['xacro',' ', xacro_path])
}]
)
control_launch_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(get_package_share_directory('cuorabot_control'), 'launch', 'control_launch.py'))
)
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_use_sim_time_cmd)
# Add any conditioned actions
ld.add_action(start_robot_state_publisher_cmd)
ld.add_action(control_launch_cmd)
return ld
``` |
{
"source": "Joe310/GenomeBuilder",
"score": 2
} |
#### File: Joe310/GenomeBuilder/chromosome_builder.py
```python
import time
import mmap
import random
import sys
import re
import argparse
import pickle
import shutil
import unittest
import os
from heapq import merge
class chromosome_builder():
def __init__(self, args=None):
if not args:
args = self.parse_system_args()
self._genome_id = args.id
self._chromosome_id = args.chr_id
self._chromosome_size = args.chr_size
if args.scale == 'k':
self._chromosome_size *= 1000
elif args.scale == 'm':
self._chromosome_size *= 1000000
elif args.scale == 'b':
self._chromosome_size *= 1000000000
if args.alu == 'y':
self._use_alu = True
self._base_alu = args.base_alu
else:
self._use_alu = False
if args.assembly == 'y':
self._use_assembly = True
else:
self._use_assembly = False
self._allele_base_list = ["C", "T", "G", "A"]
self._working_dir = "TMP_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id)
self._ref_genome_file = "ref_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id) + ".txt"
self._priv_genome_file = "private_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id) + ".txt"
self._reads_file = "reads_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id) + ".txt"
self._answer_file = "ans_" + str(self._genome_id) + "_chr_" + str(self._chromosome_id) + ".txt"
self._base_alu_file = "alu_" + str(self._genome_id) + ".txt"
self._overlap_buffer = 5
self._long_variant_rate = .1
self._snp_rate = 0.003
self._ref_str_rate = 0.000075
self._denovo_str_rate = 0.000025
self._str_min_copies = 5
self._str_max_copies = 50
self._str_min_length = 2
self._str_max_length = 5
self._str_mutation_amount = 2
self._ref_cnv_rate = 0.0001
self._denovo_cnv_rate = 0.00001
self._cnv_min_length = 20
self._cnv_max_length = 500
self._cnv_min_copies = 2
self._cnv_max_copies = 10
self._cnv_mutation_amount = 2
self._inv_rate = 0.00001
self._inv_short_min_length = 20
self._inv_short_max_length = 50
self._inv_long_min_length = 50
self._inv_long_max_length = 500
self._ins_rate = 0.0005
self._ins_short_min_length = 1
self._ins_short_max_length = 5
self._ins_long_min_length = 5
self._ins_long_max_length = 200
self._del_rate = 0.0005
self._del_short_min_length = 1
self._del_short_max_length = 5
self._del_long_min_length = 5
self._del_long_max_length = 200
self._alu_mutation_rate = 0.3
self._alu_min_length = 300
self._alu_max_length = 300
if self._use_alu:
self._ref_alu_rate = 0.075
self._denovo_alu_rate = 0.025
else:
self._ref_alu_rate = 0
self._denovo_alu_rate = 0
#reduce the max length of mutations for smaller chromosome sizes
if self._chromosome_size < 500000:
self._nbr_long_inv = 0
self._nbr_long_ins = 0
self._nbr_long_del = 0
self._str_max_copies = 20 #from 50
self._cnv_max_length = 50 #from 500
self._cnv_max_copies = 4 #from 10
else:
self._nbr_long_inv = max(4, int(self._inv_rate * self._chromosome_size * self._long_variant_rate))
self._nbr_long_ins = max(10, int(self._ins_rate * self._chromosome_size * self._long_variant_rate))
self._nbr_long_del = max(10, int(self._ins_rate * self._chromosome_size * self._long_variant_rate))
self._nbr_snp = int(self._snp_rate * self._chromosome_size)
self._nbr_denovo_str = int(self._denovo_str_rate * self._chromosome_size)
self._nbr_denovo_cnv = int(self._denovo_cnv_rate * self._chromosome_size)
self._nbr_ref_alu = int(self._ref_alu_rate * self._chromosome_size / self._alu_max_length)
self._nbr_denovo_alu = int(self._denovo_alu_rate * self._chromosome_size / self._alu_max_length)
self._nbr_ref_str = max(4, int(self._ref_str_rate * self._chromosome_size))
self._nbr_ref_cnv = max(4, int(self._ref_cnv_rate * self._chromosome_size))
self._nbr_short_inv = max(4, int(self._inv_rate * self._chromosome_size * (1 - self._long_variant_rate)))
self._nbr_short_ins = max(10, int(self._ins_rate * self._chromosome_size * (1 - self._long_variant_rate)))
self._nbr_short_del = max(10, int(self._ins_rate * self._chromosome_size * (1 - self._long_variant_rate)))
#mutation_list is used when generating the various mutations for the genomes
self._mutation_list = []
self._str_list = [] #used when mutating STRs in donor genome
self._cnv_list = [] #used when mutating CNVs in donor genome
self._cnv_dict = {}
self._sequencer_coverage = 30
self._sequencer_error_rate = 0.01
self._sequencer_garbage_rate = 0.1
self._sequencer_read_length = 50
self._sequencer_gap_min = 90
self._sequencer_gap_max = 110
def insert_newlines(self, sequence, line_size=80):
return '\n'.join(sequence[i:i+line_size] for i in range(0, len(sequence), line_size)) + '\n'
def write_genome_lines_to_file(self, genome, file_object):
genome = self.insert_newlines(genome, 80)
file_object.write(genome)
def parse_fasta(self, file_name, buffer_size=100000):
"""Gives buffered access to large fasta files so that the entire file doesn't need to be loaded into
memory all at once. Works as a generator, yielding a block of up to buffer_size with each call. For
general use, use:
for sequence in parse_fasta(file_name, buffer_size)
This yield sequences until the end of file or a '>' character is found, at which point it yields None
Since None is yielded for '>', this can be used with multiple chromosomes separated by '>chr#' in a single
file. To do so, the generator should be initialized before iterating through chromosomes, then as each
chromosome is processed you can anticipate None will be yielded one time to mark the end of the current
chromoeome
:param file_name: the file to read in
:param buffer_size: the number of characters to return for each iteration
:returns: Sequences of up to size buffer_size, or None if EOF or '>' is encountered
"""
with open(file_name) as fasta_file:
start_of_file = True
buffer = ""
while True:
for line in fasta_file:
#skip initial documentation lines
if start_of_file and '>' in line:
pass
#each chromosome is marked by a > line, so need to catch this switch
elif not start_of_file and '>' in line:
if len(buffer) == 0:
yield None
else:
#first yield the buffer, then yeild None to flag the end of the chromosome
yield buffer
buffer = ''
yield None
else:
if start_of_file:
start_of_file = False
buffer += line.strip()
if len(buffer) >= buffer_size:
yield buffer[:buffer_size]
buffer = buffer[buffer_size:]
#clear out any remaining buffer when the file is done
if len(buffer) > 0:
yield buffer
buffer = ''
else:
yield None
#this version may give a slight performance boost, but need to work out the bugs before it can be used
def parse_fasta_mmap(self, file_name, buffer_size=100000):
with open(file_name, encoding='utf-8') as fasta:
fasta_map = mmap.mmap(fasta.fileno(), 0, access=mmap.ACCESS_READ)
start_of_file = True
buffer = ""
for line in fasta_map:
line = line.decode('utf-8')
#skip initial documentation lines
if start_of_file and '>' in line:
pass
#each chromosome is marked by a > line, so need to catch this switch
elif not start_of_file and '>' in line:
if len(buffer) == 0:
yield None
else:
#first yield the buffer, then yeild None to flag the end of the chromosome
yield buffer
buffer = ''
yield None
else:
if start_of_file:
start_of_file = False
buffer += line.strip()
if len(buffer) >= buffer_size:
yield buffer[:buffer_size]
buffer = buffer[buffer_size:]
#clear out any remaining buffer when the file is done
yield buffer
def compare_intervals(self, stt1, end1, stt2, end2, buffer_space=0):
"""
Compares two intervals represented by their start and end posns to check which precedes the other,
or if they overlap. Adds a buffer space around each interval that increases the region in which they
are considered to overlap.
Returns: -1 If interval 1 (stt1 and end1) precedes interval 2, 0 if they overlap, or 1 if interval 2
precedes interval 1
"""
stt1 -= buffer_space
end1 += buffer_space
stt2 -= buffer_space
end2 += buffer_space
if end1 < stt2:
return -1
elif end2 < stt1:
return 1
else:
return 0
def find_empty_ranges(self, range_size, nbr_posns, buffer_size):
"""
Searches for ranges of unused posns in the genome for use when introducing new structural variants.
Finds the number of posns given as a parameter and returns them as a list.
"""
posn_list = []
max_posn = self._chromosome_size - range_size - 1
#Will repeat until enough positions have been found
while len(posn_list) < nbr_posns:
raw_posn_list = []
for posn in posn_list:
raw_posn_list.append(posn)
posn_list = []
#1. Generate 150% needed number of random positions
for i in range(int(nbr_posns)):
raw_posn_list.append(random.randint(0, max_posn))
#2. Sort those positions and then check each to find whether they will overlap a preexisting
#structural variant.
raw_posn_list.sort()
overlap_idx = 0
#first check that there is no overlap among the generated positions
last_end = raw_posn_list[0] + range_size + (2 * buffer_size)
tmp_posn_list = []
tmp_posn_list.append(raw_posn_list[0])
for i in range(1, len(raw_posn_list)):
raw_posn = raw_posn_list[i]
if raw_posn > last_end:
tmp_posn_list.append(raw_posn)
last_end = raw_posn + range_size + (2 * buffer_size)
else:
new_posn = last_end + 1
new_end = new_posn + range_size + (2 * buffer_size)
if new_posn < max_posn:
if i == len(raw_posn_list) - 1 or new_end < raw_posn_list[i+1]:
tmp_posn_list.append(new_posn)
last_end = new_end
raw_posn_list = tmp_posn_list
tmp_posn_list = None
if len(self._mutation_list) == 0:
posn_list = raw_posn_list
else:
#then check that the remaining positions do not overlap existing structural variants
for i in range(len(raw_posn_list)):
raw_posn = raw_posn_list[i]
while overlap_idx < len(self._mutation_list):
ovlp_stt = self._mutation_list[overlap_idx][0]
ovlp_end = self._mutation_list[overlap_idx][1]
compare_result = self.compare_intervals(raw_posn, raw_posn+range_size,
ovlp_stt, ovlp_end,
buffer_size)
#no overlap
if compare_result == -1:
posn_list.append(raw_posn)
break
#attempt to shift this interval down, if that doesn't work, then
#ignore this position as it overlaps a preexisting position
elif compare_result == 0:
if overlap_idx > 0:
prev_end1 = self._mutation_list[overlap_idx-1][1]
prev_end2 = raw_posn_list[i-1]+range_size
prev_end = max(prev_end1, prev_end2)
new_posn = prev_end + (2*buffer_size)
if new_posn + range_size + (2 * buffer_size) < ovlp_stt:
posn_list.append(new_posn)
break
#no overlap was found, move to the next position in the mutation list to check for overlap
elif compare_result == 1:
if overlap_idx < len(self._mutation_list) - 1:
overlap_idx += 1
else:
posn_list.append(raw_posn)
break
#3. If there are too many positions, then randomly removes some to reduce list to proper size
while len(posn_list) > nbr_posns:
del posn_list[random.randint(0, len(posn_list)-1)]
return posn_list
def random_sequence(self, seq_len):
return "".join(random.choice(self._allele_base_list) for i in range(seq_len))
def delete_block(self, sequence, index, size):
"""deletes a block of items from a given sequence
:param sequence: sequence from which to delete items
:param index: the first position to delete
:param size: the total number of positions to delete, may extend beyond the end of the sequence
:returns: modified sequence with block deleted
"""
if index < 0 and index + size > -1:
return sequence[:index]
else:
return sequence[:index] + sequence[index + size:]
def insert_block(self, sequence, index, new_block):
"""inserts a block of items into a given sequence
:param sequence: sequence into which to insert items
:param index: the position before which to begin the insertion, to append to end use index = len(sequence)
:param new_block: the items to be inserted
:returns: modified sequence with block inserted
"""
return sequence[:index] + new_block + sequence[index:]
def overwrite_block(self, sequence, index, new_block):
"""overwrites a block of items in a given sequence
:param sequence: sequence in which to overwrite items
:param index: the position at which to begin overwriting, to append to end use index = len(sequence)
:param new_block: the items which will be written, may extend beyond end of original sequence
:returns: modified sequence with block overwritten
"""
if (index < 0 and index + len(new_block) > -1) or (index + len(new_block) > len(sequence) - 1):
return sequence[:index] + new_block
else:
return sequence[:index] + new_block + sequence[index + len(new_block):]
def invert_block(self, sequence, index, size):
"""inverts a block of items in a given sequence
:param sequence: sequence in which to invert items
:param index: the position at which to begin inversion
:param size: the number of items which will be inverted
:returns: modified sequence with block overwritten, the original block, and the inverted block
"""
if index < 0:
stt_idx = len(sequence) + index
else:
stt_idx = index
end_idx = min(stt_idx + size, len(sequence))
original_block = sequence[stt_idx:end_idx]
inverted_block = original_block[::-1]
sequence_with_inversion = self.overwrite_block(sequence, stt_idx, inverted_block)
return sequence_with_inversion, original_block, inverted_block
def generate_snp_allele(self, orig_allele):
allele_list = ["A", "C", "G", "T"]
allele_list.remove(orig_allele)
return random.choice(allele_list)
def generate_str_base(self, seq_len):
str_seq = ''
while len(str_seq) == 0:
str_seq = self.random_sequence(seq_len)
invalid = True
#ensure the sequence is not all the same allele
for idx in range(1, len(str_seq)):
if str_seq[idx - 1] != str_seq[idx]:
invalid = False
#if the first half of the sequence matches the second half, then consider it invalid
if not invalid and str_seq[:int(len(str_seq)/2)] == str_seq[int(len(str_seq)/2):]:
invalid = True
#if the sequence was invalid, then clear it out and try again
if invalid:
str_seq = ''
return str_seq
def generate_alu_sequence(self, length):
if not self._base_alu or len(self._base_alu) == 0:
raise Exception("No base Alu defined")
new_alu = self._base_alu
len_diff = abs(length - len(new_alu))
for i in range(length - len(new_alu)):
new_alu = self.insert_block(new_alu,
random.randint(0, len(new_alu)-1),
random.choice(self._allele_base_list))
for i in range(len(new_alu) - length):
new_alu = self.delete_block(new_alu,
random.randint(0, len(new_alu)-1),
1)
for k in range(int(len(new_alu)*self._alu_mutation_rate)-len_diff):
alu_posn = random.randint(0, len(new_alu)-1)
snp = self.generate_snp_allele(new_alu[alu_posn])
new_alu = self.overwrite_block(new_alu, alu_posn, snp)
return new_alu
def ranged_length_list(self, min_len, max_len, nbr_items):
"""generates a list of lengths that vary in size between min_len and max_len
:param min_len: smallest value to return
:param max_len: largest value to return, at least 2 items will have this value, must be >= min_len
:param nbr_items: the number of items which will be returned, must be > 0
:returns: a list of lengths with nbr_items items that vary from min_len to max_len
"""
if nbr_items < 1:
raise Exception("Minimum length for the list is 1")
if max_len < min_len:
raise Exception("max_len must be greater than or equal to min_len")
length_list = []
if nbr_items > 1:
max_items = max(2, int(nbr_items/10))
else:
max_items = 1
for i in range(max_items):
length_list.append(max_len)
if nbr_items > 2:
below_max = nbr_items - max_items
length_range = max_len - min_len
for i in range(below_max):
adj_value = random.randint(0, i) / below_max
length_list.append(int(min_len + (length_range * adj_value)))
return length_list
def mutate_str(self):
temp_mut_list = []
for j in range(len(self._str_list)):
mutation_amount = random.randint(-self._str_mutation_amount, self._str_mutation_amount)
orig_str = self._str_list[j][1] * self._str_list[j][2]
new_str = self._str_list[j][1] * (self._str_list[j][2] + mutation_amount)
str_stt = self._str_list[j][0]
str_end = self._str_list[j][0] + len(orig_str)
#self._mutation_list.append([str_stt, str_end, 'MUT_STR', new_str])
#self._mutation_list.sort()
temp_mut_list.append([str_stt, str_end, 'MUT_STR', new_str])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def mutate_cnv(self):
new_posn_list = self.find_empty_ranges(self._cnv_max_length,
self._nbr_ref_cnv * self._cnv_mutation_amount,
self._overlap_buffer)
temp_mut_list = []
for i in range(self._nbr_ref_cnv):
cnv_id = self._cnv_list[i][0]
cnv_len = self._cnv_list[i][1]
cnv_posn_list = self._cnv_list[i][2]
mutation_amount = random.randint(-self._cnv_mutation_amount, self._cnv_mutation_amount)
if mutation_amount > 0:
for i in range(mutation_amount):
cnv_posn = new_posn_list.pop(random.randint(0, len(new_posn_list)-1))
cnv_posn_list.append(cnv_posn)
cnv_posn_list.sort()
if mutation_amount < 0:
for j in range(mutation_amount):
next_posn = cnv_posn_list.pop(random.randint(0, len(cnv_posn_list)-1))
cnv_stt = next_posn
cnv_end = next_posn + cnv_len
self._mutation_list.append([cnv_stt, cnv_end, 'DEL_CNV', cnv_id])
for cnv_posn in cnv_posn_list:
#self._mutation_list.append([cnv_posn, cnv_posn + cnv_len, "DONOR_CNV", cnv_id])
#self._mutation_list.sort()
temp_mut_list.append([cnv_posn, cnv_posn + cnv_len, "DONOR_CNV", cnv_id])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_cnv(self, nbr_cnv, variant_tag):
if nbr_cnv < 1:
return
cnv_length_list = self.ranged_length_list(self._cnv_min_length, self._cnv_max_length, nbr_cnv)
cnv_posn_list = self.find_empty_ranges(self._cnv_max_length,
nbr_cnv * self._cnv_max_copies,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_cnv):
posn_list = []
seq_len = cnv_length_list[i]
nbr_copies = random.randint(self._cnv_min_copies, self._cnv_max_copies)
if 'REF' in variant_tag:
cnv_id = i
else:
cnv_id = i + self._nbr_ref_cnv
for j in range(nbr_copies):
cnv_posn = cnv_posn_list.pop(random.randint(0, len(cnv_posn_list)-1))
posn_list.append(cnv_posn)
#self._mutation_list.append([cnv_posn, cnv_posn+seq_len, variant_tag, cnv_id])
temp_mut_list.append([cnv_posn, cnv_posn+seq_len, variant_tag, cnv_id])
self._cnv_list.append([cnv_id, seq_len, posn_list])
#self._mutation_list.sort()
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_alu(self, nbr_alu, variant_tag):
if nbr_alu < 1:
return
alu_length_list = self.ranged_length_list(self._alu_min_length, self._alu_max_length, nbr_alu)
alu_posn_list = self.find_empty_ranges(self._alu_max_length,
nbr_alu,
self._overlap_buffer)
temp_mut_list = []
for j in range(nbr_alu):
alu_stt = alu_posn_list[j]
alu_len = alu_length_list.pop(random.randint(0, len(alu_length_list)-1))
#donor alus are inserted into the genome, so their end_posn in reference to ref genome is their start
if variant_tag == 'DONOR_ALU':
alu_end = alu_stt
else:
alu_end = alu_posn_list[j] + alu_len
#self._mutation_list.append([alu_stt, alu_end, variant_tag, alu_len])
#self._mutation_list.sort()
temp_mut_list.append([alu_stt, alu_end, variant_tag, alu_len])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_str(self, nbr_str, variant_tag):
if nbr_str < 1:
return
str_posn_list = self.find_empty_ranges(self._str_max_copies * self._str_max_length,
nbr_str,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_str):
seq_len = random.randint(self._str_min_length, self._str_max_length)
nbr_copies = random.randint(self._str_min_copies, self._str_max_copies)
str_seq = self.generate_str_base(seq_len)
str_posn = str_posn_list.pop(random.randint(0, len(str_posn_list)-1))
#self._mutation_list.append([str_posn, str_posn + (seq_len*nbr_copies), variant_tag, str_seq, nbr_copies])
self._str_list.append([str_posn, str_seq, nbr_copies])
#self._mutation_list.sort()
temp_mut_list.append([str_posn, str_posn + (seq_len*nbr_copies), variant_tag, str_seq, nbr_copies])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_inversions(self, nbr_inv, min_len, max_len):
if nbr_inv < 1:
return
inv_length_list = self.ranged_length_list(min_len, max_len, nbr_inv)
inv_posn_list = self.find_empty_ranges(max_len,
nbr_inv,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_inv):
inv_stt = inv_posn_list[i]
inv_len = inv_length_list.pop(random.randint(0, len(inv_length_list)-1))
inv_end = inv_stt + inv_len
#self._mutation_list.append([inv_stt, inv_end, 'INV', inv_len])
#self._mutation_list.sort()
temp_mut_list.append([inv_stt, inv_end, 'INV', inv_len])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_insertions(self, nbr_ins, min_len, max_len):
if nbr_ins < 1:
return
ins_length_list = self.ranged_length_list(min_len, max_len, nbr_ins)
ins_posn_list = self.find_empty_ranges(max_len,
nbr_ins,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_ins):
ins_stt = ins_posn_list[i]
ins_len = ins_length_list.pop(random.randint(0, len(ins_length_list)-1))
ins_end = ins_stt
#self._mutation_list.append([ins_stt, ins_end, 'INS', ins_len])
#self._mutation_list.sort()
temp_mut_list.append([ins_stt, ins_end, 'INS', ins_len])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_deletions(self, nbr_del, min_len, max_len):
if nbr_del < 1:
return
del_length_list = self.ranged_length_list(min_len, max_len, nbr_del)
del_posn_list = self.find_empty_ranges(max_len,
nbr_del,
self._overlap_buffer)
temp_mut_list = []
for i in range(nbr_del):
del_stt = del_posn_list[i]
del_len = del_length_list.pop(random.randint(0, len(del_length_list)-1))
del_end = del_stt + del_len
#self._mutation_list.append([del_stt, del_end, 'DEL', del_len])
#self._mutation_list.sort()
temp_mut_list.append([del_stt, del_end, 'DEL', del_len])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
def allocate_snps(self):
if self._nbr_snp < 1:
return
snp_posn_list = self.find_empty_ranges(1, self._nbr_snp, 0)
temp_mut_list = []
for i in range(self._nbr_snp):
snp_stt = snp_posn_list[i]
snp_end = snp_stt + 1
temp_mut_list.append([snp_stt, snp_end, 'SNP', 1])
#self._mutation_list.append([snp_stt, snp_end, 'SNP', 1])
temp_mut_list.sort()
self._mutation_list = list(merge(self._mutation_list, temp_mut_list))
#self._mutation_list.sort()
def generate_ref_genome(self):
"""
Generates a random reference genome with the specified number of chromosomes,
each of length length_chromosome
"""
if self._use_alu:
if not self._base_alu or len(self._base_alu) == 0:
raise Exception("No base Alu defined")
if not os.path.exists(self._base_alu_file):
with open(self._base_alu_file, "w") as alu_file:
alu_file.write(">" + str(self._genome_id) + "\n")
self.write_genome_lines_to_file(self._base_alu, alu_file)
with open(self._ref_genome_file, "w") as ref_file:
if not os.path.exists(self._working_dir):
os.makedirs(self._working_dir)
ref_file.write(">" + str(self._genome_id))
ref_file.write("\n>chr" + str(self._chromosome_id) + "\n")
self._mutation_list = []
if self._use_alu:
print('REF GENOME: Allocating ' + str(self._nbr_ref_alu) + ' Alus')
self.allocate_alu(self._nbr_ref_alu, "REF_ALU")
print('REF GENOME: Allocating ' + str(self._nbr_ref_cnv) + ' CNVs')
self.allocate_cnv(self._nbr_ref_cnv, "REF_CNV")
print('REF GENOME: Allocating ' + str(self._nbr_ref_str) + ' STRs')
self.allocate_str(self._nbr_ref_str, "REF_STR")
buffer_adj = 0
buffer = ''
mut_idx = 0
mut_max_idx = len(self._mutation_list) - 1
buffer_size = 80
count = 0
if len(self._mutation_list) == 0:
mut_idx = -1
while count < self._chromosome_size:
if len(buffer) > buffer_size or mut_idx == -1:
if mut_idx == -1:
skip_distance = self._chromosome_size - count
buffer += self.random_sequence(skip_distance)
count += skip_distance
buffer_size = len(buffer)
self.write_genome_lines_to_file(buffer, ref_file)
else:
self.write_genome_lines_to_file(buffer[:buffer_size], ref_file)
buffer = buffer[buffer_size:]
buffer_adj += buffer_size
elif len(self._mutation_list) > 0 and count < self._mutation_list[mut_idx][0]:
skip_distance = self._mutation_list[mut_idx][0] - count
buffer += self.random_sequence(skip_distance)
count += skip_distance
elif mut_idx != -1:
mut_type = self._mutation_list[mut_idx][2]
if mut_type == 'REF_STR':
str_seq = self._mutation_list[mut_idx][3]
nbr_copies = self._mutation_list[mut_idx][4]
str_seq = str_seq * nbr_copies
#pads either side of str with non matching allele to remove ambiguity
if buffer[-1] == str_seq[-1]:
buffer = buffer[:-1] + self.generate_snp_allele(buffer[-1])
right_padding = self.generate_snp_allele(str_seq[0])
buffer += str_seq + right_padding
count += len(str_seq) + 1
elif mut_type == 'REF_CNV':
cnv_stt = self._mutation_list[mut_idx][0]
cnv_end = self._mutation_list[mut_idx][1]
cnv_len = cnv_end - cnv_stt
cnv_id = self._mutation_list[mut_idx][3]
if cnv_id in self._cnv_dict:
cnv_seq = self._cnv_dict[cnv_id]
else:
cnv_seq = self.random_sequence(cnv_len)
self._cnv_dict[cnv_id] = cnv_seq
buffer += cnv_seq
count += cnv_len
elif mut_type == 'REF_ALU':
alu_len = self._mutation_list[mut_idx][3]
alu_seq = self.generate_alu_sequence(alu_len)
buffer += alu_seq
count += alu_len
if mut_idx < mut_max_idx:
mut_idx += 1
else:
mut_idx = -1 #flags when all mutations have been seen
def generate_donor_genome(self):
with open(self._priv_genome_file, "w") as donor_genome_file:
donor_genome_file.write(">" + str(self._genome_id) + "\n")
donor_genome_file.write(">chr" + str(self._chromosome_id) + "\n")
buffer_size = 100000
fasta_parser = self.parse_fasta(self._ref_genome_file, buffer_size=buffer_size)
#plan out all mutation ranges in reference to the ref genome, storing them in the mutation_list
print('DONOR GENOME: Mutating existing STRs')
self.mutate_str()
print('DONOR GENOME: Mutating existing CNVs')
self.mutate_cnv()
if self._use_alu:
print('DONOR GENOME: Allocating ' + str(self._nbr_denovo_alu) + ' Alus')
self.allocate_alu(self._nbr_denovo_alu, "DONOR_ALU")
print('DONOR GENOME: Allocating ' + str(self._nbr_denovo_cnv) + ' CNVs')
self.allocate_cnv(self._nbr_denovo_cnv, "DONOR_CNV")
print('DONOR GENOME: Allocating ' + str(self._nbr_denovo_str) + ' STRs')
self.allocate_str(self._nbr_denovo_str, "DONOR_STR")
print('DONOR GENOME: Allocating ' + str(self._nbr_long_inv) + ' long inversions')
self.allocate_inversions(self._nbr_long_inv, self._inv_long_min_length, self._inv_long_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_long_ins) + ' long insertions')
self.allocate_insertions(self._nbr_long_ins, self._ins_long_min_length, self._ins_long_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_long_del) + ' long deletions')
self.allocate_deletions(self._nbr_long_del, self._del_long_min_length, self._del_long_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_short_inv) + ' short inversions')
self.allocate_inversions(self._nbr_short_inv, self._inv_short_min_length, self._inv_short_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_short_ins) + ' short insertions')
self.allocate_insertions(self._nbr_short_ins, self._ins_short_min_length, self._ins_short_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_short_del) + ' short deletions')
self.allocate_deletions(self._nbr_short_del, self._del_short_min_length, self._del_short_max_length)
print('DONOR GENOME: Allocating ' + str(self._nbr_snp) + ' SNPs')
self.allocate_snps()
variant_types = ['STR','CNV','ALU','INV','INS','DEL','SNP']
answer_files = {}
for variant in variant_types:
answer_files[variant] = open(os.path.join(self._working_dir, variant + '_ANS_FILE'), 'w')
#read in the reference genome, writing out the donor genome out to file using
#the mutations from the mutation list
with open(self._priv_genome_file, "a") as donor_genome_file:
ref_genome_idx = 0
buffer_adjust = 0
donor_genome = ''
ref_genome = ''
if len(self._mutation_list) > 0:
mut_idx = 0
else:
mut_idx = -1
mut_max_idx = len(self._mutation_list) - 1
while ref_genome_idx + buffer_adjust < self._chromosome_size:
ref_genome = ref_genome[ref_genome_idx:]
buffer_adjust += ref_genome_idx
ref_genome_idx = 0
next_segment = next(fasta_parser)
if next_segment:
ref_genome += next_segment
if mut_idx == -1:
donor_genome += ref_genome[ref_genome_idx:]
ref_genome_idx += len(ref_genome) - ref_genome_idx
elif ref_genome_idx + buffer_adjust != self._mutation_list[mut_idx][0]:
donor_genome += ref_genome[ref_genome_idx:self._mutation_list[mut_idx][0] - buffer_adjust]
ref_genome_idx = self._mutation_list[mut_idx][0] - buffer_adjust
else:
if len(donor_genome) > buffer_size:
self.write_genome_lines_to_file(donor_genome[:buffer_size], donor_genome_file)
donor_genome = donor_genome[buffer_size:]
mut_type = self._mutation_list[mut_idx][2]
ref_genome_stt = self._mutation_list[mut_idx][0] - buffer_adjust
ref_genome_end = self._mutation_list[mut_idx][1] - buffer_adjust
ref_genome_idx = ref_genome_end
if mut_type == 'SNP':
orig_allele = ref_genome[ref_genome_stt]
snp_allele = self.generate_snp_allele(orig_allele)
donor_genome += snp_allele
answer_files['SNP'].write('\n' + str(self._chromosome_id) + ',' + orig_allele +
',' + snp_allele + ',' + str(self._mutation_list[mut_idx][0]))
#the mutation list contains both the original str and the mutated str, so when
#one is encountered the other needs to be pulled and dealt with at the same
#time
elif mut_type == 'MUT_STR':
new_str = self._mutation_list[mut_idx][3]
mut_idx += 1
donor_genome += new_str
answer_files['STR'].write('\n' + str(self._chromosome_id) + ',' + new_str +
',' + str(self._mutation_list[mut_idx][0]))
elif mut_type == 'DONOR_STR':
str_seq = self._mutation_list[mut_idx][3]
nbr_copies = self._mutation_list[mut_idx][4]
str_seq = str_seq * nbr_copies
#pads either side of str with non matching allele to remove ambiguity
left_padding = self.generate_snp_allele(str_seq[-1])
right_padding = self.generate_snp_allele(str_seq[0])
padded_str_seq = left_padding + str_seq + right_padding
donor_genome += padded_str_seq
answer_files['STR'].write('\n' + str(self._chromosome_id) + ',' + str_seq +
',' + str(self._mutation_list[mut_idx][0] + 1))
answer_files['INS'].write('\n' + str(self._chromosome_id) + ',' + padded_str_seq + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'REF_CNV':
cnv_id = self._mutation_list[mut_idx][3]
cnv_seq = ref_genome[ref_genome_stt:ref_genome_end]
donor_genome += cnv_seq
answer_files['CNV'].write('\n' + str(self._chromosome_id) + ',' + str(cnv_id) +
',' + str(self._mutation_list[mut_idx][0]) + ',' + cnv_seq)
#assumes DEL_CNV is always followed by an entry for the REF_CNV, so the mut_idx is incremented
elif mut_type == 'DEL_CNV':
mut_idx += 1
del_len = self._mutation_list[mut_idx][1] - self._mutation_list[mut_idx][0]
del_seq = ref_genome[ref_genome_stt:ref_genome_end]
answer_files['DEL'].write('\n' + str(self._chromosome_id) + ',' + del_seq + ',' +
str(self._mutation_list[mut_idx][0]))
#every non deleted CNV will have a DONOR_CNV entry (some will only have DONOR_CNV, no REF_CNV)
elif mut_type == 'DONOR_CNV':
cnv_stt = self._mutation_list[mut_idx][0]
cnv_end = self._mutation_list[mut_idx][1]
cnv_len = cnv_end - cnv_stt
cnv_id = self._mutation_list[mut_idx][3]
if cnv_id in self._cnv_dict:
cnv_seq = self._cnv_dict[cnv_id]
else:
cnv_seq = ref_genome[ref_genome_stt:ref_genome_end]
self._cnv_dict[cnv_id] = cnv_seq
donor_genome += cnv_seq
if mut_idx < mut_max_idx and self._mutation_list[mut_idx+1][2] == 'REF_CNV' and \
self._mutation_list[mut_idx][0] == self._mutation_list[mut_idx+1][0]:
mut_idx += 1
else:
answer_files['INS'].write('\n' + str(self._chromosome_id) + ',' + cnv_seq + ',' +
str(self._mutation_list[mut_idx][0]))
answer_files['CNV'].write('\n' + str(self._chromosome_id) + ',' + str(cnv_id) +
',' + str(self._mutation_list[mut_idx][0]) + ',' + cnv_seq)
elif mut_type == 'REF_ALU':
alu_stt = self._mutation_list[mut_idx][0]
alu_end = self._mutation_list[mut_idx][1]
alu_len = alu_end - alu_stt
alu_seq = ref_genome[ref_genome_stt:ref_genome_end]
donor_genome += alu_seq
answer_files['ALU'].write('\n' + str(self._chromosome_id) + ',' + alu_seq + ',' +
str(self._mutation_list[mut_idx][0]))
answer_files['INS'].write('\n' + str(self._chromosome_id) + ',' + alu_seq + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'DONOR_ALU':
alu_len = self._mutation_list[mut_idx][3]
alu_seq = self.generate_alu_sequence(alu_len)
donor_genome += alu_seq
answer_files['ALU'].write('\n' + str(self._chromosome_id) + ',' + alu_seq + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'INV':
orig_block = ref_genome[ref_genome_stt:ref_genome_end]
inv_block = orig_block[::-1]
donor_genome += inv_block
answer_files['INV'].write('\n' + str(self._chromosome_id) + ',' + orig_block + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'INS':
ins_len = self._mutation_list[mut_idx][3]
ins_seq = self.random_sequence(ins_len)
donor_genome += ins_seq
answer_files['INS'].write('\n' + str(self._chromosome_id) + ',' + ins_seq + ',' +
str(self._mutation_list[mut_idx][0]))
elif mut_type == 'DEL':
del_seq = ref_genome[ref_genome_stt:ref_genome_end]
answer_files['DEL'].write('\n' + str(self._chromosome_id) + ',' + del_seq + ',' +
str(self._mutation_list[mut_idx][0]))
if mut_idx < mut_max_idx:
mut_idx += 1
else:
mut_idx = -1 #flags when all mutations have been seen
writeable = int(len(donor_genome) / 80)
if writeable >= 1:
self.write_genome_lines_to_file(donor_genome[:writeable*80], donor_genome_file)
donor_genome = donor_genome[writeable*80:]
self.write_genome_lines_to_file(donor_genome, donor_genome_file)
for key in answer_files:
answer_files[key].close()
with open(self._answer_file, 'w') as main_ans:
main_ans.write(">" + str(self._genome_id) + "\n")
main_ans.write(">chr" + str(self._chromosome_id))
for variant in variant_types:
with open(os.path.join(self._working_dir, variant + '_ANS_FILE'), 'r') as temp_file:
if variant == 'CNV':
main_ans.write("\n>CNV")
cnv_dict = {}
for line in temp_file:
line = line.strip()
if line:
line_array = line.split(',')
cnv_id = line_array[1]
cnv_posn = line_array[2]
cnv_seq = line_array[3]
if cnv_id in cnv_dict:
cnv_seq, cnv_posn_list = cnv_dict[cnv_id]
else:
cnv_posn_list = []
cnv_posn_list.append(cnv_posn)
cnv_dict[cnv_id] = (cnv_seq, cnv_posn_list)
cnv_list = []
for key in cnv_dict:
cnv_seq, cnv_posn_list = cnv_dict[key]
cnv_posn_list.sort()
cnv_list.append([cnv_seq, cnv_posn_list])
cnv_list.sort(key = lambda l: l[:][1][0])
for cnv_seq, cnv_posn_list in cnv_list:
main_ans.write('\n' + str(self._chromosome_id) + ',' + cnv_seq)
for posn in cnv_posn_list:
main_ans.write(',' + str(posn))
else:
main_ans.write("\n>" + variant)
for line in temp_file:
line = line.strip()
if line:
main_ans.write('\n' + line)
os.remove(os.path.join(self._working_dir, variant + '_ANS_FILE'))
shutil.rmtree(self._working_dir)
def add_sequencer_errors(self, read_sequence):
error_list = []
for i in range(len(read_sequence)):
if random.random() < self._sequencer_error_rate:
error_list.append(i)
for i in error_list:
orig_allele = read_sequence[i]
error_allele = self.generate_snp_allele(orig_allele)
read_sequence = self.overwrite_block(read_sequence, i, error_allele)
return read_sequence
def create_read_pair(self, donor_genome, left_stt, right_stt):
left_read = donor_genome[left_stt:left_stt+self._sequencer_read_length]
right_read = donor_genome[right_stt:right_stt+self._sequencer_read_length]
#only one is flipped so they are always in opposing directions with their
#overall direction
if random.random() > .5:
right_read = right_read[::-1]
else:
left_read = left_read[::-1]
return left_read, right_read
def generate_reads(self):
with open(self._reads_file, "w") as reads_file:
reads_file.write(">" + str(self._genome_id))
with open(self._priv_genome_file, "r") as donor_genome_file:
# skip the first two '>' labels in the donor genome file
donor_genome_file.readline()
donor_genome_file.readline()
temp_file_name_list = []
donor_genome = ""
for line in donor_genome_file:
if ">" in line:
break
donor_genome += str(line).strip()
nbr_reads = int(self._chromosome_size * self._sequencer_coverage / self._sequencer_read_length)
nbr_pairs = int(nbr_reads / 2)
write_list = []
for i in range(nbr_pairs):
if random.random() < self._sequencer_garbage_rate:
left_read = self.random_sequence(self._sequencer_read_length)
right_read = self.random_sequence(self._sequencer_read_length)
else:
gap_len = random.randint(self._sequencer_gap_min, self._sequencer_gap_max)
total_len = 2 * self._sequencer_read_length + gap_len
left_stt = random.randint(0, self._chromosome_size - total_len - 1)
right_stt = left_stt + self._sequencer_read_length + gap_len
left_read, right_read = self.create_read_pair(donor_genome, left_stt, right_stt)
left_read = self.add_sequencer_errors(left_read)
right_read = self.add_sequencer_errors(right_read)
reads_file.write('\n' + left_read + ',' + right_read)
def parse_system_args(self):
parser = argparse.ArgumentParser(
description="This script generates a reference and donor genome as a set "
"of files. The files can be used for various computational "
"genetics purposes. The following files are created: 1) "
"reference genome \'ref_*.txt\' 2) mutated donor genome "
"\'private_*.txt\' 3) paired-end reads \'reads_*.txt\'"
"from donor genome 4) mutation answer key \'ans_*.txt\'"
)
parser.add_argument(
"--id",
type=str,
default='test',
help="The name or ID of this genome for identification purposes. The "
"genome id will be reflected in the generated file names."
)
parser.add_argument(
"--chr_id",
type=int,
default='1',
help="The id number for this chromosome, defaults to 1."
)
parser.add_argument(
"--chr_size",
type=int,
default='10',
help="The size of each chromosome, multiplied by -s (scaling factor). Change "
"scale with -s option"
)
parser.add_argument(
"-s", "--scale",
type=str,
choices=["k", "m", "b"],
default="k",
help="the amount to scale chromosome-size by. k: thousands, m: millions,"
" b: billions. By default, scale is k (thousands)."
)
parser.add_argument(
"--alu",
type=str,
choices=["y", "n"],
default="n",
help="whether to include Alus in the genome."
)
parser.add_argument(
"--assembly",
type=str,
choices=["y", "n"],
default="n",
help="whether to generate output for assembly (no reference genome)."
)
return parser.parse_args()
class TestClass(unittest.TestCase):
def setUp(self):
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = 10
args.scale = 'k'
args.alu = 'y'
args.assembly = 'n'
args.base_alu = random_sequence(300)
self.gen = chromosome_builder(args)
def test_compare_intervals(self):
self.assertEqual(0, self.gen.compare_intervals(0, 1, 1, 2, buffer_space=0))
self.assertEqual(-1, self.gen.compare_intervals(0, 0, 1, 10, buffer_space=0))
self.assertEqual(1, self.gen.compare_intervals(11, 12, 1, 10, buffer_space=0))
self.assertEqual(0, self.gen.compare_intervals(0, 0, 1, 10, buffer_space=1))
self.assertEqual(0, self.gen.compare_intervals(1, 10, 0, 0, buffer_space=1))
self.assertEqual(0, self.gen.compare_intervals(11, 12, 1, 10, buffer_space=1))
self.assertEqual(0, self.gen.compare_intervals(0, 4, 1, 2, buffer_space=0))
self.assertEqual(0, self.gen.compare_intervals(0, 4, 1, 2, buffer_space=5))
self.assertEqual(0, self.gen.compare_intervals(1, 4, 0, 2, buffer_space=0))
self.assertEqual(0, self.gen.compare_intervals(1, 4, 1, 4, buffer_space=0))
def test_find_empty_ranges(self):
self.gen._mutation_list.append([1000, 10000, 'MUT_STR', 9000])
posn_list = self.gen.find_empty_ranges(10, 25, 5)
self.assertEqual(len(posn_list), 25)
for posn in posn_list:
self.assertTrue(posn < 995 and posn >= 0)
self.gen._mutation_list = []
self.gen._mutation_list.append([0, 9000, 'MUT_STR', 9000])
posn_list = self.gen.find_empty_ranges(10, 25, 5)
self.assertEqual(len(posn_list), 25)
for posn in posn_list:
self.assertTrue(posn <= 10000 and posn > 9005)
self.gen._mutation_list = []
self.gen._mutation_list.append([9000, 10000, 'MUT_STR', 1000])
self.gen._mutation_list.append([0, 1000, 'MUT_STR', 1000])
self.gen._mutation_list.sort()
posn_list = self.gen.find_empty_ranges(10, 100, 5)
self.assertEqual(len(posn_list), 100)
for posn in posn_list:
self.assertTrue(posn < 8995 and posn > 1005)
self.gen._mutation_list = []
self.gen._mutation_list.append([0, 1000, 'MUT_STR', 1000])
self.gen._mutation_list.append([1100, 2000, 'MUT_STR', 900])
self.gen._mutation_list.append([4000, 5000, 'MUT_STR', 1000])
self.gen._mutation_list.append([7000, 8000, 'MUT_STR', 1000])
self.gen._mutation_list.append([9000, 10000, 'MUT_STR', 1000])
self.gen._mutation_list.sort()
posn_list = self.gen.find_empty_ranges(10, 25, 5)
self.assertEqual(len(posn_list), 25)
for posn in posn_list:
self.assertTrue( (posn > 1005 and posn < 1095) or
(posn > 2005 and posn < 3995) or
(posn > 5005 and posn < 6995) or
(posn > 8005 and posn < 8995) )
self.gen._mutation_list = []
posn_list = self.gen.find_empty_ranges(50, 4, 5)
for posn in posn_list:
self.assertTrue(posn > 0 and posn < 10000)
def test_random_sequence(self):
rand_seq = self.gen.random_sequence(10)
self.assertEqual(len(rand_seq), 10)
for allele in rand_seq:
self.assertTrue(allele in ['A','C','G','T'])
def test_delete_block(self):
sequence = 'THIS IS A TEST SEQUENCE'
sequence = self.gen.delete_block(sequence, 5, 3)
self.assertEqual(sequence, 'THIS A TEST SEQUENCE')
sequence = self.gen.delete_block(sequence, 0, 1)
self.assertEqual(sequence, 'HIS A TEST SEQUENCE')
sequence = self.gen.delete_block(sequence, -1, 1)
self.assertEqual(sequence, 'HIS A TEST SEQUENC')
sequence = self.gen.delete_block(sequence, -2, 1)
self.assertEqual(sequence, 'HIS A TEST SEQUEC')
sequence = self.gen.delete_block(sequence, -2, 3)
self.assertEqual(sequence, 'HIS A TEST SEQU')
def test_insert_block(self):
sequence = 'HIS A TEST SEQUE'
sequence = self.gen.insert_block(sequence, -1, 'EC')
self.assertEqual(sequence, 'HIS A TEST SEQUECE')
sequence = self.gen.insert_block(sequence, -2, 'N')
self.assertEqual(sequence, 'HIS A TEST SEQUENCE')
sequence = self.gen.insert_block(sequence, 0, 'T')
self.assertEqual(sequence, 'THIS A TEST SEQUENCE')
sequence = self.gen.insert_block(sequence, 5, 'IS ')
self.assertEqual(sequence, 'THIS IS A TEST SEQUENCE')
sequence = self.gen.insert_block(sequence, len(sequence), '!')
self.assertEqual(sequence, 'THIS IS A TEST SEQUENCE!')
def test_overwrite_block(self):
sequence = 'THIS IS A TEST SEQUENCE!'
sequence = self.gen.overwrite_block(sequence, 2, 'AT')
self.assertEqual(sequence, 'THAT IS A TEST SEQUENCE!')
sequence = self.gen.overwrite_block(sequence, 0, 'W')
self.assertEqual(sequence, 'WHAT IS A TEST SEQUENCE!')
sequence = self.gen.overwrite_block(sequence, -1, '?')
self.assertEqual(sequence, 'WHAT IS A TEST SEQUENCE?')
sequence = self.gen.overwrite_block(sequence, -1, '?!?')
self.assertEqual(sequence, 'WHAT IS A TEST SEQUENCE?!?')
sequence = self.gen.overwrite_block(sequence, len(sequence), '!')
self.assertEqual(sequence, 'WHAT IS A TEST SEQUENCE?!?!')
def test_invert_block(self):
sequence = 'THIS IS A TEST SEQUENCE'
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, 0, 1)
self.assertEqual(orig_block, 'T')
self.assertEqual(inverted_block, 'T')
self.assertEqual(sequence, 'THIS IS A TEST SEQUENCE')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, 0, 2)
self.assertEqual(orig_block, 'TH')
self.assertEqual(inverted_block, 'HT')
self.assertEqual(sequence, 'HTIS IS A TEST SEQUENCE')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, -1, 1)
self.assertEqual(orig_block, 'E')
self.assertEqual(inverted_block, 'E')
self.assertEqual(sequence, 'HTIS IS A TEST SEQUENCE')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, -2, 2)
self.assertEqual(orig_block, 'CE')
self.assertEqual(inverted_block, 'EC')
self.assertEqual(sequence, 'HTIS IS A TEST SEQUENEC')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, 5, 4)
self.assertEqual(orig_block, 'IS A')
self.assertEqual(inverted_block, 'A SI')
self.assertEqual(sequence, 'HTIS A SI TEST SEQUENEC')
sequence, orig_block, inverted_block = self.gen.invert_block(sequence, len(sequence) - 2, 2)
self.assertEqual(orig_block, 'EC')
self.assertEqual(inverted_block, 'CE')
self.assertEqual(sequence, 'HTIS A SI TEST SEQUENCE')
def test_generate_snp_allele(self):
snp_allele = self.gen.generate_snp_allele('A')
self.assertTrue(snp_allele in ['T','G','C'])
snp_allele = self.gen.generate_snp_allele('T')
self.assertTrue(snp_allele in ['A','G','C'])
snp_allele = self.gen.generate_snp_allele('G')
self.assertTrue(snp_allele in ['T','A','C'])
snp_allele = self.gen.generate_snp_allele('C')
self.assertTrue(snp_allele in ['T','G','A'])
def test_generate_str_base(self):
str_base = self.gen.generate_str_base(2)
self.assertTrue(str_base[0] != str_base[1])
for allele in str_base:
self.assertTrue(allele in ['T','A','C','G'])
str_base = self.gen.generate_str_base(3)
self.assertTrue((str_base[0] != str_base[1]) or
(str_base[1] != str_base[2]) or
(str_base[0] != str_base[2]) )
for allele in str_base:
self.assertTrue(allele in ['T','A','C','G'])
str_base = self.gen.generate_str_base(4)
self.assertEqual(len(str_base), 4)
self.assertTrue(str_base[:2] != str_base[2:])
for allele in str_base:
self.assertTrue(allele in ['T','A','C','G'])
str_base = self.gen.generate_str_base(5)
self.assertEqual(len(str_base), 5)
for allele in str_base:
self.assertTrue(allele in ['T','A','C','G'])
def test_generate_alu_sequence(self):
alu_seq = self.gen.generate_alu_sequence(300)
self.assertTrue(len(alu_seq) == 300)
self.assertTrue(alu_seq != self.gen._base_alu)
for allele in alu_seq:
self.assertTrue(allele in ['T','A','C','G'])
alu_seq = self.gen.generate_alu_sequence(295)
self.assertTrue(len(alu_seq) == 295)
self.assertTrue(alu_seq != self.gen._base_alu[:295])
self.assertTrue(alu_seq != self.gen._base_alu[4:])
for allele in alu_seq:
self.assertTrue(allele in ['T','A','C','G'])
alu_seq = self.gen.generate_alu_sequence(305)
self.assertTrue(len(alu_seq) == 305)
self.assertTrue(alu_seq != self.gen._base_alu)
for allele in alu_seq:
self.assertTrue(allele in ['T','A','C','G'])
def test_ranged_length_list(self):
self.assertRaises(Exception, self.gen.ranged_length_list, 10, 10, 0)
self.assertRaises(Exception, self.gen.ranged_length_list, 1, 0, 1)
length_list = self.gen.ranged_length_list(10, 10, 10)
self.assertEqual(10, len(length_list))
for item in length_list:
self.assertEqual(item, 10)
length_list = self.gen.ranged_length_list(10, 20, 10)
self.assertEqual(10, len(length_list))
max_count = 0
for item in length_list:
if item == 20:
max_count += 1
self.assertTrue(item <= 20 and item >= 10)
self.assertEqual(max_count, 2)
length_list = self.gen.ranged_length_list(10, 20, 1)
self.assertEqual(1, len(length_list))
self.assertTrue(length_list[0] == 20)
length_list = self.gen.ranged_length_list(10, 20, 2)
self.assertEqual(2, len(length_list))
self.assertTrue(length_list[0] == 20 and length_list[1] == 20)
length_list = self.gen.ranged_length_list(10, 20, 3)
self.assertEqual(3, len(length_list))
self.assertTrue(length_list[0] == 20 and length_list[1] == 20 and length_list[2] == 10)
def test_write_genome_lines_to_file(self):
length_list = [0,1,79,80,81]
for i in length_list:
with open('test_file', 'w') as test_file:
self.gen.write_genome_lines_to_file(self.gen._base_alu, test_file)
with open('test_file', 'r') as test_file:
base_alu = ''
for line in test_file:
base_alu += str(line).strip()
self.assertEqual(base_alu, self.gen._base_alu)
os.remove('test_file')
def test_parse_fasta(self):
nbr_chr_list = [1,2,3]
length_list = [1,79,80,81]
for nbr_chr in nbr_chr_list:
for next_len in length_list:
file_name = 'test_file_' + str(nbr_chr) + '_' + str(next_len)
with open(file_name, 'w') as test_file:
test_file.write('>test')
for chr in range(1, nbr_chr + 1):
test_file.write('\n>chr' + str(chr) + '\n')
self.gen.write_genome_lines_to_file(self.gen._base_alu, test_file)
for sequence in self.gen.parse_fasta(file_name):
if sequence:
base_alu = sequence
self.assertEqual(base_alu, self.gen._base_alu)
else:
break
os.remove(file_name)
def test_create_read_pair(self):
donor_genome = self.gen.random_sequence(200)
left_read, right_read = self.gen.create_read_pair(donor_genome, 0, 0)
self.assertEqual(left_read, right_read[::-1])
self.assertTrue( (left_read in donor_genome and right_read[::-1] in donor_genome)
or
(right_read in donor_genome and left_read[::-1] in donor_genome) )
left_stt = random.randint(0, 200 - self.gen._sequencer_read_length - 1)
right_stt = random.randint(0, 200 - self.gen._sequencer_read_length - 1)
left_read, right_read = self.gen.create_read_pair(donor_genome, left_stt, right_stt)
self.assertEqual(len(left_read), len(right_read))
self.assertTrue( (left_read in donor_genome and right_read[::-1] in donor_genome)
or
(right_read in donor_genome and left_read[::-1] in donor_genome) )
left_stt = 0
right_stt = 200 - self.gen._sequencer_read_length - 1
left_read, right_read = self.gen.create_read_pair(donor_genome, left_stt, right_stt)
self.assertEqual(len(left_read), len(right_read))
self.assertTrue( (left_read in donor_genome and right_read[::-1] in donor_genome)
or
(right_read in donor_genome and left_read[::-1] in donor_genome) )
def test_add_sequencer_errors(self):
error_found = False
donor_genome = self.gen.random_sequence(200)
left_read, right_read = self.gen.create_read_pair(donor_genome, 0, 0)
self.assertEqual(left_read, right_read[::-1])
self.assertTrue( (left_read in donor_genome and right_read[::-1] in donor_genome)
or
(right_read in donor_genome and left_read[::-1] in donor_genome) )
for i in range(25):
left_read_w_error = self.gen.add_sequencer_errors(left_read)
right_read_w_error = self.gen.add_sequencer_errors(right_read)
if left_read != left_read_w_error or right_read != right_read_w_error:
error_found = True
self.assertTrue(error_found)
def test_generate_ref_genome(self):
for alu in ['y', 'n']:
for test_args in [[10, 'k'], [100, 'k']]:
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = test_args[0]
args.scale = test_args[1]
args.alu = alu
args.assembly = 'n'
args.base_alu = random_sequence(300)
self.gen = chromosome_builder(args)
self.gen._alu_min_length = 300
self.gen._alu_max_length = 300
self.gen._alu_mutation_rate = 0.3
self.gen.generate_ref_genome()
ref_genome = next(self.gen.parse_fasta(self.gen._ref_genome_file))
cnv_dict = {}
cnv_count = 0
str_count = 0
for mutation in self.gen._mutation_list:
if mutation[2] == 'REF_STR':
str_count += 1
self.assertTrue(ref_genome[mutation[0]:mutation[1]] == mutation[3]*mutation[4])
elif mutation[2] == 'REF_CNV':
if mutation[3] in cnv_dict:
self.assertTrue(cnv_dict[mutation[3]] == ref_genome[mutation[0]:mutation[1]])
else:
cnv_count += 1
cnv_dict[mutation[3]] = ref_genome[mutation[0]:mutation[1]]
elif mutation[2] == 'REF_ALU':
base_alu = self.gen._base_alu
match_count = 0
for i in range(len(base_alu)):
if self.gen._base_alu[i] == ref_genome[mutation[0]+i]:
match_count += 1
self.assertTrue((match_count / len(base_alu)) > (.99 - self.gen._alu_mutation_rate))
self.assertEqual(cnv_count, self.gen._nbr_ref_cnv)
self.assertEqual(str_count, self.gen._nbr_ref_str)
def test_generate_donor_genome(self):
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = 10
args.scale = 'k'
args.alu = 'n'
args.assembly = 'n'
args.base_alu = random_sequence(300)
self.gen = chromosome_builder(args)
self.gen._nbr_snp = 0
self.gen._nbr_denovo_str = 0
self.gen._nbr_denovo_cnv = 0
self.gen._nbr_long_inv = 0
self.gen._nbr_long_ins = 0
self.gen._nbr_long_del = 0
self.gen._nbr_ref_alu = 0
self.gen._nbr_denovo_alu = 0
self.gen._nbr_ref_str = 0
self.gen._nbr_ref_cnv = 0
self.gen._nbr_short_inv = 0
self.gen._nbr_short_ins = 0
self.gen._nbr_short_del = 0
self.gen._cnv_mutation_amount = 0
self.gen._str_mutation_amount = 0
self.gen.generate_ref_genome()
self.gen.generate_donor_genome()
ref_genome = ''
for sequence in self.gen.parse_fasta(self.gen._ref_genome_file):
if sequence:
ref_genome += sequence
else:
break
donor_genome = ''
for sequence in self.gen.parse_fasta(self.gen._priv_genome_file):
if sequence:
donor_genome += sequence
else:
break
self.assertEqual(ref_genome, donor_genome)
self.assertEqual(len(ref_genome), 10000)
for alu in ['y', 'n']:
for test_args in [[10, 'k'], [100, 'k'], [150, 'k']]:
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = test_args[0]
args.scale = test_args[1]
args.alu = alu
args.assembly = 'n'
args.base_alu = random_sequence(300)
if args.scale == 'k':
expected_size = test_args[0] * 1000
elif args.scale == 'm':
expected_size = test_args[0] * 1000000
self.gen = chromosome_builder(args)
self.gen._alu_min_length = 300
self.gen._alu_max_length = 300
self.gen._alu_mutation_rate = 0.3
self.gen.generate_ref_genome()
self.gen.generate_donor_genome()
ref_genome = ''
for sequence in self.gen.parse_fasta(self.gen._ref_genome_file):
if sequence:
ref_genome += sequence
else:
break
donor_genome = ''
for sequence in self.gen.parse_fasta(self.gen._priv_genome_file):
if sequence:
donor_genome += sequence
else:
break
last_end = 0
self.assertEqual(expected_size, len(ref_genome))
for i in range(len(self.gen._mutation_list)):
mutation = self.gen._mutation_list[i]
self.assertTrue(ref_genome[last_end:mutation[0]] in donor_genome)
last_end = mutation[1]
mut_type = mutation[2]
range_stt = max(0, mutation[0]-20)
range_end = min(len(ref_genome)-1, mutation[0]+20)
gapped_range_end = min(len(ref_genome)-1, mutation[1]+20)
if mut_type == 'SNP':
self.assertTrue(ref_genome[range_stt:range_end] not in donor_genome, msg='SNP ' + str(mutation[0]))
elif mut_type == 'MUT_STR':
new_str = mutation[3]
self.assertTrue(new_str in donor_genome, msg='MUT_STR ' + str(mutation[0]))
elif mut_type == 'DONOR_STR':
str_seq = mutation[3]
nbr_copies = mutation[4]
new_str = str_seq * nbr_copies
self.assertTrue(new_str in donor_genome, msg='DONOR_STR ' + str(mutation[0]))
elif mut_type == 'REF_ALU':
self.assertTrue(ref_genome[mutation[0]:mutation[1]] in donor_genome, msg='REF_ALU ' + str(mutation[0]))
elif mut_type == 'REF_CNV':
self.assertTrue(ref_genome[mutation[0]:mutation[1]] in donor_genome, msg='REF_CNV ' + str(mutation[0]))
elif mut_type == 'DONOR_ALU':
self.assertTrue(ref_genome[range_stt:gapped_range_end] not in donor_genome, msg='DONOR_ALU ' + str(mutation[0]))
elif mut_type == 'INV':
inv_seq = ref_genome[mutation[0]:mutation[1]]
inv_seq = inv_seq[::-1]
self.assertTrue(inv_seq in donor_genome, msg='INV ' + str(mutation[0]))
elif mut_type == 'INS':
self.assertTrue(ref_genome[range_stt:range_end] not in donor_genome, msg='INS ' + str(mutation[0]))
elif mut_type == 'DEL':
self.assertTrue(ref_genome[range_stt:gapped_range_end] not in donor_genome, msg='DEL ' + str(mutation[0]))
def test_generate_reads(self):
pass
def test_mutate_str(self):
pass
def test_mutate_cnv(self):
pass
def test_allocate_cnv(self):
#, nbr_cnv, variant_tag):
pass
def test_allocate_alu(self):
#, nbr_alu, variant_tag):
pass
def test_allocate_str(self):
#, nbr_str, variant_tag):
pass
def test_allocate_inversions(self):
#, nbr_inv, min_len, max_len):
pass
def test_allocate_insertions(self):
#, nbr_ins, min_len, max_len):
pass
def test_allocate_deletions(self):
#, nbr_del, min_len, max_len):
pass
def test_allocate_snps(self):
pass
def random_sequence(seq_len):
return "".join(random.choice(['A','C','G','T']) for i in range(seq_len))
class TestSetting():
def __init__(self):
self.id = None
self.num_chr = None
self.chr_size = None
self.scale = None
self.alu = None
self.assembly = None
self.base_alu = None
if __name__ == '__main__':
unittest.main()
test_results = []
for alu in ['y']:#, 'n']:
for test in [[100, 'k']]:#, [1, 'm']]:
args = TestSetting()
args.id = 'test'
args.chr_id = 1
args.chr_size = test[0]
args.scale = test[1]
args.alu = alu
args.assembly = 'n'
args.base_alu = random_sequence(300)
start = time.clock()
gen = chromosome_builder(args)
print('generating ref genome length: ' + str(test[0]) + test[1])
gen.generate_ref_genome()
print('generating donor genome')
gen.generate_donor_genome()
print('generating reads')
gen.generate_reads()
test_results.append('Test: ' + str(test[0]) + test[1] + ' time: ' + str(time.clock() - start))
for res in test_results:
print(res)
``` |
{
"source": "joe3d1998/GraphFlow",
"score": 2
} |
#### File: core/utils/bert_utils.py
```python
from collections import defaultdict, namedtuple
import torch
# When using the sliding window trick for long sequences,
# we take the representation of each token with maximal context.
# Take average of the BERT embeddings of these BPE sub-tokens
# as the embedding for the word.
# Take *weighted* average of the word embeddings through all layers.
def extract_bert_ques_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, turn_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, turn_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = []
for t, para_feature in enumerate(ex_feature): # Turn
para_token_count = defaultdict(int)
for j, chunk_feature in enumerate(para_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, t, doc_word_idx] += all_encoder_layers[:, i, t, j, k]
para_token_count[doc_word_idx] += 1
ex_token_count.append(para_token_count)
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for t, para_token_count in enumerate(ex_token_count):
for doc_word_idx, count in para_token_count.items():
out_features[:, i, t, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def extract_bert_ctx_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = defaultdict(int)
for j, chunk_feature in enumerate(ex_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, doc_word_idx] += all_encoder_layers[:, i, j, k]
ex_token_count[doc_word_idx] += 1
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for doc_word_idx, count in ex_token_count.items():
out_features[:, i, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def convert_text_to_bert_features(text, bert_tokenizer, max_seq_length, doc_stride):
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
tok_to_orig_index = []
all_doc_tokens = []
for (i, token) in enumerate(text):
sub_tokens = bert_tokenizer.wordpiece_tokenizer.tokenize(token.lower())
for sub_ in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_)
# The -2 accounts for [CLS] and [SEP]
max_tokens_for_doc = max_seq_length - 2
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
out_features = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
feature = BertInputFeatures(
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
out_features.append(feature)
return out_features
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class BertInputFeatures(object):
"""A single set of BERT features of data."""
def __init__(self,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids):
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
```
#### File: core/utils/generic_utils.py
```python
import yaml
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None, device=None):
''' Sinusoid position encoding table '''
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.
sinusoid_table = torch.Tensor(sinusoid_table)
return sinusoid_table.to(device) if device else sinusoid_table
def get_range_vector(size, device):
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device.type == 'cuda':
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def to_cuda(x, device=None):
if device:
x = x.to(device)
return x
def batched_diag(x, device=None):
# Input: a 2D tensor
# Output: a 3D tensor
x_diag = torch.zeros(x.size(0), x.size(1), x.size(1))
_ = x_diag.as_strided(x.size(), [x_diag.stride(0), x_diag.size(2) + 1]).copy_(x)
return to_cuda(x_diag, device)
def create_mask(x, N, device=None):
x = x.data
mask = np.zeros((x.size(0), N))
for i in range(x.size(0)):
mask[i, :x[i]] = 1
return to_cuda(torch.Tensor(mask), device)
def get_config(config_path="config.yml"):
with open(config_path, "r") as setting:
config = yaml.load(setting)
return config
``` |
{
"source": "joe42/python-holidays",
"score": 3
} |
#### File: holidays/countries/brazil.py
```python
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, TU
from holidays.constants import JAN, MAR, APR, MAY, JUN, JUL, AUG, SEP, OCT, \
NOV, DEC
from holidays.holiday_base import HolidayBase
class Brazil(HolidayBase):
"""
https://pt.wikipedia.org/wiki/Feriados_no_Brasil
"""
STATES = ['AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT',
'MS', 'MG', 'PA', 'PB', 'PE', 'PI', 'PR', 'RJ', 'RN', 'RS', 'RO',
'RR', 'SC', 'SP', 'SE', 'TO']
def __init__(self, **kwargs):
self.country = 'BR'
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
self[date(year, JAN, 1)] = "Ano novo"
self[date(year, APR, 21)] = "Tiradentes"
self[date(year, MAY, 1)] = "Dia Mundial do Trabalho"
self[date(year, SEP, 7)] = "Independência do Brasil"
self[date(year, OCT, 12)] = "Nossa Senhora Aparecida"
self[date(year, NOV, 2)] = "Finados"
self[date(year, NOV, 15)] = "Proclamação da República"
# Christmas Day
self[date(year, DEC, 25)] = "Natal"
self[easter(year) - rd(days=2)] = "Sexta-feira Santa"
self[easter(year)] = "Páscoa"
self[easter(year) + rd(days=60)] = "Corpus Christi"
quaresma = easter(year) - rd(days=46)
self[quaresma] = "Quarta-feira de cinzas (Início da Quaresma)"
self[quaresma - rd(weekday=TU(-1))] = "Carnaval"
if self.state == 'AC':
self[date(year, JAN, 23)] = "Dia do evangélico"
self[date(year, JUN, 15)] = "Aniversário do Acre"
self[date(year, SEP, 5)] = "Dia da Amazônia"
self[date(year, NOV, 17)] = "Assinatura do Tratado de" \
" Petrópolis"
if self.state == 'AL':
self[date(year, JUN, 24)] = "São João"
self[date(year, JUN, 29)] = "São Pedro"
self[date(year, SEP, 16)] = "Emancipação política de Alagoas"
self[date(year, NOV, 20)] = "Consciência Negra"
if self.state == 'AP':
self[date(year, MAR, 19)] = "Dia de São José"
self[date(year, JUL, 25)] = "São Tiago"
self[date(year, OCT, 5)] = "Criação do estado"
self[date(year, NOV, 20)] = "Consciência Negra"
if self.state == 'AM':
self[date(year, SEP, 5)] = "Elevação do Amazonas" \
" à categoria de província"
self[date(year, NOV, 20)] = "Consciência Negra"
self[date(year, DEC, 8)] = "Dia de Nossa Senhora da Conceição"
if self.state == 'BA':
self[date(year, JUL, 2)] = "Independência da Bahia"
if self.state == 'CE':
self[date(year, MAR, 19)] = "São José"
self[date(year, MAR, 25)] = "Data Magna do Ceará"
if self.state == 'DF':
self[date(year, APR, 21)] = "Fundação de Brasília"
self[date(year, NOV, 30)] = "Dia do Evangélico"
if self.state == 'ES':
self[date(year, OCT, 28)] = "Dia do Servidor Público"
if self.state == 'GO':
self[date(year, OCT, 28)] = "Dia do Servidor Público"
if self.state == 'MA':
self[date(year, JUL, 28)] = "Adesão do Maranhão" \
" à independência do Brasil"
self[date(year, DEC, 8)] = "Dia de Nossa Senhora da Conceição"
if self.state == 'MT':
self[date(year, NOV, 20)] = "Consciência Negra"
if self.state == 'MS':
self[date(year, OCT, 11)] = "Criação do estado"
if self.state == 'MG':
self[date(year, APR, 21)] = "Data Magna de MG"
if self.state == 'PA':
self[date(year, AUG, 15)] = "Adesão do Grão-Pará" \
" à independência do Brasil"
if self.state == 'PB':
self[date(year, AUG, 5)] = "Fundação do Estado"
if self.state == 'PE':
self[date(year, MAR, 6)] = "Revolução Pernambucana (Data Magna)"
self[date(year, JUN, 24)] = "São João"
if self.state == 'PI':
self[date(year, MAR, 13)] = "Dia da Batalha do Jenipapo"
self[date(year, OCT, 19)] = "Dia do Piauí"
if self.state == 'PR':
self[date(year, DEC, 19)] = "Emancipação do Paraná"
if self.state == 'RJ':
self[date(year, APR, 23)] = "Dia de São Jorge"
self[date(year, OCT, 28)] = "Dia do Funcionário Público"
self[date(year, NOV, 20)] = "Zumbi dos Palmares"
if self.state == 'RN':
self[date(year, JUN, 29)] = "Dia de São Pedro"
self[date(year, OCT, 3)] = "Mártires de Cunhaú e Uruaçuu"
if self.state == 'RS':
self[date(year, SEP, 20)] = "Revolução Farroupilha"
if self.state == 'RO':
self[date(year, JAN, 4)] = "Criação do estado"
self[date(year, JUN, 18)] = "Dia do Evangélico"
if self.state == 'RR':
self[date(year, OCT, 5)] = "Criação de Roraima"
if self.state == 'SC':
self[date(year, AUG, 11)] = "Criação da capitania," \
" separando-se de SP"
if self.state == 'SP':
self[date(year, JUL, 9)] = "Revolução Constitucionalista de 1932"
if self.state == 'SE':
self[date(year, JUL, 8)] = "Autonomia política de Sergipe"
if self.state == 'TO':
self[date(year, JAN, 1)] = "Instalação de Tocantins"
self[date(year, SEP, 8)] = "Nossa Senhora da Natividade"
self[date(year, OCT, 5)] = "Criação de Tocantins"
class BR(Brazil):
pass
class BRA(Brazil):
pass
```
#### File: holidays/countries/italy.py
```python
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO
from holidays.constants import JAN, FEB, MAR, APR, MAY, JUN, JUL, AUG, SEP, \
OCT, \
NOV, DEC
from holidays.holiday_base import HolidayBase
class Italy(HolidayBase):
PROVINCES = ['AN', 'AO', 'BA', 'BL', 'BO',
'BZ', 'BS', 'CB', 'CT', 'Cesena',
'CH', 'CS', 'KR', 'EN', 'FE', 'FI',
'FC', 'Forli', 'FR', 'GE', 'GO', 'IS',
'SP', 'LT', 'MN', 'MS', 'MI',
'MO', 'MB', 'NA', 'PD', 'PA',
'PR', 'PG', 'PE', 'PC', 'PI',
'PD', 'PT', 'RA', 'RE',
'RI', 'RN', 'RM', 'RO', 'SA',
'SR', 'TE', 'TO', 'TS', 'Pesaro', 'PU',
'Urbino', 'VE', 'VC', 'VI']
def __init__(self, **kwargs):
self.country = 'IT'
self.prov = kwargs.pop('prov', kwargs.pop('state', ''))
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, JAN, 1)] = "Capodanno"
self[date(year, JAN, 6)] = "Epifania del Signore"
self[easter(year)] = "Pasqua di Resurrezione"
self[easter(year) + rd(weekday=MO)] = "Lunedì dell'Angelo"
if year >= 1946:
self[date(year, APR, 25)] = "Festa della Liberazione"
self[date(year, MAY, 1)] = "Festa dei Lavoratori"
if year >= 1948:
self[date(year, JUN, 2)] = "Festa della Repubblica"
self[date(year, AUG, 15)] = "Assunzione della Vergine"
self[date(year, NOV, 1)] = "Tutti i Santi"
self[date(year, DEC, 8)] = "Immacolata Concezione"
self[date(year, DEC, 25)] = "Natale"
self[date(year, DEC, 26)] = "Santo Stefano"
# Provinces holidays
if self.prov:
if self.prov == 'AN':
self[date(year, MAY, 4)] = "San Ciriaco"
elif self.prov == 'AO':
self[date(year, SEP, 7)] = "San Grato"
elif self.prov in ('BA'):
self[date(year, DEC, 6)] = "San Nicola"
elif self.prov == 'BL':
self[date(year, NOV, 11)] = "San Martino"
elif self.prov in ('BO'):
self[date(year, OCT, 4)] = "San Petronio"
elif self.prov == 'BZ':
self[date(year, AUG, 15)] = "Maria Santissima Assunta"
elif self.prov == 'BS':
self[date(year, FEB, 15)] = "Santi Faustino e Giovita"
elif self.prov == 'CB':
self[date(year, APR, 23)] = "San Giorgio"
elif self.prov == 'CT':
self[date(year, FEB, 5)] = "Sant'Agata"
elif self.prov in ('FC', 'Cesena'):
self[date(year, JUN, 24)] = "San Giovanni Battista"
if self.prov in ('FC', 'Forlì'):
self[date(year, FEB, 4)] = "Madonna del Fuoco"
elif self.prov == 'CH':
self[date(year, MAY, 11)] = "San Giustino di Chieti"
elif self.prov == 'CS':
self[date(year, FEB, 12)] = "Madonna del Pilerio"
elif self.prov == 'KR':
self[date(year, OCT, 9)] = "<NAME>"
elif self.prov == 'EN':
self[date(year, JUL, 2)] = "Madonna della Visitazione"
elif self.prov == 'FE':
self[date(year, APR, 23)] = "San Giorgio"
elif self.prov == 'FI':
self[date(year, JUN, 24)] = "San Giovanni Battista"
elif self.prov == 'FR':
self[date(year, JUN, 20)] = "San Silverio"
elif self.prov == 'GE':
self[date(year, JUN, 24)] = "San Giovanni Battista"
elif self.prov == 'GO':
self[date(year, MAR, 16)] = "Santi Ilario e Taziano"
elif self.prov == 'IS':
self[date(year, MAY, 19)] = "San Pietro Celestino"
elif self.prov == 'SP':
self[date(year, MAR, 19)] = "San Giuseppe"
elif self.prov == 'LT':
self[date(year, APR, 25)] = "San <NAME>"
elif self.prov == 'ME':
self[date(year, JUN, 3)] = "Madonna della Lettera"
elif self.prov == 'MI':
self[date(year, DEC, 7)] = "Sant'Ambrogio"
elif self.prov == 'MN':
self[date(year, MAR, 18)] = "<NAME>"
elif self.prov == 'MS':
self[date(year, OCT, 4)] = "San Francesco d'Assisi"
elif self.prov == 'MO':
self[date(year, JAN, 31)] = "San Geminiano"
elif self.prov == 'MB':
self[date(year, JUN, 24)] = "San Giovanni Battista"
elif self.prov == 'NA':
self[date(year, SEP, 19)] = "San Gennaro"
elif self.prov == 'PD':
self[date(year, JUN, 13)] = "<NAME>"
elif self.prov == 'PA':
self[date(year, JUL, 15)] = "San Giovanni"
elif self.prov == 'PR':
self[date(year, JAN, 13)] = "<NAME>"
elif self.prov == 'PG':
self[date(year, JAN, 29)] = "Sant'Ercolano e <NAME>"
elif self.prov == 'PC':
self[date(year, JUL, 4)] = "<NAME>"
elif self.prov == 'RM':
self[date(year, JUN, 29)] = "Santi Pietro e Paolo"
elif self.prov == 'TO':
self[date(year, JUN, 24)] = "San Giovanni Battista"
elif self.prov == 'TS':
self[date(year, NOV, 3)] = "San Giusto"
elif self.prov == 'VI':
self[date(year, APR, 25)] = "San Marco"
# TODO: add missing provinces' holidays:
# 'Pisa', 'Pordenone', 'Potenza', 'Ravenna',
# '<NAME>', 'Rieti', 'Rimini', 'Rovigo',
# 'Salerno', 'Siracusa', 'Teramo', 'Torino', 'Urbino',
# 'Venezia'
class IT(Italy):
pass
class ITA(Italy):
pass
``` |
{
"source": "joe5343281/PyComplex",
"score": 4
} |
#### File: joe5343281/PyComplex/pycomplex.py
```python
import math
import re
class Complex():
def __init__(self, real=0, imaginary=0):
self.r = real
self.i = imaginary
def __add__(self, other):
"""It can make operator, +, be used for adding two Complex object"""
r = self.r + other.r
i = self.i + other.i
return Complex(r, i)
def __sub__(self, other):
"""Above"""
r = self.r - other.r
i = self.i - other.i
return Complex(r, i)
def __mul__(self, other):
"""(a+bi)*(c+di) = a*c-b*d + (a*d+b*c)i"""
a = self.r
b = self.i
c = other.r
d = other.i
r = a * c - b * d
i = a * d + b * c
return Complex(r, i)
def __truediv__(self, other):
a = self.r
b = self.i
c = other.r
d = other.i
r = (a * c + b * d) / (c * c + d * d)
i = (b * c - a * d) / (c * c + d * d)
return Complex(r, i)
def conj(self):
return Complex(self.r, -1 * self.i)
def ln(self):
a = self.r
b = self.i
r = a * a + b * b
w = (1/2)*math.log(r)
x = math.acos(a/math.sqrt(r))
return Complex(w, x)
def exp(self):
a = self.r
b = self.i
r = math.exp(a)
return Complex(r * math.cos(b), r * math.sin(b))
def rad(self):
a = self.r
b = self.i
sqsum = a * a + b * b
r = math.sqrt(sqsum)
co = a/r
rad = math.acos(co)
return rad
def parser(self, comp):
m = re.match('^([-/+]?\d*)?([-/+]?\d*)(i?)', comp)
r = int(m.group(1))
i = int(m.group(2))
return Complex(r, i)
def toString(self):
return str(self.r) + str(self.i) + 'i'
``` |
{
"source": "Joe606/movie",
"score": 3
} |
#### File: douban_movie/spiders/movie.py
```python
import scrapy
from douban_movie.items import DoubanMovieItem
import time
class Toscrapemovie(scrapy.Spider):
name = 'scrape_movie'
def start_requests(self):
urls = ['https://movie.douban.com/tag/#/',]
for url in urls:
yield scrapy.Request(url=url,callback=self.parse,meta={'depth':0}) #用meta标记
def parse(self,response):
print(response.text)
item = DoubanMovieItem()
selector = response.xpath('//div[@class="list-wp"]/a')
item['name'] = selector.xpath('p/span[@class="title"]/text()').getall()
item['score'] = selector.xpath('p/span[@class="rate"]/text()').getall()
item['pic'] = selector.xpath('div/span/img/@src').getall()
item['link'] = selector.xpath('@href').getall()
yield item
print(type(item['link']),len(item['link']))
for link in item['link']:
yield response.follow(link,callback=self.next_parse,meta={'depth':1})
'''
for url_next in selector.xpath('@href'):
yield response.follow(url_next,callback=self.next_parse)
print('next page')
'''
def next_parse(self,response):
item2 = DoubanMovieItem()
print(item2)
item2['_name'] = response.xpath('//div[@id="content"]/h1/span[@property="v:itemreviewed"]/text()').getall()
item2['_year'] = response.xpath('//div[@id="content"]/h1/span[@class="year"]/text()').getall()
item2['_pic'] = response.xpath('//div[@id="mainpic"]/a/img/@src').getall()
print(item2)
selector_next = response.xpath('//div[@id="info"]')
item2['_director'] = selector_next.xpath('//span[@class="attrs"]/a/text()').getall()[:2]
item2['_writer'] = selector_next.xpath('//span[@class="attrs"]/a/text()').getall()[2:]
item2['_cast'] = selector_next.xpath('//span[@class="actor"]/span[@class="attrs"]/span/a/text()').getall()
item2['_type'] = selector_next.xpath('span[@property="v:genre"]/text()').getall()
item2['_country'] = selector_next.xpath('text()').getall()[7]
item2['_language'] = selector_next.xpath('text()').getall()[9]
item2['_premiere'] = selector_next.xpath('span[@property="v:initialReleaseDate"]/text()').getall()
item2['_episode'] = selector_next.xpath('text()').getall()[13]
item2['_runningtime'] = selector_next.xpath('text()').getall()[15]
item2['_plot'] = response.xpath('//span[@property="v:summary"]/text()').getall()
return item2
``` |
{
"source": "Joe606/scrape_sportshoes",
"score": 3
} |
#### File: Joe606/scrape_sportshoes/to_mysql.py
```python
how# -*- coding: utf-8 -*-
import pymysql
import time
import json
import os
#create a table callled all_comments
def new_table():
sql = '''
create table all_comments
(num int(10) auto_increment not null primary key,id text,content text,score text,productColor text,productSize text);
'''
cur.execute(sql)
db.commit()
#load data in a text file into the new table in database
def read_to_database():
i = 0
for c in y:
i += 1
sql = '''insert into all_comments
(id,content,score,productColor,productSize) values
('%s','%s','%s','%s','%s');''' %(c['id'],c['content'],c['score'],c['productColor'],c['productSize'])
print(sql)
cur.execute(sql)
db.commit()
print(i)
if __name__ == '__main__':
db = pymysql.connect(host='localhost',user='root',passwd='<PASSWORD>',database='男运动鞋')
cur = db.cursor()
print(os.getcwd())
new_table()
for j in range(1,100):
f = open('shoes{}.txt'.format(j),'r+',encoding='utf-8')
x = json.load(f)
y = x['comments']
print(type(y))
read_to_database()
cur.close()
db.closer()
``` |
{
"source": "joe6302413/APS",
"score": 3
} |
#### File: joe6302413/APS/gaussianmodule.py
```python
import numpy as np
# from scipy.optimize import curve_fit, shgo
# from scipy import integrate
# from os.path import split,join
# from scipy.signal import savgol_filter
__version__='1.0'
class DFT:
def __init__(self,basis,):
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.