metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JPFrancoia/tpb-lite",
"score": 3
} |
#### File: tpblite/models/torrents.py
```python
from typing import Optional
import re
import unicodedata
import bs4 as BeautifulSoup
import requests
from .utils import headers
# TODO: write better comments
def fileSizeStrToInt(size_str: str) -> int:
"""Converts file size given in *iB format to bytes integer"""
unit_dict = {"KiB": (2 ** 10), "MiB": (2 ** 20), "GiB": (2 ** 30), "TiB": (2 ** 40)}
try:
num = float(size_str[:-3])
unit = size_str[-3:]
return int(num * unit_dict[unit])
except Exception as e:
raise AttributeError(
"Cannot determine filesize: {0}, error: {1}".format(size_str, e)
)
class Torrent:
"""
Abstract class to contain info about torrent
magnet link, file size, number of seeds, number of leeches etc.
"""
# TODO: type hint html_row, it's a BeautifulSoup object
def __init__(self, html_row):
self._html_row = html_row
self.upload_date, self.filesize, self.byte_size, self.uploader = (
self._getFileInfo()
)
# TODO: type hint
self._info: Optional[str] = None
def __str__(self) -> str:
return "{0}, S: {1}, L: {2}, {3}".format(
self.title, self.seeds, self.leeches, self.filesize
)
def __repr__(self) -> str:
return "<Torrent object: {}>".format(self.title)
@property
def url(self) -> str:
return self._html_row.find("a", class_="detLink")["href"]
@property
def title(self) -> str:
return self._html_row.find("a", class_="detLink").string
@property
def magnetlink(self) -> str:
tag = self._html_row.find("a", href=(re.compile("magnet")))
link = tag.get("href")
return link
@property
def seeds(self) -> int:
taglist = self._html_row.find_all("td", align="right")
return int(taglist[0].string)
@property
def leeches(self) -> int:
taglist = self._html_row.find_all("td", align="right")
return int(taglist[1].string)
# TODO: handle exceptions if request fails
@property
def info(self) -> str:
if self._info is None:
request = requests.get(str(self.url), headers=headers())
soup = BeautifulSoup.BeautifulSoup(request.text, features="html.parser")
self._info = soup.find("div", {"class": "nfo"}).text
return self._info
def _getFileInfo(self):
text = self._html_row.find("font", class_="detDesc").get_text()
t = text.split(",")
uptime = unicodedata.normalize("NFKD", t[0].replace("Uploaded ", "").strip())
size = unicodedata.normalize("NFKD", t[1].replace("Size ", "").strip())
byte_size = fileSizeStrToInt(size)
uploader = unicodedata.normalize("NFKD", t[2].replace("ULed by ", "").strip())
return uptime, size, byte_size, uploader
class Torrents:
"""
Torrent object, takes query response and parses into
torrent list or dict. Has methods to select items from
torrent list.
"""
def __init__(self, html_source):
self._html_source = html_source
self.list = self._createTorrentList()
def __str__(self):
return "Torrents object: {} torrents".format(len(self.list))
def __repr__(self):
return "<Torrents object: {} torrents>".format(len(self.list))
def __iter__(self):
return iter(self.list)
def __len__(self):
return len(self.list)
def __getitem__(self, index):
return self.list[index]
def _createTorrentList(self):
soup = BeautifulSoup.BeautifulSoup(self._html_source, features="html.parser")
if soup.body is None:
raise ConnectionError("Could not determine torrents (empty html body)")
rows = soup.body.find_all("tr")
torrents = []
for row in rows:
if len(row.find_all("td", {"class": "vertTh"})) == 1:
torrents.append(Torrent(row))
return torrents
def getBestTorrent(self, min_seeds=30, min_filesize="1 GiB", max_filesize="4 GiB"):
"""Filters torrent list based on some constraints, then returns highest seeded torrent
:param min_seeds (int): minimum seed number filter
:param min_filesize (str): minimum filesize in XiB form, eg. GiB
:param max_filesize (str): maximum filesize in XiB form, eg. GiB
:return Torrent Object: Torrent with highest seed number, will return None if all are filtered out"""
if not isinstance(min_filesize, int):
min_filesize = fileSizeStrToInt(min_filesize)
if not isinstance(max_filesize, int):
max_filesize = fileSizeStrToInt(max_filesize)
filtered_list = filter(
lambda x: self._filterTorrent(x, min_seeds, min_filesize, max_filesize),
self.list,
)
sorted_list = sorted(filtered_list, key=lambda x: x.seeds, reverse=True)
if len(sorted_list) > 0:
return sorted_list[0]
else:
print("No torrents found given criteria")
return None
def _filterTorrent(self, torrent, min_seeds, min_filesize, max_filesize):
if (
(torrent.seeds < min_seeds)
or (torrent.byte_size < min_filesize)
or (torrent.byte_size > max_filesize)
):
return False
else:
return True
``` |
{
"source": "jpfreeley/performance_email_notification",
"score": 2
} |
#### File: jpfreeley/performance_email_notification/email_notifications.py
```python
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from perfreporter.data_manager import DataManager
from report_builder import ReportBuilder
class EmailNotification:
def __init__(self, arguments):
self.args = arguments
self.data_manager = DataManager(arguments)
self.report_builder = ReportBuilder()
self.smtp_config = {'host': arguments['smpt_host'],
'port': arguments['smpt_port'],
'user': arguments['smpt_user'],
'password': arguments['<PASSWORD>']}
def email_notification(self):
tests_data, last_test_data, baseline, violation, compare_with_thresholds = self.data_manager.get_api_test_info()
email_body, charts, date = self.report_builder.create_api_email_body(tests_data, last_test_data, baseline,
self.args['comparison_metric'],
violation, compare_with_thresholds)
self.send_email(self.smtp_config, self.args['user_list'], email_body, charts, date)
def ui_email_notification(self):
tests_data, last_test_data = self.data_manager.get_ui_test_info()
email_body, charts, date = self.report_builder.create_ui_email_body(tests_data, last_test_data)
self.send_email(self.smtp_config, self.args['user_list'], email_body, charts, date)
def send_email(self, smtp_config, user_list, email_body, charts, date):
s = smtplib.SMTP_SSL(host=smtp_config['host'], port=int(smtp_config['port']))
s.ehlo()
s.login(smtp_config['user'], smtp_config['password'])
subject = "[" + str(self.args['notification_type']) + "] "
subject += "Test results for \"" + str(self.args['test'])
subject += "\". Users count: " + str(self.args['users']) + ". From " + str(date) + "."
for user in user_list:
if all(i in user for i in ["<mailto:", "|"]):
user = user.split("|")[1].replace(">", "").replace("<", "")
msg_root = MIMEMultipart('related')
msg_root['Subject'] = subject
msg_root['From'] = smtp_config['user']
msg_root['To'] = user
msg_alternative = MIMEMultipart('alternative')
msg_alternative.attach(MIMEText(email_body, 'html'))
msg_root.attach(msg_alternative)
for chart in charts:
msg_root.attach(chart)
s.sendmail(smtp_config['user'], user, msg_root.as_string())
s.quit()
``` |
{
"source": "jpfr/oss-fuzz",
"score": 2
} |
#### File: infra/cifuzz/docker.py
```python
import os
import sys
# pylint: disable=wrong-import-position,import-error
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import utils
BASE_BUILDER_TAG = 'gcr.io/oss-fuzz-base/base-builder'
BASE_RUNNER_TAG = 'gcr.io/oss-fuzz-base/base-runner'
MSAN_LIBS_BUILDER_TAG = 'gcr.io/oss-fuzz-base/msan-libs-builder'
PROJECT_TAG_PREFIX = 'gcr.io/oss-fuzz/'
# Default fuzz configuration.
DEFAULT_ENGINE = 'libfuzzer'
DEFAULT_ARCHITECTURE = 'x86_64'
_DEFAULT_DOCKER_RUN_ARGS = [
'--cap-add', 'SYS_PTRACE', '-e', 'FUZZING_ENGINE=' + DEFAULT_ENGINE, '-e',
'ARCHITECTURE=' + DEFAULT_ARCHITECTURE, '-e', 'CIFUZZ=True'
]
EXTERNAL_PROJECT_IMAGE = 'external-project'
_DEFAULT_DOCKER_RUN_COMMAND = [
'docker',
'run',
'--rm',
'--privileged',
]
def get_project_image_name(project):
"""Returns the name of the project builder image for |project_name|."""
# TODO(ochang): We may need unique names to support parallel fuzzing.
if project:
return PROJECT_TAG_PREFIX + project
return EXTERNAL_PROJECT_IMAGE
def delete_images(images):
"""Deletes |images|."""
command = ['docker', 'rmi', '-f'] + images
utils.execute(command)
utils.execute(['docker', 'builder', 'prune', '-f'])
def get_base_docker_run_args(workspace, sanitizer='address', language='c++'):
"""Returns arguments that should be passed to every invocation of 'docker
run'."""
docker_args = _DEFAULT_DOCKER_RUN_ARGS.copy()
docker_args += [
'-e', f'SANITIZER={sanitizer}', '-e', f'FUZZING_LANGUAGE={language}',
'-e', 'OUT=' + workspace.out
]
docker_container = utils.get_container_name()
if docker_container:
# Don't map specific volumes if in a docker container, it breaks when
# running a sibling container.
docker_args += ['--volumes-from', docker_container]
else:
docker_args += _get_args_mapping_host_path_to_container(workspace.workspace)
return docker_args, docker_container
def get_base_docker_run_command(workspace, sanitizer='address', language='c++'):
"""Returns part of the command that should be used everytime 'docker run' is
invoked."""
docker_args, docker_container = get_base_docker_run_args(
workspace, sanitizer, language)
command = _DEFAULT_DOCKER_RUN_COMMAND.copy() + docker_args
return command, docker_container
def _get_args_mapping_host_path_to_container(host_path, container_path=None):
"""Get arguments to docker run that will map |host_path| a path on the host to
a path in the container. If |container_path| is specified, that path is mapped
to. If not, then |host_path| is mapped to itself in the container."""
# WARNING: Do not use this function when running in production (and
# --volumes-from) is used for mapping volumes. It will break production.
container_path = host_path if container_path is None else container_path
return ['-v', f'{host_path}:{container_path}']
class Workspace:
"""Class representing the workspace directory."""
def __init__(self, config):
self.workspace = config.workspace
def initialize_dir(self, directory): # pylint: disable=no-self-use
"""Creates directory if it doesn't already exist, otherwise does nothing."""
os.makedirs(directory, exist_ok=True)
@property
def out(self):
"""The out directory used for storing the fuzzer build built by
build_fuzzers."""
# Don't use 'out' because it needs to be used by artifacts.
return os.path.join(self.workspace, 'build-out')
@property
def work(self):
"""The directory used as the work directory for the fuzzer build/run."""
return os.path.join(self.workspace, 'work')
@property
def artifacts(self):
"""The directory used to store artifacts for download by CI-system users."""
# This is hardcoded by a lot of clients, so we need to use this.
return os.path.join(self.workspace, 'out', 'artifacts')
@property
def clusterfuzz_build(self):
"""The directory where builds from ClusterFuzz are stored."""
return os.path.join(self.workspace, 'cifuzz-prev-build')
@property
def clusterfuzz_coverage(self):
"""The directory where builds from ClusterFuzz are stored."""
return os.path.join(self.workspace, 'cifuzz-prev-coverage')
@property
def coverage_report(self):
"""The directory where coverage reports generated by cifuzz are put."""
return os.path.join(self.workspace, 'cifuzz-coverage')
@property
def corpora(self):
"""The directory where corpora from ClusterFuzz are stored."""
return os.path.join(self.workspace, 'cifuzz-corpus')
``` |
{
"source": "jpf/tmnt_wikipedia_bot",
"score": 3
} |
#### File: tmnt_wikipedia_bot/lib/images.py
```python
import subprocess
import sys
from lib.constants import LOGO_PATH
from lib.constants import CHROME_PATH
from PIL import Image, ImageChops
def getLogo(title: str, chrome=CHROME_PATH):
title = title.replace(" ", "_")
# TODO: Generate logo locally, stop hitting glench.com (sorry glench)
chrome_cmd = (
f"{chrome} "
"--headless "
"--disable-gpu "
"--screenshot "
"--window-size=1280,600 "
f'"http://glench.com/tmnt/#{title}"'
)
retcode = subprocess.run(chrome_cmd, shell=True).returncode
if retcode != 0:
sys.stderr.write(f"Chrome subprocess exited with code {retcode}")
sys.exit(1)
screesnhot_path = "screenshot.png"
logo_path = _cropLogo(screesnhot_path)
return logo_path
def _trimWhitespace(im):
# calculate bbox of image area
bg = Image.new(im.mode, im.size, im.getpixel((0, 0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
# crop the image and store sizes as variables
croppedImage = im.crop(bbox)
croppedImageWidth = croppedImage.size[0]
croppedImageHeight = croppedImage.size[1]
# calculate size of output image based on width of cropped image,
# 1:1.9 aspect ratio, and side margin pixels
SIDE_MARGIN = 30
outputImageWidth = croppedImageWidth + (SIDE_MARGIN * 2)
outputImageHeight = int(outputImageWidth * 0.52632)
outputImageSize = tuple([outputImageWidth, outputImageHeight])
# create empty image
outputImage = Image.new(im.mode, outputImageSize, im.getpixel((0, 0)))
# calculate positioning of cropped image on empty background, paste
x = SIDE_MARGIN
y = int((outputImageHeight - croppedImageHeight) / 2)
outputImage.paste(croppedImage, (x, y))
return outputImage
def _cropOffTopAndBottom(image_path: str):
im = Image.open(image_path)
w, h = im.size
return im.crop((0, 175, w, h - 100))
def _cropLogo(im):
logo_path = LOGO_PATH
im = _cropOffTopAndBottom(im)
im = _trimWhitespace(im)
im.save(logo_path)
return logo_path
``` |
{
"source": "jpfxgood/bkp",
"score": 2
} |
#### File: bkp/bkp_core/bkp_conf.py
```python
import os
import sys
def save_config( bkp_config, config_file, for_restart = False ):
""" save the configuration to the file object passed as a parameter """
print("bucket =", bkp_config["bucket"], file=config_file)
print("dirs = ", ";".join(bkp_config["dirs"]), file=config_file)
print("exclude_files = ", bkp_config["exclude_files"], file=config_file)
print("exclude_dirs = ",";".join(bkp_config["exclude_dirs"]), file=config_file)
print("log_email = ",bkp_config["log_email"], file=config_file)
print("error_email = ",bkp_config["error_email"], file=config_file)
print("threads = ",bkp_config["threads"], file=config_file)
print("ssh_username = ",bkp_config["ssh_username"], file=config_file)
print("ssh_password = ",bkp_config["ssh_password"], file=config_file)
if for_restart:
print("start_time = ",bkp_config["start_time"], file=config_file)
print("end_time = ",bkp_config["end_time"], file=config_file)
print("end_config = True", file=config_file)
return 0
def configure ():
bkp_config = {}
""" prompt for configuration parameters to build initial ~/.bkp/bkp_config """
bkp_config["bucket"] = input("Enter the name of your Amazon S3 bucket, file path, or ssh path:")
bkp_config["dirs"] = input("Enter a semicolon (;) delimited list of directories to backup (will include subdirectories):").split(";")
bkp_config["exclude_files"] = input("Enter a python regular expression to exclude matching file names:")
bkp_config["exclude_dirs"] = input("Enter a semicolon (;) delimited list of directories to exclude (including subdirectories):").split(";")
bkp_config["log_email"] = input("Enter an e-mail address to send log files to:")
bkp_config["error_email"] = input("Enter an e-mail address to send errors to:")
bkp_config["ssh_username"] = input("Enter your ssh user name:")
bkp_config["ssh_password"] = input("Enter your ssh password:")
bkp_config["threads"] = input("Enter the number of threads to use for transfers:")
bkp_dir = os.path.expanduser("~/.bkp")
if not os.path.exists(bkp_dir):
os.mkdir(bkp_dir)
save_config(bkp_config,open(os.path.join(bkp_dir,"bkp_config"), "w"))
return 0
def config( config_file, verbose = False ):
""" load configuration for a backup from a config file """
bkp_config = {}
config_path = os.path.expanduser(config_file)
for l in open(config_path,"r"):
l = l.strip()
if l :
key, value = l.split("=",1)
key = key.strip().lower()
if key == "end_config":
break
value = value.strip()
if key in ["dirs","exclude_dirs"]:
value = [f.strip() for f in value.split(";")]
if verbose:
print("config key =",key,"value =", value, file=sys.stderr)
bkp_config[key] = value
bucket = bkp_config["bucket"]
if not (bucket.startswith("ssh://") or bucket.startswith("file://") or bucket.startswith("s3://")):
bkp_config["bucket"] = "s3://"+bucket
return bkp_config
```
#### File: bkp/bkp_core/fs_mod.py
```python
from bkp_core import s3_mod
from bkp_core import ssh_mod
from bkp_core import file_mod
from bkp_core import bkp_conf
import re
def fs_utime( remote_path, times, get_config = lambda: {} ):
""" use the appropriate function to set the access and modified times on a file """
if remote_path.startswith("ssh://"):
return ssh_mod.ssh_utime( remote_path, times, get_config )
elif remote_path.startswith("s3://"):
# Not implemented for s3, however s3 defaults to copying all
# file attributes so we don't have to do it for our use cases
return
elif remote_path.startswith("file://"):
return file_mod.file_utime( remote_path, times )
elif re.match(r"\w*://.*",remote_path):
raise Exception("fs_utime: Unknown remote file system",remote_path)
else:
return file_mod.file_utime( remote_path, times )
def fs_get( remote_path, local_path, get_config = lambda: {} ):
""" use the appropriate function to copy a file from the remote_path to the local_path """
if remote_path.startswith("ssh://"):
return ssh_mod.ssh_get( remote_path, local_path, get_config )
elif remote_path.startswith("s3://"):
return s3_mod.s3_get( remote_path, local_path )
elif remote_path.startswith("file://"):
return file_mod.file_get( remote_path, local_path )
elif re.match(r"\w*://.*",remote_path):
raise Exception("fs_get: Unknown remote file system",remote_path)
else:
return file_mod.file_get( remote_path, local_path )
def fs_put( local_path, remote_path, get_config = lambda: {}, verbose = False ):
""" use the appropriate function to copy a file from the local_path to the remote_path """
if remote_path.startswith("ssh://"):
return ssh_mod.ssh_put( local_path, remote_path, get_config, verbose)
elif remote_path.startswith("s3://"):
return s3_mod.s3_put( local_path, remote_path)
elif remote_path.startswith("file://"):
return file_mod.file_put( local_path, remote_path )
elif re.match(r"\w*://.*",remote_path):
raise Exception("fs_put: Unknown remote file system",remote_path)
else:
return file_mod.file_put( local_path, remote_path )
def fs_ls( remote_path, recurse=False, get_config = lambda: {} ):
""" use the appropriate function to get a file listing of the path """
if remote_path.startswith("ssh://"):
return ssh_mod.ssh_ls( remote_path, recurse, get_config)
elif remote_path.startswith("s3://"):
return s3_mod.s3_ls( remote_path, recurse)
elif remote_path.startswith("file://"):
return file_mod.file_ls( remote_path, recurse )
elif re.match(r"\w*://.*",remote_path):
raise Exception("fs_ls: Unknown remote file system",remote_path)
else:
return file_mod.file_ls( remote_path, recurse )
def fs_del( remote_path, recurse=False, get_config = lambda: {} ):
""" use the appropriate function to delete a file or directory at the path """
if remote_path.startswith("ssh://"):
return ssh_mod.ssh_del( remote_path, recurse, get_config)
elif remote_path.startswith("s3://"):
return s3_mod.s3_del( remote_path, recurse)
elif remote_path.startswith("file://"):
return file_mod.file_del( remote_path, recurse )
elif re.match(r"\w*://.*",remote_path):
raise Exception("fs_del: Unknown remote file system",remote_path)
else:
return file_mod.file_del( remote_path, recurse )
def fs_stat( remote_path, get_config = lambda: {} ):
""" return tuple ( mtime, size ) for a path to a file, returns (-1,-1) if doesn't exist resolution of mtime is seconds """
if remote_path.startswith("ssh://"):
return ssh_mod.ssh_stat( remote_path, get_config )
elif remote_path.startswith("file://"):
return file_mod.file_stat( remote_path )
elif remote_path.startswith("s3://"):
return s3_mod.s3_stat( remote_path )
elif re.match(r"\w*://.*",remote_path):
raise Exception("fs_stat: Unknown remote file system", remote_path )
else:
return file_mod.file_stat( remote_path )
def fs_test( remote_path, verbose = False, get_config = lambda: {} ):
""" use the appropriate function to test if file system is accessable, does NOT mean the path exists just that a host is listening """
if remote_path.startswith("ssh://"):
return ssh_mod.ssh_test( remote_path, verbose, get_config)
elif remote_path.startswith("s3://"):
return s3_mod.s3_test( remote_path, verbose )
else:
# We assume the filesystem is always available
return True
```
#### File: bkp/bkp_core/logger.py
```python
import sys
import os
import traceback
import queue
import threading
class Logger:
def __init__( self ):
self.logger_thread = None
self.logger_stop = False
self.logger_queue = queue.Queue()
def perform_log( self ):
""" read from the restore logging queue and print messages to stderr """
while not self.stopped():
line = self.get()
if line:
try:
print(line, file=sys.stderr)
except:
print("Invalid Log Line!", file=sys.stderr)
def start_logger( self, action = None ):
""" start the restore logger thread """
if not action:
action = self.perform_log
self.logger_thread = threading.Thread(target=action)
self.logger_thread.start()
def stop_logger( self ):
""" stop the restore logger """
self.logger_stop = True
def wait_for_logger( self ):
""" wait until the restore log queue is empty """
if not self.logger_queue.empty():
self.logger_queue.join()
self.stop_logger()
if self.logger_thread and self.logger_thread.is_alive():
self.logger_thread.join()
self.logger_thread = None
self.logger_stop = False
self.logger_queue = queue.Queue()
def log( self, msg ):
""" log a message to the restore logger """
self.logger_queue.put(msg)
def get( self ):
""" get a message off the queue """
try:
line = self.logger_queue.get(True,1)
self.logger_queue.task_done()
except queue.Empty:
line = None
return line
def stopped( self ):
""" test to see if we need to stop """
return self.logger_stop
```
#### File: bkp/bkp_core/sync_mod.py
```python
import sys
import os
import re
import traceback
import queue
import threading
import platform
import time
import io
from bkp_core.fs_mod import fs_get,fs_put,fs_ls,fs_stat,fs_test,fs_utime
from bkp_core.util import put_contents
from bkp_core.logger import Logger
class WorkerParams:
""" worker params """
def __init__(self, method, from_path, to_path, mtime = 0.0):
""" set up the copy from and to paths for the worker """
self.from_path = from_path
self.to_path = to_path
self.method = method
self.mtime = mtime
class SyncJob:
def __init__( self, config ):
self.dryrun = False
self.verbose = False
self.init( config )
def init( self, config ):
""" initialize our internal state for a new run """
self.config = config
self.work_queue = queue.Queue()
self.machine_path = ""
self.errors_count = 0
self.worker_thread_pool = []
self.processed_files = {}
self.processed_dirs = {}
self.pending_markers = []
self.worker_stop = False
self.remote_processed_files = {}
if not os.path.exists(os.path.expanduser("~/.sync")):
os.mkdir(os.path.expanduser("~/.sync"))
self.remote_processed_files_name = os.path.expanduser("~/.sync/.sync.processed")
self.logger = Logger()
def set_dryrun( self, dr ):
""" set the dryrun flag to true to prevent real actions in s3 """
self.dryrun = dr
def set_verbose( self, vb ):
""" set the verbose flag to true to enable extended output """
self.verbose = vb
def process_sync( self ):
""" thread body for worker thread, loop processing the queue until killed """
start_time = time.time()
while not self.worker_stop:
try:
# every 5 minutes dump a stack trace if verbose
if time.time() - start_time > 300:
start_time = time.time()
params = self.work_queue.get(True,1)
try:
if not self.dryrun:
if self.verbose:
self.logger.log( "Starting transfer: %s to %s"%(params.from_path, params.to_path) )
if params.method == fs_put:
params.method( params.from_path, params.to_path, lambda: self.config, self.verbose)
fs_utime( params.to_path, (params.mtime, params.mtime), lambda: self.config)
else:
params.method( params.from_path, params.to_path, lambda: self.config )
os.utime( params.to_path, (params.mtime, params.mtime))
if self.verbose:
self.logger.log( "Transferred: %s to %s"%(params.from_path, params.to_path) )
self.work_queue.task_done()
except:
tb = traceback.format_exc()
self.logger.log( "Failed Transfer: %s to %s error %s"%(params.from_path, params.to_path, tb) )
self.errors_count += 1
self.work_queue.task_done()
except queue.Empty:
continue
except:
self.work_queue.task_done()
self.logger.log(traceback.format_exc())
continue
def start_workers(self):
""" start the workers in the pool """
num_threads = int(self.config["threads"])
while num_threads:
t = threading.Thread(target=self.process_sync)
t.start()
self.worker_thread_pool.append(t)
num_threads = num_threads - 1
def stop_workers(self):
""" stop the workers """
self.worker_stop = True
def wait_for_workers(self):
""" wait for the worker queue to be empty """
if not self.work_queue.empty():
self.work_queue.join()
self.stop_workers()
for t in self.worker_thread_pool:
if t.is_alive():
t.join()
self.worker_stop = False
self.worker_thread_pool = []
self.work_queue = queue.Queue()
def sync_directory( self, path ):
""" enqueue the files to be synced for a given directory path, apply filters on datetime, pattern, non-hidden files only, recurse visible subdirs """
# save off remote directory recursive listing
remote_files = fs_ls(self.machine_path+path,True, lambda: self.config)
for (dirpath, dirnames, filenames) in os.walk(path):
if self.verbose:
self.logger.log("Scanning dirpath= %s"%(dirpath))
# if exclude_dirs is contained in any of the paths then return
exclude_dir = False
for e in self.config["exclude_dirs"]:
if e and re.search(e,dirpath):
exclude_dir = True
break
if exclude_dir:
if self.verbose:
self.logger.log("Excluding dirpath= %s because of e= %s"%(dirpath,e))
continue
# get rid of hidden directories
while True:
deleted = False
didx = 0
for d in dirnames:
if d[0] == ".":
if self.verbose:
self.logger.log("Deleting hidden directory = %s"%(d))
del dirnames[didx]
deleted = True
break
didx = didx + 1
if not deleted:
break
# stat the sentinel file .sync to avoid sloshing files around
sync_marker_path = self.machine_path + os.path.abspath(dirpath)
sync_marker_node = ".sync."+ platform.node()
sync_marker = os.path.join( sync_marker_path, sync_marker_node )
sync_mtime,sync_size = fs_stat(sync_marker,lambda: self.config)
# process files in the directory enqueueing included files for sync
for f in filenames:
# if it is a hidden file skip it
if f[0] == ".":
if self.verbose:
self.logger.log("Skipping hidden file = %s"%(f))
continue
# if it is excluded file skip it
if self.config["exclude_files"] and re.match(self.config["exclude_files"],f):
if self.verbose:
self.logger.log("Excluding file = %s Because of pattern= %s"%(f,self.config["exclude_files"]))
continue
# build the absolute path for the file and it's sync path
local_path = os.path.join(os.path.abspath(dirpath),f)
remote_path = self.machine_path + local_path
# if the file is in the time range for this sync then queue it for sync
s = os.lstat(local_path)
mtime, size = fs_stat(remote_path,lambda: self.config)
self.processed_files[remote_path] = True
if s.st_mtime < mtime and (mtime - s.st_mtime) >= 1.0:
if self.verbose:
self.logger.log("Enqueuing get for %s,%s timediff %f"%(remote_path,local_path, mtime - s.st_mtime))
self.work_queue.put(WorkerParams( fs_get, remote_path, local_path, mtime ))
elif s.st_mtime > mtime and (s.st_mtime - mtime) >= 1.0:
if self.verbose:
self.logger.log("Enqueuing put for %s,%s timediff %f"%(local_path,remote_path,s.st_mtime - mtime))
self.work_queue.put(WorkerParams( fs_put, local_path, remote_path, s.st_mtime ))
else:
if self.verbose:
self.logger.log("Not Enqueuing copy work for %s because time is the same or not greater than last sync"%(local_path))
# drop a marker file on the remote host
self.pending_markers.append((sync_marker_path,sync_marker_node))
self.processed_dirs[sync_marker_path] = True
if self.verbose:
self.logger.log("Checking for files only present on the server")
self.logger.log(remote_files)
#loop over remote files and handle any that haven't already been synced
for line in io.StringIO(remote_files):
fdate,ftime,size,fpath = re.split("\s+",line,3)
fpath = fpath.strip()
if not fpath in self.processed_files:
lpath = fpath[len(self.machine_path):]
ldir,lnode = os.path.split(lpath)
fdir,fnode = os.path.split(fpath)
# if exclude_dirs is contained in any of the paths then return
exclude_dir = False
for e in self.config["exclude_dirs"]:
if (e and re.search(e,ldir)) or re.match(".*/\..*",ldir):
exclude_dir = True
break
if exclude_dir:
if self.verbose:
self.logger.log("Excluding dirpath= %s because of e= %s"%(ldir,e))
continue
# if it is a hidden file skip it
if lnode[0] == ".":
if self.verbose:
self.logger.log("Skipping hidden file = %s"%(lnode))
continue
# if it is excluded file skip it
if self.config["exclude_files"] and re.match(self.config["exclude_files"],lnode):
if self.verbose:
self.logger.log("Excluding file = %s Because of pattern= %s"%(lnode,self.config["exclude_files"]))
continue
# if it was processed in the past don't fetch it just mark it as processed
# it was deleted on the client otherwise enqueue a get
if not fpath in self.remote_processed_files:
if self.verbose:
self.logger.log("Enqueuing get for %s,%s"%(fpath,lpath))
mtime, size = fs_stat(fpath,lambda: self.config)
self.work_queue.put(WorkerParams( fs_get, fpath, lpath, mtime))
else:
if self.verbose:
self.logger.log("Not enqueuing get for %s becase it was deleted on client"%(fpath))
self.processed_files[fpath] = True
if not fdir in self.processed_dirs:
self.processed_dirs[fdir] = True
self.pending_markers.append((fdir,".sync."+platform.node()))
return
def synchronize(self):
""" driver to perform syncrhonize """
try:
# initialize our internal state for a new run
self.init( self.config )
# the sync target a given machine will be target
self.machine_path = self.config["target"]
# if there is no connection to the target then exit
if not fs_test( self.machine_path, self.verbose, lambda: self.config ):
return 0
# get the remote processed files so we can check for deletes
if os.path.exists(self.remote_processed_files_name):
for line in open(self.remote_processed_files_name):
self.remote_processed_files[line.strip()] = True
# start the logger thread
self.logger.start_logger()
# fire up the worker threads
self.start_workers()
# loop over the paths provided and add them to the work queue
for d in self.config["dirs"]:
self.sync_directory( d )
# wait for queue to empty
self.wait_for_workers()
# drop all our sync markers after any copies complete
for sync_marker_path,sync_marker_node in self.pending_markers:
put_contents(sync_marker_path,sync_marker_node, "syncrhonized %s"%time.ctime(),self.dryrun,lambda: self.config, self.verbose)
# write out the processed files
if not self.dryrun:
processed_out = open(self.remote_processed_files_name,"w")
for fpath in self.processed_files.keys():
print(fpath, file=processed_out)
processed_out.close()
# wait for the logger to finish
self.logger.wait_for_logger()
except:
self.stop_workers()
self.logger.stop_logger()
raise
if self.errors_count:
return 1
else:
return 0
```
#### File: bkp/bkp_core/util.py
```python
from email.mime.text import MIMEText
from bkp_core import fs_mod
import sys
import os
import tempfile
import re
import traceback
import platform
import datetime
import subprocess
import time
def get_contents( path, name, verbose = False, get_config=lambda: {} ):
""" fetch the contents of an s3 file and return it's contents as a string """
t_file_fh, t_file_name = tempfile.mkstemp()
os.close(t_file_fh)
try:
fs_mod.fs_get( path+"/"+name, t_file_name, get_config )
except:
if verbose:
print("get_contents exception:",traceback.format_exc(), file=sys.stderr)
return ""
contents = open(t_file_name,"r").read()
os.remove(t_file_name)
return contents
def put_contents( path, name, contents, dryrun = False, get_config=lambda: {}, verbose=False ):
""" put the contents string to the s3 file at path, name """
t_file_fh, t_file_name = tempfile.mkstemp()
os.close(t_file_fh)
print(contents, file=open(t_file_name,"w"))
if not dryrun:
fs_mod.fs_put( t_file_name, path+"/"+name, get_config, verbose )
if not path.startswith("s3://"):
t = time.time()
fs_mod.fs_utime( path+"/"+name, (t,t), get_config )
os.remove(t_file_name)
return
def send_email( mime_msg ):
""" given a mime message with From: To: Subject: headers """
cmd = "/usr/sbin/ssmtp %s"%mime_msg['To']
p = subprocess.Popen(cmd,
shell=True,
bufsize=1024,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
universal_newlines=True)
output = p.communicate(input=mime_msg.as_string())[0]
result = p.wait()
if result:
raise Exception(cmd,output,result)
return output
def mail_error( error, log_file=None, verbose = False, get_config=lambda: {} ):
""" e-mail an error report to the error e-mail account """
return mail_log( error, log_file, True, verbose, get_config=get_config )
def mail_log( log, log_file=None, is_error = False, verbose = False, get_config=lambda: {} ):
""" e-mail a log file to the log e-mail account """
tries = 3
log_text = ""
while tries:
try:
if log != None:
msg = MIMEText(log)
elif log_file != None:
log_text = re.sub("^smtp_.*$|^ssh_.*$","",log_file.read(),flags=re.M)
msg = MIMEText(log_text[:2*pow(2,20)])
else:
return 0
if is_error:
if verbose:
print("E-mailing log file with errors", file=sys.stderr)
msg['Subject'] = "bkp error: %s "%(platform.node())
msg['From'] = get_config()["error_email"]
msg['To'] = get_config()["error_email"]
else:
if verbose:
print("E-mailing log file with no errors", file=sys.stderr)
msg['Subject'] = "bkp complete: %s"%(platform.node())
msg['From'] = get_config()["log_email"]
msg['To'] = get_config()["log_email"]
msg['Date'] = datetime.datetime.now().strftime( "%m/%d/%Y %H:%M" )
send_email(msg)
return 0
except:
time.sleep(tries*10.0)
tries = tries - 1
if not tries:
if is_error:
print("Error couldn't send via e-mail", file=sys.stderr)
else:
print("Success couldn't send via e-mail", file=sys.stderr)
if log:
print(log, file=sys.stderr)
if log_text:
print(log_text, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
raise
```
#### File: bkp/tests/test_bkp_mod.py
```python
from bkp_core import bkp_mod
from bkp_core import bkp_conf
from bkp_core import fs_mod
from bkp_core import util
from bkp_test_util import bkp_testdir
import platform
import os
import sys
import time
from io import StringIO
import math
def do_bkp_test( t_dir, base_path ):
""" driver to test backup on different targets """
bkp_config = {}
bkp_config["bucket"] = base_path
bkp_config["dirs"] = [t_dir["local_path"]]
bkp_config["exclude_files"] = r"local_3\.txt"
bkp_config["exclude_dirs"] = ["not_subdir_path"]
bkp_config["log_email"] = t_dir["test_email"]
bkp_config["error_email"] = t_dir["test_email"]
bkp_config["ssh_username"] = t_dir["ssh_username"]
bkp_config["ssh_password"] = t_dir["ssh_password"]
bkp_config["threads"] = "5"
end_time = time.time()
bkp_job = bkp_mod.BackupJob(bkp_config)
bkp_job.set_verbose(True)
assert(not bkp_job.backup())
machine_path = bkp_config["bucket"]+"/bkp/"+platform.node()
next = util.get_contents( machine_path, "next", False, lambda: bkp_config )
if next:
bkp_time = float(next)
else:
bkp_time = 0.0
assert(math.floor(bkp_time) >= math.floor(end_time))
backed_up_count = 0
backedup = bkp_mod.get_backedup_files(machine_path, bkp_config)
for lpath,dates in list(backedup.items()):
date_count = 0
for d in dates:
assert(os.path.basename(lpath) in t_dir["local_files"])
assert("local_3.txt" not in lpath)
assert("not_subdir_path" not in lpath)
date_count += 1
backed_up_count += 1
assert(date_count == 1)
assert(backed_up_count == len(t_dir["local_files"])-1)
backups = bkp_mod.get_backups( machine_path, bkp_config )
backed_up_count = 0
backup_count = 0
for bkp in backups:
assert(math.floor(bkp.time) >= math.floor(end_time))
bkp_log = util.get_contents(machine_path,bkp.timestamp+"/bkp/bkp."+bkp.timestamp+".log",False, lambda: bkp_config)
backup_count += 1
past_config = False
for l in StringIO(bkp_log):
if not past_config:
name,value = l.strip().split("=",1)
name = name.strip()
value = value.strip()
if name == "end_config" and value == "True":
past_config = True
continue
if name in ["dirs","exclude_dirs"]:
value = [f.strip() for f in value.split(";")]
if name not in ["start_time","end_time"]:
assert(bkp_config[name] == value)
else:
local_path,remote_path,status,msg = l.split(";",3)
assert(status != "error")
assert(os.path.basename(local_path) in t_dir["local_files"])
assert("local_3.txt" not in local_path)
assert("not_subdir_path" not in local_path)
backed_up_count += 1
assert(backed_up_count == len(t_dir["local_files"])-1)
assert(backup_count == 1)
print("Overwrite the first local file!",file=open(os.path.join(t_dir["local_path"],t_dir["local_files"][0]),"w"))
end_time = time.time()
bkp_job_1 = bkp_mod.BackupJob(bkp_config)
bkp_job_1.set_verbose(True)
time.sleep(1) # guarantee at least 1 second between backup jobs
assert(not bkp_job_1.backup())
next = util.get_contents( machine_path, "next", False, lambda: bkp_config )
if next:
second_bkp_time = float(next)
else:
second_bkp_time = 0.0
assert(math.floor(second_bkp_time) >= math.floor(end_time))
assert(math.floor(second_bkp_time) > math.floor(bkp_time))
backed_up_count = 0
backedup = bkp_mod.get_backedup_files(machine_path, bkp_config)
for lpath,dates in list(backedup.items()):
date_count = 0
for d in dates:
assert(os.path.basename(lpath) in t_dir["local_files"])
assert("local_3.txt" not in lpath)
assert("not_subdir_path" not in lpath)
date_count += 1
backed_up_count += 1
assert(date_count == 1 or os.path.basename(lpath) == t_dir["local_files"][0])
assert(backed_up_count == len(t_dir["local_files"]))
backups = bkp_mod.get_backups( machine_path, bkp_config )
backed_up_count = 0
backup_count = 0
for bkp in backups:
assert(math.floor(bkp.time) >= math.floor(bkp_time) or math.floor(bkp.time) >= math.floor(second_bkp_time))
bkp_log = util.get_contents(machine_path,bkp.timestamp+"/bkp/bkp."+bkp.timestamp+".log",False, lambda: bkp_config )
backup_count += 1
past_config = False
for l in StringIO(bkp_log):
if not past_config:
name,value = l.strip().split("=",1)
name = name.strip()
value = value.strip()
if name == "end_config" and value == "True":
past_config = True
continue
if name in ["dirs","exclude_dirs"]:
value = [f.strip() for f in value.split(";")]
if name not in ["start_time","end_time"]:
assert(bkp_config[name] == value)
else:
local_path,remote_path,status,msg = l.split(";",3)
assert(status != "error")
assert(os.path.basename(local_path) in t_dir["local_files"])
assert("local_3.txt" not in local_path)
assert("not_subdir_path" not in local_path)
backed_up_count += 1
assert(backed_up_count == len(t_dir["local_files"]))
assert(backup_count == 2)
time.sleep(1) # guarantee at least 1 second between backup jobs
assert(not bkp_job_1.backup())
backups = bkp_mod.get_backups( machine_path, bkp_config )
assert(len(backups) == 3)
old_stdout = sys.stdout
file_list = StringIO()
sys.stdout = file_list
try:
bkp_mod.list( bkp_config, True )
sys.stdout = old_stdout
finally:
sys.stdout = old_stdout
for l in file_list:
parts = l.strip().split(" ",5)
assert(os.path.basename(parts[-1]) in t_dir["local_files"])
bkp_mod.compact(bkp_config,False, True )
backups = bkp_mod.get_backups( machine_path, bkp_config )
assert(len(backups) == 2)
def test_bkp_mod_fs(bkp_testdir):
""" test suite for the bkp_mod module covering file system functionality """
do_bkp_test(bkp_testdir, bkp_testdir["file_basepath"])
def test_bkp_mod_ssh(bkp_testdir):
""" test suite for the bkp_mod module covering ssh functionality """
do_bkp_test(bkp_testdir, bkp_testdir["ssh_basepath"])
def test_bkp_mod_s3(bkp_testdir):
""" test suite for the bkp_mod module covering s3 functionality """
do_bkp_test(bkp_testdir, bkp_testdir["s3_basepath"])
``` |
{
"source": "jpfxgood/dashboard",
"score": 3
} |
#### File: dashboard/data_sources/odbc_data.py
```python
import locale
locale.setlocale(locale.LC_ALL,'')
import sys
import os
import glob
import gzip
import re
import pyodbc
import keyring
from datetime import datetime,timedelta
from data_sources.data_table import DataTable,Column,Cell,blank_type,string_type,float_type,int_type,date_type,format_string,format_float,format_date,format_int,synchronized
class ODBCDataTable( DataTable ):
""" class that collects data from the response to a specific sql query on an odbc connected database and populates tables based on a field map """
def __init__(self,refresh_minutes=1,sql_spec=None,sql_query=None,sql_map=None):
""" Initalize the ODBCDataTable object pass in a sql_spec to connect to the database of the form odbc://user@server/driver/database:port, a sql_query to be executed, and a field map of the form [[sql_column_name, data_table_column_name],..] indicating the columns to collect from the result """
self.sql_spec = sql_spec
self.sql_query = sql_query
self.sql_map = sql_map
DataTable.__init__(self,None,
"ODBCDataTable query:%s,database:%s,fieldmap:%s,refreshed every %d minutes"%(
sql_query,sql_spec,sql_map,refresh_minutes),
refresh_minutes)
self.refresh()
@synchronized
def refresh( self ):
""" refresh the table from the query """
username,server,driver,database,port = re.match(r"odbc://([a-z_][a-z0-9_-]*\${0,1})@([^/]*)/([^/]*)/([^:]*):{0,1}(\d*){0,1}",self.sql_spec).groups()
password = keyring.get_password(self.sql_spec, username)
if not password:
return
conn = pyodbc.connect("DRIVER={%s};DATABASE=%s;UID=%s;PWD=%s;SERVER=%s;PORT=%s;"%(driver,database,username,password,server,port))
if not conn:
return
result = conn.execute(self.sql_query)
for row in result:
for sql_column,data_column in self.sql_map:
value = getattr(row,sql_column)
if not self.has_column(data_column):
self.add_column(Column(name=data_column))
c = self.get_column(data_column)
if isinstance(value,datetime):
cc = Cell(date_type,value,format_date)
elif isinstance(value,int):
cc = Cell(int_type,value,format_int)
elif isinstance(value,float):
cc = Cell(float_type,value,format_float)
elif isinstance(value,str):
cc = Cell(string_type,value,format_string)
else:
cc = Cell(string_type,str(value),format_string)
c.put(c.size(),cc)
self.changed()
DataTable.refresh(self)
```
#### File: dashboard/data_sources/remote_data.py
```python
import sys
import os
import re
from datetime import datetime
import threading
import time
import csv
import json
from io import StringIO
from paramiko.client import SSHClient
import keyring
from functools import wraps
from data_sources.data_table import DataTable,Cell,Column,from_json,to_json,synchronized
from dashboard.version import __version__
def sync_connection(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
with self.connection_lock:
return method(self, *args, **kwargs)
return wrapper
def sync_manager(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
with self.manager_lock:
return method(self, *args, **kwargs)
return wrapper
class Connection():
def __init__(self, owner=None, ssh_client=None, session=None, stdin=None, stdout=None, stderr=None ):
""" provides protocol to the table server """
self.ssh_client = ssh_client
self.session = session
self.clients = []
self.stdout_lines = []
self.stderr_lines = []
self.owner = owner
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.connection_lock = threading.RLock()
self.reader_lock = threading.RLock()
self.stdout_reader_thread = threading.Thread(target=self.reader,args=(self.stdout,self.stdout_lines))
self.stdout_reader_thread.start()
self.stderr_reader_thread = threading.Thread(target=self.reader,args=(self.stderr,self.stderr_lines))
self.stderr_reader_thread.start()
def reader( self, stream, lines ):
""" worker thread that reads from stdout and pushes data onto stdout_lines and stderr_lines """
while not self.session.exit_status_ready():
line = stream.readline()
with self.reader_lock:
lines.append(line)
with self.reader_lock:
lines += stream.readlines()
@sync_connection
def get_stdout_line( self ):
""" fetch a line from the queue of stdout_lines """
while True:
with self.reader_lock:
if len(self.stdout_lines):
return self.stdout_lines.pop(0)
time.sleep(1)
@sync_connection
def get_stderr_line( self ):
""" fetch a line from the queue of stderr_lines """
while True:
with self.reader_lock:
if len(self.stderr_lines):
return self.stderr_lines.pop(0)
time.sleep(1)
@sync_connection
def open( self, client ):
""" register this client as a user of this connection """
if client not in self.clients:
self.clients.append(client)
@sync_connection
def table(self, table_def):
""" send request to create a new remote table, returns loaded response """
print("table:%s"%table_def,file=self.stdin,flush=True)
return self.get_stdout_line()
@sync_connection
def refresh(self, table_name):
""" send request to refresh a remote table named table_name and return response """
print("refresh:%s"%table_name,file=self.stdin,flush=True)
return self.get_stdout_line()
@sync_connection
def get(self, table_name):
""" send request to fetch table_name and return response """
print("get:%s"%table_name,file=self.stdin,flush=True)
return self.get_stdout_line()
@sync_connection
def exit(self):
""" terminate the server and clean up this connection """
print("exit",file=self.stdin,flush=True)
return ""
@sync_connection
def close(self,client):
""" close this client's use of this connection """
if client in self.clients:
self.clients.remove(client)
class ConnectionManager():
def __init__(self):
""" manages connections to servers and their initial setup """
self.connections = {}
self.manager_lock = threading.RLock()
def __del__(self):
""" just in case clean up any connections """
self.shutdown()
@sync_manager
def shutdown( self ):
""" shut down the connection manager and close all the pooled connections """
for cn in self.connections:
self.connections[cn].exit()
self.connections[cn].ssh_client.close()
self.connections = {}
@sync_manager
def connect(self,ssh_spec,client):
""" create a connection to a server """
if ssh_spec in self.connections:
connection = self.connections[ssh_spec]
connection.open(client)
return connection
username,server,port = re.match(r"ssh://([a-z_][a-z0-9_-]*\${0,1})@([^:]*):{0,1}(\d*){0,1}",ssh_spec).groups()
password = keyring.get_password(server,username)
if not password:
return None
ssh_client = SSHClient()
ssh_client.load_system_host_keys()
local_keys = os.path.expanduser('~/.ssh/known_hosts')
if os.path.exists(local_keys):
try:
ssh_client.load_host_keys(local_keys)
except:
pass
ssh_client.connect( hostname=server, port=int(port if port else 22), username=username, password=password)
if self.setup( ssh_client ):
session,stdin,stdout,stderror = self.start_command(ssh_client,"~/.local/bin/dashboard --server")
connection = Connection(self,ssh_client,session,stdin,stdout,stderror)
connection.open(client)
self.connections[ssh_spec] = connection
return connection
raise Exception("Setup of remote dashboard failed")
@sync_manager
def start_command(self, ssh_client, command ):
""" start a command and return a tuple with (channel,stdin,stdout,stderr) for running process """
transport = ssh_client.get_transport()
session = transport.open_session()
session.exec_command(command)
stdout = session.makefile("r",1)
stderr = session.makefile_stderr("r",1)
stdin = session.makefile_stdin("w",1)
return (session,stdin,stdout,stderr)
@sync_manager
def run_command(self, ssh_client, command ):
""" run a command wait for it to exit and return the output (retcode,stdout_str,stderr_str) """
session,stdin,stdout,stderr = self.start_command(ssh_client,command)
stderr_output = StringIO()
stdout_output = StringIO()
while not session.exit_status_ready():
stdout_output.write(stdout.readline())
stderr_output.write(stderr.readline())
stdout_output.write("".join(stdout.readlines()))
stderr_output.write("".join(stderr.readlines()))
exit_status = session.recv_exit_status()
return (exit_status,stdout_output.getvalue(),stderr_output.getvalue())
@sync_manager
def setup(self, ssh_client ):
""" check to see that dashboard is installed and install it if needed """
exit_status,stdout_str,stderr_str = self.run_command(ssh_client,"~/.local/bin/dashboard --version")
stdout_str = stdout_str.strip()
if stdout_str.startswith("dashboard version"):
if stdout_str.endswith(__version__):
return True
exit_status,stdout_str,stderr_str = self.run_command(ssh_client,'python3 -m pip install --upgrade "terminal-dashboard==%s"'%(__version__))
if exit_status:
raise Exception(exit_status,stdout_str,stderr_str)
return True
_connection_manager = None
def get_connection_manager():
""" return the connection manager create one if it doesn't exit """
global _connection_manager
if not _connection_manager:
_connection_manager = ConnectionManager()
return _connection_manager
def shutdown_connection_manager():
""" shut down the connection manager if it was ever started """
global _connection_manager
if _connection_manager:
_connection_manager.shutdown()
_connection_manager = None
class RemoteDataTable( DataTable ):
def __init__(self,ssh_spec=None,table_def=None,name=None,refresh_minutes=1):
""" accepts an ssh_spec to connect to of the form ssh://username@server_name:port_number, a json string with the definition for the remote table, the local name for this table, and the number of minutes for refresh """
DataTable.__init__(self,None,name,refresh_minutes)
self.ssh_spec = ssh_spec
self.table_def = table_def
self.connection = None
self.refresh()
@synchronized
def refresh(self):
""" create a connection to the remote dashboard table server and refresh our internal state """
if not self.connection:
cm = get_connection_manager()
connection = cm.connect(self.ssh_spec,self)
if not connection:
return
self.connection = connection
response = self.connection.table(json.dumps(self.table_def))
if not response.startswith("loaded:%s"%self.table_def["name"]):
return
table_data = self.connection.get(self.table_def["name"])
name,json_blob = table_data.split(":",1)
dt = from_json(StringIO(json_blob))
rows,cols = dt.get_bounds()
for idx in range(cols):
self.replace_column(idx,dt.get_column(idx))
self.changed()
DataTable.refresh(self)
```
#### File: dashboard/data_sources/syslog_data.py
```python
import locale
locale.setlocale(locale.LC_ALL,'')
import sys
import os
import glob
import gzip
import re
from datetime import datetime,timedelta
from data_sources.data_table import DataTable,Column,Cell,blank_type,string_type,float_type,int_type,date_type,format_string,format_float,format_date,format_int,synchronized
class SyslogDataTable( DataTable ):
""" class that collects a time based aggregation of data from the syslog into a data_table """
def __init__(self,syslog_glob="/var/log/syslog*",num_hours=24,bucket_hours=1,refresh_minutes=10,start_time=None):
""" Initialize the SyslogDataTable with a file glob pattern to collect the syslogs on this machine, a timespan to aggregate for, aggregation bucket in hours, a refresh interval for updating in minutes """
self.syslog_glob = syslog_glob
self.num_hours = num_hours
self.bucket_hours = bucket_hours
self.start_time = start_time
DataTable.__init__(self,None,
"Syslog Data: %s for the last %d hours in %d hour buckets, refreshed every %d minutes"%(
self.syslog_glob,
self.num_hours,
self.bucket_hours,
refresh_minutes),refresh_minutes)
self.refresh()
@synchronized
def refresh( self ):
""" refresh or rebuild tables """
if self.start_time:
year,month,day,hour,minute,second = self.start_time
current_time = datetime(year,month,day,hour,minute,second)
else:
current_time = datetime.now()
start_time = current_time - timedelta( hours = self.num_hours )
syslog_files = glob.glob(self.syslog_glob)
time_column = Column(name="Time Stamps")
bucket_time = start_time
idx = 0
while bucket_time < current_time:
time_column.put(idx,Cell(date_type,bucket_time,format_date))
bucket_time = bucket_time + timedelta( hours = self.bucket_hours )
idx += 1
time_column.put(idx,Cell(date_type,current_time,format_date))
def bucket_idx( timestamp ):
if timestamp < start_time or timestamp > current_time:
return -1
for idx in range(time_column.size()):
if time_column.get(idx).get_value() >= timestamp:
return idx
else:
return -1
errors_column = Column(name="Errors by Time")
warnings_column = Column(name="Warnings by Time")
messages_column = Column(name="Messages by Time")
services_column = Column(name="Services")
errors_service_column = Column(name="Errors by Service")
warnings_service_column = Column(name="Warnings by Service")
messages_service_column = Column(name="Messages by Service")
def service_idx( service ):
for idx in range(services_column.size()):
if services_column.get(idx).get_value() == service:
return idx
else:
return -1
def put_or_sum( column, idx, value ):
current_value = 0
if idx < column.size():
c = column.get(idx)
if c.get_type() != blank_type:
current_value = int(c.get_value())
column.put(idx,Cell(int_type,current_value+value,format_int))
for slf in syslog_files:
if slf.endswith(".gz"):
slf_f = gzip.open(slf,"rt",encoding="utf-8")
else:
slf_f = open(slf,"r",encoding="utf-8")
for line in slf_f:
line = line.strip()
m = re.match(r"(\w\w\w\s+\d+\s\d\d:\d\d:\d\d)\s[a-z0-9\-]*\s([a-zA-Z0-9\-\_\.]*)[\[\]0-9]*:\s*(.*)",line)
if m:
log_date = re.sub(r"\s+"," ","%d "%current_time.year + m.group(1))
log_process = m.group(2)
log_message = m.group(3)
log_datetime = datetime.strptime(log_date,"%Y %b %d %H:%M:%S")
b_idx = bucket_idx( log_datetime )
if b_idx >= 0:
s_idx = service_idx( log_process )
if s_idx < 0:
s_idx = services_column.size()
services_column.put(s_idx,Cell(string_type,log_process,format_string))
put_or_sum(messages_column,b_idx,1)
put_or_sum(messages_service_column,s_idx,1)
is_error = re.search(r"[Ee]rror|ERROR",log_message)
is_warning = re.search(r"[Ww]arning|WARNING",log_message)
error_count = 0
warning_count = 0
if is_error and not is_warning:
error_count = 1
elif is_warning:
warning_count = 1
put_or_sum(errors_column,b_idx,error_count)
put_or_sum(errors_service_column,s_idx,error_count)
put_or_sum(warnings_column,b_idx,warning_count)
put_or_sum(warnings_service_column,s_idx,warning_count)
columns = [time_column,errors_column,warnings_column,messages_column,services_column,
errors_service_column,warnings_service_column,messages_service_column]
for c in columns:
if self.has_column(c.get_name()):
self.replace_column(self.map_column(c.get_name()),c)
else:
self.add_column(c)
self.changed()
DataTable.refresh(self)
```
#### File: dashboard/tests/test_canvas.py
```python
from char_draw import canvas
import curses
import curses.ascii
import os
import time
from dashboard_test_util import screen_size,dashboard_test_case
def test_Canvas(request,capsys):
with capsys.disabled():
def main(stdscr):
screen_size(40,100)
stdscr.clear()
stdscr.refresh()
python_path = os.path.dirname(os.path.dirname(request.fspath))
c = canvas.Canvas(stdscr)
max_x,max_y = c.get_maxxy()
iy = 0
for ix in range(8,max_x):
c.put_pixel(ix,iy,c.red)
iy = (iy + 1)%max_y
dashboard_test_case(stdscr,"put_pixel",python_path)
c.clear()
ix = 0
iy = 0
for ic in range(0,min(max_x,max_y)//10):
c.line(max_x//2,0,ix,iy,c.cyan)
ix = (ix+15)%max_x
iy = (iy+10)%max_y
dashboard_test_case(stdscr,"line",python_path)
c.clear()
c.circle(max_x//2,max_y//2,min(max_x,max_y)//3,c.white,False)
dashboard_test_case(stdscr,"circle_not_filled",python_path)
c.clear()
c.circle(max_x//2,max_y//2,min(max_x,max_y)//3,curses.color_pair(20),True)
dashboard_test_case(stdscr,"circle_filled",python_path)
c.clear()
a = 0
a1 = 23
for ac in range(20):
c.arc(max_x//2,max_y//2,min(max_x,max_y)//3,a,a1,c.white,False)
a = a1
a1 = a1 + ac*5
if a1 > 360:
break
dashboard_test_case(stdscr,"arc_not_filled",python_path)
c.clear()
a = 0
a1 = 23
for ac in range(20):
c.arc(max_x//2,max_y//2,min(max_x,max_y)//3,a,a1,c.white,True)
a = a1
a1 = a1 + ac*5
if a1 > 360:
break
dashboard_test_case(stdscr,"arc_filled",python_path)
c.clear()
ix = 0
iy = 0
for ic in range(0,5):
x = ix + ic*5
y = iy + ic*5
c.rect(x,y,x+5,y+5,curses.color_pair(9+(ic*10)),False)
dashboard_test_case(stdscr,"rect_not_filled",python_path)
c.clear()
ix = 0
iy = 0
for ic in range(0,5):
x = ix + ic*5
y = iy + ic*5
c.rect(x,y,x+5,y+5,curses.color_pair(9+(ic*10)),True)
dashboard_test_case(stdscr,"rect_filled",python_path)
c.clear()
ix = 0
iy = 0
for ic in range(0,5):
x = ix + ic*5
y = iy + ic*5
c.textat(x,y,curses.color_pair(9+(ic*10)),"Test message %d"%ic)
dashboard_test_case(stdscr,"textat",python_path)
c.clear()
ix = 0
iy = 0
for ic in range(0,5):
x = ix + ic*16
y = iy + ic*10
c.polyline([(x,y),(x+16,y+16),(x+8,y),(x,y+16),(x+16,y+8),(x,y+8),(x+8,y+16)],curses.color_pair(9+(ic*10)))
dashboard_test_case(stdscr,"polyline",python_path)
ix = 0
iy = 0
for ic in range(0,5):
x = ix + ic*16
y = iy + ic*10
c.polygon([(x,y),(x+16,y+8),(x+16,y+16),(x+8,y+16),(x,y+8)],curses.color_pair(9+(ic*10)),False)
dashboard_test_case(stdscr,"polygon_not_filled",python_path)
ix = 0
iy = 0
for ic in range(0,5):
x = ix + ic*16
y = iy + ic*10
c.polygon([(x,y),(x+16,y+8),(x+16,y+16),(x+8,y+16),(x,y+8)],curses.color_pair(9+(ic*10)),True)
dashboard_test_case(stdscr,"polygon_filled",python_path)
ix = 0
iy = 0
for ic in range(0,5):
x = ix + ic*16
y = iy + ic*10
c.polygon([(x,y),(x+16,y+8),(x+8,y+8),(x+8,y+16),(x,y+8)],curses.color_pair(9+(ic*10)),True)
dashboard_test_case(stdscr,"polygon_concave_filled",python_path)
curses.wrapper(main)
```
#### File: dashboard/tests/test_dashboard.py
```python
from char_draw import canvas,display_list,graph
from data_sources.data_table import DataTable,Column,Cell,string_type,float_type,int_type,date_type,blank_type,format_string,format_date,format_float,format_int
from dashboard import dashboard
import curses
import curses.ascii
import os
import time
from datetime import datetime,timedelta
from dashboard_test_util import screen_size,dashboard_test_case
def test_Dashboard(request,capsys):
with capsys.disabled():
def main(stdscr):
screen_size(40,100)
stdscr.clear()
stdscr.refresh()
python_path = os.path.dirname(os.path.dirname(request.fspath))
c_names = ["X-Series","Pie Labels","Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"]
d = DataTable()
for c in c_names:
d.add_column(Column(name=c))
for idx in range(0,10):
d.put(idx,"X-Series",Cell(int_type,idx*10,format_int))
d.put(idx,"Pie Labels",Cell(string_type,"Group %d"%idx,format_string))
d.put(idx,"Metric 1",Cell(float_type,50.0+(idx*20),format_float))
d.put(idx,"Metric 2",Cell(float_type,75.0+(idx*30),format_float))
d.put(idx,"Metric 3",Cell(float_type,100.0+(idx*40),format_float))
d.put(idx,"Metric 4",Cell(float_type,123.0+(idx*23),format_float))
d.put(idx,"Metric 5",Cell(float_type,143+(idx*33),format_float))
d.put(idx,"Metric 6",Cell(float_type,171+(idx*51),format_float))
c = canvas.Canvas(stdscr)
max_x,max_y = c.get_maxxy()
db = dashboard.Dashboard(stdscr,None,0)
p = dashboard.Page(stdscr)
pp = dashboard.Panel()
g = graph.BarGraph(d,"X-Series",["Metric 1","Metric 3","Metric 5"],"Metric Units",None,c,0,"Basic Bar Graph")
pp.add_graph(g)
g = graph.LineGraph(d,"X-Series",["Metric 2","Metric 4","Metric 6"],"Metric Units",None,c,False,"Basic Line Graph")
pp.add_graph(g)
p.add_panel(pp)
db.add_page(p)
p = dashboard.Page(stdscr)
pp = dashboard.Panel()
g = graph.PieGraph(d,"Pie Labels",["Metric 3"],None,c,"Basic Pie Graph")
pp.add_graph(g)
g = graph.TableGraph(d,"Pie Labels",["Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"],None,c,"Basic Table")
pp.add_graph(g)
p.add_panel(pp)
db.add_page(p)
# force the timestamp to be the same so the screen diffs will match
d.refresh_timestamp = datetime(2020,8,24,9,49,0,0).timestamp()
d.changed()
db.main([])
dashboard_test_case(stdscr,"db_basic_dashboard",python_path)
db.main([curses.KEY_NPAGE])
dashboard_test_case(stdscr,"db_basic_dashboard_1",python_path)
db.main([9]) # tab
dashboard_test_case(stdscr,"db_basic_dashboard_2",python_path)
db.main([curses.KEY_HOME])
dashboard_test_case(stdscr,"db_basic_dashboard_3",python_path)
db.main([curses.KEY_ENTER])
dashboard_test_case(stdscr,"db_basic_dashboard_4",python_path)
db.main([27,-1]) # esc to exit zoom and redraw
dashboard_test_case(stdscr,"db_basic_dashboard_5",python_path)
curses.wrapper(main)
``` |
{
"source": "jpfxgood/my_radio",
"score": 3
} |
#### File: jpfxgood/my_radio/voice.py
```python
import sys
import os
import urllib2
import urllib
import time
import hashlib
cached_words = 0
tts_words = 0
def wavcache_hash( text, voice ):
h = hashlib.md5()
h.update(voice)
h.update(text.encode('utf-8','ignore'))
return os.path.join("wavcache",h.hexdigest()+".wav")
def wavcache_get( text, voice ):
hname = wavcache_hash( text, voice )
if not os.path.exists("wavcache"):
os.mkdir("wavcache")
return ""
if os.path.exists(hname):
return hname
else:
return ""
def wavcache_put( text, voice, wavfile ):
hname = wavcache_hash( text, voice )
open(hname, "wb").write(open(wavfile,"rb").read())
remote = False
local = "text2wave"
if remote:
accountId = None
password = <PASSWORD>
def parse_response( response ):
retvals = {}
parts = response.split()
for p in parts:
if "=" in p:
namevalue = p.split("=",1)
retvals[namevalue[0]] = namevalue[1][1:-1]
return retvals
def try_request( url, args=None, tries = 3):
while tries:
try:
if args:
r = urllib2.urlopen(url,
urllib.urlencode (args))
else:
r = urllib2.urlopen(url)
response = r.read()
r.close()
return response
except:
time.sleep(0.1)
tries -= 1
raise Exception("Failed after 3 attempts! ")
def neospeech_request ( args ):
return parse_response(try_request('https://tts.neospeech.com/rest_1_1.php',args))
def text2wav( text, wavfile, verbose = False, voice="julie" ):
global tts_words
global cached_words
wc = len(text.split(" "))
cache_name = wavcache_get( text, voice )
if cache_name:
cached_words = cached_words + wc
open(wavfile,"wb").write(open(cache_name,"rb").read())
if verbose:
print "cached_words, tts_words:",cached_words,tts_words
return
tts_words = tts_words + wc
if verbose:
print "cached_words, tts_words:",cached_words,tts_words
voicemap = { "kate":"TTS_KATE_DB", "julie":"TTS_JULIE_DB", "paul":"TTS_PAUL_DB", "bridget":"TTS_KATE_DB" }
ret = neospeech_request((( 'method','ConvertSimple'),
('email','<EMAIL>'),
('accountId',accountId),
('loginKey','LoginKey'),
('loginPassword',password),
('voice',voicemap[voice]),
('outputFormat','FORMAT_WAV'),
('sampleRate','16'),
('text',text.encode('utf-8','ignore'))))
if verbose:
print ret
if ret['resultCode'] != '0':
raise Exception("ConvertSimple failed! "+str(ret)+" "+voice+" "+text)
url = None
conversionNumber = ret['conversionNumber']
while not url:
ret = neospeech_request((( 'method','GetConversionStatus'),
('email','<EMAIL>'),
('accountId',accountId),
('conversionNumber',conversionNumber)))
if verbose:
print ret
if ret['resultCode'] != '0':
raise Exception("GetConversionStatus failed! "+str(ret)+" "+voice+" "+text)
if ret['statusCode'] == '5':
raise Exception("Conversion failed! "+str(ret)+" "+voice+" "+text)
if ret['statusCode'] == '4':
url = ret['downloadUrl']
else:
time.sleep(0.1)
if verbose:
print url
open(wavfile,"wb").write(try_request(url))
wavcache_put(text,voice,wavfile)
else:
if local == "text2wave":
import subprocess
def text2wav( text, wavfile, verbose = False, voice="julie" ):
global tts_words
global cached_words
wc = len(text.split(" "))
voicemap = { "kate":"voice_rab_diphone", "julie":"voice_ked_diphone", "paul":"voice_kal_diphone", "bridget":"voice_kal_diphone" }
cache_name = wavcache_get( text, voice )
if cache_name:
cached_words = cached_words + wc
open(wavfile,"wb").write(open(cache_name,"rb").read())
if verbose:
print "cached_words, tts_words:",cached_words,tts_words
return
tts_words = tts_words + wc
if verbose:
print "cached_words, tts_words:",cached_words,tts_words
open(wavfile+".txt","w").write(text.encode('utf-8','ignore'))
cmd = ["/bin/bash","-c","text2wave -o '%s' -eval '(%s)'< '%s.txt'" % (wavfile, voicemap[voice], wavfile)]
if verbose:
ret = subprocess.call(cmd)
else:
ret = subprocess.call(cmd,
stdin = open("/dev/null","r"),
stdout = open("/dev/null","w"),
stderr = open("/dev/null","w"),
close_fds = True)
# ret = ttsapi.toSpeech( "james-mediacenter", text.encode('utf-8','ignore'), voicemap[voice], wavfile )
if verbose:
print ret, voicemap[voice], voice, wavfile
wavcache_put(text,voice,wavfile)
else:
import ttsapi
def text2wav( text, wavfile, verbose = False, voice="julie" ):
global tts_words
global cached_words
wc = len(text.split(" "))
voicemap = { "kate":100, "julie":100, "paul":101, "bridget":500 }
cache_name = wavcache_get( text, voice )
if cache_name:
cached_words = cached_words + wc
open(wavfile,"wb").write(open(cache_name,"rb").read())
if verbose:
print "cached_words, tts_words:",cached_words,tts_words
return
tts_words = tts_words + wc
if verbose:
print "cached_words, tts_words:",cached_words,tts_words
ret = ttsapi.toSpeech( "james-mediacenter", text.encode('utf-8','ignore'), voicemap[voice], wavfile )
if verbose:
print ret, voicemap[voice], voice, wavfile
wavcache_put(text,voice,wavfile)
if __name__ == '__main__':
text2wav("This is a my radio test output!", "bridget_test.wav", True, "bridget")
text2wav("This is a my radio test output!", "paul_test.wav", True, "paul")
text2wav("This is a my radio test output!", "julie_test.wav", True, "julie")
text2wav("This is a my radio test output!", "kate_test.wav", True, "kate")
``` |
{
"source": "jpfxgood/ped",
"score": 3
} |
#### File: ped/ped_core/editor_common.py
```python
import curses
import curses.ascii
import sys
import os
import shutil
import tempfile
import re
import gc
from ped_dialog.prompt_dialog import prompt
from ped_dialog.message_dialog import message
from ped_dialog.replace_dialog import replace,confirm_replace
from ped_dialog.confirm_dialog import confirm
from ped_dialog import file_dialog
from ped_core import undo
from ped_core import python_mode
from ped_core import java_mode
from ped_core import cpp_mode
from ped_core import guess_mode
import copy
from ped_core import clipboard
import subprocess
from ped_core import cmd_names
from ped_core import keytab
from ped_core import keymap
from ped_core import extension_manager
from ped_core import changes
import traceback
import locale
import codecs
import tempfile
import threading
import logging
import time
locale.setlocale(locale.LC_ALL,'')
def_encoding = locale.getpreferredencoding()
class EditLine:
""" Interface for each editable line in a file, a fly-weight object """
def __init__(self):
""" should initialize any content or references to external objects """
pass
def length(self):
""" return the length of the line """
pass
def flush(self):
""" flush cached length if you have one """
pass
def getContent(self):
""" should return line representing this line in the source file """
pass
class FileLine(EditLine):
""" Instance of a line in a file that hasn't been changed, stored on disk """
def __init__(self, parent, pos, len = -1 ):
""" FileLine(s) are pointers to a line on disk the EditFile reference and offset are stored """
EditLine.__init__(self)
self.parent = parent
self.pos = pos
self.len = len
def length(self):
""" return length of line """
if self.len < 0:
self.len = len(self.parent.expand_tabs(self.getContent()))
return self.len
def flush(self):
""" flush cached length """
self.len = -1
def getContent(self):
""" gets the file from its parent, seeks to position, reads line and returns it """
working = self.parent.getWorking()
working.seek(self.pos,0)
txt = working.readline().rstrip()
return txt
def __del__(self):
self.parent = None
class MemLine(EditLine):
""" Instance of a line in memory that has been edited """
def __init__(self, content ):
""" MemLine(s) are in memory strings that represent a line that has been edited, it is initialized from the original file content"""
EditLine.__init__(self)
self.content = content
def length(self):
""" return the length of the content """
return len(self.content)
def flush(self):
""" flush cached length """
pass
def getContent(self):
""" just return the string reference """
return self.content
class ReadOnlyError(Exception):
""" Exception when modification to readonly file attempted """
pass
class EditFile:
""" Object that manages one file that is open for editing,
lines are either pointers to lines on disk, or in-memory copies
for edited lines """
default_readonly = False
default_backuproot = "~"
def __init__(self, filename=None ):
""" takes an optional filename to either load or create """
# store the filename
self.filename = filename
# the root of the backup directory
self.backuproot = EditFile.default_backuproot
# set the default tab stops
self.tabs = [ 4, 8 ]
# set the changed flag to false
self.changed = False
# read only flag
self.readonly = EditFile.default_readonly
# undo manager
self.undo_mgr = undo.UndoManager()
# change manager
self.change_mgr = changes.ChangeManager()
# modification reference incremented for each change
self.modref = 0
# the file object
self.working = None
# the lines in this file
self.lines = []
# load the file
if filename:
self.load()
def __copy__(self):
""" override copy so that copying manages file handles and intelligently copies the lists """
result = EditFile()
result.filename = self.filename
result.tabs = self.tabs
result.changed = self.changed
result.readonly = True
result.undo_mgr = copy.copy(self.undo_mgr)
result.change_mgr = copy.copy(self.change_mgr)
result.modref = self.modref
result.lines = []
for l in self.lines:
if isinstance(l,MemLine):
result.lines.append(copy.deepcopy(l))
elif isinstance(l,FileLine):
result.lines.append(FileLine(result,l.pos,l.len))
result.working = None
if self.working:
result.working = open(self.working.name,"r",buffering=1,encoding="utf-8")
return result
def __del__(self):
""" make sure we close file when we are destroyed """
self.undo_mgr = None
self.change_mgr = None
self.close()
def set_tabs(self, tabs ):
""" set the tab stops for this file to something new """
if tabs != self.tabs:
self.tabs = tabs
for l in self.lines:
l.flush()
def get_tabs(self):
""" return the list of tab stops """
return self.tabs
def getWorking(self):
""" return the file object """
return self.working
def getModref(self):
""" modref is a serial number that is incremented for each change to a file, used to detect changes externally """
return self.modref
def setUndoMgr(self,undo_mgr):
""" sets the undo manager object for this EditFile, undo manager is used to record undo records to enable undo in the editor """
self.undo_mgr = undo_mgr
def getUndoMgr(self):
""" returns our undo_manager """
return self.undo_mgr
def isChanged(self):
""" true if there are unsaved changes, false otherwise """
return self.changed
def isReadOnly(self):
""" true if the file is read only, false otherwise """
return self.readonly
def setReadOnly(self,flag = True):
""" mark this file as read only """
self.readonly = flag
def getFilename(self):
""" get the filename for this file """
return self.filename
def setFilename(self,filename):
""" set the filename for this object """
self.filename = filename
def numLines(self):
""" get the number of lines in this file """
return len(self.lines)
def open(self):
""" open the file or create it if it doesn't exist """
abs_name = os.path.abspath(self.filename)
abs_path = os.path.dirname(abs_name)
if os.path.exists(abs_name):
self.working = tempfile.NamedTemporaryFile(mode="w+",buffering=1,encoding="utf-8",prefix="ped_",dir=EditFile.get_backup_dir(self.backuproot))
shutil.copyfile(abs_name,self.working.name)
if not self.readonly:
self.setReadOnly(not os.access(abs_name,os.W_OK))
elif not self.readonly:
self.working = tempfile.NamedTemporaryFile(mode="w+",buffering=1,encoding="utf-8",prefix="ped_",dir=EditFile.get_backup_dir(self.backuproot))
else:
raise Exception("File %s does not exist!"%(self.filename))
self.filename = abs_name
self.working.seek(0,0)
def isModifiedOnDisk(self):
""" return true if the file we're editing has been modified since we started """
if os.path.exists(self.filename):
disk_stat = os.stat(self.filename)
temp_stat = os.stat(self.working.name)
return disk_stat.st_mtime > temp_stat.st_mtime
else:
return False
def close(self):
""" close the file """
if self.working:
self.working.close()
self.working = None
self.lines = None
def load(self):
""" open the file and load the lines into the array """
self.open()
self.lines = []
pos = 0
lidx = 0
while True:
line = self.working.readline()
if not line:
break
line = line.rstrip()
lidx = lidx + 1
self.lines.append(FileLine(self,pos,len(self.expand_tabs(line))))
pos = self.working.tell()
while len(self.lines) and not self.lines[-1].getContent().strip():
del self.lines[-1]
if not len(self.lines):
self.lines.append(MemLine(""))
self.changed = False
self.modref = 0
def hasChanges(self,view):
""" return true if there are pending screen updates """
return self.change_mgr.has_changes(view)
def isLineChanged(self,view,line):
""" return true if a particular line is changed """
if self.change_mgr and line < len(self.lines):
return self.change_mgr.is_changed(view,line)
else:
return True
def flushChanges(self,view):
""" reset the change tracking for full screen redraw events """
if self.change_mgr:
self.change_mgr.flush(view)
def _deleteLine(self,line,changed = True):
""" delete a line """
if self.undo_mgr:
self.undo_mgr.get_transaction().push(self._insertLine,(line,self.lines[line],self.changed))
del self.lines[line]
self.changed = changed
self.modref += 1
if self.change_mgr:
self.change_mgr.changed(line,len(self.lines))
def _insertLine(self,line,lineObj,changed = True):
""" insert a line """
if self.undo_mgr:
self.undo_mgr.get_transaction().push(self._deleteLine,(line,self.changed))
self.lines.insert(line,lineObj)
self.changed = changed
self.modref += 1
if self.change_mgr:
self.change_mgr.changed(line,len(self.lines))
def _replaceLine(self,line,lineObj,changed = True):
""" replace a line """
if self.undo_mgr:
self.undo_mgr.get_transaction().push(self._replaceLine,(line,self.lines[line],self.changed))
self.lines[line] = lineObj
self.changed = changed
self.modref += 1
if self.change_mgr:
self.change_mgr.changed(line,line)
def _appendLine(self,lineObj,changed = True):
""" add a line """
if self.undo_mgr:
self.undo_mgr.get_transaction().push(self._deleteLine,(len(self.lines),self.changed))
self.lines.append(lineObj)
self.changed = changed
self.modref += 1
if self.change_mgr:
self.change_mgr.changed(len(self.lines)-1,len(self.lines)-1)
def touchLine(self, line_start, line_end):
""" touch a line so it will redraw"""
if self.change_mgr:
self.change_mgr.changed(min(line_start,line_end),max(line_start,line_end))
def length(self, line ):
""" return the length of the line """
if line < len(self.lines):
return self.lines[line].length()
else:
return 0
def getLine( self, line, pad = 0, trim = False ):
""" get a line """
if line < len(self.lines):
orig = self.lines[line].getContent()
else:
orig = ""
if trim:
orig = orig.rstrip()
if pad > len(orig):
orig = orig + ' '*(pad-len(orig))
return self.expand_tabs(orig)
def getLines( self, line_start = 0, line_end = -1):
""" get a list of a range of lines """
if line_end < 0:
line_end = len(self.lines)
if line_end > len(self.lines):
line_end = len(self.lines)
lines = []
while line_start < line_end:
lines.append(self.expand_tabs(self.lines[line_start].getContent()))
line_start += 1
return lines
def deleteLine( self, line ):
""" delete a line, high level interface """
if self.isReadOnly():
raise ReadOnlyError()
if line < len(self.lines):
self._deleteLine(line)
def insertLine( self, line, content ):
""" insert a line, high level interface """
if self.isReadOnly():
raise ReadOnlyError()
if line >= len(self.lines):
lidx = len(self.lines)
while lidx <= line:
self._appendLine(MemLine(""))
lidx += 1
self._insertLine(line,MemLine(content))
def replaceLine( self, line, content ):
""" replace a line, high level interface """
if self.isReadOnly():
raise ReadOnlyError()
if line >= len(self.lines):
lidx = len(self.lines)
while lidx <= line:
self._appendLine(MemLine(""))
lidx += 1
self._replaceLine(line, MemLine(content))
@staticmethod
def get_backup_dir( base = "~" ):
""" get the backup directory, create it if it doesn't exist """
base = os.path.expanduser(base)
if not os.path.exists(base):
base = os.path.expanduser("~")
pedbackup = os.path.join(base,".pedbackup")
if not os.path.exists(pedbackup):
os.mkdir(pedbackup)
return pedbackup
@staticmethod
def make_backup_dir( filename, base = "~" ):
""" make a backup directory under ~/.pedbackup for filename and return it's name """
pedbackup = EditFile.get_backup_dir( base )
(filepath,rest) = os.path.split(os.path.abspath(filename))
for part in filepath.split("/"):
if part:
pedbackup = os.path.join(pedbackup,part)
if not os.path.exists(pedbackup):
os.mkdir(pedbackup)
return os.path.join(pedbackup,rest)
def save( self, filename = None ):
""" save the file, if filename is passed it'll be saved to that filename and reopened """
if filename:
if filename == self.filename and self.isReadOnly():
raise ReadOnlyError()
o = open(filename,"w",buffering=1,encoding="utf-8")
for l in self.lines:
txt = l.getContent()+'\n'
o.write(txt)
o.close()
self.close()
self.filename = filename
self.load()
else:
if self.isReadOnly():
raise ReadOnlyError()
if not self.changed:
return
o = open(self.filename+".sav","w",buffering=1,encoding="utf-8")
for l in self.lines:
txt = l.getContent()+'\n'
o.write(txt)
o.close()
self.working.close()
if os.path.exists(self.filename):
fstat = os.stat(self.filename)
backup_path = EditFile.make_backup_dir(self.filename,self.backuproot)
retval = shutil.move(self.filename,backup_path)
os.rename(self.filename+".sav",self.filename)
os.chmod(self.filename,fstat.st_mode)
else:
os.rename(self.filename+".sav",self.filename)
self.load()
def get_tab_stop(self, idx, before=False ):
""" return the next tab stop before or after a given offset """
prev = 0
for stop in self.tabs:
if stop > idx:
if before:
return prev
else:
return stop
prev = stop
incr = self.tabs[-1]-self.tabs[-2]
while stop <= idx:
prev = stop
stop += incr
if before:
return prev
else:
return stop
def expand_tabs(self, content ):
""" expand tabs in a line """
idx = 0
while idx < len(content):
if content[idx] == '\t':
stop = self.get_tab_stop(idx)
content = content[0:idx] + ' '*(stop-idx) + content[idx+1:]
idx += (stop-idx)
else:
idx += 1
return content
class Editor:
""" class that implements the text editor, operates on a file abstraction EditFile """
modes = [python_mode,cpp_mode,java_mode,guess_mode]
def __init__(self, parent, scr, filename, workfile = None, showname = True, wrap = False ):
""" takes parent curses screen we're popped up over, scr our curses window, filename we should edit, optionally an already open EditFile """
if workfile:
self.workfile = workfile
else:
self.workfile = EditFile(filename)
self.workfile.change_mgr.add_view(self)
self.undo_mgr = self.workfile.getUndoMgr()
self.parent = parent
self.scr = scr
if scr:
self.max_y,self.max_x = self.scr.getmaxyx()
else:
self.max_y = 0
self.max_x = 0
self.line = 0
self.pos = 0
self.vpos = 0
self.left = 0
self.prev_cmd = cmd_names.CMD_NOP
self.cmd_id = cmd_names.CMD_NOP
self.home_count = 0
self.end_count = 0
self.line_mark = False
self.span_mark = False
self.rect_mark = False
self.search_mark = False
self.mark_pos_start = 0
self.mark_line_start = 0
self.last_search = None
self.last_search_dir = True
self.mode = None
self.showname = showname
self.wrap = wrap
self.wrap_lines = []
self.unwrap_lines = []
self.wrap_modref = -1
self.wrap_width = -1
self.show_cursor = True
self.prev_pos = (0,0)
self.focus = True
self.invalidate_all()
curses.raw()
curses.meta(1)
def __copy__(self):
""" override to just copy the editor state and not the underlying file object """
result = Editor(self.parent,self.scr,None,self.workfile,self.showname,self.wrap)
result.line = self.line
result.pos = self.pos
result.vpos = self.vpos
result.left = self.left
result.prev_cmd = self.prev_cmd
result.cmd_id = self.cmd_id
result.home_count = self.home_count
result.end_count = self.end_count
result.line_mark = self.line_mark
result.span_mark = self.span_mark
result.rect_mark = self.rect_mark
result.search_mark = self.search_mark
result.mark_pos_start = self.mark_pos_start
result.mark_line_start = self.mark_line_start
result.last_search = self.last_search
result.last_search_dir = self.last_search_dir
result.mode = self.mode
result.wrap_lines = copy.copy(self.wrap_lines)
result.unwrap_lines = copy.copy(self.unwrap_lines)
result.wrap_modref = self.wrap_modref
result.wrap_width = self.wrap_width
result.show_cursor = self.show_cursor
result.focus = self.focus
result.prev_pos = copy.copy(self.prev_pos)
return result
def __del__(self):
""" if we're closing then clean some stuff up """
# let the mode clean up if it needs to
if self.workfile and self.workfile.change_mgr:
self.workfile.change_mgr.remove_view(self)
if self.mode:
self.mode.finish(self)
self.mode = None
self.workfile = None
self.undo_mgr = None
def close(self):
""" by default it is a no-op but editors overriding this can hook the close to clean things up """
pass
def pushUndo(self):
""" push an undo action onto the current transaction """
self.undo_mgr.get_transaction().push(self.applyUndo,(self.line,
self.pos,
self.vpos,
self.left,
self.prev_cmd,
self.cmd_id,
self.home_count,
self.end_count,
self.line_mark,
self.span_mark,
self.rect_mark,
self.search_mark,
self.mark_pos_start,
self.mark_line_start,
self.last_search,
self.last_search_dir,
clipboard.clip,
clipboard.clip_type,
self.show_cursor,
self.focus,
self.wrap))
def applyUndo(self,*args):
""" called by undo to unwind one undo action """
( self.line,
self.pos,
self.vpos,
self.left,
self.prev_cmd,
self.cmd_id,
self.home_count,
self.end_count,
self.line_mark,
self.span_mark,
self.rect_mark,
self.search_mark,
self.mark_pos_start,
self.mark_line_start,
self.last_search,
self.last_search_dir,
clipboard.clip,
clipboard.clip_type,
self.show_cursor,
self.focus,
self.wrap ) = args
self.invalidate_screen()
self.invalidate_mark()
def undo(self):
""" undo the last transaction, actually undoes the open transaction and the prior closed one """
line = self.line
left = self.left
self.undo_mgr.undo_transaction() # undo the one we're in... probably empty
self.undo_mgr.undo_transaction() # undo the previous one... probably not empty
if self.line != line or self.left != left:
self.invalidate_screen()
def setWin(self,win):
""" install a new window to render to """
self.scr = win
def getModref(self):
""" return the current modref of this editor """
return self.workfile.getModref()
def getWorkfile(self):
""" return the workfile that this editor is attached to """
return self.workfile
def getFilename(self):
""" return the filename for this editor """
return self.workfile.getFilename()
def getUndoMgr(self):
""" get the undo manager that we're using """
return self.undo_mgr
def isChanged(self):
""" returns true if the file we're working on has unsaved changes """
return self.workfile.isChanged()
def isLineChanged(self, line, display=True ):
""" return true if line is changed for the current revisions """
if self.workfile:
if display:
return self.workfile.isLineChanged( self, self.filePos(line,0)[0])
else:
return self.workfile.isLineChanged( self, line )
else:
return True
def flushChanges( self ):
""" flush change tracking to show we're done updating """
if self.workfile:
self.workfile.flushChanges(self)
def isMark(self):
""" returns true if there is a mark set """
return (self.line_mark or self.span_mark or self.rect_mark or self.search_mark)
def getCurrentLine(self,display=False):
""" returns the current line in the file """
return self.getContent(self.getLine(display),display)
def getPos(self,display=False):
""" get the character position in the current line that we're at """
if self.wrap:
if not display:
r_line,r_pos = self.filePos(self.line+self.vpos,self.left+self.pos)
return r_pos
return self.left+self.pos
def getLine(self,display=False):
""" get the line that we're on in the current file """
if self.wrap:
if not display:
r_line,r_pos = self.filePos(self.line+self.vpos,0)
return r_line
return self.line+self.vpos
def filePos(self, line, pos ):
""" translate display line, pos to file line, pos """
if self.wrap:
if line < len(self.wrap_lines):
return (self.wrap_lines[line][0],self.wrap_lines[line][1]+pos)
else:
return (self.numLines()+(line-len(self.wrap_lines)),pos)
else:
return (line,pos)
def scrPos(self, line, pos ):
""" translate file pos to screen pos """
if self.wrap:
nlines = len(self.unwrap_lines)
if line >= nlines:
r_line,r_pos = self.scrPos(self.numLines()-1,self.getLength(self.numLines()-1)-1)
return (r_line+(line-self.numLines())+1,pos)
sline = self.unwrap_lines[line]
while sline < len(self.wrap_lines) and self.wrap_lines[sline][0] == line:
if pos >= self.wrap_lines[sline][1] and pos < self.wrap_lines[sline][2]:
return (sline,pos-self.wrap_lines[sline][1])
sline = sline + 1
else:
return (sline-1,pos - self.wrap_lines[sline-1][1])
else:
return (line,pos)
def getContent(self, line, pad = 0, trim= False, display=False ):
""" get a line from the file """
if self.wrap:
if display:
orig = ""
if line < len(self.wrap_lines):
orig = self.workfile.getLine(self.wrap_lines[line][0])[self.wrap_lines[line][1]:self.wrap_lines[line][2]]
if trim:
orig = orig.rstrip()
if pad > len(orig):
orig = orig + ' '*(pad-len(orig))
return orig
orig = self.workfile.getLine(line,pad,trim)
return orig
def getLength(self, line, display=False ):
""" get the length of a line """
length = 0
if self.wrap and display:
if line < len(self.wrap_lines):
length = self.workfile.length(self.wrap_lines[line][0])
else:
length = self.workfile.length(line)
return length
def numLines(self,display=False):
""" get the number of lines in the editor """
if self.wrap and display:
return len(self.wrap_lines)
return self.workfile.numLines()
def rewrap(self, force = False):
""" compute the wrapped line array """
if self.wrap and (force or self.workfile.getModref() != self.wrap_modref or self.wrap_width != self.max_x):
self.wrap_modref = self.workfile.getModref()
self.wrap_width = self.max_x
self.wrap_lines = []
self.unwrap_lines = []
for l in range(0,self.workfile.numLines()):
line_len = self.workfile.length(l)
start = 0
self.unwrap_lines.append(len(self.wrap_lines))
if not line_len:
self.wrap_lines.append((l,0,0))
else:
while start < line_len:
self.wrap_lines.append((l,start,min(line_len,start+self.wrap_width)))
start += self.wrap_width
self.invalidate_after_cursor()
def addstr(self,row,col,str,attr = curses.A_NORMAL):
""" write properly encoded string to screen location """
try:
return self.scr.addstr(row,col,codecs.encode(str,"utf-8"),attr)
except:
return 0
def window_pos(self,line,pos):
sc_line,sc_pos = self.scrPos(line,pos)
return((sc_line-self.line)+1,sc_pos-self.left)
def showcursor(self,state):
""" set flag to turn cursor on or off """
old_cursor_state = self.show_cursor
self.show_cursor = state
return old_cursor_state
def setfocus(self,state):
""" set this editor to have focus or not """
old_focus_state = self.focus
self.focus = state
return old_focus_state
def draw_cursor(self):
""" worker function to draw the current cursor position """
if self.show_cursor:
line = self.getLine()
pos = self.getPos()
if pos < self.getLength(line):
cursor_ch = self.getContent(line)[pos]
else:
cursor_ch = ' '
sc_line,sc_pos = self.window_pos(line,pos)
self.addstr(sc_line,sc_pos,cursor_ch,curses.A_REVERSE)
def draw_mark(self):
""" worker function to draw the marked section of the file """
if not self.isMark():
return
(mark_top,mark_left) = self.scrPos(self.mark_line_start,self.mark_pos_start)
mark_line_start = mark_top
mark_pos_start = mark_left
mark_right = self.getPos(True)
mark_bottom = self.getLine(True)
if (self.rect_mark or self.line_mark or (self.span_mark and mark_top == mark_bottom)) and mark_left > mark_right:
mark = mark_left
mark_left = mark_right
mark_right = mark
if mark_top > mark_bottom:
mark = mark_bottom
mark_bottom = mark_top
mark_top = mark
if mark_top < self.line:
mark_top = self.line
if self.span_mark:
mark_left = 0
mark_right = mark_right + 1
s_left = mark_left - self.left
s_left = max(0,s_left)
s_right = mark_right - self.left
s_right = min(self.max_x,s_right)
s_top = (mark_top - self.line)+1
s_top = max(1,s_top)
s_bottom = (mark_bottom - self.line)+1
s_bottom = min(self.max_y-1,s_bottom)
mark_left = max(mark_left,self.left)
mark_right = min(mark_right,self.left+self.max_x)
if self.line_mark:
s_right = self.max_x
mark_right = self.left+s_right
mark_left = max(mark_left,self.left)
if s_top == s_bottom:
if s_right > s_left:
self.addstr(s_top,
s_left,
self.getContent(mark_top,
mark_right,
True,
True)[mark_left:mark_right],
curses.A_REVERSE)
elif self.rect_mark:
if mark_top < self.line:
mark_top = self.line
while s_top <= s_bottom:
self.addstr(s_top,
s_left,
self.getContent(mark_top,
mark_right,
True,
True)[mark_left:mark_right],
curses.A_REVERSE)
s_top += 1
mark_top += 1
elif self.span_mark:
cur_line = mark_top
while s_top <= s_bottom:
if cur_line == mark_top:
offset = s_left
width = self.max_x-offset
self.addstr(s_top,
offset,
self.getContent(cur_line,
self.left+offset+width,
True,
True)[self.left+offset:self.left+offset+width],
curses.A_REVERSE)
elif cur_line == mark_bottom:
self.addstr(s_top,
0,
self.getContent(cur_line,
self.getPos(True),
True,
True)[self.left:self.getPos(True)+1],
curses.A_REVERSE)
else:
self.addstr(s_top,
0,
self.getContent(cur_line,
self.left+self.max_x,
True,
True)[self.left:self.left+self.max_x],
curses.A_REVERSE)
s_top += 1
cur_line += 1
elif self.line_mark:
cur_line = mark_top
while s_top <= s_bottom:
self.addstr(s_top,
0,
self.getContent(cur_line,
self.left+self.max_x,
True,
True)[self.left:self.left+self.max_x],
curses.A_REVERSE)
s_top += 1
cur_line += 1
def resize(self):
""" resize the editor to fill the window """
if self.scr:
self.max_y,self.max_x = self.scr.getmaxyx()
self.rewrap()
bottom_y = max(min((self.numLines(True)-1)-self.line,(self.max_y-2)),0)
if self.vpos > bottom_y:
self.vpos = bottom_y
right_x = self.max_x-1
if self.pos > right_x:
self.left += self.pos-right_x
self.pos = right_x
self.invalidate_screen()
def move(self):
""" update the previous cursor position from the current """
self.prev_pos = (self.getLine(),self.getPos())
def prevPos(self):
""" get the previous cursor position """
return self.prev_pos
def redraw(self):
""" redraw the editor as needed """
try:
if not self.scr or keymap.is_playback():
return
self.max_y,self.max_x = self.scr.getmaxyx()
self.scr.keypad(1)
if self.workfile.isChanged():
changed = "*"
elif self.workfile.isReadOnly():
changed = "R"
else:
changed = " "
if self.mode:
changed = changed + " " + self.mode.name()
filename = self.workfile.getFilename()
if not self.showname:
filename = ""
status = "%d : %d : %d : %s : %s : %s"%(self.numLines(),self.getLine(),self.getPos(),changed,filename, "REC" if keymap.is_recording() else "PBK" if keymap.is_playback() else " " )
if len(status) < self.max_x:
status += (self.max_x-len(status))*' '
if self.focus:
self.addstr(0,0,status[0:self.max_x],curses.A_REVERSE|curses.A_BOLD)
else:
self.addstr(0,0,status[0:self.max_x],curses.A_REVERSE)
# if the mode is rendering then don't do the default rendering as well
mode_redraw = False
if self.mode:
mode_redraw = self.mode.redraw(self)
if not mode_redraw:
cursor_line,cursor_pos = self.window_pos(*self.prevPos())
y = 1
lidx = self.line
while lidx < self.line+(self.max_y-1):
try:
line_changed = self.isLineChanged(lidx)
is_cursor_line = (y == cursor_line)
if line_changed or is_cursor_line:
l = self.getContent(lidx,self.left+self.max_x,True,True)
if line_changed:
self.addstr(y,0,l[self.left:self.left+self.max_x])
else:
self.addstr(y,cursor_pos,l[self.left+cursor_pos])
except Exception as e:
pass
y = y + 1
lidx = lidx + 1
self.draw_mark()
self.move()
self.draw_cursor()
if mode_redraw:
self.flushChanges()
except:
raise
def insert(self, c ):
""" insert a character or string at the cursor position """
self.pushUndo()
if self.isMark():
self.copy_marked(True,True) # delete the marked block first then insert
orig = self.getContent(self.getLine()).rstrip()
offset = self.getPos()
pad = ""
if offset > len(orig):
pad = " "*(offset - len(orig))
orig = orig[0:offset] + pad + c + orig[offset:]
insert_line = self.getLine()
self.workfile.replaceLine(insert_line,orig)
self.rewrap()
self.goto(insert_line,offset+len(c))
def delc(self):
""" deletes one character at the cursor position """
self.pushUndo()
if self.isMark():
self.copy_marked(True,True) # delete the marked block instead and return
return
orig = self.getContent(self.getLine())
offset = self.getPos()
if offset > len(orig):
return
elif offset == len(orig):
next_idx = self.getLine()+1
if next_idx > self.numLines():
return
next = self.getContent(next_idx)
orig = orig[0:offset] + next
self.workfile.replaceLine(self.getLine(),orig)
self.workfile.deleteLine(next_idx)
else:
orig = orig[0:offset]+orig[offset+1:]
self.workfile.replaceLine(self.getLine(),orig)
self.rewrap()
def backspace(self):
""" delete a character at the cursor and move back one character """
self.pushUndo()
if self.isMark():
self.copy_marked(True,True) # delete the marked block instead and return
return
line = self.getLine()
pos = self.getPos()
if pos:
if pos <= self.getLength(line):
self.goto(line,pos-1)
self.delc()
else:
self.goto(line,pos-1)
elif line:
pos = self.getLength(line-1)-1
self.goto(line-1,pos)
self.delc()
def goto(self,line, pos ):
""" goto a line in the file and position the cursor to pos offset in the line """
self.pushUndo()
self.invalidate_mark()
if line < 0:
line = 0
if pos < 0:
pos = 0
(line,pos) = self.scrPos(line,pos)
if line >= self.line and line <= self.line+(self.max_y-2):
self.vpos = line - self.line
elif line < self.line:
self.line = line
self.vpos = 0
self.invalidate_screen()
elif line > self.line+(self.max_y-2):
self.line = line - (self.max_y-2)
self.vpos = (self.max_y-2)
self.invalidate_screen()
if pos >= self.left and pos < self.left+(self.max_x-1):
self.pos = pos - self.left
elif pos >= self.left+(self.max_x-1):
self.left = pos-(self.max_x-1)
self.pos = self.max_x-1
self.invalidate_screen()
else:
self.left = pos
self.pos = 0
self.invalidate_screen()
def endln(self):
""" go to the end of a line """
self.pushUndo()
self.invalidate_mark()
orig = self.getContent(self.getLine())
offset = len(orig)
self.goto(self.getLine(),offset)
def endpg(self):
""" go to the end of a page """
self.pushUndo()
self.invalidate_mark()
ldisp = (self.numLines(True)-1)-self.line
self.vpos = min(self.max_y-2,ldisp)
def endfile(self):
""" go to the end of the file """
self.pushUndo()
ldisp = (self.numLines(True)-1)-self.line
if ldisp < self.max_y-2:
return
self.line = (self.numLines(True)-1) - (self.max_y-2)
self.vpos = min(self.max_y-2,ldisp)
self.invalidate_screen()
def end(self):
""" once go to end of line, twice end of page, thrice end of file """
self.pushUndo()
if self.cmd_id == cmd_names.CMD_END and self.prev_cmd == cmd_names.CMD_END:
self.end_count += 1
self.end_count = self.end_count % 3
else:
self.end_count = 0
if self.end_count == 0:
self.endln()
elif self.end_count == 1:
self.endpg()
self.endln()
elif self.end_count == 2:
self.endfile()
self.endln()
def home(self):
""" once to to start of line, twice start of page, thrice start of file """
self.pushUndo()
self.invalidate_mark()
if self.cmd_id == cmd_names.CMD_HOME and self.prev_cmd == cmd_names.CMD_HOME:
self.home_count += 1
self.home_count = self.home_count % 3
else:
self.home_count = 0
if self.home_count == 0:
self.goto(self.getLine(),0)
elif self.home_count == 1:
self.vpos = 0
elif self.home_count == 2:
self.line = 0
self.invalidate_screen()
def pageup(self):
""" go back one page in the file """
self.pushUndo()
self.invalidate_mark()
offset = self.line - (self.max_y-2)
if offset < 0:
offset = 0
self.line = offset
self.invalidate_screen()
def pagedown(self):
""" go forward one page in the file """
self.pushUndo()
self.invalidate_mark()
offset = self.line + (self.max_y-2)
if offset > self.numLines(True)-1:
return
self.line = offset
ldisp = (self.numLines(True)-1)-self.line
if self.vpos > ldisp:
self.vpos = ldisp
self.invalidate_screen()
def cup(self):
""" go back one line in the file """
self.pushUndo()
self.invalidate_mark()
if self.vpos:
self.vpos -= 1
elif self.line:
self.line -= 1
self.invalidate_screen()
self.goto(self.getLine(),self.getPos())
def cdown(self,rept = 1):
""" go forward one or rept lines in the file """
self.pushUndo()
self.invalidate_mark()
while rept:
if self.vpos < min((self.numLines(True)-1)-self.line,(self.max_y-2)):
self.vpos += 1
elif self.line <= self.numLines(True)-self.max_y:
self.line += 1
self.invalidate_screen()
rept = rept - 1
self.goto(self.getLine(),self.getPos())
def prev_word( self ):
""" scan left until you get to the previous word """
self.pushUndo()
orig = self.getContent(self.getLine()).rstrip()
pos = self.getPos()
if pos >= len(orig):
pos = len(orig)-1
if pos and pos < len(orig):
pos -= 1
while pos and orig[pos] == ' ':
pos -= 1
while pos and orig[pos-1] != ' ':
pos -= 1
elif pos >= len(orig):
pos = len(orig)
else:
pos = 0
self.goto(self.getLine(),pos)
def next_word( self ):
""" scan left until you get to the previous word """
self.pushUndo()
orig = self.getContent(self.getLine()).rstrip()
pos = self.getPos()
if pos < len(orig):
if orig[pos] == ' ':
while pos < len(orig) and orig[pos] == ' ':
pos += 1
else:
while pos < len(orig) and orig[pos] != ' ':
pos += 1
while pos < len(orig) and orig[pos] == ' ':
pos += 1
else:
pos = len(orig)
self.goto(self.getLine(),pos)
def cleft(self,rept = 1):
""" go back one or rept characters in the current line """
self.pushUndo()
pos = self.getPos()
line = self.getLine()
if pos >= rept:
self.goto(line,pos-rept)
return
if self.wrap:
if line:
offset = self.getLength(line-1)-(rept-pos)
self.goto(line-1,offset)
else:
self.goto(line,0)
def cright(self,rept = 1):
""" go forward one or rept characters in the current line """
self.pushUndo()
pos = self.getPos()
line = self.getLine()
if self.wrap:
llen = self.getLength(line)
if pos + rept < llen:
self.goto(line,pos+rept)
return
if line < self.numLines()-1:
self.goto(line+1,llen-(pos+rept))
return
self.goto(line,llen)
else:
self.goto(line,pos+rept)
def scroll_left(self):
""" scroll the page left without moving the current cursor position """
self.pushUndo()
if self.left:
self.left -= 1
self.invalidate_screen()
def scroll_right(self):
""" scroll the page right without moving the current cursor position """
self.pushUndo()
self.left += 1
self.invalidate_screen()
def searchagain(self):
""" repeat the previous search if any """
self.pushUndo()
self.invalidate_mark()
if self.isMark():
if not self.last_search_dir:
self.goto(self.mark_line_start,self.mark_pos_start)
self.mark_span()
if self.last_search:
return self.search(self.last_search,self.last_search_dir,True)
else:
return False
def search(self, pattern, down = True, next = True):
""" search for a regular expression forward or back if next is set then skip one before matching """
self.pushUndo()
self.invalidate_mark()
self.last_search = pattern
self.last_search_dir = down
first_line = self.getLine()
line = first_line
if down:
while line < self.numLines():
content = self.getContent(line)
if line == first_line:
content = content[self.getPos():]
offset = self.getPos()
else:
offset = 0
match = None
try:
match = re.search(pattern,content)
except:
pass
if match:
if self.isMark():
self.mark_span()
self.goto(line,match.start()+offset)
self.mark_span()
self.goto(line,match.end()+offset-1)
self.search_mark = True
return True
line += 1
else:
while line >= 0:
content = self.getContent(line)
if line == first_line:
content = content[:self.getPos()]
match = None
try:
match = re.search(pattern,content)
except:
pass
last_match = None
offset = 0
while match:
last_match = match
last_offset = offset
offset += match.end()
match = re.search(pattern,content[offset:])
if last_match:
if self.isMark():
self.mark_span()
self.goto(line,last_match.start()+last_offset)
self.mark_span()
self.goto(line,last_match.end()+last_offset-1)
self.search_mark = True
return True
line -= 1
return False
def invalidate_mark(self):
""" touch the marked lines so that they'll redraw when we change the shape of the mark or do a copy or paste """
if self.isMark():
self.workfile.touchLine(self.mark_line_start, self.getLine())
if self.search_mark:
self.span_mark = False
self.search_mark = False
def invalidate_all(self):
""" touch all the lines in the file so everything will redraw """
self.workfile.touchLine(0,self.workfile.numLines())
def invalidate_screen(self):
""" touch all the lines on the screen so everything will redraw """
line,pos = self.filePos(self.line,self.left)
self.workfile.touchLine(line,line+self.max_y)
def invalidate_after_cursor(self):
""" touch all the lines from the current position to the end of the screen """
line,pos = self.filePos(self.line,self.left)
self.workfile.touchLine(self.getLine(),line+self.max_y)
def has_changes(self):
""" return true if there are any pending changes """
return self.workfile.hasChanges(self)
def mark_span(self):
""" mark a span of characters that can start and end in the middle of a line """
self.pushUndo()
self.invalidate_mark()
if not self.span_mark:
self.span_mark = True
self.rect_mark = False
self.line_mark = False
self.mark_pos_start = self.getPos()
self.mark_line_start = self.getLine()
else:
self.span_mark = False
def mark_rect(self):
""" mark a rectangular or column selection across lines """
# no column cut in wrapped mode, it doesn't make sense
if self.wrap:
return
self.pushUndo()
self.invalidate_mark()
if not self.rect_mark:
self.rect_mark = True
self.span_mark = False
self.line_mark = False
self.mark_pos_start = self.getPos()
self.mark_line_start = self.getLine()
else:
self.rect_mark = False
def mark_lines(self):
""" mark whole lines """
self.pushUndo()
self.invalidate_mark()
if not self.line_mark:
self.line_mark = True
self.span_mark = False
self.rect_mark = False
self.mark_pos_start = 0
self.mark_line_start = self.getLine()
else:
self.line_mark = False
def get_marked(self, delete=False, nocopy = False):
""" returns marked text as tuple ( cliptype, [list of clipped] ) returns () if no mark """
if not self.isMark():
return ()
self.pushUndo()
if delete:
self.invalidate_screen()
mark_pos_start = self.mark_pos_start
mark_line_start = self.mark_line_start
mark_pos_end = self.getPos()
mark_line_end = self.getLine()
if mark_line_start > mark_line_end:
mark = mark_line_start
mark_line_start = mark_line_end
mark_line_end = mark
mark = mark_pos_start
mark_pos_start = mark_pos_end
mark_pos_end = mark
elif mark_line_start == mark_line_end and mark_pos_start > mark_pos_end:
mark = mark_pos_start
mark_pos_start = mark_pos_end
mark_pos_end = mark
clip = []
clip_type = clipboard.LINE_CLIP
line_idx = mark_line_start
if self.line_mark:
if not nocopy:
clip_type = clipboard.LINE_CLIP
while line_idx <= mark_line_end:
clip.append(self.getContent(line_idx))
line_idx += 1
if delete:
line_idx = mark_line_start
while line_idx <= mark_line_end:
self.workfile.deleteLine(mark_line_start)
line_idx += 1
self.rewrap()
elif self.span_mark:
if not nocopy:
clip_type = clipboard.SPAN_CLIP
if line_idx == mark_line_end:
clip.append(self.getContent(line_idx)[mark_pos_start:mark_pos_end+1])
else:
clip.append(self.getContent(line_idx)[mark_pos_start:]+'\n')
line_idx += 1
while line_idx < mark_line_end:
clip.append(self.getContent(line_idx)+'\n')
line_idx += 1
clip.append(self.getContent(line_idx)[0:mark_pos_end+1])
if delete:
line_idx = mark_line_start
if line_idx == mark_line_end:
orig = self.getContent(line_idx)
orig = orig[0:mark_pos_start] + orig[mark_pos_end+1:]
self.workfile.replaceLine(line_idx,orig)
self.rewrap()
else:
first_line = self.getContent(mark_line_start)
last_line = self.getContent(mark_line_end)
while line_idx <= mark_line_end:
self.workfile.deleteLine(mark_line_start)
line_idx += 1
self.workfile.insertLine(mark_line_start,first_line[0:mark_pos_start] + last_line[mark_pos_end+1:])
self.rewrap()
elif self.rect_mark:
if not nocopy:
clip_type = clipboard.RECT_CLIP
while line_idx <= mark_line_end:
clip.append(self.getContent(line_idx,mark_pos_end,True)[mark_pos_start:mark_pos_end+1])
line_idx += 1
if delete:
line_idx = mark_line_start
while line_idx <= mark_line_end:
orig = self.getContent(line_idx,mark_pos_end,True)
self.workfile.replaceLine(line_idx,orig[0:mark_pos_start]+orig[mark_pos_end+1:])
line_idx += 1
# sync the x clipboard
self.transfer_clipboard()
if self.line_mark:
self.line_mark = False
if self.rect_mark:
self.rect_mark = False
if self.span_mark:
self.span_mark = False
if delete:
self.goto(mark_line_start,mark_pos_start)
self.invalidate_screen()
return (clip_type, clip)
def copy_marked(self,delete=False,nocopy = False):
""" copy the marked text to the clipboard, delete== True means cut, nocopy == True will just delete """
if not self.isMark():
return
self.pushUndo()
cp = self.get_marked(delete,nocopy)
if cp and not (delete and nocopy):
clipboard.clip_type = cp[0]
clipboard.clip = cp[1]
def paste(self):
""" paste the current clip at the cursor position """
if clipboard.clip:
# no column cut or paste when in wrap mode
if self.wrap and clipboard.clip_type == clipboard.RECT_CLIP:
return
self.pushUndo()
if self.isMark():
self.copy_marked(True,True) # delete the marked block first then insert
if clipboard.clip_type == clipboard.LINE_CLIP:
target = self.getLine()
pos = self.getPos()
for line in clipboard.clip:
self.workfile.insertLine(target,line)
target += 1
self.rewrap()
self.goto(target,pos)
elif clipboard.clip_type == clipboard.SPAN_CLIP:
target = self.getLine()
pos = self.getPos()
idx = 0
for line in clipboard.clip:
orig = self.getContent(target,pos,True)
if (not line) or line[-1] == '\n':
line = line.rstrip()
if not idx:
self.workfile.replaceLine(target,orig[0:pos]+line)
self.workfile.insertLine(target+1,orig[pos:])
self.rewrap()
self.goto(target, pos+len(line))
target += 1
else:
self.workfile.insertLine(target,line)
self.rewrap()
self.goto(target, len(line))
target += 1
else:
if not idx:
self.workfile.replaceLine(target,orig[0:pos]+line+orig[pos:])
self.rewrap()
self.goto(target, pos+len(line))
else:
self.workfile.replaceLine(target,line+orig)
self.rewrap()
self.goto(target, len(line))
idx += 1
elif clipboard.clip_type == clipboard.RECT_CLIP:
target = self.getLine()
pos = self.getPos()
for line in clipboard.clip:
orig = self.getContent(target,self.getPos(),True)
self.workfile.replaceLine(target,orig[0:self.getPos()]+line+orig[self.getPos():])
target += 1
self.rewrap()
self.goto(target,pos)
def cr(self):
""" insert a carriage return, split the current line at cursor """
self.pushUndo()
orig = self.getContent(self.getLine(),self.getPos(),True)
self.workfile.replaceLine(self.getLine(),orig[0:self.getPos()])
self.workfile.insertLine(self.getLine()+1,orig[self.getPos():])
self.rewrap()
self.goto(self.getLine()+1,0)
def instab(self, line, pos, move_cursor = True ):
""" insert a tab at a line and position """
orig = self.getContent(line,pos,True)
stop = self.workfile.get_tab_stop(pos)
orig = orig[0:pos] + ' '*(stop-(pos)) + orig[pos:]
self.workfile.replaceLine(line,orig)
self.rewrap()
if move_cursor:
self.goto(line,stop)
def tab(self):
""" tab in the correct distance to the next tab stop """
self.pushUndo()
if self.isMark() and self.line_mark:
oline = self.getLine()
opos = self.getPos()
mark_line_start = self.mark_line_start
mark_line_end = oline
if mark_line_start > mark_line_end:
mark = mark_line_start
mark_line_start = mark_line_end
mark_line_end = mark
while mark_line_start <= mark_line_end:
self.instab( mark_line_start, 0, False )
mark_line_start += 1
self.goto(oline,opos)
else:
self.instab( self.getLine(), self.getPos() )
def deltab(self, line, pos, move_cursor = True ):
""" remove a tab from the line at position provided optionally move the cursor """
orig = self.getContent(line,pos+1,True)
idx = pos
start = 0
stop = 0
while idx:
while idx and orig[idx] != ' ':
idx -= 1
start = idx
stop = self.workfile.get_tab_stop(idx,True)
while idx and idx >= stop:
if orig[idx] != ' ':
break
idx -= 1
else:
if start > stop:
break
if start > stop:
orig = orig[0:stop]+orig[start+1:]
self.workfile.replaceLine(line,orig)
self.rewrap()
if move_cursor:
self.goto(line,stop)
def btab(self):
""" remove white space to the previous tab stop, or shift the line back to the previous tab stop """
self.pushUndo()
if self.isMark() and self.line_mark:
mark_line_start = self.mark_line_start
mark_line_end = self.getLine()
if mark_line_start > mark_line_end:
mark = mark_line_start
mark_line_start = mark_line_end
mark_line_end = mark
while mark_line_start <= mark_line_end:
self.deltab( mark_line_start, self.workfile.get_tab_stop(0), False )
mark_line_start += 1
else:
self.deltab( self.getLine(), self.getPos() )
def prmt_goto(self):
""" prompt for a line to go to and go there """
self.invalidate_screen()
goto_line = prompt(self.parent,"Goto","Enter line number 0-%d :"%(self.numLines()-1),10,name="goto")
if goto_line:
self.goto(int(goto_line),self.getPos())
def saveas(self):
""" open the file dialog and enter or point to a file and then save this buffer to that path """
f = file_dialog.FileDialog(self.parent,"Save file as")
choices = f.main()
if choices and choices["file"]:
self.workfile.save(os.path.join(choices["dir"],choices["file"]))
self.undo_mgr.flush_undo()
self.invalidate_all()
gc.collect()
def save(self):
""" save the current buffer """
if self.workfile.isModifiedOnDisk():
if not confirm(self.parent, "File has changed on disk, overwrite?"):
self.invalidate_screen()
self.redraw()
return
self.workfile.save()
self.undo_mgr.flush_undo()
self.goto(self.getLine(),self.getPos())
self.invalidate_all()
self.redraw()
gc.collect()
def prmt_search(self,down=True):
""" prompt for a search string then search for it and either put up a message that it was not found or position the cursor to the occurrance """
self.invalidate_screen()
if down:
title = "Search Forward"
else:
title = "Search Backward"
pattern = prompt(self.parent,title,"Pattern: ",-1,name="search")
if pattern:
if not self.search(pattern,down):
message(self.parent,"Search","Pattern not found.")
def prmt_replace(self):
""" prompt for search pattern and replacement string, then confirm replacment or replace all for the occurrences until no more are found """
(pattern,rep) = replace(self.parent)
if pattern and rep:
found = self.search(pattern)
replace_all = False
do_replace = False
while found:
self.redraw()
self.scr.refresh()
if not replace_all:
answer = confirm_replace(self.parent)
self.invalidate_screen()
if answer == 1:
do_replace = True
elif answer == 2:
do_replace = False
elif answer == 3:
replace_all = True
elif answer == 4:
message(self.parent,"Canceled","Replace canceled.")
return
if do_replace or replace_all:
self.insert(rep)
found = self.searchagain()
else:
message(self.parent,"Replace","Pattern not found.")
self.invalidate_screen()
def prmt_searchagain(self):
""" search again and put up a message if no more are found """
self.invalidate_screen()
if not self.searchagain():
if self.isMark():
self.mark_span()
message(self.parent,"Search","Pattern not found.")
def transfer_clipboard(self, from_xclip = False):
""" use xclip to transfer out clipboard to x or vice/versa """
if os.path.exists("/dev/clipboard"):
if from_xclip:
clipboard.clip = []
clipboard.clip_type = clipboard.SPAN_CLIP
for line in open("/dev/clipboard","r",buffering=1,encoding="utf-8"):
clipboard.clip.append(line)
else:
cld = open("/dev/clipboard","w",buffering=0,encoding="utf-8")
for line in clipboard.clip:
cld.write(line)
cld.close()
elif os.path.exists("/usr/bin/xclip"):
cmd = [ "xclip", ]
if from_xclip:
cmd += ["-out","-selection","clipboard"]
else:
cmd += ["-in","-selection","clipboard"]
try:
proc = subprocess.Popen( cmd, encoding="utf-8", stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
if from_xclip:
clipboard.clip = []
clipboard.clip_type = clipboard.SPAN_CLIP
for l in proc.stdout:
clipboard.clip.append(l)
else:
for l in clipboard.clip:
print(l.rstrip(), file=proc.stdin)
proc.stdout.close()
proc.stdin.close()
proc.stderr.close()
proc.wait()
except:
pass
def toggle_wrap(self):
""" toggle wrapping for this editor """
# don't toggle wrapping while we're marking a rectangle
if self.rect_mark:
return
self.pushUndo()
oline = self.getLine()
opos = self.getPos()
self.wrap = not self.wrap
self.rewrap(True)
self.invalidate_all()
self.goto(oline,opos)
def handle(self,ch):
""" main character handler dispatches keystrokes to execute editor commands returns characters meant to be processed
by containing manager or dialog """
self.prev_cmd = self.cmd_id
if isinstance(ch,int):
self.cmd_id, ret = keymap.mapkey( self.scr, keymap.keymap_editor, ch )
else:
self.cmd_id, ret = keymap.mapseq( keymap.keymap_editor, ch )
if extension_manager.is_extension(self.cmd_id):
if not extension_manager.invoke_extension( self.cmd_id, self, ch ):
return ret
if self.cmd_id == cmd_names.CMD_RETURNKEY:
if ret in [keytab.KEYTAB_NOKEY,keytab.KEYTAB_REFRESH,keytab.KEYTAB_RESIZE]:
self.cmd_id = self.prev_cmd
elif self.cmd_id == cmd_names.CMD_INSERT:
self.insert(chr(ret))
ret = keytab.KEYTAB_NOKEY
elif self.cmd_id == cmd_names.CMD_MARKSPAN:
self.mark_span()
elif self.cmd_id == cmd_names.CMD_MARKRECT:
self.mark_rect()
elif self.cmd_id == cmd_names.CMD_COPYMARKED:
self.copy_marked()
elif self.cmd_id == cmd_names.CMD_PRMTGOTO:
self.prmt_goto()
elif self.cmd_id == cmd_names.CMD_BACKSPACE:
self.backspace()
elif self.cmd_id == cmd_names.CMD_FILENAME:
if self.getFilename():
message(self.parent,"Filename",self.getFilename())
elif self.cmd_id == cmd_names.CMD_CUTMARKED:
self.copy_marked(True)
elif self.cmd_id == cmd_names.CMD_PASTE:
self.paste()
elif self.cmd_id == cmd_names.CMD_MARKLINES:
self.mark_lines()
elif self.cmd_id == cmd_names.CMD_CR:
self.cr()
elif self.cmd_id == cmd_names.CMD_TAB:
self.tab()
elif self.cmd_id == cmd_names.CMD_SAVE:
self.save()
elif self.cmd_id == cmd_names.CMD_SAVEAS:
self.saveas()
elif self.cmd_id == cmd_names.CMD_UNDO:
self.undo()
elif self.cmd_id == cmd_names.CMD_TOGGLEWRAP:
self.toggle_wrap()
elif self.cmd_id == cmd_names.CMD_MARKCOPYLINE:
if not self.isMark():
self.mark_lines()
self.copy_marked()
elif self.cmd_id == cmd_names.CMD_MARKCUTLINE:
if not self.isMark():
self.mark_lines()
self.copy_marked(True)
elif self.cmd_id == cmd_names.CMD_BTAB:
self.btab()
elif self.cmd_id == cmd_names.CMD_PREVWORD:
self.prev_word()
elif self.cmd_id == cmd_names.CMD_NEXTWORD:
self.next_word()
elif self.cmd_id == cmd_names.CMD_HOME1:
self.pushUndo()
self.prev_cmd = cmd_names.CMD_HOME
self.cmd_id = cmd_names.CMD_HOME
self.home_count = 0
self.home()
self.home()
self.home()
elif self.cmd_id == cmd_names.CMD_END1:
self.pushUndo()
self.prev_cmd = cmd_names.CMD_END
self.cmd_id = cmd_names.CMD_END
self.end_count = 0
self.end()
self.end()
self.end()
elif self.cmd_id == cmd_names.CMD_UP:
self.cup()
elif self.cmd_id == cmd_names.CMD_DOWN:
self.cdown()
elif self.cmd_id == cmd_names.CMD_LEFT:
self.cleft()
elif self.cmd_id == cmd_names.CMD_RIGHT:
self.cright()
elif self.cmd_id == cmd_names.CMD_DELC:
self.delc()
elif self.cmd_id == cmd_names.CMD_HOME:
self.home()
elif self.cmd_id == cmd_names.CMD_END:
self.end()
elif self.cmd_id == cmd_names.CMD_PAGEUP:
self.pageup()
elif self.cmd_id == cmd_names.CMD_PAGEDOWN:
self.pagedown()
elif self.cmd_id == cmd_names.CMD_PRMTSEARCH:
self.prmt_search()
elif self.cmd_id == cmd_names.CMD_PRMTREPLACE:
self.prmt_replace()
elif self.cmd_id == cmd_names.CMD_TRANSFERCLIPIN:
self.transfer_clipboard(False)
elif self.cmd_id == cmd_names.CMD_TRANSFERCLIPOUT:
self.transfer_clipboard(True)
elif self.cmd_id == cmd_names.CMD_PRMTSEARCHBACK:
self.prmt_search(False)
elif self.cmd_id == cmd_names.CMD_SEARCHAGAIN:
self.prmt_searchagain()
elif self.cmd_id == cmd_names.CMD_TOGGLERECORD:
keymap.toggle_recording()
elif self.cmd_id == cmd_names.CMD_PLAYBACK:
keymap.start_playback()
return ret
def main(self,blocking = True, start_ch = None):
""" main driver loop for editor, if blocking = False exits on each keystroke to allow embedding,
start_ch is a character read externally that hould be processed on startup """
curses.curs_set(0)
self.rewrap()
self.scr.nodelay(1)
self.scr.notimeout(0)
self.scr.timeout(0)
while (1):
if not self.scr:
return 27
if not self.mode:
for m in Editor.modes:
if m.detect_mode(self):
self.mode = m
self.getWorkfile().set_tabs(m.get_tabs(self))
break
else:
self.mode = None
self.redraw()
if start_ch:
ch = start_ch
start_ch = None
else:
ch = keymap.getch(self.scr)
try:
self.undo_mgr.new_transaction()
if self.mode:
ch = self.mode.handle(self,ch)
modref = self.workfile.getModref()
ret_seq = self.handle(ch)
if self.wrap and modref != self.workfile.getModref():
self.rewrap()
if ret_seq or not blocking:
return ret_seq
except ReadOnlyError as e:
message(self.parent,"Read Only File Error","Changes not allowed.")
if not blocking:
return keytab.KEYTAB_REFRESH
class StreamThread:
""" Thread to read from a stream blocking as needed adding lines to the owning EditFile object """
def __init__(self, ef, stream ):
self.ef = ef
self.stream = stream
self.thread = None
self.read_worker_stop = False
def __del__( self ):
self.stop_stream()
def start_stream( self ):
self.thread = threading.Thread(target = self.read_worker)
self.thread.start()
def wait(self):
if self.thread:
self.thread.join()
def stop_stream( self ):
self.read_worker_stop = True
if self.thread and self.thread.is_alive():
self.thread.join()
if self.stream:
self.stream.close()
self.stream = None
self.thread = None
self.read_worker_stop = False
def read_worker( self ):
pos = 0
lidx = 0
while not self.read_worker_stop:
line = self.stream.readline()
if not line:
break
try:
self.ef.lines_lock.acquire()
self.ef.modref += 1
self.ef.working.seek(0,2)
self.ef.working.write(line)
line = line.rstrip()
self.ef.lines.append(FileLine(self.ef,pos,len(self.ef.expand_tabs(line))))
if self.ef.change_mgr:
self.ef.change_mgr.changed(lidx,lidx)
pos = self.ef.working.tell()
lidx += 1
finally:
self.ef.lines_lock.release()
time.sleep( 0 )
try:
self.ef.lines_lock.acquire()
while len(self.ef.lines) and not self.ef.lines[-1].getContent().strip():
del self.ef.lines[-1]
if not len(self.ef.lines):
self.ef.lines.append(MemLine(""))
finally:
self.ef.lines_lock.release()
self.ef.changed = False
self.ef.modref = 0
if self.stream:
self.stream.close()
self.stream = None
self.thread = None
self.read_worker_stop = False
class StreamFile(EditFile):
""" Class reads a stream to the end and writes it to a temp file which
is opened and loaded read only, used for capturing the output of
shell commands to a read-only editor """
def __init__(self,name,stream,wait=False):
""" takes name which is a display name for this stream, stream is the input stream to read """
self.stream = stream
self.stream_thread = None
self.lines_lock = threading.Lock()
self.wait = wait
# store the filename
self.filename = name
# the root of the backup directory
self.backuproot = EditFile.default_backuproot
# set the default tab stops
self.tabs = [ 4, 8 ]
# set the changed flag to false
self.changed = False
# read only flag
self.readonly = EditFile.default_readonly
# undo manager
self.undo_mgr = undo.UndoManager()
# change manager
self.change_mgr = changes.ChangeManager()
# modification reference incremented for each change
self.modref = 0
# the file object
self.working = None
# the lines in this file
self.lines = []
# load the file
self.load()
def __del__(self):
""" clean up stream thread and stream """
self.stream_thread = None
EditFile.__del__(self)
def open(self):
""" override of the open method, starts a thread that reads the stream into a tempfile which then becomes the file for the editor """
if self.stream and not self.working:
self.working = tempfile.NamedTemporaryFile(mode="w+")
self.setReadOnly(True)
self.stream_thread = StreamThread(self,self.stream)
self.stream_thread.start_stream()
if self.wait:
self.stream_thread.wait()
self.stream_thread = None
return
else:
EditFile.open(self)
def load(self):
if self.stream and not self.working:
self.open()
return
else:
EditFile.load(self)
def close(self):
""" override of close method, make sure the stream gets closed """
if self.stream_thread:
self.stream_thread.stop_stream()
self.stream = None
self.stream_thread = None
elif self.stream:
self.stream.close()
self.stream = None
EditFile.close(self)
def save( self, filename = None ):
""" save the file, if filename is passed it'll be saved to that filename and reopened """
if filename:
if filename == self.filename and self.isReadOnly():
raise ReadOnlyError()
try:
self.lines_lock.acquire()
o = open(filename,"w",buffering=1,encoding="utf-8")
for l in self.lines:
txt = l.getContent()+'\n'
o.write(txt)
o.close()
finally:
self.lines_lock.release()
self.close()
self.filename = filename
self.load()
else:
if self.isReadOnly():
raise ReadOnlyError()
def set_tabs(self, tabs):
try:
self.lines_lock.acquire()
EditFile.set_tabs(self, tabs)
finally:
self.lines_lock.release()
def numLines(self):
try:
self.lines_lock.acquire()
return EditFile.numLines(self)
finally:
self.lines_lock.release()
def length(self,line):
try:
self.lines_lock.acquire()
return EditFile.length(self,line)
finally:
self.lines_lock.release()
def getLine(self, line, pad = 0, trim = False ):
try:
self.lines_lock.acquire()
return EditFile.getLine(self,line,pad,trim)
finally:
self.lines_lock.release()
def getLines(self, line_start = 0, line_end = -1 ):
try:
self.lines_lock.acquire()
return EditFile.getLines(self,line_start,line_end)
finally:
self.lines_lock.release()
class StreamEditor(Editor):
""" this is a read only editor that wraps a stream it has a select
option for use when embedding in a control to select lines
from the stream """
def __init__(self, par, scr, name, stream, select = False, line_re = None, follow = False, wait = False, workfile=None ):
""" takes parent curses screen, screen to render to, name for
stream, stream to read in, and select to indicate if
line selection is requested """
self.select = select
self.line_re = line_re
self.follow = follow
self.wait = wait
self.o_nlines = 0
if workfile:
self.sfile = workfile
else:
self.sfile = StreamFile(name,stream,self.wait)
Editor.__init__(self, par, scr, self.sfile.getFilename(), self.sfile)
def __copy__(self):
""" override to just copy the editor state and not the underlying file object """
result = StreamEditor(self.parent,self.scr,None,None,self.select, self.line_re, self.follow, self.wait, self.workfile)
result.o_nlines = 0
result.line = self.line
result.pos = self.pos
result.vpos = self.vpos
result.left = self.left
result.prev_cmd = self.prev_cmd
result.cmd_id = self.cmd_id
result.home_count = self.home_count
result.end_count = self.end_count
result.line_mark = self.line_mark
result.span_mark = self.span_mark
result.rect_mark = self.rect_mark
result.search_mark = self.search_mark
result.mark_pos_start = self.mark_pos_start
result.mark_line_start = self.mark_line_start
result.last_search = self.last_search
result.last_search_dir = self.last_search_dir
result.mode = self.mode
result.wrap_lines = copy.copy(self.wrap_lines)
result.unwrap_lines = copy.copy(self.unwrap_lines)
result.wrap_modref = self.wrap_modref
result.wrap_width = self.wrap_width
result.show_cursor = self.show_cursor
result.focus = self.focus
result.prev_pos = copy.copy(self.prev_pos)
return result
def __del__(self):
""" clean up the StreamFile """
self.sfile = None
def close(self):
""" close and shut down the stream file """
if self.sfile:
self.sfile.close()
self.sfile = None
def handle(self,ch):
""" override normal keystroke handling if in select mode
and move about doing selection and return on enter """
if self.follow:
nlines = self.numLines()
if self.o_nlines != nlines:
self.endfile()
self.o_nlines = nlines
if ch in keymap.keydef_map and keymap.keydef_map[ch][-1] == keytab.KEYTAB_CTRLF:
self.follow = not self.follow
return keytab.KEYTAB_NOKEY
elif not self.select:
return Editor.handle(self,ch)
o_line = self.getLine()
if isinstance(ch,int):
ch = keymap.get_keyseq( self.scr, ch )
ret_ch = keytab.KEYTAB_NOKEY
direction = 0
if ch in [keytab.KEYTAB_F02,keytab.KEYTAB_F04,keytab.KEYTAB_F10,keytab.KEYTAB_F01,keytab.KEYTAB_RESIZE, keytab.KEYTAB_CR, keytab.KEYTAB_BTAB, keytab.KEYTAB_TAB, keytab.KEYTAB_ESC]:
ret_ch = ch
elif ch == keytab.KEYTAB_UP:
self.cup()
direction = -1
elif ch == keytab.KEYTAB_DOWN:
self.cdown()
direction = 1
elif ch == keytab.KEYTAB_LEFT:
self.scroll_left()
elif ch == keytab.KEYTAB_RIGHT:
self.scroll_right()
elif ch == keytab.KEYTAB_BACKSPACE:
direction = -1
self.cup()
elif ch == keytab.KEYTAB_HOME:
self.pushUndo()
self.left = 0
self.pos = 0
self.line = 0
self.vpos = 0
direction = 1
elif ch == keytab.KEYTAB_END:
self.endfile()
direction = -1
elif ch == keytab.KEYTAB_PAGEUP:
self.pageup()
direction = -1
elif ch == keytab.KEYTAB_PAGEDOWN:
self.pagedown()
direction = 1
elif ch == keytab.KEYTAB_F05:
self.prmt_search()
direction = 1
elif ch == keytab.KEYTAB_F17: # shift f5:
self.prmt_search(False)
direction = -1
elif ch == keytab.KEYTAB_F03:
self.prmt_searchagain()
if self.last_search_dir:
direction = 1
else:
direction = -1
if self.line_re and direction:
if direction > 0:
while True:
if re.search(self.line_re, self.getCurrentLine()):
self.line = self.getLine()
self.vpos = 0
break
line = self.getLine()
self.cdown()
if line == self.getLine():
self.undo_mgr.undo_transaction()
break
elif direction < 0:
while True:
if re.search(self.line_re, self.getCurrentLine()):
self.line = self.getLine()
self.vpos = 0
break
line = self.getLine()
self.cup()
if line == self.getLine():
self.undo_mgr.undo_transaction()
break
if self.getLine() != o_line or not self.isMark():
if self.isMark():
self.mark_lines()
self.mark_lines()
return ret_ch
class ReadonlyEditor(Editor):
""" editor subclass implements read only editor for viewing files """
def __init__(self, par, scr, name, showname = True):
""" parent curses screen, screen to render to, filename to open """
self.showname = showname
sfile = EditFile(name)
sfile.setReadOnly()
Editor.__init__(self, par, scr, name, sfile, showname)
def getFilename(self):
""" override getFilename so we can return None to indicate no file stuff should be done """
if self.showname:
return Editor.getFilename(self)
else:
return None
def handle(self,ch):
""" handle override to only do read only actions to the file """
o_line = self.getLine()
if isinstance(ch,int):
ch = keymap.get_keyseq( self.scr, ch )
ret_ch = keytab.KEYTAB_NOKEY
if ch in [keytab.KEYTAB_F02,keytab.KEYTAB_F04,keytab.KEYTAB_F10,keytab.KEYTAB_F01,keytab.KEYTAB_RESIZE, keytab.KEYTAB_CR, keytab.KEYTAB_BTAB, keytab.KEYTAB_TAB, keytab.KEYTAB_ESC]:
ret_ch = ch
elif ch == keytab.KEYTAB_CTRLW: # ctrl-w (toggle wrap in readonly editor)
self.toggle_wrap()
elif ch == keytab.KEYTAB_UP:
self.cup()
elif ch == keytab.KEYTAB_DOWN:
self.cdown()
elif ch == keytab.KEYTAB_LEFT:
self.scroll_left()
elif ch == keytab.KEYTAB_RIGHT:
self.scroll_right()
elif ch == keytab.KEYTAB_BACKSPACE:
self.cup()
elif ch == keytab.KEYTAB_HOME:
self.home()
elif ch == keytab.KEYTAB_END:
self.end()
elif ch == keytab.KEYTAB_PAGEUP:
self.pageup()
elif ch == keytab.KEYTAB_PAGEDOWN:
self.pagedown()
elif ch == keytab.KEYTAB_F05:
self.prmt_search()
elif ch == keytab.KEYTAB_F17: # shift f5:
self.prmt_search(False)
elif ch == keytab.KEYTAB_F03:
self.prmt_searchagain()
if self.getLine() != o_line or not self.isMark():
if self.isMark():
self.mark_lines()
self.mark_lines()
return ret_ch
def main(stdscr):
""" test driver for the editor """
open("test.txt","w").write("""This is line one
This is line two
\tThis is line three
\t\tThis is line four
This is line five
aaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbb
cccccccccccccccc
dddddddddddddddd
eeeeeeeeeeeeeeee
ffffffffffffffff
gggggggggggggggg
hhhhhhhhhhhhhhhh
iiiiiiiiiiiiiiii
jjjjjjjjjjjjjjjj
kkkkkkkkkkkkkkkk
llllllllllllllll
mmmmmmmmmmmmmmmm
Aa23456789b23456789c23456789d23456789d23456789e23456789f23456789g23456789h23456789A
Ba23456789b23456789c23456789d23456789d23456789e23456789f23456789g23456789h23456789B
Ca23456789b23456789c23456789d23456789d23456789e23456789f23456789g23456789h23456789C
Da23456789b23456789c23456789d23456789d23456789e23456789f23456789g23456789h23456789D
Ea23456789b23456789c23456789d23456789d23456789e23456789f23456789g23456789h23456789E
Fa23456789b23456789c23456789d23456789d23456789e23456789f23456789g23456789h23456789F
Ga23456789b23456789c23456789d23456789d23456789e23456789f23456789g23456789h23456789G
asdkfjlkjaslkfjj
asfdkljsa;dfkljas;dflksajdf;laskdfjas;kfdljas;dlkfjas safkjsf;kljsf
askdfj;sa asdkfj as;lkfjs fksadfjs;lkfj asdjdfkljsaf
al;slkfdj asdlkfj asdlfkj asldkfj asdf;lkj as;lkdfj
as;ldkfj adsflk asdlfkj aslfkj
aslfj adflkj alkjasdflk aksdfj
asdflj asldkfj asdflkj asldkfj aslkfj
aslfdkjalksfjd aslfjd asdlfkj ;askfdj alskdfj
asldfkj ksaldfj slkdfj kasdfj
asdflkja aljkjjlk asdkfljlaksfjd aslkdjf alskdjf alskdfj
aslfkj alkjdfslkj aldkfj alskdfj asldkfj
asldfj aslkdfj alskdfj alkdfj aslkdfj aslkdfj""")
e1 = Editor(stdscr, curses.newwin(0,0),"test.txt",None,True,True)
e1.main()
if __name__ == '__main__':
curses.wrapper(main)
```
#### File: ped/ped_core/extension_manager.py
```python
import sys
import os
from ped_core import cmd_names
from ped_core import keymap
from ped_core import keytab
extensions = {}
# extension modules are python modules with the following two entry points
# ped_ext_info which takes no arguments and returns a tuple ( cmd_name, keymap_name {"EDITOR","DIALOG","MANAGER"}, keytab_key_name, keytab_key_name_ret, ext_name )
# cmd_name can be the same as an existing name which will be an override or it can be a new name
# keymap_name and keytab_key_name should be None if it is an override, if not it needs to be specified
# ped_ext_invoke which takes arguments ( Dialog or EditorManager or Editor, key ordinal we entered on )
# returns True to continue cmd processing or False to exit cmd processing allows for augmenting commands
# with a prior action
def register_extensions():
""" search the directory ~/.pedextension for python modules that implement ped_ext_info, ped_ext_invoke methods """
ped_extension_path = "~/.pedextension"
if "PED_EXTENSION_PATH" in os.environ:
ped_extension_path = os.environ["PED_EXTENSION_PATH"]
ext_dir = os.path.expanduser( ped_extension_path )
if os.path.exists(ext_dir) and os.path.isdir(ext_dir):
pwd = os.getcwd()
os.chdir(ext_dir)
sys_path = list(sys.path)
sys.path.append(ext_dir)
for f in os.listdir(ext_dir):
if f.endswith(".py"):
ext_mod = __import__(f[:-3])
if hasattr(ext_mod,"ped_ext_info") and hasattr(ext_mod,"ped_ext_invoke"):
cm_name, km_name, km_key, km_ret_key, ex_name = ext_mod.ped_ext_info()
if cm_name in cmd_names.name_to_cmd:
extensions[cm_name] = ext_mod
else:
new_cmd = max(cmd_names.name_to_cmd.values())+1
cmd_names.cmd_to_name[new_cmd] = cm_name
cmd_names.name_to_cmd[cm_name] = new_cmd
if km_name == "EDITOR":
keymap.keymap_editor[keytab.name_to_key[km_key]] = ( new_cmd, keytab.name_to_key[km_ret_key] )
elif km_name == "MANAGER":
keymap.keymap_manager[keytab.name_to_key[km_key]] = ( new_cmd, keytab.name_to_key[km_ret_key] )
elif km_name == "DIALOG":
keymap.keymap_dialog[keytab.name_to_key[km_key]] = ( new_cmd, keytab.name_to_key[km_ret_key] )
extensions[cm_name] = ext_mod
os.chdir(pwd)
sys.path = sys_path
def is_extension( cmd_id ):
""" test to see if there is an extension for a command id """
return (cmd_names.cmd_to_name[cmd_id] in extensions)
def invoke_extension( cmd_id, target, ch ):
""" invoke the extension returns true if processing should continue for augmenting or false if processing should exit for overrides """
# target could be Editor, EditorManager, or Dialog object, plugin needs to know
return extensions[cmd_names.cmd_to_name[cmd_id]].ped_ext_invoke( cmd_id, target, ch )
```
#### File: ped/ped_core/keytab.py
```python
import curses
KEYTAB_NOKEY=chr(0)
KEYTAB_ALTA=chr(27)+'a'
KEYTAB_ALTB=chr(27)+'b'
KEYTAB_ALTC=chr(27)+'c'
KEYTAB_ALTD=chr(27)+'d'
KEYTAB_ALTE=chr(27)+'e'
KEYTAB_ALTF=chr(27)+'f'
KEYTAB_ALTG=chr(27)+'g'
KEYTAB_ALTH=chr(27)+'h'
KEYTAB_ALTI=chr(27)+'i'
KEYTAB_ALTJ=chr(27)+'j'
KEYTAB_ALTK=chr(27)+'k'
KEYTAB_ALTL=chr(27)+'l'
KEYTAB_ALTM=chr(27)+'m'
KEYTAB_ALTN=chr(27)+'n'
KEYTAB_ALTo=chr(27)+'o'
KEYTAB_ALTO=chr(27)+'O'
KEYTAB_ALTP=chr(27)+'p'
KEYTAB_ALTQ=chr(27)+'q'
KEYTAB_ALTR=chr(27)+'r'
KEYTAB_ALTS=chr(27)+'s'
KEYTAB_ALTT=chr(27)+'t'
KEYTAB_ALTU=chr(27)+'u'
KEYTAB_ALTV=chr(27)+'v'
KEYTAB_ALTW=chr(27)+'w'
KEYTAB_ALTX=chr(27)+'x'
KEYTAB_ALTY=chr(27)+'y'
KEYTAB_ALTZ=chr(27)+'z'
KEYTAB_BACKSPACE="backspace"
KEYTAB_BACKSPACE=chr(8)
KEYTAB_BACKTAB=chr(27)+'[Z'
KEYTAB_BTAB="btab"
KEYTAB_CR=chr(10)
KEYTAB_CTRLA=chr(1)
KEYTAB_CTRLB=chr(2)
KEYTAB_CTRLC=chr(3)
KEYTAB_CTRLD=chr(4)
KEYTAB_CTRLE=chr(5)
KEYTAB_CTRLF=chr(6)
KEYTAB_CTRLG=chr(7)
KEYTAB_CTRLH=chr(8)
KEYTAB_CTRLI=chr(9)
KEYTAB_CTRLJ=chr(10)
KEYTAB_CTRLK=chr(11)
KEYTAB_CTRLL=chr(12)
KEYTAB_CTRLM=chr(13)
KEYTAB_CTRLN=chr(14)
KEYTAB_CTRLO=chr(15)
KEYTAB_CTRLP=chr(16)
KEYTAB_CTRLQ=chr(17)
KEYTAB_CTRLR=chr(18)
KEYTAB_CTRLS=chr(19)
KEYTAB_CTRLT=chr(20)
KEYTAB_CTRLU=chr(21)
KEYTAB_CTRLV=chr(22)
KEYTAB_CTRLW=chr(23)
KEYTAB_CTRLX=chr(24)
KEYTAB_CTRLY=chr(25)
KEYTAB_CTRLZ=chr(26)
KEYTAB_CTRLLEFT='ctrl-left'
KEYTAB_CTRLRIGHT='ctrl-right'
KEYTAB_CTRLHOME='ctrl-home'
KEYTAB_CTRLEND='ctrl-end'
KEYTAB_DELC="delc"
KEYTAB_DLGCANCEL="cancel"
KEYTAB_DLGNOP=KEYTAB_NOKEY
KEYTAB_DLGOK="ok"
KEYTAB_DOWN="down"
KEYTAB_END="end"
KEYTAB_ESC=chr(27)
KEYTAB_F00="fk00"
KEYTAB_F01="fk01"
KEYTAB_F02="fk02"
KEYTAB_F03="fk03"
KEYTAB_F04="fk04"
KEYTAB_F05="fk05"
KEYTAB_F06="fk06"
KEYTAB_F07="fk07"
KEYTAB_F08="fk08"
KEYTAB_F09="fk09"
KEYTAB_F10="fk10"
KEYTAB_F11="fk11"
KEYTAB_F12="fk12"
KEYTAB_F13="fk13"
KEYTAB_F14="fk14"
KEYTAB_F15="fk15"
KEYTAB_F16="fk16"
KEYTAB_F17="fk17"
KEYTAB_F18="fk18"
KEYTAB_F19="fk19"
KEYTAB_F20="fk20"
KEYTAB_F21="fk21"
KEYTAB_F22="fk22"
KEYTAB_F23="fk23"
KEYTAB_F24="fk24"
KEYTAB_F25="fk25"
KEYTAB_F26="fk26"
KEYTAB_F27="fk27"
KEYTAB_F28="fk28"
KEYTAB_F29="fk29"
KEYTAB_F30="fk30"
KEYTAB_F31="fk31"
KEYTAB_F32="fk32"
KEYTAB_F33="fk33"
KEYTAB_F34="fk34"
KEYTAB_F35="fk35"
KEYTAB_F36="fk36"
KEYTAB_F37="fk37"
KEYTAB_F38="fk38"
KEYTAB_F39="fk39"
KEYTAB_F40="fk40"
KEYTAB_F41="fk41"
KEYTAB_F42="fk42"
KEYTAB_F43="fk43"
KEYTAB_F44="fk44"
KEYTAB_F45="fk45"
KEYTAB_F46="fk46"
KEYTAB_F47="fk47"
KEYTAB_F48="fk48"
KEYTAB_F49="fk49"
KEYTAB_F50="fk50"
KEYTAB_F51="fk51"
KEYTAB_F52="fk52"
KEYTAB_F53="fk53"
KEYTAB_F54="fk54"
KEYTAB_F55="fk55"
KEYTAB_F56="fk56"
KEYTAB_F57="fk57"
KEYTAB_F58="fk58"
KEYTAB_F59="fk59"
KEYTAB_F60="fk60"
KEYTAB_F61="fk61"
KEYTAB_F62="fk62"
KEYTAB_F63="fk63"
KEYTAB_HOME="home"
KEYTAB_INSERT="insert"
KEYTAB_KEYPADPLUS=chr(27)+'Ok'
KEYTAB_KEYTPADMINUS=chr(27)+'Om'
KEYTAB_LEFT="left"
KEYTAB_PAGEDOWN="pagedown"
KEYTAB_PAGEUP="pageup"
KEYTAB_REFRESH="refresh"
KEYTAB_RESIZE="resize"
KEYTAB_RIGHT="right"
KEYTAB_SPACE=' '
KEYTAB_TAB=chr(9)
KEYTAB_UP="up"
KEYTAB_MOUSE="mouse"
name_to_key = {
"KEYTAB_ALTA" : KEYTAB_ALTA,
"KEYTAB_ALTB" : KEYTAB_ALTB,
"KEYTAB_ALTC" : KEYTAB_ALTC,
"KEYTAB_ALTD" : KEYTAB_ALTD,
"KEYTAB_ALTE" : KEYTAB_ALTE,
"KEYTAB_ALTF" : KEYTAB_ALTF,
"KEYTAB_ALTG" : KEYTAB_ALTG,
"KEYTAB_ALTH" : KEYTAB_ALTH,
"KEYTAB_ALTI" : KEYTAB_ALTI,
"KEYTAB_ALTJ" : KEYTAB_ALTJ,
"KEYTAB_ALTK" : KEYTAB_ALTK,
"KEYTAB_ALTL" : KEYTAB_ALTL,
"KEYTAB_ALTM" : KEYTAB_ALTM,
"KEYTAB_ALTN" : KEYTAB_ALTN,
"KEYTAB_ALTo" : KEYTAB_ALTo,
"KEYTAB_ALTO" : KEYTAB_ALTO,
"KEYTAB_ALTP" : KEYTAB_ALTP,
"KEYTAB_ALTQ" : KEYTAB_ALTQ,
"KEYTAB_ALTR" : KEYTAB_ALTR,
"KEYTAB_ALTS" : KEYTAB_ALTS,
"KEYTAB_ALTT" : KEYTAB_ALTT,
"KEYTAB_ALTU" : KEYTAB_ALTU,
"KEYTAB_ALTV" : KEYTAB_ALTV,
"KEYTAB_ALTW" : KEYTAB_ALTW,
"KEYTAB_ALTX" : KEYTAB_ALTX,
"KEYTAB_ALTY" : KEYTAB_ALTY,
"KEYTAB_ALTZ" : KEYTAB_ALTZ,
"KEYTAB_BACKSPACE" : KEYTAB_BACKSPACE,
"KEYTAB_BACKSPACE" : KEYTAB_BACKSPACE,
"KEYTAB_BACKTAB" : KEYTAB_BACKTAB,
"KEYTAB_BTAB" : KEYTAB_BTAB,
"KEYTAB_CR" : KEYTAB_CR,
"KEYTAB_CTRLA" : KEYTAB_CTRLA,
"KEYTAB_CTRLB" : KEYTAB_CTRLB,
"KEYTAB_CTRLC" : KEYTAB_CTRLC,
"KEYTAB_CTRLD" : KEYTAB_CTRLD,
"KEYTAB_CTRLE" : KEYTAB_CTRLE,
"KEYTAB_CTRLF" : KEYTAB_CTRLF,
"KEYTAB_CTRLG" : KEYTAB_CTRLG,
"KEYTAB_CTRLH" : KEYTAB_CTRLH,
"KEYTAB_CTRLI" : KEYTAB_CTRLI,
"KEYTAB_CTRLJ" : KEYTAB_CTRLJ,
"KEYTAB_CTRLK" : KEYTAB_CTRLK,
"KEYTAB_CTRLL" : KEYTAB_CTRLL,
"KEYTAB_CTRLM" : KEYTAB_CTRLM,
"KEYTAB_CTRLN" : KEYTAB_CTRLN,
"KEYTAB_CTRLO" : KEYTAB_CTRLO,
"KEYTAB_CTRLP" : KEYTAB_CTRLP,
"KEYTAB_CTRLQ" : KEYTAB_CTRLQ,
"KEYTAB_CTRLR" : KEYTAB_CTRLR,
"KEYTAB_CTRLS" : KEYTAB_CTRLS,
"KEYTAB_CTRLT" : KEYTAB_CTRLT,
"KEYTAB_CTRLU" : KEYTAB_CTRLU,
"KEYTAB_CTRLV" : KEYTAB_CTRLV,
"KEYTAB_CTRLW" : KEYTAB_CTRLW,
"KEYTAB_CTRLX" : KEYTAB_CTRLX,
"KEYTAB_CTRLY" : KEYTAB_CTRLY,
"KEYTAB_CTRLZ" : KEYTAB_CTRLZ,
"KEYTAB_CTRLLEFT" : KEYTAB_CTRLLEFT,
"KEYTAB_CTRLRIGHT" : KEYTAB_CTRLRIGHT,
"KEYTAB_CTRLHOME" : KEYTAB_CTRLHOME,
"KEYTAB_CTRLEND" : KEYTAB_CTRLEND,
"KEYTAB_DELC" : KEYTAB_DELC,
"KEYTAB_DLGCANCEL" : KEYTAB_DLGCANCEL,
"KEYTAB_DLGNOP" : KEYTAB_DLGNOP,
"KEYTAB_DLGOK" : KEYTAB_DLGOK,
"KEYTAB_DOWN" : KEYTAB_DOWN,
"KEYTAB_END" : KEYTAB_END,
"KEYTAB_ESC" : KEYTAB_ESC,
"KEYTAB_F00" : KEYTAB_F00,
"KEYTAB_F01" : KEYTAB_F01,
"KEYTAB_F02" : KEYTAB_F02,
"KEYTAB_F03" : KEYTAB_F03,
"KEYTAB_F04" : KEYTAB_F04,
"KEYTAB_F05" : KEYTAB_F05,
"KEYTAB_F06" : KEYTAB_F06,
"KEYTAB_F07" : KEYTAB_F07,
"KEYTAB_F08" : KEYTAB_F08,
"KEYTAB_F09" : KEYTAB_F09,
"KEYTAB_F10" : KEYTAB_F10,
"KEYTAB_F11" : KEYTAB_F11,
"KEYTAB_F12" : KEYTAB_F12,
"KEYTAB_F13" : KEYTAB_F13,
"KEYTAB_F14" : KEYTAB_F14,
"KEYTAB_F15" : KEYTAB_F15,
"KEYTAB_F16" : KEYTAB_F16,
"KEYTAB_F17" : KEYTAB_F17,
"KEYTAB_F18" : KEYTAB_F18,
"KEYTAB_F19" : KEYTAB_F19,
"KEYTAB_F20" : KEYTAB_F20,
"KEYTAB_F21" : KEYTAB_F21,
"KEYTAB_F22" : KEYTAB_F22,
"KEYTAB_F23" : KEYTAB_F23,
"KEYTAB_F24" : KEYTAB_F24,
"KEYTAB_F25" : KEYTAB_F25,
"KEYTAB_F26" : KEYTAB_F26,
"KEYTAB_F27" : KEYTAB_F27,
"KEYTAB_F28" : KEYTAB_F28,
"KEYTAB_F29" : KEYTAB_F29,
"KEYTAB_F30" : KEYTAB_F30,
"KEYTAB_F31" : KEYTAB_F31,
"KEYTAB_F32" : KEYTAB_F32,
"KEYTAB_F33" : KEYTAB_F33,
"KEYTAB_F34" : KEYTAB_F34,
"KEYTAB_F35" : KEYTAB_F35,
"KEYTAB_F36" : KEYTAB_F36,
"KEYTAB_F37" : KEYTAB_F37,
"KEYTAB_F38" : KEYTAB_F38,
"KEYTAB_F39" : KEYTAB_F39,
"KEYTAB_F40" : KEYTAB_F40,
"KEYTAB_F41" : KEYTAB_F41,
"KEYTAB_F42" : KEYTAB_F42,
"KEYTAB_F43" : KEYTAB_F43,
"KEYTAB_F44" : KEYTAB_F44,
"KEYTAB_F45" : KEYTAB_F45,
"KEYTAB_F46" : KEYTAB_F46,
"KEYTAB_F47" : KEYTAB_F47,
"KEYTAB_F48" : KEYTAB_F48,
"KEYTAB_F49" : KEYTAB_F49,
"KEYTAB_F50" : KEYTAB_F50,
"KEYTAB_F51" : KEYTAB_F51,
"KEYTAB_F52" : KEYTAB_F52,
"KEYTAB_F53" : KEYTAB_F53,
"KEYTAB_F54" : KEYTAB_F54,
"KEYTAB_F55" : KEYTAB_F55,
"KEYTAB_F56" : KEYTAB_F56,
"KEYTAB_F57" : KEYTAB_F57,
"KEYTAB_F58" : KEYTAB_F58,
"KEYTAB_F59" : KEYTAB_F59,
"KEYTAB_F60" : KEYTAB_F60,
"KEYTAB_F61" : KEYTAB_F61,
"KEYTAB_F62" : KEYTAB_F62,
"KEYTAB_F63" : KEYTAB_F63,
"KEYTAB_HOME" : KEYTAB_HOME,
"KEYTAB_INSERT" : KEYTAB_INSERT,
"KEYTAB_KEYPADPLUS" : KEYTAB_KEYPADPLUS,
"KEYTAB_KEYTPADMINUS" : KEYTAB_KEYTPADMINUS,
"KEYTAB_LEFT" : KEYTAB_LEFT,
"KEYTAB_NOKEY" : KEYTAB_NOKEY,
"KEYTAB_PAGEDOWN" : KEYTAB_PAGEDOWN,
"KEYTAB_PAGEUP" : KEYTAB_PAGEUP,
"KEYTAB_REFRESH" : KEYTAB_REFRESH,
"KEYTAB_RESIZE" : KEYTAB_RESIZE,
"KEYTAB_RIGHT" : KEYTAB_RIGHT,
"KEYTAB_SPACE" : KEYTAB_SPACE,
"KEYTAB_TAB" : KEYTAB_TAB,
"KEYTAB_UP" : KEYTAB_UP,
"KEYTAB_MOUSE" : KEYTAB_MOUSE,
}
key_to_name = {}
for name,key in list(name_to_key.items()):
key_to_name[key] = name
keydef = [
((0,),KEYTAB_NOKEY),
((27,-1,),KEYTAB_ESC),
((27,ord('a'),-1),KEYTAB_ALTA),
((27,ord('b'),-1),KEYTAB_ALTB),
((27,ord('c'),-1),KEYTAB_ALTC),
((27,ord('d'),-1),KEYTAB_ALTD),
((27,ord('e'),-1),KEYTAB_ALTE),
((27,ord('f'),-1),KEYTAB_ALTF),
((27,ord('g'),-1),KEYTAB_ALTG),
((27,ord('h'),-1),KEYTAB_ALTH),
((27,ord('i'),-1),KEYTAB_ALTI),
((27,ord('j'),-1),KEYTAB_ALTJ),
((27,ord('k'),-1),KEYTAB_ALTK),
((27,ord('l'),-1),KEYTAB_ALTL),
((27,ord('m'),-1),KEYTAB_ALTM),
((27,ord('n'),-1),KEYTAB_ALTN),
((27,ord('o'),-1),KEYTAB_ALTo),
((27,ord('p'),-1),KEYTAB_ALTP),
((27,ord('q'),-1),KEYTAB_ALTQ),
((27,ord('r'),-1),KEYTAB_ALTR),
((27,ord('s'),-1),KEYTAB_ALTS),
((27,ord('t'),-1),KEYTAB_ALTT),
((27,ord('u'),-1),KEYTAB_ALTU),
((27,ord('v'),-1),KEYTAB_ALTV),
((27,ord('w'),-1),KEYTAB_ALTW),
((27,ord('x'),-1),KEYTAB_ALTX),
((27,ord('y'),-1),KEYTAB_ALTY),
((27,ord('z'),-1),KEYTAB_ALTZ),
((27,ord('A'),-1),KEYTAB_ALTA),
((27,ord('B'),-1),KEYTAB_ALTB),
((27,ord('C'),-1),KEYTAB_ALTC),
((27,ord('D'),-1),KEYTAB_ALTD),
((27,ord('E'),-1),KEYTAB_ALTE),
((27,ord('F'),-1),KEYTAB_ALTF),
((27,ord('G'),-1),KEYTAB_ALTG),
((27,ord('H'),-1),KEYTAB_ALTH),
((27,ord('I'),-1),KEYTAB_ALTI),
((27,ord('J'),-1),KEYTAB_ALTJ),
((27,ord('K'),-1),KEYTAB_ALTK),
((27,ord('L'),-1),KEYTAB_ALTL),
((27,ord('M'),-1),KEYTAB_ALTM),
((27,ord('N'),-1),KEYTAB_ALTN),
((27,ord('O'),-1),KEYTAB_ALTO),
((27,ord('P'),-1),KEYTAB_ALTP),
((27,ord('Q'),-1),KEYTAB_ALTQ),
((27,ord('R'),-1),KEYTAB_ALTR),
((27,ord('S'),-1),KEYTAB_ALTS),
((27,ord('T'),-1),KEYTAB_ALTT),
((27,ord('U'),-1),KEYTAB_ALTU),
((27,ord('V'),-1),KEYTAB_ALTV),
((27,ord('W'),-1),KEYTAB_ALTW),
((27,ord('X'),-1),KEYTAB_ALTX),
((27,ord('Y'),-1),KEYTAB_ALTY),
((27,ord('Z'),-1),KEYTAB_ALTZ),
((curses.KEY_BACKSPACE,-1),KEYTAB_BACKSPACE),
((8,-1),KEYTAB_BACKSPACE),
((127,-1),KEYTAB_BACKSPACE),
((27,ord('['),ord('Z'),-1),KEYTAB_BACKTAB),
((curses.KEY_BTAB,-1),KEYTAB_BTAB),
((10,-1),KEYTAB_CR),
((1,-1),KEYTAB_CTRLA),
((2,-1),KEYTAB_CTRLB),
((3,-1),KEYTAB_CTRLC),
((4,-1),KEYTAB_CTRLD),
((5,-1),KEYTAB_CTRLE),
((6,-1),KEYTAB_CTRLF),
((7,-1),KEYTAB_CTRLG),
((8,-1),KEYTAB_CTRLH),
((9,-1),KEYTAB_CTRLI),
((10,-1),KEYTAB_CTRLJ),
((11,-1),KEYTAB_CTRLK),
((12,-1),KEYTAB_CTRLL),
((13,-1),KEYTAB_CTRLM),
((14,-1),KEYTAB_CTRLN),
((15,-1),KEYTAB_CTRLO),
((16,-1),KEYTAB_CTRLP),
((17,-1),KEYTAB_CTRLQ),
((18,-1),KEYTAB_CTRLR),
((19,-1),KEYTAB_CTRLS),
((20,-1),KEYTAB_CTRLT),
((21,-1),KEYTAB_CTRLU),
((22,-1),KEYTAB_CTRLV),
((23,-1),KEYTAB_CTRLW),
((24,-1),KEYTAB_CTRLX),
((25,-1),KEYTAB_CTRLY),
((26,-1),KEYTAB_CTRLZ),
((545,-1),KEYTAB_CTRLLEFT),
((560,-1),KEYTAB_CTRLRIGHT),
((530,-1),KEYTAB_CTRLHOME),
((525,-1),KEYTAB_CTRLEND),
((curses.KEY_DC,-1),KEYTAB_DELC),
((curses.KEY_DOWN,-1),KEYTAB_DOWN),
((curses.KEY_END,-1),KEYTAB_END),
((curses.KEY_F0,-1),KEYTAB_F00),
((curses.KEY_F1,-1),KEYTAB_F01),
((curses.KEY_F2,-1),KEYTAB_F02),
((curses.KEY_F3,-1),KEYTAB_F03),
((curses.KEY_F4,-1),KEYTAB_F04),
((curses.KEY_F5,-1),KEYTAB_F05),
((curses.KEY_F6,-1),KEYTAB_F06),
((curses.KEY_F7,-1),KEYTAB_F07),
((curses.KEY_F8,-1),KEYTAB_F08),
((curses.KEY_F9,-1),KEYTAB_F09),
((curses.KEY_F10,-1),KEYTAB_F10),
((curses.KEY_F11,-1),KEYTAB_F11),
((curses.KEY_F12,-1),KEYTAB_F12),
((curses.KEY_F13,-1),KEYTAB_F13),
((curses.KEY_F14,-1),KEYTAB_F14),
((curses.KEY_F15,-1),KEYTAB_F15),
((curses.KEY_F16,-1),KEYTAB_F16),
((curses.KEY_F17,-1),KEYTAB_F17),
((curses.KEY_F18,-1),KEYTAB_F18),
((curses.KEY_F19,-1),KEYTAB_F19),
((curses.KEY_F20,-1),KEYTAB_F20),
((curses.KEY_F21,-1),KEYTAB_F21),
((curses.KEY_F22,-1),KEYTAB_F22),
((curses.KEY_F23,-1),KEYTAB_F23),
((curses.KEY_F24,-1),KEYTAB_F24),
((curses.KEY_F25,-1),KEYTAB_F25),
((curses.KEY_F26,-1),KEYTAB_F26),
((curses.KEY_F27,-1),KEYTAB_F27),
((curses.KEY_F28,-1),KEYTAB_F28),
((curses.KEY_F29,-1),KEYTAB_F29),
((curses.KEY_F30,-1),KEYTAB_F30),
((curses.KEY_F31,-1),KEYTAB_F31),
((curses.KEY_F32,-1),KEYTAB_F32),
((curses.KEY_F33,-1),KEYTAB_F33),
((curses.KEY_F34,-1),KEYTAB_F34),
((curses.KEY_F35,-1),KEYTAB_F35),
((curses.KEY_F36,-1),KEYTAB_F36),
((curses.KEY_F37,-1),KEYTAB_F37),
((curses.KEY_F38,-1),KEYTAB_F38),
((curses.KEY_F39,-1),KEYTAB_F39),
((curses.KEY_F40,-1),KEYTAB_F40),
((curses.KEY_F41,-1),KEYTAB_F41),
((curses.KEY_F42,-1),KEYTAB_F42),
((curses.KEY_F43,-1),KEYTAB_F43),
((curses.KEY_F44,-1),KEYTAB_F44),
((curses.KEY_F45,-1),KEYTAB_F45),
((curses.KEY_F46,-1),KEYTAB_F46),
((curses.KEY_F47,-1),KEYTAB_F47),
((curses.KEY_F48,-1),KEYTAB_F48),
((curses.KEY_F49,-1),KEYTAB_F49),
((curses.KEY_F50,-1),KEYTAB_F50),
((curses.KEY_F51,-1),KEYTAB_F51),
((curses.KEY_F52,-1),KEYTAB_F52),
((curses.KEY_F53,-1),KEYTAB_F53),
((curses.KEY_F54,-1),KEYTAB_F54),
((curses.KEY_F55,-1),KEYTAB_F55),
((curses.KEY_F56,-1),KEYTAB_F56),
((curses.KEY_F57,-1),KEYTAB_F57),
((curses.KEY_F58,-1),KEYTAB_F58),
((curses.KEY_F59,-1),KEYTAB_F59),
((curses.KEY_F60,-1),KEYTAB_F60),
((curses.KEY_F61,-1),KEYTAB_F61),
((curses.KEY_F62,-1),KEYTAB_F62),
((curses.KEY_F63,-1),KEYTAB_F63),
((curses.KEY_HOME,-1),KEYTAB_HOME),
((curses.KEY_IC,-1),KEYTAB_INSERT),
((27,ord('O'),ord('k'),-1),KEYTAB_KEYPADPLUS),
((27,ord('O'),ord('m'),-1),KEYTAB_KEYTPADMINUS),
((curses.KEY_LEFT,-1),KEYTAB_LEFT),
((curses.KEY_NPAGE,-1),KEYTAB_PAGEDOWN),
((curses.KEY_PPAGE,-1),KEYTAB_PAGEUP),
((curses.KEY_RESIZE,-1),KEYTAB_RESIZE),
((curses.KEY_RIGHT,-1),KEYTAB_RIGHT),
((ord(' '),-1),KEYTAB_SPACE),
((9,-1),KEYTAB_TAB),
((curses.KEY_UP,-1),KEYTAB_UP),
((curses.KEY_MOUSE,-1),KEYTAB_MOUSE),
]
```
#### File: ped/ped_core/undo.py
```python
import copy
class UndoAction:
""" represents one undoable action """
def __init__(self, func, tpl ):
""" constructed with an undo func which is a function to call on undo
and tpl which is a tuple of arguments to pass to the function
a shallow copy of the tuple is made preserving the state """
self.func = func
self.tpl = copy.copy(tpl)
def __del__(self):
self.func = None
self.tpl = None
def undo(self):
""" undo this action by invoking the function and passing the tuple
to it """
self.func(*self.tpl)
class UndoTransaction:
""" represents a collection of undo actions that should be undone together
as one transaction """
def __init__(self, manager):
""" constructed with a reference to the UndoManager that this
transaction belongs to """
self.manager = manager
self.list = []
def __del__(self):
self.manager = None
self.list = None
def isEmpty(self):
""" tests to see if this transaction has no actions in it """
return not self.list
def push(self, func, tpl ):
""" pushes a new undo action onto this transaction """
if not self.manager.inUndo():
self.list.append(UndoAction(func,tpl))
def undo(self):
""" undoes all of the actions in this transaction """
while self.list:
self.list[-1].undo()
del self.list[-1]
self.list = []
class UndoManager:
""" manager for a list of undo transactions for a given context """
def __init__(self):
""" construct an UndoManager, no arguments """
self.transactions = []
self.inundo = False
def __del__(self):
self.transactions = None
def inUndo(self):
""" returns true if currently executing an undo, used to prevent recursion during undo """
return self.inundo
def new_transaction(self):
""" start a new transaction or return the current empty transaction """
if not self.transactions or not self.transactions[-1].isEmpty():
self.transactions.append(UndoTransaction(self))
return self.transactions[-1]
def get_transaction(self):
""" return the current transaction if any """
if self.transactions:
return self.transactions[-1]
else:
return self.new_transaction()
def undo_transaction(self):
""" undo the current transaction """
if self.transactions:
self.inundo = True
self.transactions[-1].undo()
del self.transactions[-1]
self.inundo = False
return not self.transactions
def flush_undo(self):
""" free the transactions and forget all the undo information """
self.transactions = []
```
#### File: ped/ped_extensions/dummy_extension.py
```python
from ped_core.editor_common import Editor
from ped_dialog.message_dialog import message
import time
def ped_ext_info():
""" return registration information for extension_manager """
return ( "CMD_HELP", None, None, None, "test_extension" )
def ped_ext_invoke( cmd_id, target, ch ):
""" do our thing with the target object """
message( target.scr, "Help Extension", "On our way to help!",False )
time.sleep(5)
return True
```
#### File: ped/tests/test_editor_common.py
```python
from ped_core import editor_common
import io
import pprint
import os
import curses
import curses.ascii
import time
import re
from ped_core import keymap
from ped_core import keytab
from ped_core import clipboard
from ped_test_util import read_str, match_attr, undo_all, window_pos, play_macro, validate_mark, validate_screen, editor_test_suite
import subprocess
def test_memline():
m = editor_common.MemLine( "01234567890123456789" )
assert( m.length() == 20 )
assert( m.getContent() == "01234567890123456789" )
def test_EditFile(testdir):
lines_to_test = ["This is the first line","This is the second line","This is the third line","This is the last line"]
testfile = testdir.makefile(".txt",lines_to_test[0],lines_to_test[1],lines_to_test[2],lines_to_test[3])
fn = str(testfile)
ef = editor_common.EditFile( fn )
assert(ef.get_tabs() == [ 4, 8 ] )
ef.set_tabs( [ 8, 16] )
assert(ef.get_tabs() == [ 8, 16 ] )
w = ef.getWorking()
assert( not w.closed )
assert( ef.getModref() == 0)
assert( isinstance(ef.getUndoMgr(), editor_common.undo.UndoManager ))
assert( not ef.isChanged() )
assert( not ef.isReadOnly() )
ef.setReadOnly( True )
assert( ef.isReadOnly() )
ef.setReadOnly( False )
assert( not ef.isReadOnly() )
assert( ef.getFilename() == fn )
ef.setFilename( "bogus.txt" )
assert( ef.getFilename() == "bogus.txt" )
ef.setFilename( fn )
assert( ef.getFilename() == fn )
fls = ef.getLines()
assert( ef.numLines() == 4 )
ef.close()
assert( ef.getWorking() == None )
ef.load()
w = ef.getWorking()
assert( not w.closed )
for line in range(0,len(lines_to_test)):
assert(ef.length(line) == len(lines_to_test[line]))
fl = ef.getLine(line)
assert(fl.rstrip() == lines_to_test[line])
assert(fls[line].rstrip() == lines_to_test[line])
fls = ef.getLines(1,3)
assert(len(fls) == 2 )
assert(fls[0].rstrip() == lines_to_test[1] and fls[1].rstrip() == lines_to_test[2])
ef.deleteLine(1)
fls = ef.getLines()
assert(fls[0].rstrip() == lines_to_test[0] and fls[1].rstrip() == lines_to_test[2] and fls[2].rstrip() == lines_to_test[3] )
assert(ef.numLines() == 3 )
assert(ef.getModref() == 1 )
assert(ef.isChanged() )
um = ef.getUndoMgr()
um.undo_transaction()
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(ef.numLines() == 4)
assert(ef.getModref() == 2)
assert(not ef.isChanged() )
new_test_line = "This is the line for insert"
ef.insertLine(2,new_test_line)
fls = ef.getLines()
assert(fls[0].rstrip() == lines_to_test[0] and fls[1].rstrip() == lines_to_test[1] and fls[2].rstrip() == new_test_line and fls[3].rstrip() == lines_to_test[2] and fls[4].rstrip() == lines_to_test[3] )
assert(ef.numLines() == 5 )
assert(ef.getModref() == 3 )
assert(ef.isChanged() )
um = ef.getUndoMgr()
um.undo_transaction()
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(ef.numLines() == 4)
assert(ef.getModref() == 4)
assert(not ef.isChanged() )
ef.replaceLine(3,new_test_line)
fls = ef.getLines()
assert(fls[0].rstrip() == lines_to_test[0] and fls[1].rstrip() == lines_to_test[1] and fls[2].rstrip() == lines_to_test[2] and fls[3].rstrip() == new_test_line )
assert(ef.numLines() == 4 )
assert(ef.getModref() == 5 )
assert(ef.isChanged() )
um = ef.getUndoMgr()
um.undo_transaction()
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(ef.numLines() == 4)
assert(ef.getModref() == 6)
assert(not ef.isChanged() )
fd = str(testdir.tmpdir)
backup_filepath = ef.make_backup_dir( fn, fd )
assert(os.path.exists(os.path.dirname(backup_filepath)))
ef.insertLine(10,new_test_line)
ef.backuproot = fd
ef.save()
assert(os.path.exists(backup_filepath))
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(fls[10].rstrip() == new_test_line)
newname = os.path.join(fd,"1_"+os.path.basename(fn))
ef.save(newname)
assert(os.path.exists(newname))
ef.close()
ef.load()
assert(ef.getFilename() == newname)
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(fls[10].rstrip() == new_test_line)
assert(ef.get_tab_stop(4) == 8)
assert(ef.get_tab_stop(10) == 16 )
assert(ef.get_tab_stop(10,True) == 8)
tabby_string = "01234\t56789012\t3456789"
expanded_string = "01234 56789012 3456789"
assert(ef.expand_tabs(tabby_string) == expanded_string)
def test_Editor_unwrapped(testdir,capsys):
with capsys.disabled():
curses.wrapper(editor_test_suite,testdir,False,None)
def test_Editor_wrapped(testdir,capsys):
with capsys.disabled():
curses.wrapper(editor_test_suite,testdir,True,None)
def test_StreamEditor(testdir,capsys):
with capsys.disabled():
def main(stdscr,testdir):
max_y,max_x = stdscr.getmaxyx()
generator_lines = [
"for i in range(0,1000000):",
" print('Line %d of test file'%i)",
]
generator_script = testdir.makepyfile(**{ "generator": "\n".join(generator_lines)})
cmd = 'python3 %s'%str(generator_script)
se = editor_common.StreamEditor(stdscr,stdscr.subwin(max_y,max_x,0,0),"Test Stream",subprocess.Popen(cmd,
shell=True,
bufsize=1024,
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).stdout)
starting_num_lines = se.numLines()
time.sleep(1)
for i in range(0,100):
se.main(False)
assert(se.getContent(i) == 'Line %d of test file'%i)
current_line = se.getLine()
se.main(False,6) # ctrl-f
for i in range(0,200):
se.main(False)
assert(se.follow == True and se.getLine() > current_line)
se.main(False,6) # ctrl-f
current_line = se.getLine()
for i in range(0,200):
se.main(False)
assert(se.follow == False and se.getLine() == current_line)
play_macro(se, [keytab.KEYTAB_ALTO,keytab.KEYTAB_TAB,keytab.KEYTAB_DOWN]+list("testout.out")+[keytab.KEYTAB_CR,keytab.KEYTAB_CR])
assert(se.getFilename().endswith("testout.out") and os.path.exists(se.getFilename()))
se.close()
curses.wrapper(main,testdir)
```
#### File: ped/tests/test_python_mode.py
```python
from ped_core import editor_manager
from ped_core import editor_common
import curses
import curses.ascii
from ped_core import keytab
from ped_test_util import read_str,validate_screen,editor_test_suite,play_macro,screen_size,match_attr,wait_for_screen
def test_python_mode(testdir,capsys):
with capsys.disabled():
def main(stdscr):
lines_to_test = [
"import editor_manager",
"import editor_common",
"import sys",
"",
"# This is a comment",
"def main( args ):",
" if args[0] == 'test':",
" print( 'This is a test' )",
" return args[1]",
"",
"if __name__ == '__main__':",
" main(sys.argv) # This line ends with a comment"
]
args = { "python_test":"\n".join(lines_to_test)}
testfile = testdir.makefile(".py", **args)
green = curses.color_pair(1)
red = curses.color_pair(2)
cyan = curses.color_pair(3)
white = curses.color_pair(4)
ed = editor_common.Editor(stdscr,None,str(testfile))
ed.setWin(stdscr.subwin(ed.max_y,ed.max_x,0,0))
validate_screen(ed)
assert(ed.mode and ed.mode.name() == "python_mode")
match_list = [(0,0,6,cyan),(4,0,19,red),(6,18,5,green),(11,19,31,red)]
for line,pos,width,attr in match_list:
assert(match_attr(ed.scr,line+1,pos,1,width,attr))
ed.goto(6,0)
ed.endln()
ed.main(False,10)
assert(ed.getLine() == 7 and ed.getPos() == 8)
ed.insert('foo = "A double quoted string"')
wait_for_screen(ed)
assert(match_attr(ed.scr,8,8,1,3,white))
assert(match_attr(ed.scr,8,14,1,24,green))
curses.wrapper(main)
```
#### File: ped/tests/test_ssh_dialog.py
```python
import curses
import curses.ascii
from ped_core import keytab
from ped_test_util import read_str,validate_dialog,editor_test_suite,play_macro,screen_size,match_attr
from ped_ssh_dialog.ssh_dialog import SSHFileDialog
from ped_ssh_dialog.ssh_mod import ssh_put, ssh_del, ssh_stat
from ped_dialog import dialog
import pytest
import os
import time
@pytest.fixture(scope="function")
def sftp_testdir(request,testdir):
sftp_basepath = os.environ.get("SSH_DIALOG_BASEPATH",None)
sftp_username = os.environ.get("SSH_DIALOG_USERNAME",None)
sftp_password = os.environ.get("SSH_DIALOG_PASSWORD",None)
assert sftp_basepath and sftp_username and sftp_password,"SSH_DIALOG environment not set"
local_files = []
remote_files = []
local_file_names = []
remote_file_names = []
for i in range(0,5):
args = { "local_%d"%(i):"\n".join(["local_%d test line %d"%(i,j) for j in range(0,200)])}
local_files.append(testdir.makefile(".txt",**args))
args = { "remote_%d"%(i):"\n".join(["local_%d test line %d"%(i,j) for j in range(0,200)])}
remote_files.append(testdir.makefile(".txt",**args))
for f in remote_files:
ssh_put( str(f), sftp_basepath+str(f),lambda : { "ssh_username" : sftp_username, "ssh_password" : sftp_password}, False )
remote_file_names.append(f.basename)
f.remove()
for f in local_files:
local_file_names.append(f.basename)
def cleanup_sftp_testdir():
ssh_del( sftp_basepath+str(testdir.tmpdir.parts()[1]),True, lambda : { "ssh_username" : sftp_username, "ssh_password" : <PASSWORD> })
request.addfinalizer(cleanup_sftp_testdir)
return {"ssh_username" : sftp_username,
"ssh_password" : <PASSWORD>,
"ssh_basepath": sftp_basepath+str(testdir.tmpdir),
"local_path": str(testdir.tmpdir),
"local_files" : local_file_names,
"remote_files" : remote_file_names,
"testdir" : testdir }
def test_ssh_dialog(sftp_testdir,capsys):
with capsys.disabled():
def main(stdscr):
screen_size( 30, 100 )
d = SSHFileDialog(stdscr, title = "SFTP File Manager",
remote_path=sftp_testdir["ssh_basepath"],
ssh_username=sftp_testdir["ssh_username"],
ssh_password=<PASSWORD>dir["ssh_password"],
local_path=sftp_testdir["local_path"])
d.main(False,True)
validate_dialog(d)
d.main(False,True,keytab.KEYTAB_TAB)
d.main(False,True,keytab.KEYTAB_TAB)
d.main(False,True,keytab.KEYTAB_TAB)
assert(d.focus_list[d.current][1].name == "ssh_files")
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
(ch,values) = d.main(False,True,keytab.KEYTAB_CR)
selection,file_list = values["ssh_files"]
assert(file_list[selection] == sftp_testdir["remote_files"][2] and values["ssh_file"] == sftp_testdir["remote_files"][2] and values["local_file"] == sftp_testdir["remote_files"][2])
d.goto(d.get_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(os.path.exists(os.path.join(str(sftp_testdir["testdir"].tmpdir),sftp_testdir["remote_files"][2])))
d.goto(d.file_list)
assert(d.focus_list[d.current][1].name == "local_files")
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
(ch,values) = d.main(False,True,keytab.KEYTAB_CR)
selection,file_list = values["local_files"]
assert(file_list[selection] == sftp_testdir["local_files"][2] and values["ssh_file"] == sftp_testdir["local_files"][2] and values["local_file"] == sftp_testdir["local_files"][2])
d.goto(d.put_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(ssh_stat( values["ssh_dir"]+"/"+values["ssh_file"],lambda : { 'ssh_username':sftp_testdir['ssh_username'], 'ssh_password':sftp_testdir['ssh_password'] }) != (-1,-1))
d.goto(d.open_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(ch == dialog.Component.CMP_KEY_OK)
d.goto(d.cancel_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(ch == dialog.Component.CMP_KEY_CANCEL)
curses.wrapper(main)
``` |
{
"source": "jpg013/CS5570-project",
"score": 2
} |
#### File: jpg013/CS5570-project/build_graphs.py
```python
from history import History
from history_query_builder import HistoryQueryBuilder
from data_generation import DataGeneration
from recovery_engine import RecoveryEngine
from serializable_or_not import serializable_or_not
from app_config import AppConfig
def build_graphs():
data_graph = build_data_graph()
tx_graph = build_transaction_graph()
return {
'transaction_graph': tx_graph,
'data_set_graph': data_graph
}
def build_data_graph():
iter_count = 100
results = []
data_items = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
for idx in range(10):
AppConfig.set('data_set', set(data_items[0:idx+1]))
not_recoverable_count = 0
not_aca_count = 0
not_strict_count = 0
for x in range(iter_count):
data_generation = DataGeneration()
history = History(data_generation.generate_transactions())
history.interleave_transaction_schedule()
recovery_engine = RecoveryEngine(history)
report = recovery_engine.get_report()
if report.is_not_recoverable():
not_recoverable_count += 1
if report.is_not_aca():
not_aca_count += 1
if report.is_not_strict():
not_strict_count += 1
results.append({
'not_recoverable_percent': (not_recoverable_count/iter_count) * 100,
'not_cacadeless_percent': (not_aca_count/iter_count) * 100,
'not_strict_percent': (not_strict_count/iter_count) * 100,
})
AppConfig.restore_defaults()
return results
def build_transaction_graph():
iter_count = 100
results = []
for idx in range(10):
AppConfig.set('transaction_count', { 'min': 1, 'max': idx+1 })
not_recoverable_count = 0
not_aca_count = 0
not_strict_count = 0
for x in range(iter_count):
data_generation = DataGeneration()
history = History(data_generation.generate_transactions())
history.interleave_transaction_schedule()
recovery_engine = RecoveryEngine(history)
report = recovery_engine.get_report()
if report.is_not_recoverable():
not_recoverable_count += 1
if report.is_not_aca():
not_aca_count += 1
if report.is_not_strict():
not_strict_count += 1
results.append({
'not_recoverable_percent': (not_recoverable_count/iter_count) * 100,
'not_cacadeless_percent': (not_aca_count/iter_count) * 100,
'not_strict_percent': (not_strict_count/iter_count) * 100,
})
AppConfig.restore_defaults()
return results
```
#### File: jpg013/CS5570-project/data_generation.py
```python
import random
from app_config import AppConfig
import sys
from transaction import Transaction
from data_operation import DataOperation, type_switcher, OperationType
class DataGeneration():
"""DataGeneration is a helper class with several utility methods
for generating random data sets for transactions and data operations"""
def __init__(self):
self.transaction_count = AppConfig.get('transaction_count')
self.data_set = AppConfig.get('data_set')
def get_transaction_data_cardinality(self):
# Return the number of transaction data items for the history
return random.randint(1, len(self.data_set))
def generate_data_item(self):
# Return data item value
return random.sample(self.data_set, 1)[0]
def make_transaction_data_sample(self):
# returns list of data_items
data_sample = []
data_cardinality = self.get_transaction_data_cardinality()
while len(data_sample) < data_cardinality:
data = self.generate_data_item()
if data not in data_sample:
data_sample.append(data)
return data_sample
def generate_transactions(self):
# Generate random number of transactions with data items
transaction_cardinality = self.get_transaction_cardinality()
transactions = []
for idx in range(transaction_cardinality):
tx = Transaction(idx+1)
for x in self.generate_tx_data_operations(tx, self.make_transaction_data_sample()):
tx.add_data_operation(x)
transactions.append(tx)
return transactions
def get_transaction_cardinality(self):
# Return the number of transactions for the history
return random.randint(
self.transaction_count.get("min"),
self.transaction_count.get("max")
)
def generate_tx_data_operations(self, transaction, data_items):
if len(data_items) < 1:
raise ValueError('data_items list must have at least one item')
data_operations = []
for item in data_items:
# Generate random data Operations
data_operations = data_operations + list(map(lambda type: DataOperation(type, transaction, item), self.generate_read_writes_types()))
# Randomly shuffle all the data operations.
random.shuffle(data_operations)
# Add a commit / abort data operation at the end
data_operations.append(DataOperation(self.generate_commit_abort_type(), transaction, None))
return data_operations
# Helper method to randomly generate list of read/write data operations
def generate_read_writes_types(self):
# Each data item may have a read, write, or both
count = random.randint(1, 2)
if count is 1:
return [type_switcher.get(random.randint(0,1))]
else:
return [OperationType.READ, OperationType.WRITE]
def generate_commit_abort_type(self):
return type_switcher.get(random.randint(2,3))
``` |
{
"source": "jpgacrama/DeepLearning",
"score": 2
} |
#### File: DeepLearning/pt_framework/c14e1_seq2seq_translate.py
```python
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.text \
import text_to_word_sequence
from tensorflow.keras.preprocessing.sequence \
import pad_sequences
import numpy as np
import random
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
EPOCHS = 20
BATCH_SIZE = 128
MAX_WORDS = 10000
READ_LINES = 60000
LAYER_SIZE = 256
EMBEDDING_WIDTH = 128
TEST_PERCENT = 0.2
SAMPLE_SIZE = 20
OOV_WORD = 'UNK'
PAD_INDEX = 0
OOV_INDEX = 1
START_INDEX = MAX_WORDS - 2
STOP_INDEX = MAX_WORDS - 1
MAX_LENGTH = 60
SRC_DEST_FILE_NAME = '../data/fra.txt'
# Function to read file.
def read_file_combined(file_name, max_len):
file = open(file_name, 'r', encoding='utf-8')
src_word_sequences = []
dest_word_sequences = []
for i, line in enumerate(file):
if i == READ_LINES:
break
pair = line.split('\t')
word_sequence = text_to_word_sequence(pair[1])
src_word_sequence = word_sequence[0:max_len]
src_word_sequences.append(src_word_sequence)
word_sequence = text_to_word_sequence(pair[0])
dest_word_sequence = word_sequence[0:max_len]
dest_word_sequences.append(dest_word_sequence)
file.close()
return src_word_sequences, dest_word_sequences
# Functions to tokenize and un-tokenize sequences.
def tokenize(sequences):
# "MAX_WORDS-2" used to reserve two indices
# for START and STOP.
tokenizer = Tokenizer(num_words=MAX_WORDS-2,
oov_token=OOV_WORD)
tokenizer.fit_on_texts(sequences)
token_sequences = tokenizer.texts_to_sequences(sequences)
return tokenizer, token_sequences
def tokens_to_words(tokenizer, seq):
word_seq = []
for index in seq:
if index == PAD_INDEX:
word_seq.append('PAD')
elif index == OOV_INDEX:
word_seq.append(OOV_WORD)
elif index == START_INDEX:
word_seq.append('START')
elif index == STOP_INDEX:
word_seq.append('STOP')
else:
word_seq.append(tokenizer.sequences_to_texts(
[[index]])[0])
print(word_seq)
# Read file and tokenize.
src_seq, dest_seq = read_file_combined(SRC_DEST_FILE_NAME,
MAX_LENGTH)
src_tokenizer, src_token_seq = tokenize(src_seq)
dest_tokenizer, dest_token_seq = tokenize(dest_seq)
# Prepare training data.
dest_target_token_seq = [x + [STOP_INDEX] for x in dest_token_seq]
dest_input_token_seq = [[START_INDEX] + x for x in
dest_target_token_seq]
src_input_data = pad_sequences(src_token_seq)
dest_input_data = pad_sequences(dest_input_token_seq,
padding='post')
dest_target_data = pad_sequences(
dest_target_token_seq, padding='post', maxlen
= len(dest_input_data[0]))
# Convert to same precision as model.
src_input_data = src_input_data.astype(np.int64)
dest_input_data = dest_input_data.astype(np.int64)
dest_target_data = dest_target_data.astype(np.int64)
# Split into training and test set.
rows = len(src_input_data[:,0])
all_indices = list(range(rows))
test_rows = int(rows * TEST_PERCENT)
test_indices = random.sample(all_indices, test_rows)
train_indices = [x for x in all_indices if x not in test_indices]
train_src_input_data = src_input_data[train_indices]
train_dest_input_data = dest_input_data[train_indices]
train_dest_target_data = dest_target_data[train_indices]
test_src_input_data = src_input_data[test_indices]
test_dest_input_data = dest_input_data[test_indices]
test_dest_target_data = dest_target_data[test_indices]
# Create a sample of the test set that we will inspect in detail.
test_indices = list(range(test_rows))
sample_indices = random.sample(test_indices, SAMPLE_SIZE)
sample_input_data = test_src_input_data[sample_indices]
sample_target_data = test_dest_target_data[sample_indices]
# Create Dataset objects.
trainset = TensorDataset(torch.from_numpy(train_src_input_data),
torch.from_numpy(train_dest_input_data),
torch.from_numpy(train_dest_target_data))
testset = TensorDataset(torch.from_numpy(test_src_input_data),
torch.from_numpy(test_dest_input_data),
torch.from_numpy(test_dest_target_data))
# Define models.
class EncoderModel(nn.Module):
def __init__(self):
super().__init__()
self.embedding_layer = nn.Embedding(MAX_WORDS, EMBEDDING_WIDTH)
nn.init.uniform_(self.embedding_layer.weight, -0.05, 0.05) # Default is -1, 1.
self.lstm_layers = nn.LSTM(EMBEDDING_WIDTH, LAYER_SIZE, num_layers=2, batch_first=True)
def forward(self, inputs):
x = self.embedding_layer(inputs)
x = self.lstm_layers(x)
return x[1]
class DecoderModel(nn.Module):
def __init__(self):
super().__init__()
self.state = None
self.use_state = False
self.embedding_layer = nn.Embedding(MAX_WORDS, EMBEDDING_WIDTH)
nn.init.uniform_(self.embedding_layer.weight, -0.05, 0.05) # Default is -1, 1.
self.lstm_layers = nn.LSTM(EMBEDDING_WIDTH, LAYER_SIZE, num_layers=2, batch_first=True)
self.output_layer = nn.Linear(LAYER_SIZE, MAX_WORDS)
def forward(self, inputs):
x = self.embedding_layer(inputs)
if(self.use_state):
x = self.lstm_layers(x, self.state)
else:
x = self.lstm_layers(x)
self.state = (x[1][0].detach().clone(), x[1][1].detach().clone()) # Store most recent internal state.
x = self.output_layer(x[0])
return x
# Functions to provide explicit control of LSTM state.
def set_state(self, state):
self.state = state
self.use_state = True
return
def get_state(self):
return self.state
def clear_state(self):
self.use_state = False
return
encoder_model = EncoderModel()
decoder_model = DecoderModel()
# Loss functions and optimizer.
encoder_optimizer = torch.optim.RMSprop(encoder_model.parameters(), lr=0.001)
decoder_optimizer = torch.optim.RMSprop(decoder_model.parameters(), lr=0.001)
loss_function = nn.CrossEntropyLoss()
# Using a custom training loop instead of our standard training function.
# Transfer model to GPU.
encoder_model.to(device)
decoder_model.to(device)
trainloader = DataLoader(dataset=trainset, batch_size=BATCH_SIZE, shuffle=True)
testloader = DataLoader(dataset=testset, batch_size=BATCH_SIZE, shuffle=False)
# Train and test repeatedly.
for i in range(EPOCHS):
encoder_model.train() # Set model in training mode.
decoder_model.train() # Set model in training mode.
train_loss = 0.0
train_correct = 0
train_batches = 0
train_elems = 0
for src_inputs, dest_inputs, dest_targets in trainloader:
# Move data to GPU.
src_inputs, dest_inputs, dest_targets = src_inputs.to(
device), dest_inputs.to(device), dest_targets.to(device)
# Zero the parameter gradients.
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Forward pass.
encoder_state = encoder_model(src_inputs)
decoder_model.set_state(encoder_state)
outputs = decoder_model(dest_inputs)
loss = loss_function(outputs.view(-1, MAX_WORDS), dest_targets.view(-1))
# Accumulate metrics.
_, indices = torch.max(outputs.data, 2)
train_correct += (indices == dest_targets).sum().item()
train_elems += indices.numel()
train_batches += 1
train_loss += loss.item()
# Backward pass and update.
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
train_loss = train_loss / train_batches
train_acc = train_correct / train_elems
# Evaluate the model on the test dataset.
encoder_model.eval() # Set model in inference mode.
decoder_model.eval() # Set model in inference mode.
test_loss = 0.0
test_correct = 0
test_batches = 0
test_elems = 0
for src_inputs, dest_inputs, dest_targets in testloader:
# Move data to GPU.
src_inputs, dest_inputs, dest_targets = src_inputs.to(
device), dest_inputs.to(device), dest_targets.to(device)
encoder_state = encoder_model(src_inputs)
decoder_model.set_state(encoder_state)
outputs = decoder_model(dest_inputs)
loss = loss_function(outputs.view(-1, MAX_WORDS), dest_targets.view(-1))
_, indices = torch.max(outputs, 2)
test_correct += (indices == dest_targets).sum().item()
test_elems += indices.numel()
test_batches += 1
test_loss += loss.item()
test_loss = test_loss / test_batches
test_acc = test_correct / test_elems
print(f'Epoch {i+1}/{EPOCHS} loss: {train_loss:.4f} - acc: {train_acc:0.4f} - val_loss: {test_loss:.4f} - val_acc: {test_acc:0.4f}')
# Loop through samples to see result
for (test_input, test_target) in zip(sample_input_data,
sample_target_data):
# Run a single sentence through encoder model.
x = np.reshape(test_input, (1, -1))
inputs = torch.from_numpy(x)
inputs = inputs.to(device)
last_states = encoder_model(inputs)
# Provide resulting state and START_INDEX as input
# to decoder model.
decoder_model.set_state(last_states)
prev_word_index = START_INDEX
produced_string = ''
pred_seq = []
for j in range(MAX_LENGTH):
x = np.reshape(np.array(prev_word_index), (1, 1))
# Predict next word and capture internal state.
inputs = torch.from_numpy(x)
inputs = inputs.to(device)
outputs = decoder_model(inputs)
preds = outputs.cpu().detach().numpy()[0][0]
state = decoder_model.get_state()
decoder_model.set_state(state)
# Find the most probable word.
prev_word_index = preds.argmax()
pred_seq.append(prev_word_index)
if prev_word_index == STOP_INDEX:
break
tokens_to_words(src_tokenizer, test_input)
tokens_to_words(dest_tokenizer, test_target)
tokens_to_words(dest_tokenizer, pred_seq)
print('\n\n')
```
#### File: DeepLearning/stand_alone/c1e2_perceptron_learning_plot.py
```python
import matplotlib.pyplot as plt
import random
# Define variables needed for plotting.
color_list = ['r-', 'm-', 'y-', 'c-', 'b-', 'g-']
color_index = 0
def show_learning(w):
global color_index
print('w0 =', '%5.2f' % w[0], ', w1 =', '%5.2f' % w[1],
', w2 =', '%5.2f' % w[2])
if color_index == 0:
plt.plot([1.0], [1.0], 'b_', markersize=12)
plt.plot([-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
'r+', markersize=12)
plt.axis([-2, 2, -2, 2])
plt.xlabel('x1')
plt.ylabel('x2')
x = [-2.0, 2.0]
if abs(w[2]) < 1e-5:
y = [-w[1]/(1e-5)*(-2.0)+(-w[0]/(1e-5)),
-w[1]/(1e-5)*(2.0)+(-w[0]/(1e-5))]
else:
y = [-w[1]/w[2]*(-2.0)+(-w[0]/w[2]),
-w[1]/w[2]*(2.0)+(-w[0]/w[2])]
plt.plot(x, y, color_list[color_index])
if color_index < (len(color_list) - 1):
color_index += 1
# Define variables needed to control training process.
random.seed(7) # To make repeatable
LEARNING_RATE = 0.1
index_list = [0, 1, 2, 3] # Used to randomize order
# Define training examples.
x_train = [(1.0, -1.0, -1.0), (1.0, -1.0, 1.0),
(1.0, 1.0, -1.0), (1.0, 1.0, 1.0)] # Inputs
y_train = [1.0, 1.0, 1.0, -1.0] # Output (ground truth)
# Define perceptron weights.
w = [0.2, -0.6, 0.25] # Initialize to some "random" numbers
# Print initial weights.
show_learning(w)
# First element in vector x must be 1.
# Length of w and x must be n+1 for neuron with n inputs.
def compute_output(w, x):
z = 0.0
for i in range(len(w)):
z += x[i] * w[i] # Compute sum of weighted inputs
if z < 0: # Apply sign function
return -1
else:
return 1
# Perceptron training loop.
all_correct = False
while not all_correct:
all_correct = True
random.shuffle(index_list) # Randomize order
for i in index_list:
x = x_train[i]
y = y_train[i]
p_out = compute_output(w, x) # Perceptron function
if y != p_out: # Update weights when wrong
for j in range(0, len(w)):
w[j] += (y * LEARNING_RATE * x[j])
all_correct = False
show_learning(w) # Show updated weights
plt.show()
``` |
{
"source": "jpgacrama/Deep-Learning-with-TensorFlow-2-and-Keras",
"score": 3
} |
#### File: Deep-Learning-with-TensorFlow-2-and-Keras/Chapter 6/DCGAN.py
```python
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import os
import numpy as np
import shutil
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# In[]:
FOLDER = 'DCGAN_images'
# In[2]:
class DCGAN():
def __init__(self, rows, cols, channels, z = 10):
# Input shape
self.img_rows = rows
self.img_cols = cols
self.channels = channels
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = z
optimizer = Adam(0.0003, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size=256, save_interval=50):
# Load the dataset
(X_train, _), (_, _) = mnist.load_data()
# Rescale -1 to 1
X_train = X_train / 127.5 - 1.
X_train = np.expand_dims(X_train, axis=3)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# Prepare list for storing the historical values
d_loss_real_hist, d_loss_fake_hist, g_loss_hist, d_accuracy_real_hist, d_accuracy_fake_hist = list(), list(), list(), list(), list()
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample noise and generate a batch of new images
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Train the discriminator (real classified as ones and generated as zeros)
d_loss_real, d_accuracy_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake, d_accuracy_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
# Train the generator (wants discriminator to mistake images as real)
g_loss = self.combined.train_on_batch(noise, valid)
# Append losses for plotting later
d_loss_real_hist.append(d_loss_real)
d_loss_fake_hist.append(d_loss_fake)
g_loss_hist.append(g_loss)
d_accuracy_real_hist.append(d_accuracy_real)
d_accuracy_fake_hist.append(d_accuracy_fake)
# Plot the progress
print ("%d [D loss Real: %f, D loss Fake: %f] [G loss: %f] [Acc Real: %.2f%%, Acc Fake: %.2f%%]"
% (epoch, d_loss_real, d_loss_fake, g_loss, 100 * d_accuracy_real, 100 * d_accuracy_fake))
# If at save interval => save generated image samples
if epoch % save_interval == 0:
self.save_imgs(epoch)
self.plot_history(d_loss_real_hist, d_loss_fake_hist, g_loss_hist, d_accuracy_real_hist, d_accuracy_fake_hist)
def save_imgs(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("DCGAN_images/dcgan_mnist_%d.png" % epoch)
plt.close()
# create a line plot of loss for the gan and save to file
def plot_history(self, d_hist_real, d_hist_fake, g_hist, d_acc_real, d_acc_fake):
# plot loss
plt.subplot(2, 1, 1)
plt.plot(d_hist_real, label='d-real')
plt.plot(d_hist_fake, label='d-fake')
plt.plot(g_hist, label='gen')
plt.legend()
# plot discriminator accuracy
plt.subplot(2, 1, 2)
plt.plot(d_acc_real, label='acc-real')
plt.plot(d_acc_fake, label='acc-fake')
plt.legend()
# save plot to file
plt.savefig(f'{FOLDER}/plot_line_plot_loss.png')
plt.close()
# In[3]:
# make folder for results
shutil.rmtree(f'{FOLDER}', ignore_errors=True)
os.makedirs(f'{FOLDER}', exist_ok=True)
dcgan = DCGAN(28,28,1,100)
dcgan.train(epochs=400, batch_size=256, save_interval=50)
```
#### File: Deep-Learning-with-TensorFlow-2-and-Keras/Chapter 6/GAN_with_Adam_defaults.py
```python
import os
from os import makedirs
from numpy import expand_dims
from numpy import zeros
from numpy import ones
from numpy.random import randn
from numpy.random import randint
from keras.datasets.mnist import load_data
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import LeakyReLU
from tensorflow.keras.initializers import RandomNormal
from matplotlib import pyplot
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# define the standalone discriminator model
def define_discriminator(in_shape=(28,28,1)):
# weight initialization
init = RandomNormal(stddev=0.02)
# define model
model = Sequential()
# downsample to 14x14
model.add(Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init, input_shape=in_shape))
model.add(LeakyReLU(alpha=0.2))
# downsample to 7x7
model.add(Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init))
model.add(LeakyReLU(alpha=0.2))
# classifier
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# define the standalone generator model
def define_generator(latent_dim):
# weight initialization
init = RandomNormal(stddev=0.02)
# define model
model = Sequential()
# foundation for 7x7 image
n_nodes = 128 * 7 * 7
model.add(Dense(n_nodes, kernel_initializer=init, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Reshape((7, 7, 128)))
# upsample to 14x14
model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init))
model.add(LeakyReLU(alpha=0.2))
# upsample to 28x28
model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init))
model.add(LeakyReLU(alpha=0.2))
# output 28x28x1
model.add(Conv2D(1, (7,7), activation='tanh', padding='same', kernel_initializer=init))
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(generator, discriminator):
# make weights in the discriminator not trainable
discriminator.trainable = False
# connect them
model = Sequential()
# add generator
model.add(generator)
# add the discriminator
model.add(discriminator)
# compile model
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
# load mnist images
def load_real_samples():
# load dataset
(trainX, trainy), (_, _) = load_data()
# expand to 3d, e.g. add channels
X = expand_dims(trainX, axis=-1)
# select all of the examples for a given class
selected_ix = trainy == 8
X = X[selected_ix]
# convert from ints to floats
X = X.astype('float32')
# scale from [0,255] to [-1,1]
X = (X - 127.5) / 127.5
return X
# select real samples
def generate_real_samples(dataset, n_samples):
# choose random instances
ix = randint(0, dataset.shape[0], n_samples)
# select images
X = dataset[ix]
# generate class labels
y = ones((n_samples, 1))
return X, y
# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n_samples):
# generate points in the latent space
x_input = randn(latent_dim * n_samples)
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n_samples, latent_dim)
return x_input
# use the generator to generate n fake examples, with class labels
def generate_fake_samples(generator, latent_dim, n_samples):
# generate points in latent space
x_input = generate_latent_points(latent_dim, n_samples)
# predict outputs
X = generator.predict(x_input)
# create class labels
y = zeros((n_samples, 1))
return X, y
# generate samples and save as a plot and save the model
def summarize_performance(step, g_model, latent_dim, n_samples=100):
# prepare fake examples
X, _ = generate_fake_samples(g_model, latent_dim, n_samples)
# scale from [-1,1] to [0,1]
X = (X + 1) / 2.0
# plot images
for i in range(10 * 10):
# define subplot
pyplot.subplot(10, 10, 1 + i)
# turn off axis
pyplot.axis('off')
# plot raw pixel data
pyplot.imshow(X[i, :, :, 0], cmap='gray_r')
# save plot to file
pyplot.savefig('results_opt/generated_plot_%03d.png' % (step+1))
pyplot.close()
# save the generator model
g_model.save('results_opt/model_%03d.h5' % (step+1))
# create a line plot of loss for the gan and save to file
def plot_history(d1_hist, d2_hist, g_hist, a1_hist, a2_hist):
# plot loss
pyplot.subplot(2, 1, 1)
pyplot.plot(d1_hist, label='d-real')
pyplot.plot(d2_hist, label='d-fake')
pyplot.plot(g_hist, label='gen')
pyplot.legend()
# plot discriminator accuracy
pyplot.subplot(2, 1, 2)
pyplot.plot(a1_hist, label='acc-real')
pyplot.plot(a2_hist, label='acc-fake')
pyplot.legend()
# save plot to file
pyplot.savefig('results_opt/plot_line_plot_loss.png')
pyplot.close()
# train the generator and discriminator
def train(g_model, d_model, gan_model, dataset, latent_dim, n_epochs=10, n_batch=128):
# calculate the number of batches per epoch
bat_per_epo = int(dataset.shape[0] / n_batch)
# calculate the total iterations based on batch and epoch
n_steps = bat_per_epo * n_epochs
# calculate the number of samples in half a batch
half_batch = int(n_batch / 2)
# prepare lists for storing stats each iteration
d1_hist, d2_hist, g_hist, a1_hist, a2_hist = list(), list(), list(), list(), list()
# manually enumerate epochs
for i in range(n_steps):
# get randomly selected 'real' samples
X_real, y_real = generate_real_samples(dataset, half_batch)
# update discriminator model weights
d_loss1, d_acc1 = d_model.train_on_batch(X_real, y_real)
# generate 'fake' examples
X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
# update discriminator model weights
d_loss2, d_acc2 = d_model.train_on_batch(X_fake, y_fake)
# prepare points in latent space as input for the generator
X_gan = generate_latent_points(latent_dim, n_batch)
# create inverted labels for the fake samples
y_gan = ones((n_batch, 1))
# update the generator via the discriminator's error
g_loss = gan_model.train_on_batch(X_gan, y_gan)
# summarize loss on this batch
print('>%d, d1=%.3f, d2=%.3f g=%.3f, a1=%d, a2=%d' %
(i+1, d_loss1, d_loss2, g_loss, int(100*d_acc1), int(100*d_acc2)))
# record history
d1_hist.append(d_loss1)
d2_hist.append(d_loss2)
g_hist.append(g_loss)
a1_hist.append(d_acc1)
a2_hist.append(d_acc2)
# evaluate the model performance every 'epoch'
if (i+1) % bat_per_epo == 0:
summarize_performance(i, g_model, latent_dim)
plot_history(d1_hist, d2_hist, g_hist, a1_hist, a2_hist)
# make folder for results
makedirs('results_opt', exist_ok=True)
# size of the latent space
latent_dim = 50
# create the discriminator
discriminator = define_discriminator()
# create the generator
generator = define_generator(latent_dim)
# create the gan
gan_model = define_gan(generator, discriminator)
# load image data
dataset = load_real_samples()
print(dataset.shape)
# train model
train(generator, discriminator, gan_model, dataset, latent_dim)
``` |
{
"source": "jpgacrama/Mastering-Concurrency-in-Python",
"score": 3
} |
#### File: Mastering-Concurrency-in-Python/Chapter04/example1.py
```python
from os import system, name
n_files = 254
files = []
# define our clear function
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
# method 1
# for i in range(n_files):
# files.append(open('output1/sample%i.txt' % i, 'w'))
# method 2
'''for i in range(n_files):
f = open('output1/sample%i.txt' % i, 'w')
files.append(f)
f.close()'''
# method 3
if __name__ == '__main__':
clear()
for i in range(n_files):
with open(f'output1/sample{i}.txt', 'w') as f:
files.append(f)
```
#### File: Mastering-Concurrency-in-Python/Chapter05/example1.py
```python
import requests
from os import system, name
# define our clear function
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
if __name__ == '__main__':
clear()
url = 'http://www.google.com'
res = requests.get(url)
print(res.status_code)
print(res.headers)
with open('google.html', 'w') as f:
f.write(res.text)
print('Done.')
```
#### File: Mastering-Concurrency-in-Python/Chapter08/example1.py
```python
import cv2
import os
# define our clear function
def clear():
# for windows
if os.name == 'nt':
_ = os.system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = os.system('clear')
if __name__ == '__main__':
clear()
cwd = os.getcwd()
if os.name == 'nt':
im = cv2.imread(os.path.join(cwd, 'Chapter08\input\ship.jpg'))
else:
im = cv2.imread(os.path.join(cwd, 'Chapter08/input/ship.jpg'))
cv2.imshow('Test', im)
cv2.waitKey(0) # press any key to move forward here
print(im)
print('Type:', type(im))
print('Shape:', im.shape)
print('Top-left pixel:', im[0, 0])
print('Done.')
```
#### File: Mastering-Concurrency-in-Python/Chapter19/example3.py
```python
from datetime import datetime
import time
import os
from apscheduler.schedulers.background import BackgroundScheduler
def task():
print(f'From process {os.getpid()}: The time is {datetime.now()}')
print(f'Starting job inside {os.getpid()}')
time.sleep(4)
print(f'Ending job inside {os.getpid()}')
if __name__ == '__main__':
scheduler = BackgroundScheduler()
scheduler.add_executor('processpool')
scheduler.add_job(task, 'interval', seconds=3, max_instances=3)
scheduler.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
scheduler.shutdown()
``` |
{
"source": "JPGarCar/HORS",
"score": 3
} |
#### File: HORS/Extra_python/assignment.py
```python
from helpers import *
from delegate import *
from teacher import *
# flask for web service
from flask import Flask, redirect, render_template, request, url_for, session, flash, send_file
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# assigns variable 'db' to the SQL database immuns.db
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///immuns.db"
app.config["SQLALCHEMY_ECHO"] = True
db = SQLAlchemy(app)
class Assignment(db.Model):
__tablename__ = "generalList"
id = db.Column(db.Integer, primary_key=True)
committee = db.Column(db.Text)
country = db.Column(db.Text)
country_id = db.Column(db.Integer)
#delegate_name = db.Column(db.Text)
#delegate_school = db.Column(db.Text)
typeOfCom = db.Column(db.Text)
room = db.Column(db.Text)
important = db.Column(db.Text)
delegate = db.relationship("Delegate", backref="assignment", uselist=False)
def __init__(self, committee, country, country_id, typeOfCom, room, important):
self.committee = committee
self.country = country
self.country_id = country_id
# self.delegate_name = delegate_name
# self.delegate_school = delegate_school
self.typeOfCom = typeOfCom
self.room = room
self.important = important
### randomCountry (Number String String Teacher -> Void)
### Randomly assigns assignments to current user's table as specified
def randomCountry(number, typeOfCom, Important, teacher):
# used to have different iterations of the random function to be more random
ranNum = 1
numAssign = number
# iterates until all the assignments pending are assigned
while numAssign > 0:
# number of countries as specified in generalList
maxNumInDBNow = maxTypeInGen(typeOfCom, Important)
# assigns a random number to "codeID", -1 because list starts at 0
for i in range(0,ranNum):
codeID = randint(1, maxNumInDBNow) - 1
# all assignments of type of committee and importance on generalList
assignments = Assignment.querry.filter(Assignment.delegate == None and Assignment.typeOfCom == typeOfCom and Assignment.important == Important).all()
# assignment from assignemnts in index "codeID"
assignment = assignments[codeID]
# assignment assigned to current user and its table updated
#if not assignment["room"] == "":
delegate = Delegate(" ", assignment, teacher)
db.session.add(delegate)
db.session.commit()
#db.execute("INSERT INTO :tableName (committee, country, delegateName, room, important) VALUES(:committee, :country, :delNam, :room, :imp)",
#tableName=session["currentUser"], committee=assignment["committee"], country=assignment["country"], delNam="", room=assignment["room"], imp=assignment["Important"])
# else:
# delegate = Delegate(teacher.dataBaseName, assignment.committee, assignment.country, "", "", assignment.important)
# db.session.add(delegate)
# db.session.commit()
#db.execute("INSERT INTO :tableName (committee, country, delegateName, room, important) VALUES(:committee, :country, :delNam, :room, :imp)",
#tableName=session["currentUser"], committee=assignment["committee"], country=assignment["country"], delNam="", room="", imp=assignment["Important"])
# updates the generalList
# assignment.delegate_name = "taken"
# assignment.delegate_school = teacher.school
# db.session.commit()
#db.execute("UPDATE generalList SET delegate_name='taken', delegate_school =:school WHERE committee=:com AND country=:con",
#school=session["currentUser"], com=assignment["committee"] ,con=assignment["country"])
# reduces number of pedning assignments by one
numAssign = numAssign - 1
# increase the numer of iterations of random
ranNum = ranNum + 1
```
#### File: HORS/Extra_python/delegate.py
```python
from helpers import *
from assignment import *
from teacher import *
# flask for web service
from flask import Flask, redirect, render_template, request, url_for, session, flash, send_file
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# assigns variable 'db' to the SQL database immuns.db
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///immuns.db"
app.config["SQLALCHEMY_ECHO"] = True
db = SQLAlchemy(app)
class Delegate(db.Model):
__tablename__ = "delegates"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
assignment_id = db.Column(db.Integer, db.ForeignKey("assignment.id"))
teacher_id = db.Column(db.Integer, db.ForeignKey("teacher.id"))
#committee = db.Column(db.Text)
#country = db.Column(db.Text)
#room = db.Column(db.Text)
#important = db.Column(db.Text)
def __init__(self, name, assignment, teacher):
self.name = name
self.assignment_id = assignment
self.teacher_id = teacher
```
#### File: Extra_python/Scripts/changeManual.py
```python
from cs50 import SQL
from IDArange import idArange
db = SQL("sqlite:///immuns.db")
def changeMan(dataBase, user):
for pair in changeList:
db.execute("UPDATE :dataBase SET delegate_name = '' WHERE committee=:com AND country=:con", dataBase=dataBase, com=pair[0], con=pair[1])
db.execute("UPDATE generalList SET delegate_name = '' WHERE committee=:com AND country=:con", com=pair[0], con=pair[1])
db.execute("DELETE FROM :user WHERE committee=:com AND country=:con", user=user, com=pair[0], con=pair[1])
idArange(user)
changeList = [("European Council MS", "Austria"), ("World Health Organization MS", "Mexico"), ("Organization of American States MS", "Brazil"),
("Security Council MS", "United Kingdom"), ("European Council MS", "Croatia"), ("European Council MS", "United Kingdom"),
("World Health Organization MS", "Saudi Arabia"), ("ECOSOC MS", "Sweden"), ("ECOSOC MS", "Finland"), ("World Health Organization MS", "Afghanistan"),
("ECOSOC MS", "Japan"), ("Security Council MS", "Syria")]
changeMan("msen", "<NAME>")
```
#### File: Extra_python/Scripts/IDArange.py
```python
from cs50 import SQL
db = SQL("sqlite:///immuns.db")
def idArange(user):
countries = db.execute("SELECT * FROM :user", user=user)
x = 1
for country in countries:
db.execute("UPDATE :user SET id=:idx WHERE id=:firstID", user=user, idx=x, firstID=country["id"])
x = x + 1
#idArange("msen")
#idArange("mssp")
#idArange("hsen")
#idArange("hssp")
#idArange("generalList")
#idArange("<NAME>_ASFM")
```
#### File: Extra_python/Scripts/manualRegister.py
```python
from cs50 import SQL
db = SQL("sqlite:///immuns.db")
global currentUser
def randomCountry(number, dataBase, school):
stem = db.execute("SELECT * FROM :dataBase WHERE id = :ids", dataBase=dataBase, ids=number)
for stoop in stem:
if stoop["delegate_name"] == "":
db.execute("INSERT INTO :tableName (committee, country, delegateName) VALUES(:committee, :country, :delNam)",
tableName=currentUser, committee=stoop["committee"], country=stoop["country"], delNam="")
db.execute("UPDATE :dataBase SET delegate_name = 'taken' WHERE id=:id", dataBase=dataBase, id=number)
db.execute("UPDATE generalList SET delegate_name = 'taken' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE :dataBase SET delegate_school = :school WHERE id=:id", dataBase=dataBase, school=school, id=number)
db.execute("UPDATE generalList SET delegate_school = :school WHERE committee=:com AND country=:con", school=school, com=stoop["committee"] ,con=stoop["country"])
currentUser = "DianaRubioColegioAmericanoSaltillo"
numberList = [148,156,157]
subList = "hsen"
for number in numberList:
randomCountry(number, subList, currentUser)
```
#### File: JPGarCar/HORS/models.py
```python
from flask import Flask, redirect, render_template, request, url_for, session, flash, send_file
from flask_sqlalchemy import SQLAlchemy
import helpers
from typeOfCommittee import TypeOfCom
from Important import Important
from advanced import Advanced
from grades import Grades
from application import db
from passlib.apps import custom_app_context as pwd_context
class Teacher(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
email = db.Column(db.Text)
school = db.Column(db.Text)
password = db.Column(db.Text)
confirmationCode = db.Column(db.Text)
delegates = db.relationship('Delegate', backref='teacher', lazy=True)
### chagnePassword() (String -> void)
### change the teacher's password, hashed and all
def changePassword(self, newPassword):
self.password = pwd_context.hash(newPassword);
### canAddDelegate() ( -> bool)
### return true if teacher has space for more delegates, else false
def canAddDelegate(self):
if self.getNumOfStudents() < self.getNumOfMaxStudents():
return True
else:
return False
### getNumOfStudents (String -> Number)
### return number of students in teacher
def getNumOfStudents(self):
num = 0
# grabs all assignments of user
assignments = self.delegates
# iterates over all the assignments in the table adding one to the variable num
for assignment in assignments:
num = num + 1
return num
### returnUserPageOld (Void -> templateRendered)
### returns the rendered template user_oldTeacherPage.html with corresponding data from current user's table
def returnUserPageOld(self):
delegates = self.delegates
grades = helpers.getGrades()
return render_template("user_oldTeacherPage.html", delegates=delegates, grades=grades)
### makeUser (String String -> String)
### produces unique user id from its name and school without any special characters
def getTeacherSession(self):
return helpers.replaceSpecial(self.name) + helpers.replaceSpecial(self.school)
### recheck the number of students available
def getNumOfMaxStudents(self):
return helpers.getSpecial(self.confirmationCode)
def __init__(self, name, email, password, school, code):
self.name = name
self.email = email
self.password = password
self.school = school
self.confirmationCode = code
# Flask-Login stuff
self.is_authenticaded = False
self.is_active = True
self.is_anonymous = False
###### Flask-Login stuff #######
def get_id(self):
return chr(Teacher.query.filter(Teacher.name == self.name, Teacher.email == self.email).first().id)
class Assignment(db.Model):
id = db.Column(db.Integer, primary_key=True)
country = db.Column(db.Text)
country_id = db.Column(db.Integer)
important = db.Column(db.Text)
delegate = db.relationship('Delegate', backref='assignment', uselist=False)
committee_id = db.Column(db.Integer, db.ForeignKey("committee.id"), nullable=False)
def __init__(self, committeeID, country, country_id, important : Important):
self.country = country
self.country_id = country_id
self.important = important
self.committee_id = committeeID
class Delegate(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
grade = db.Column(db.Text)
assignment_id = db.Column(db.Integer, db.ForeignKey('assignment.id'), nullable=False)
teacher_id = db.Column(db.Integer, db.ForeignKey('teacher.id'), nullable=False)
def __init__(self, name, assignment, teacher, grade : Grades):
self.name = name
self.assignment_id = assignment
self.teacher_id = teacher
self.grade = grade
class Committee(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
typeOfCom = db.Column(db.Text)
room = db.Column(db.Text)
advanced = db.Column(db.Text)
assignments = db.relationship("Assignment", backref="committee")
def __init__(self, name, typeOfCom : TypeOfCom, room, advanced):
self.name = name
self.typeOfCom = typeOfCom
self.room = room
self.advanced = advanced
self.assignments
### return number of assignments in this committee
def numOfAssignments(self):
num = 0
for assignment in self.assignments:
if assignment.important == Important.NO.value:
num = num + 1
return num
### return number of important assignments in this committee
def numOfImportantAssignments(self):
num = 0
for assignment in self.assignments:
if assignment.important == Important.YES.value:
num = num + 1
return num
### return number of assignments with a delegate in this committee
def numOfDelegates(self):
num = 0
for assignment in self.assignments:
if assignment.important == Important.NO.value and assignment.delegate is not None:
num = num + 1
return num
### return number of important assignments with a delegate in this committee
def numOfImportantDelegates(self):
num = 0
for assignment in self.assignments:
if assignment.important == Important.YES.value and assignment.delegate is not None:
num = num + 1
return num
### return true if there are more assignments available than delegates in the committee by half
def codeYellow(self, isImportant):
print("inside code yellow")
if isImportant:
if self.numOfImportantAssignments() - self.numOfImportantDelegates() >= self.numOfImportantAssignments() / 2:
return True
else:
return False
else:
if self.numOfAssignments() - self.numOfDelegates() >= self.numOfAssignments() / 2:
print("inside true for code yellow")
return True
else:
return False
### return true if there are more assignments available than delegates in the committe by two thirds
def codeRed(self, isImportant):
print("inside code red")
if isImportant:
if self.numOfImportantAssignments() - self.numOfImportantDelegates() >= self.numOfImportantAssignments() * 0.6:
return True
else:
return False
else:
if self.numOfAssignments() - self.numOfDelegates() >= self.numOfAssignments() * 0.6:
print("inside true for code red")
return True
else:
return False
``` |
{
"source": "jpgard/bostonScrape",
"score": 3
} |
#### File: jpgard/bostonScrape/main_2016.py
```python
import csv
import errno
import itertools
import os
import time
from bs4 import BeautifulSoup
import click
import dataset
import funcy as fy
import requests
HTML_DB_FILENAME = 'scrape_test.db'
DATASET = dataset.connect('sqlite:///' + HTML_DB_FILENAME)
TABLE = DATASET['raw_html']
def mkdir_p(path):
"""
Makes directories. Taken from: http://stackoverflow.com/a/600612
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
@click.group()
def cli():
"""Pile of commands to scrape the boston marathon results."""
pass
def extract_entrants(html):
"""Generator yielding entrant info dicts given an HTML file."""
soup = BeautifulSoup(html, 'lxml')
trs = soup.select('.tablegrid_list_item > .tablegrid_table > tbody > tr')
# Two rows per entrant, so we group them in pairs, omitting the extra end
# row.
user_rows = fy.ipartition(2, trs)
for tr_header, tr_data in user_rows:
header_strings = [td.get_text(strip=True)
for td in tr_header.find_all('td', recursive=False)]
assert len(header_strings) == 9
data_strings = [td.get_text(strip=True) for td in tr_data.select('td')[0].select('td')]
assert len(data_strings) == 15
yield {
'bib_number': header_strings[0],
'name': header_strings[1],
'age': header_strings[2],
'gender': header_strings[3],
'city': header_strings[4],
'state': header_strings[5],
'county': header_strings[6],
'origin': header_strings[7],
#new items added by JG
'5k': data_strings[0],
'10k': data_strings[1],
'15k': data_strings[2],
'20k': data_strings[3],
'half': data_strings[4],
'25k': data_strings[5],
'30k': data_strings[6],
'35k': data_strings[7],
'40k': data_strings[8],
'pace' : data_strings[9],
'projected_time' : data_strings[10],
'official_time' : data_strings[11],
'overall_place' : data_strings[12],
'gender_place' : data_strings[13],
'division_place' : data_strings[14]
}
@cli.command()
@click.argument('output_csv', type=click.File('wb'))
def output_csv(output_csv):
"""Write a csv listing of all entrants."""
entrants = fy.cat(extract_entrants(row['page_html']) for row in TABLE.all())
# We could technically use the first entry's keys, but I like this column order.
keys = [
'bib_number',
'name',
'age',
'gender',
'city',
'state',
'county',
'origin',
#new items added by JG
'5k',
'10k',
'15k',
'20k',
'half',
'25k',
'30k',
'35k',
'40k',
'pace',
'projected_time',
'official_time',
'overall_place',
'gender_place',
'division_place'
]
writer = csv.DictWriter(output_csv, keys)
writer.writeheader()
for row in entrants:
writer.writerow({k.encode('utf8'):v.encode('utf8') for k,v in row.items()})
click.echo('Wrote %d entrants.' % len(entrants))
@cli.command()
def output_html():
"""Write all pages in the database into HTML files."""
mkdir_p('output')
for row in TABLE.all():
#TODO:COUNTRY
filename = 'output/country_%s_state_%s_page_%s.html' % (row['country_id'], row['state_id'], row['page_number'])
click.echo('Writing ' + filename)
with file(filename, 'w') as f:
f.write(row['page_html'])
def scrape_state(country_id=0, state_id=0):
"""
Generator yielding pages of HTML for a particular state.
Returns tuples of (page_number, html_text).
"""
# Fuckton of random shit in here, but whatever, don't fuck with whatever the
# server is doing if it works.
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'http://registration.baa.org',
'Referer': 'http://registration.baa.org/2016/cf/Public/iframe_ResultsSearch.cfm?mode=results',
}
#TODO: COUNTRY
params = {
'mode': 'results',
'criteria': '',
'StoredProcParamsOn': 'yes',
'VarGenderID': 0,
'VarBibNumber': '',
'VarLastName': '',
'VarFirstName': '',
'VarStateID': state_id,
'VarCountryOfResID': country_id,
'VarCountryOfCtzID': 0,
'VarReportingSegID': 1,
'VarAwardsDivID': 0,
'VarQualClassID': 0,
'VarCity': '',
'VarTargetCount': 1000,
'records': 25,
'headerexists': 'Yes',
'bordersize': 0,
'bordercolor': '#ffffff',
'rowcolorone': '#FFCC33',
'rowcolortwo': '#FFCC33',
'headercolor': '#ffffff',
'headerfontface': 'Verdana,Arial,Helvetica,sans-serif',
'headerfontcolor': '#004080',
'headerfontsize': '12px',
'fontface': 'Verdana,Arial,Helvetica,sans-serif',
'fontcolor': '#000099',
'fontsize': '10px',
'linkfield': 'FormattedSortName',
'linkurl': 'OpenDetailsWindow',
'linkparams': 'RaceAppID',
'queryname': 'SearchResults',
'tablefields': 'FullBibNumber,FormattedSortName,AgeOnRaceDay,GenderCode,'
'City,StateAbbrev,CountryOfResAbbrev,CountryOfCtzAbbrev,'
'DisabilityGroup',
}
for page_number, start in enumerate(itertools.count(1, 25)):
# Don't hammer the server. Give it a sec between requests.
time.sleep(1.0)
#TODO: country
click.echo('Requesting country %d state %d - page %d' % (country_id, state_id, page_number))
response = requests.post(
'http://registration.baa.org/2016/cf/Public/iframe_ResultsSearch.cfm',
headers=headers,
params=params,
data={'start': start, 'next': 'Next 25 Records'},
)
response.raise_for_status()
# Only yield if there actually are results. Just found this random
# tr_header thing in the HTML of the pages that have results, but not
# empty results pages.
if 'tr_header' in response.text:
yield page_number, response.text
else:
assert 'Next 25 Records' not in response.text
click.echo(' No results found.')
break
# No more pages!
if 'Next 25 Records' not in response.text:
break
@cli.command()
def scrape():
"""Pull down HTML from the server into dataset."""
# Bullshit, I know right? But no, go look at the search page.
state_ids = range(2, 78)
country_ids = range(0,203)
for country_id in country_ids:
if country_id in (36, 193): #if US or canada, need to scrape by state, otherwise, can just scrape by country
for state_id in state_ids:
for page_number, page_html in scrape_state(country_id, state_id): #TODO: scrape_state needs country id and state id as parameters
TABLE.upsert(dict(
state_id=state_id,
page_number=page_number,
page_html=page_html,
), ['country_id', 'state_id', 'page_number'])
else:
for page_number, page_html in scrape_state(country_id, state_id=0): #TODO: scrape_state needs country id and state id as parameters
TABLE.upsert(dict(
country_id =country_id,
page_number=page_number,
page_html=page_html,
), ['country_id','state_id', 'page_number'])
if __name__ == '__main__':
cli()
``` |
{
"source": "jpgard/federated",
"score": 2
} |
#### File: feded/preprocessing/larc.py
```python
import pandas as pd
SID_COLNAME = "STDNT_ID"
TERM_COLNAME = "TERM_CD"
SNAPSHT_COLNAME = "SNPSHT_RPT_DT"
def make_prev_term_gpa_column(df):
return df
```
#### File: feded/training/model.py
```python
import tensorflow as tf
class ModelConfig:
def __init__(self, learning_rate: float, loss):
"""
A set of configurations to represent a unique model.
:param learning_rate: learning rate.
:param loss: callable to be used as the loss function for the model.
"""
self.learning_rate = learning_rate
self.loss = loss
# create a simple keras model
def create_compiled_keras_model(input_shape, model_config: ModelConfig):
"""Create and compile a simple fully-connected Keras model."""
model = tf.keras.models.Sequential([
# preprocessing_layer,
tf.keras.layers.BatchNormalization(center=True, scale=True,
input_shape=input_shape),
tf.keras.layers.Dense(16, activation='relu', kernel_initializer='zeros'),
tf.keras.layers.Dense(8, activation='relu', kernel_initializer='zeros'),
tf.keras.layers.Dense(1, activation='sigmoid', kernel_initializer='zeros')
])
# compile the model
model.compile(
loss=model_config.loss(),
optimizer=tf.keras.optimizers.SGD(learning_rate=model_config.learning_rate),
metrics=[
tf.keras.metrics.BinaryCrossentropy(),
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.AUC(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()
])
return model
``` |
{
"source": "jpgard/mooc-peer-grading",
"score": 3
} |
#### File: docker/feature_extraction/sql_utils.py
```python
import os
import subprocess
import shutil
import pandas as pd
DATABASE_NAME = "course"
def execute_mysql_query_into_csv(query, file, database_name=DATABASE_NAME, delimiter=","):
"""
Execute a mysql query into a file.
:param query: valid mySQL query as string.
:param file: csv filename to write to.
:param database_name: name of database to use.
:param delimiter: type of delimiter to use in file.
:param escape_char: escape character to use.
:return: None
"""
formatted_query = """{} INTO OUTFILE '{}' FIELDS TERMINATED BY '{}';""".format(query, file, delimiter)
command = '''mysql -u root -proot {} -e"{}"'''.format(database_name, formatted_query)
subprocess.call(command, shell=True)
return
def load_mysql_dump(dumpfile, database_name=DATABASE_NAME):
"""
Load a mySQL data dump into DATABASE_NAME.
:param file: path to mysql database dump
:return:
"""
command = '''mysql -u root -proot {} < {}'''.format(database_name, dumpfile)
subprocess.call(command, shell=True)
return
def initialize_database(database_name=DATABASE_NAME):
"""
Start mySQL service and initialize mySQL database with database_name.
:param database_name: name of database.
:return: None
"""
# start mysql server
subprocess.call("service mysql start", shell=True)
# create database
subprocess.call('''mysql -u root -proot -e "CREATE DATABASE {}"'''.format(database_name), shell=True)
return
def extract_coursera_sql_data(course, session):
"""
Initialize the mySQL database, load files, and execute queries to deposit csv files of data into /input/course/session directory.
:param course: course name.
:param session: session.
:param outfile: name of csv file to write to.
:return:
"""
# paths for reading and writing results
course_session_dir = os.path.join("/input", course, session)
mysql_default_output_dir = "/var/lib/mysql/{}/".format(
DATABASE_NAME) # this is the only location mysql can write to
hash_mapping_sql_dump = \
[x for x in os.listdir(course_session_dir) if "anonymized_general" in x and session in x][
0] # contains users table
initialize_database()
load_mysql_dump(os.path.join(course_session_dir, hash_mapping_sql_dump))
# execute forum comment query and send to csv
for tablename in ["hg_assessment_metadata", "hg_assessment_submission_metadata",
"hg_assessment_overall_evaluation_metadata", "hg_assessment_evaluation_metadata",
"hg_assessment_calibration_gradings", "hg_assessment_peer_grading_metadata",
"hg_assessment_peer_grading_set_metadata", "hg_assessment_self_grading_set_metadata",
"hg_assessment_training_metadata", "hg_assessment_training_set_metadata"
]:
query = """SELECT * FROM {}""".format(tablename)
outfile = tablename + ".csv"
outfile_temp_fp = os.path.join(mysql_default_output_dir, outfile)
outfile_fp = os.path.join("/output", outfile)
execute_mysql_query_into_csv(query, file=outfile_temp_fp)
shutil.move(outfile_temp_fp, outfile_fp)
return
``` |
{
"source": "jpgard/morf",
"score": 2
} |
#### File: morf/utils/caching.py
```python
import os
import subprocess
import shutil
from urllib.parse import urlparse
import logging
from morf.utils.docker import load_docker_image
from morf.utils.log import set_logger_handlers, execute_and_log_output
from morf.utils.s3interface import sync_s3_bucket_cache
module_logger = logging.getLogger(__name__)
def make_course_session_cache_dir_fp(job_config, bucket, data_dir, course, session):
fp = os.path.join(job_config.cache_dir, bucket, data_dir, course, session)
return fp
def update_raw_data_cache(job_config):
"""
Update the raw data cache using the parameters in job_config; if job_config contains multiple raw data buckets, cache all of them.
:param job_config: MorfJobConfig object.
:return:
"""
# cache each bucket in a named directory within job_cache_dir
for raw_data_bucket in job_config.raw_data_buckets:
sync_s3_bucket_cache(job_config, raw_data_bucket)
return
def update_proc_data_cache(job_config):
"""
Update the processed data cache using the parameters in job_config. Assumes job_config contains only a single proc_data_bucket.
:param job_config: MorfJobConfig object.
:return:
"""
proc_data_bucket = getattr(job_config, "proc_data_bucket", None)
sync_s3_bucket_cache(job_config, proc_data_bucket)
return
def fetch_from_cache(job_config, cache_file_path, dest_dir):
"""
Fetch a file from the cache for job_config into dest_dir, if it exists.
:param job_config:
:param cache_file_path: string, relative path to file in cache (this is identical to the directory path in s3; e.g. "/bucket/path/to/somefile.csv"
:param dest_dir: absolute path of directory to fetch file into (will be created if not exists)
:return: path to fetched file (string); return None if cache is not used.
"""
logger = set_logger_handlers(module_logger, job_config)
logger.info("fetching file {} from cache".format(cache_file_path))
abs_cache_file_path = os.path.join(getattr(job_config, "cache_dir", None), cache_file_path)
if hasattr(job_config, "cache_dir") and os.path.exists(abs_cache_file_path):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_fp = shutil.copy(abs_cache_file_path, dest_dir)
else:
logger.warning("file {} does not exist in cache".format(abs_cache_file_path))
dest_fp = None
return dest_fp
def docker_cloud_login(job_config):
"""
Log into docker cloud using creds in job_config.
:param job_config: MorfJobConfig object.
:return: None
"""
cmd = "docker login --username={} --password={}".format(job_config.docker_cloud_username, job_config.docker_cloud_password)
logger = set_logger_handlers(module_logger, job_config)
execute_and_log_output(cmd, logger)
return
def docker_cloud_push(job_config, image_uuid):
"""
Push image to Docker Cloud repo in job_config; tagging the image with its morf_id.
:param job_config: MorfJobConfig object
:param image_uuid: Docker image uuid
:return: None
"""
logger = set_logger_handlers(module_logger, job_config)
docker_cloud_repo_and_tag_path = "{}:{}".format(job_config.docker_cloud_repo, job_config.morf_id)
# tag the docker image using the morf_id
tag_cmd = "docker tag {} {}".format(image_uuid, docker_cloud_repo_and_tag_path)
execute_and_log_output(tag_cmd, logger)
# push the image to docker cloud
push_cmd = "docker push {}".format(docker_cloud_repo_and_tag_path)
execute_and_log_output(push_cmd, logger)
return docker_cloud_repo_and_tag_path
def cache_to_docker_hub(job_config, dir, image_name):
"""
Push image to MORF repo in Docker Hub.
:param job_config: MorfJobConfig object.
:return: None
"""
logger = set_logger_handlers(module_logger, job_config)
image_uuid = load_docker_image(dir, job_config, logger, image_name)
docker_cloud_login(job_config)
docker_cloud_repo_and_tag_path = docker_cloud_push(job_config, image_uuid)
return docker_cloud_repo_and_tag_path
```
#### File: morf/tests/test_s3_utils.py
```python
import pytest
from morf.utils import get_bucket_from_url, get_key_from_url
def test_get_bucket_from_url():
assert get_bucket_from_url("s3://my-bucket/some/file.txt") == "my-bucket"
assert get_bucket_from_url("s3://anotherbucket/some/file.txt") == "anotherbucket"
def test_get_key_from_url():
assert get_key_from_url("s3://my-bucket/some/file.txt") == "some/file.txt"
with pytest.raises(AttributeError):
get_key_from_url("s3://my-bucket/") # tests case of path without a key
```
#### File: morf/utils/config.py
```python
import boto3
import configparser
import fileinput
import json
import logging
import multiprocessing
import os
import re
from morf.utils import get_bucket_from_url, get_key_from_url
from morf.utils.security import generate_md5
def get_config_properties(config_file="config.properties", sections_to_fetch = None):
"""
Returns the list of properties as a dict of key/value pairs in the file config.properties.
:param config_file: filename (string).
:param section: name of section to fetch properties from (if specified); all sections are returned by default (iterable).
:return: A flat (no sections) Python dictionary of properties.
"""
cf = configparser.ConfigParser()
try:
cf.read(config_file)
except Exception as e:
print("[ERROR] exception {} reading configurations from file {}".format(e, config_file))
properties = {}
for section in cf.sections():
# only include args section if requested
if (not sections_to_fetch or (section in sections_to_fetch)):
for item in cf.items(section):
properties[item[0]] = item[1]
return properties
def combine_config_files(*args, outfile="config.properties"):
"""
Combine multiple config files into single config file located at outfile.
:param args: names of config files to combine.
:param outfile: pathname to write to.
:return: None
"""
with open(outfile, "w") as fout, fileinput.input(args) as fin:
for line in fin:
fout.write(line)
return
def update_config_fields_in_section(section, config_file="config.properties", **kwargs):
"""
Overwrite (or create, if not exists) fields in section of config_file with values provided according to kwargs.
:param section: section header within config file which contains the field to be modified.
:param kwargs: named parameters, with values, to overwrite.
:param config_file: path to config properties; should be valid ConfigParser file
:return:
"""
cf = configparser.ConfigParser()
try:
cf.read(config_file)
except Exception as e:
print("[ERROR] exception {} reading configurations from file {}".format(e, config_file))
cf_new = configparser.ConfigParser()
for section in cf.sections():
for item in cf.items(section):
try:
cf_new[section][item[0]] = item[1]
except KeyError: # section doesn't exist yet
cf_new[section] = {}
cf_new[section][item[0]] = item[1]
for key, value in kwargs.items():
try:
cf_new[section][key] = value
except KeyError:
print(
"[ERROR] error updating config file: {}; possibly attempted to update a section that does not exist".format(
e))
try:
os.remove(config_file)
with open(config_file, "w") as cfwrite:
cf_new.write(cfwrite)
except Exception as e:
print("[ERROR] error updating config file: {}".format(e))
return
def fetch_data_buckets_from_config(config_file="config.properties", data_section="data",
required_bucket_dir_name='morf-data/'):
"""
Fetch the buckets from data_section of config_file; warn if key does not exactle match directory_name.
:param config_file: path to config file.
:param data_section: section of config file with key-value pairs representing institution names and s3 paths.
:param required_bucket_dir_name: directory or path that should match ALL values in data_section; if not, throws warning.
:return: list of buckets to iterate over; no directories are returned because these should be uniform across all of the buckets.
"""
cf = configparser.ConfigParser()
cf.read(config_file)
buckets = []
for item in cf.items(data_section):
item_url = item[1]
bucket = get_bucket_from_url(item_url)
dir = get_key_from_url(item_url)
if dir != required_bucket_dir_name:
msg = "[ERROR]: specified path {} does not match required directory name {}; change name of directories to be consistent or specify the correct directory to check for.".format(
item_url, required_bucket_dir_name)
print(msg)
raise
else:
buckets.append(bucket)
assert len(buckets) >= 1
return tuple(buckets)
class MorfJobConfig:
"""
Job-level configurations; these should remain consistent across entire workflow of a job.
"""
def __init__(self, config_file):
self.type = "morf" # todo: delete this
self.mode = None
self.status = "START"
properties = get_config_properties(config_file)
self.client_args = get_config_properties(config_file, sections_to_fetch="args")
# add properties to class as attributes
for prop in properties.items():
setattr(self, prop[0], prop[1])
# if client_args are specified, add these to job_id to ensure unique
if self.client_args:
self.generate_job_id()
# fetch raw data buckets as list
self.raw_data_buckets = fetch_data_buckets_from_config()
self.generate_morf_id(config_file)
# if maximum number of cores is not specified, set to one less than half of current machine's cores; otherwise cast to int
self.setcores()
def generate_job_id(self):
"""
Generate and set a unique job_id by appending client-supplied arg names and values.
This makes submitting multiple jobs by simply altering the 'args' field much easier for users.
:return: None
"""
new_job_id = self.job_id
for arg_name, arg_value in sorted(self.client_args.items()):
name = re.sub("[./]", "", arg_name)
value = re.sub("[./]", "", arg_value)
new_job_id += '_'.join([name, value])
setattr(self, "job_id", new_job_id)
return
def generate_morf_id(self, config_file):
"""
Generate a unique MORF identifier via hashing of the config file.
:param config_file:
:return:
"""
self.morf_id = generate_md5(config_file)
def check_configurations(self):
# todo: check that all arguments are valid/acceptable
pass
def update_status(self, status):
# todo: check whether status is valid by comparing with allowed values
self.status = status
def update_email_to(self, email_to):
# todo: check if email is valid
self.email_to = email_to
def update_mode(self, mode):
# todo: check whether mode is valid by comparing with allowed values
self.mode = mode
def initialize_s3(self):
# create s3 connection object for communicating with s3
s3obj = boto3.client("s3",
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
return s3obj
def setcores(self):
if not hasattr(self, "max_num_cores"):
n_cores = multiprocessing.cpu_count()
self.max_num_cores = max(n_cores//2 - 1, 1)
else:
n_cores = int(self.max_num_cores)
self.max_num_cores = n_cores
return
```
#### File: morf/mwe-prule/morf-prule.py
```python
import argparse
import subprocess
import os
import tarfile
import gzip
import shutil
import re
from feature_extraction.sql_utils import extract_grade_sql_data
def unarchive_file(src, dest):
"""
Untar or un-gzip a file from src into dest. Supports file extensions: .zip, .tgz, .gz. Taken from MORF API.
:param src: path to source file to unarchive (string).
:param dest: directory to unarchive result into (string).
:return: None
"""
if src.endswith(".zip") or src.endswith(".tgz"):
tar = tarfile.open(src)
tar.extractall(dest)
tar.close()
outpath = os.path.join(dest, os.path.basename(src))
elif src.endswith(".gz"):
with gzip.open(src, "rb") as f_in:
destfile = os.path.basename(src)[:-3] # source file without '.gz' extension
destpath = os.path.join(dest, destfile)
with open(destpath, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(src)
outpath = destpath
else:
raise NotImplementedError("Passed in a file with an extension not supported by unarchive_file: {}".format(src))
return outpath
def clean_filename(src):
"""
Rename file, removing any non-alphanumeric characters.
:param src: file to rename.
:return: None
"""
src_dir, src_file = os.path.split(src)
clean_src_file = re.sub('[\(\)\s]', '', src_file)
clean_src_path = os.path.join(src_dir, clean_src_file)
try:
os.rename(src, clean_src_path)
except Exception as e:
print("[ERROR] error renaming file: {}".format(e))
return clean_src_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="execute feature extraction, training, or testing.")
parser.add_argument("-c", "--course", required=True, help="an s3 pointer to a course")
parser.add_argument("-r", "--session", required=True, help="3-digit course run number")
parser.add_argument("-m", "--mode", required=True, help="mode to run image in; {extract, train, test}")
args = parser.parse_args()
if args.mode == "extract":
# this block expects individual session-level data mounted by extract_session() and outputs one CSV file per session in /output
# unzip files and clean names
session_input_dir = os.path.join("/input", args.course, args.session)
# fetch/create path names for various input and output files
clickstream = [x for x in os.listdir(session_input_dir) if x.endswith("clickstream_export.gz")][0]
clickstream_fp = os.path.join(session_input_dir, clickstream)
clickstream_fp = unarchive_file(clickstream_fp, session_input_dir)
clickstream_fp = clean_filename(clickstream_fp)
outfile = "{}-{}-extract.csv".format(args.course, args.session)
output_fp = os.path.join("/output", outfile)
# run grade extraction script
grade_file = extract_grade_sql_data(args.course, args.session)
grade_fp = os.path.join(session_input_dir, grade_file)
# compile the java code; ideally this should happen in dockerfile, not here!
cmd = "javac -cp MORF1.4/jars/*: -d /MORF1.4/bin/ /MORF1.4/src/Extract.java"
subprocess.call(cmd, shell=True)
# run feature extraction
os.chdir("/MORF1.4/bin/")
cmd = "java -cp /MORF1.4/jars/*: Extract {} {} {}".format(clickstream_fp, grade_fp, output_fp)
subprocess.call(cmd, shell=True)
``` |
{
"source": "jpgard/pytorch-CycleGAN-and-pix2pix",
"score": 2
} |
#### File: pytorch-CycleGAN-and-pix2pix/models/networks_3d.py
```python
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
from models.networks import Identity, init_net
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
"""Return a 3D normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm3d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm3d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda x: Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a 3D generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from <NAME>'s neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
raise NotImplementedError
# net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
raise NotImplementedError
# net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
raise NotImplementedError
# net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator3d(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator3d(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator3d(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
raise NotImplementedError
# net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class UnetGenerator3d(nn.Module):
"""Create a 3D Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator3d, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock3d(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock3d(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock3d(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock3d(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock3d(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock3d(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock3d(nn.Module):
"""Defines the 3D Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm3d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock3d) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
user_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock3d, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm3d
else:
use_bias = norm_layer == nn.InstanceNorm3d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv3d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose3d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose3d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose3d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator3d(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm3d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator3d, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm3d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm3d
else:
use_bias = norm_layer != nn.BatchNorm3d
kw = 4
padw = 1
sequence = [nn.Conv3d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv3d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv3d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv3d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
```
#### File: pytorch-CycleGAN-and-pix2pix/models/vox2vox_model.py
```python
from models.pix2pix_model import Pix2PixModel
from models.base_model import BaseModel
import models.networks_3d as networks_3d
import models.networks as networks
import torch
class Vox2VoxModel(BaseModel):
""" This class implements the vox2vox model, for learning a mapping from input volumes to output volumes given paired data.
This is designed to mostly follow the default architecture of the pix2pix model, with appropriate modifications
for the 3D case.
The model training requires '--dataset_mode aligned_volume' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
see also models.pix2pix_model.py
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned_volume')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_l', type=float, default=100.0, help='lambda weight for L1 or L2 loss')
parser.add_argument('--loss_norm', type=str, default="L1", help='type of loss (L1 or L2)')
return parser
def __init__(self, opt):
"""Initialize the vox2vox class. Modified from models.pix2pix_model .
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1L2', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks_3d.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
self.netD = networks_3d.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
if opt.loss_norm == "L1":
self.criterionL1L2 = torch.nn.L1Loss()
elif opt.loss_norm == "L2":
self.criterionL1L2 = torch.nn.MSELoss()
else:
raise NotImplementedError("specify either an L1 or L2 as the loss_norm")
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
# todo
raise NotImplementedError
def optimize_parameters(self):
# todo
raise NotImplementedError
def forward(self):
# todo
raise NotImplementedError
``` |
{
"source": "JPGarzonE/curso-de-python",
"score": 4
} |
#### File: curso-de-python/estructuras_de_datos/repite_letra_con_tuplas.py
```python
def first_not_repeating_char_my_method(char_sequence):
sequence_tuple = tuple(char_sequence)
not_repeating_char = '_*'
for idx in range( len(sequence_tuple) ):
if sequence_tuple.count( sequence_tuple[idx] ) == 1:
not_repeating_char = sequence_tuple[idx]
break
return not_repeating_char
# Metodo hecho por el profe
def first_not_repeating_char_teachers_method(char_sequence):
seen_letters = {}
for idx, letter in enumerate(char_sequence):
if letter not in seen_letters:
seen_letters[letter] = (idx, 1)
else:
seen_letters[letter] = (seen_letters[letter][0], seen_letters[letter][1] + 1)
final_letters = []
for key, value in seen_letters.iteritems():
if value[1] == 1:
final_letters.append( (key, value[0]) )
not_repeated_letters = sorted(final_letters, key=lambda value: value[1])
if not_repeated_letters:
return not_repeated_letters[0][0]
else:
return '_'
if __name__ == '__main__':
char_sequence = str(input('Escribe una secuencia de caracteres: '))
result = first_not_repeating_char_my_method(char_sequence)
if result == '_*':
print('Todos los caracteres se repiten.')
else:
print('El primer caracter no repetido es: {}'.format(result))
```
#### File: curso-de-python/proyecto_directorio_contactos/contact.py
```python
class Contact:
def __init__(self, name, phone, email):
self._name = name;
self._phone = phone;
self._email = email;
```
#### File: curso-de-python/recursividad/busqueda_binaria.py
```python
import math
def binarySearch(numbers, numberToSearch, idxBottom, idxTop):
if idxBottom > idxTop:
return False
idxMiddle = math.floor( (idxTop + idxBottom) / 2 )
if numbers[idxMiddle] == numberToSearch:
return True
elif numbers[idxMiddle] > numberToSearch:
return binarySearch( numbers, numberToSearch, idxBottom, idxMiddle - 1 )
else:
return binarySearch( numbers, numberToSearch, idxMiddle + 1, idxTop )
def main():
numbers = [2,4,8,13,14,15,19,20,22,23,24,28]
numberToSearch = int( input("Ingresa un número: ") )
result = binarySearch( numbers, numberToSearch, 0, len( numbers ) - 1 )
if result is True:
print('El numero si está en la lista')
else:
print('El numero NO se encuentra')
if __name__ == '__main__':
main()
```
#### File: curso-de-python/recursividad/factorial.py
```python
def main():
print("C A L C U L A D O R A D E F A C T O R I A L")
numero = int( input("¿Cuál es tu numero? ") )
resultado = factorial(numero)
print("El factorial de {} es {}".format(numero, resultado))
def factorial(numero):
if numero == 1 :
return 1
else:
return numero * factorial(numero-1)
if __name__ == "__main__":
main()
``` |
{
"source": "jpgill86/analysis",
"score": 2
} |
#### File: notebooks/modules/r_stats.py
```python
from rpy2.robjects.packages import importr
from rpy2.robjects import numpy2ri
numpy2ri.activate()
stats = importr('stats')
rrcov = importr('rrcov')
effsize = importr('effsize')
def t_test(*args, **kwargs):
'''
Utilizes R's implementation of Student's t-test
For arguments, see
https://stat.ethz.ch/R-manual/R-patched/library/stats/html/t.test.html
'''
result = stats.t_test(*args, **kwargs)
return {
't': result.rx2('statistic')[0],
'df': result.rx2('parameter')[0],
'p': result.rx2('p.value')[0],
}
def wilcox_test(*args, **kwargs):
'''
Utilizes R's implementation of the Wilcoxon signed-rank test
For arguments, see
https://stat.ethz.ch/R-manual/R-patched/library/stats/html/wilcox.test.html
'''
result = stats.wilcox_test(*args, **kwargs)
return {
'W': result.rx2('statistic')[0],
'p': result.rx2('p.value')[0],
}
def T2_test(*args, **kwargs):
'''
Utilizes the R package rrcov's implementation of Hotelling's T-squared test
For arguments, see
https://rdrr.io/cran/rrcov/man/T2.test.html
'''
result = rrcov.T2_test(*args, **kwargs)
return {
'T2': result.rx2('statistic')[0],
'F': result.rx2('statistic')[1],
'df_num': result.rx2('parameter')[0],
'df_den': result.rx2('parameter')[1],
'p': result.rx2('p.value')[0],
}
def shapiro_test(*args, **kwargs):
'''
Utilizes R's implementation of the Shapiro-Wilk normality test
For arguments, see
https://stat.ethz.ch/R-manual/R-patched/library/stats/html/shapiro.test.html
'''
result = stats.shapiro_test(*args, **kwargs)
return {
'W': result.rx2('statistic')[0],
'p': result.rx2('p.value')[0],
}
def cohen_d(*args, **kwargs):
'''
Utilizes the R package effsize's implementation of Cohen's d and Hedges's g effect size
For arguments, see
https://rdrr.io/cran/effsize/man/cohen.d.html
'''
result = effsize.cohen_d(*args, **kwargs)
return {
'method': result.rx2('method')[0],
'estimate': result.rx2('estimate')[0],
}
``` |
{
"source": "jpgill86/elephant",
"score": 3
} |
#### File: elephant/elephant/spike_train_synchrony.py
```python
from __future__ import division, print_function, unicode_literals
from collections import namedtuple
import neo
import numpy as np
import quantities as pq
from elephant.utils import is_time_quantity
SpikeContrastTrace = namedtuple("SpikeContrastTrace", (
"contrast", "active_spiketrains", "synchrony"))
def _get_theta_and_n_per_bin(spiketrains, t_start, t_stop, bin_size):
"""
Calculates theta (amount of spikes per bin) and the amount of active spike
trains per bin of one spike train.
"""
bin_step = bin_size / 2
edges = np.arange(t_start, t_stop + bin_step, bin_step)
# Calculate histogram for every spike train
histogram = np.vstack([
_binning_half_overlap(st, edges=edges)
for st in spiketrains
])
# Amount of spikes per bin
theta = histogram.sum(axis=0)
# Amount of active spike trains per bin
n_active_per_bin = np.count_nonzero(histogram, axis=0)
return theta, n_active_per_bin
def _binning_half_overlap(spiketrain, edges):
"""
Referring to [1] overlapping the bins creates a better result.
"""
histogram, bin_edges = np.histogram(spiketrain, bins=edges)
histogram = histogram[:-1] + histogram[1:]
return histogram
def spike_contrast(spiketrains, t_start=None, t_stop=None,
min_bin=10 * pq.ms, bin_shrink_factor=0.9,
return_trace=False):
"""
Calculates the synchrony of spike trains, according to
:cite:`synchrony-Ciba18_136`. The spike trains can have different lengths.
Original implementation by: <NAME> [<EMAIL>]
Parameters
----------
spiketrains : list of neo.SpikeTrain
A list of input spike trains to calculate the synchrony from.
t_start : pq.Quantity, optional
The beginning of the spike train. If None, it's taken as the minimum
value of `t_start`s of the input spike trains.
Default: None
t_stop : pq.Quantity, optional
The end of the spike train. If None, it's taken as the maximum value
of `t_stop` of the input spike trains.
Default: None
min_bin : pq.Quantity, optional
Sets the minimum value for the `bin_min` that is calculated by the
algorithm and defines the smallest bin size to compute the histogram
of the input `spiketrains`.
Default: 0.01 ms
bin_shrink_factor : float, optional
A multiplier to shrink the bin size on each iteration. The value must
be in range `(0, 1)`.
Default: 0.9
return_trace : bool, optional
If set to True, returns a history of spike-contrast synchrony, computed
for a range of different bin sizes, alongside with the maximum value of
the synchrony.
Default: False
Returns
-------
synchrony : float
Returns the synchrony of the input spike trains.
spike_contrast_trace : namedtuple
If `return_trace` is set to True, a `SpikeContrastTrace` namedtuple is
returned with the following attributes:
`.contrast` - the average sum of differences of the number of spikes
in subsuequent bins;
`.active_spiketrains` - the average number of spikes per bin,
weighted by the number of spike trains containing at least one spike
inside the bin;
`.synchrony` - the product of `contrast` and `active_spiketrains`.
Raises
------
ValueError
If `bin_shrink_factor` is not in (0, 1) range.
If the input spike trains constist of a single spiketrain.
If all input spike trains contain no more than 1 spike.
TypeError
If the input spike trains is not a list of `neo.SpikeTrain` objects.
If `t_start`, `t_stop`, or `min_bin` are not time quantities.
Examples
--------
>>> import quantities as pq
>>> from elephant.spike_train_generation import homogeneous_poisson_process
>>> from elephant.spike_train_synchrony import spike_contrast
>>> spiketrain_1 = homogeneous_poisson_process(rate=20*pq.Hz,
... t_stop=1000*pq.ms)
>>> spiketrain_2 = homogeneous_poisson_process(rate=20*pq.Hz,
... t_stop=1000*pq.ms)
>>> spike_contrast([spiketrain_1, spiketrain_2])
0.4192546583850932
"""
if not 0. < bin_shrink_factor < 1.:
raise ValueError("'bin_shrink_factor' ({}) must be in range (0, 1)."
.format(bin_shrink_factor))
if not len(spiketrains) > 1:
raise ValueError("Spike contrast measure requires more than 1 input "
"spiketrain.")
if not all(isinstance(st, neo.SpikeTrain) for st in spiketrains):
raise TypeError("Input spike trains must be a list of neo.SpikeTrain.")
if not is_time_quantity(t_start, allow_none=True) \
or not is_time_quantity(t_stop, allow_none=True):
raise TypeError("'t_start' and 't_stop' must be time quantities.")
if not is_time_quantity(min_bin):
raise TypeError("'min_bin' must be a time quantity.")
if t_start is None:
t_start = min(st.t_start for st in spiketrains)
if t_stop is None:
t_stop = max(st.t_stop for st in spiketrains)
# convert everything to seconds
spiketrains = [st.simplified.magnitude for st in spiketrains]
t_start = t_start.simplified.item()
t_stop = t_stop.simplified.item()
min_bin = min_bin.simplified.item()
spiketrains = [times[(times >= t_start) & (times <= t_stop)]
for times in spiketrains]
n_spiketrains = len(spiketrains)
n_spikes_total = sum(map(len, spiketrains))
duration = t_stop - t_start
bin_max = duration / 2
try:
isi_min = min(np.diff(st).min() for st in spiketrains if len(st) > 1)
except TypeError:
raise ValueError("All input spiketrains contain no more than 1 spike.")
bin_min = max(isi_min / 2, min_bin)
contrast_list = []
active_spiketrains = []
synchrony_curve = []
# Set new time boundaries
t_start = t_start - isi_min
t_stop = t_stop + isi_min
bin_size = bin_max
while bin_size >= bin_min:
# Calculate Theta and n
theta_k, n_k = _get_theta_and_n_per_bin(spiketrains,
t_start=t_start,
t_stop=t_stop,
bin_size=bin_size)
# calculate synchrony_curve = contrast * active_st
active_st = (np.sum(n_k * theta_k) / np.sum(theta_k) - 1) / (
n_spiketrains - 1)
contrast = np.sum(np.abs(np.diff(theta_k))) / (2 * n_spikes_total)
# Contrast: sum(|derivation|) / (2*#Spikes)
synchrony = contrast * active_st
contrast_list.append(contrast)
active_spiketrains.append(active_st)
synchrony_curve.append(synchrony)
# New bin size
bin_size *= bin_shrink_factor
# Sync value is maximum of the cost function C
synchrony = max(synchrony_curve)
if return_trace:
spike_contrast_trace = SpikeContrastTrace(
contrast=contrast_list,
active_spiketrains=active_spiketrains,
synchrony=synchrony_curve
)
return synchrony, spike_contrast_trace
return synchrony
```
#### File: elephant/test/test_spike_train_correlation.py
```python
import sys
import unittest
import neo
import numpy as np
import quantities as pq
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
import elephant.conversion as conv
import elephant.spike_train_correlation as sc
from elephant.spike_train_generation import homogeneous_poisson_process,\
homogeneous_gamma_process
import math
python_version_major = sys.version_info.major
class CovarianceTestCase(unittest.TestCase):
def setUp(self):
# These two arrays must be such that they do not have coincidences
# spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
self.test_array_1d_0 = [
1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
self.test_array_1d_1 = [
1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
# Build spike trains
self.st_0 = neo.SpikeTrain(
self.test_array_1d_0, units='ms', t_stop=50.)
self.st_1 = neo.SpikeTrain(
self.test_array_1d_1, units='ms', t_stop=50.)
# And binned counterparts
self.binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
def test_covariance_binned(self):
'''
Test covariance between two binned spike trains.
'''
# Calculate clipped and unclipped
res_clipped = sc.covariance(
self.binned_st, binary=True, fast=False)
res_unclipped = sc.covariance(
self.binned_st, binary=False, fast=False)
# Check dimensions
self.assertEqual(len(res_clipped), 2)
self.assertEqual(len(res_unclipped), 2)
# Check result unclipped against result calculated from scratch for
# the off-diagonal element
mat = self.binned_st.to_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / (len(mat[0]) - 1)
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.cov(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_unclipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_unclipped[1][0], target_from_scratch)
# Check result clipped against result calculated from scratch for
# the off-diagonal elemant
mat = self.binned_st.to_bool_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / (len(mat[0]) - 1)
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.cov(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_clipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_clipped[1][0], target_from_scratch)
def test_covariance_binned_same_spiketrains(self):
'''
Test if the covariation between two identical binned spike
trains evaluates to the expected 2x2 matrix.
'''
# Calculate correlation
binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
result = sc.covariance(binned_st, fast=False)
# Check dimensions
self.assertEqual(len(result), 2)
# Check result
assert_array_equal(result[0][0], result[1][1])
def test_covariance_binned_short_input(self):
'''
Test if input list of only one binned spike train yields correct result
that matches numpy.cov (covariance with itself)
'''
# Calculate correlation
binned_st = conv.BinnedSpikeTrain(
self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
result = sc.covariance(binned_st, binary=True, fast=False)
# Check result unclipped against result calculated by numpy.corrcoef
mat = binned_st.to_bool_array()
target = np.cov(mat)
# Check result and dimensionality of result
self.assertEqual(result.ndim, target.ndim)
assert_array_almost_equal(result, target)
assert_array_almost_equal(target,
sc.covariance(binned_st, binary=True,
fast=True))
def test_covariance_fast_mode(self):
np.random.seed(27)
st = homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s)
binned_st = conv.BinnedSpikeTrain(st, n_bins=10)
assert_array_almost_equal(sc.covariance(binned_st, fast=False),
sc.covariance(binned_st, fast=True))
class CorrCoefTestCase(unittest.TestCase):
def setUp(self):
# These two arrays must be such that they do not have coincidences
# spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
self.test_array_1d_0 = [
1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
self.test_array_1d_1 = [
1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
self.test_array_1d_2 = []
# Build spike trains
self.st_0 = neo.SpikeTrain(
self.test_array_1d_0, units='ms', t_stop=50.)
self.st_1 = neo.SpikeTrain(
self.test_array_1d_1, units='ms', t_stop=50.)
self.st_2 = neo.SpikeTrain(
self.test_array_1d_2, units='ms', t_stop=50.)
# And binned counterparts
self.binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
def test_corrcoef_binned(self):
'''
Test the correlation coefficient between two binned spike trains.
'''
# Calculate clipped and unclipped
res_clipped = sc.correlation_coefficient(
self.binned_st, binary=True)
res_unclipped = sc.correlation_coefficient(
self.binned_st, binary=False)
# Check dimensions
self.assertEqual(len(res_clipped), 2)
self.assertEqual(len(res_unclipped), 2)
# Check result unclipped against result calculated from scratch for
# the off-diagonal element
mat = self.binned_st.to_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / \
np.sqrt(
np.dot(mat[0] - mean_0, mat[0] - mean_0) *
np.dot(mat[1] - mean_1, mat[1] - mean_1))
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.corrcoef(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_unclipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_unclipped[1][0], target_from_scratch)
# Check result clipped against result calculated from scratch for
# the off-diagonal elemant
mat = self.binned_st.to_bool_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / \
np.sqrt(
np.dot(mat[0] - mean_0, mat[0] - mean_0) *
np.dot(mat[1] - mean_1, mat[1] - mean_1))
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.corrcoef(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_clipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_clipped[1][0], target_from_scratch)
def test_corrcoef_binned_same_spiketrains(self):
'''
Test if the correlation coefficient between two identical binned spike
trains evaluates to a 2x2 matrix of ones.
'''
# Calculate correlation
binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
result = sc.correlation_coefficient(binned_st, fast=False)
target = np.ones((2, 2))
# Check dimensions
self.assertEqual(len(result), 2)
# Check result
assert_array_almost_equal(result, target)
assert_array_almost_equal(
result, sc.correlation_coefficient(
binned_st, fast=True))
def test_corrcoef_binned_short_input(self):
'''
Test if input list of one binned spike train yields 1.0.
'''
# Calculate correlation
binned_st = conv.BinnedSpikeTrain(
self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
result = sc.correlation_coefficient(binned_st, fast=False)
target = np.array(1.)
# Check result and dimensionality of result
self.assertEqual(result.ndim, 0)
assert_array_almost_equal(result, target)
assert_array_almost_equal(
result, sc.correlation_coefficient(
binned_st, fast=True))
@unittest.skipUnless(python_version_major == 3, "assertWarns requires 3.2")
def test_empty_spike_train(self):
'''
Test whether a warning is yielded in case of empty spike train.
Also check correctness of the output array.
'''
# st_2 is empty
binned_12 = conv.BinnedSpikeTrain([self.st_1, self.st_2],
bin_size=1 * pq.ms)
with self.assertWarns(UserWarning):
result = sc.correlation_coefficient(binned_12, fast=False)
# test for NaNs in the output array
target = np.zeros((2, 2)) * np.NaN
target[0, 0] = 1.0
assert_array_almost_equal(result, target)
def test_corrcoef_fast_mode(self):
np.random.seed(27)
st = homogeneous_poisson_process(rate=10 * pq.Hz, t_stop=10 * pq.s)
binned_st = conv.BinnedSpikeTrain(st, n_bins=10)
assert_array_almost_equal(
sc.correlation_coefficient(
binned_st, fast=False), sc.correlation_coefficient(
binned_st, fast=True))
class CrossCorrelationHistogramTest(unittest.TestCase):
def setUp(self):
# These two arrays must be such that they do not have coincidences
# spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
self.test_array_1d_1 = [
1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
self.test_array_1d_2 = [
1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
# Build spike trains
self.st_1 = neo.SpikeTrain(
self.test_array_1d_1, units='ms', t_stop=50.)
self.st_2 = neo.SpikeTrain(
self.test_array_1d_2, units='ms', t_stop=50.)
# And binned counterparts
self.binned_st1 = conv.BinnedSpikeTrain(
[self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
self.binned_st2 = conv.BinnedSpikeTrain(
[self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
self.binned_sts = conv.BinnedSpikeTrain(
[self.st_1, self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
# Binned sts to check errors raising
self.st_check_bin_size = conv.BinnedSpikeTrain(
[self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=5 * pq.ms)
self.st_check_t_start = conv.BinnedSpikeTrain(
[self.st_1], t_start=1 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
self.st_check_t_stop = conv.BinnedSpikeTrain(
[self.st_1], t_start=0 * pq.ms, t_stop=40. * pq.ms,
bin_size=1 * pq.ms)
self.st_check_dimension = conv.BinnedSpikeTrain(
[self.st_1, self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
bin_size=1 * pq.ms)
def test_cross_correlation_histogram(self):
'''
Test generic result of a cross-correlation histogram between two binned
spike trains.
'''
# Calculate CCH using Elephant (normal and binary version) with
# mode equal to 'full' (whole spike trains are correlated)
cch_clipped, bin_ids_clipped = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full',
binary=True)
cch_unclipped, bin_ids_unclipped = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full', binary=False)
cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full',
binary=True, method='memory')
cch_unclipped_mem, bin_ids_unclipped_mem = \
sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full',
binary=False, method='memory')
# Check consistency two methods
assert_array_equal(
np.squeeze(cch_clipped.magnitude), np.squeeze(
cch_clipped_mem.magnitude))
assert_array_equal(
np.squeeze(cch_clipped.times), np.squeeze(
cch_clipped_mem.times))
assert_array_equal(
np.squeeze(cch_unclipped.magnitude), np.squeeze(
cch_unclipped_mem.magnitude))
assert_array_equal(
np.squeeze(cch_unclipped.times), np.squeeze(
cch_unclipped_mem.times))
assert_array_almost_equal(bin_ids_clipped, bin_ids_clipped_mem)
assert_array_almost_equal(bin_ids_unclipped, bin_ids_unclipped_mem)
# Check normal correlation Note: Use numpy correlate to verify result.
# Note: numpy conventions for input array 1 and input array 2 are
# swapped compared to Elephant!
mat1 = self.binned_st1.to_array()[0]
mat2 = self.binned_st2.to_array()[0]
target_numpy = np.correlate(mat2, mat1, mode='full')
assert_array_equal(
target_numpy, np.squeeze(cch_unclipped.magnitude))
# Check cross correlation function for several displacements tau
# Note: Use Elephant corrcoeff to verify result
tau = [-25.0, 0.0, 13.0] # in ms
for t in tau:
# adjust t_start, t_stop to shift by tau
t0 = np.min([self.st_1.t_start + t * pq.ms, self.st_2.t_start])
t1 = np.max([self.st_1.t_stop + t * pq.ms, self.st_2.t_stop])
st1 = neo.SpikeTrain(self.st_1.magnitude + t, units='ms',
t_start=t0 * pq.ms, t_stop=t1 * pq.ms)
st2 = neo.SpikeTrain(self.st_2.magnitude, units='ms',
t_start=t0 * pq.ms, t_stop=t1 * pq.ms)
binned_sts = conv.BinnedSpikeTrain([st1, st2],
bin_size=1 * pq.ms,
t_start=t0 * pq.ms,
t_stop=t1 * pq.ms)
# caluclate corrcoef
corrcoef = sc.correlation_coefficient(binned_sts)[1, 0]
# expand t_stop to have two spike trains with same length as st1,
# st2
st1 = neo.SpikeTrain(self.st_1.magnitude, units='ms',
t_start=self.st_1.t_start,
t_stop=self.st_1.t_stop + np.abs(t) * pq.ms)
st2 = neo.SpikeTrain(self.st_2.magnitude, units='ms',
t_start=self.st_2.t_start,
t_stop=self.st_2.t_stop + np.abs(t) * pq.ms)
binned_st1 = conv.BinnedSpikeTrain(
st1, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms,
bin_size=1 * pq.ms)
binned_st2 = conv.BinnedSpikeTrain(
st2, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms,
bin_size=1 * pq.ms)
# calculate CCHcoef and take value at t=tau
CCHcoef, _ = sc.cch(binned_st1, binned_st2,
cross_correlation_coefficient=True)
left_edge = - binned_st1.n_bins + 1
tau_bin = int(t / float(binned_st1.bin_size.magnitude))
assert_array_almost_equal(
corrcoef, CCHcoef[tau_bin - left_edge].magnitude)
# Check correlation using binary spike trains
mat1 = np.array(self.binned_st1.to_bool_array()[0], dtype=int)
mat2 = np.array(self.binned_st2.to_bool_array()[0], dtype=int)
target_numpy = np.correlate(mat2, mat1, mode='full')
assert_array_equal(
target_numpy, np.squeeze(cch_clipped.magnitude))
# Check the time axis and bin IDs of the resulting AnalogSignal
assert_array_almost_equal(
(bin_ids_clipped - 0.5) * self.binned_st1.bin_size,
cch_unclipped.times)
assert_array_almost_equal(
(bin_ids_clipped - 0.5) * self.binned_st1.bin_size,
cch_clipped.times)
# Calculate CCH using Elephant (normal and binary version) with
# mode equal to 'valid' (only completely overlapping intervals of the
# spike trains are correlated)
cch_clipped, bin_ids_clipped = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='valid',
binary=True)
cch_unclipped, bin_ids_unclipped = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='valid',
binary=False)
cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='valid',
binary=True, method='memory')
cch_unclipped_mem, bin_ids_unclipped_mem = \
sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='valid',
binary=False, method='memory')
# Check consistency two methods
assert_array_equal(
np.squeeze(cch_clipped.magnitude), np.squeeze(
cch_clipped_mem.magnitude))
assert_array_equal(
np.squeeze(cch_clipped.times), np.squeeze(
cch_clipped_mem.times))
assert_array_equal(
np.squeeze(cch_unclipped.magnitude), np.squeeze(
cch_unclipped_mem.magnitude))
assert_array_equal(
np.squeeze(cch_unclipped.times), np.squeeze(
cch_unclipped_mem.times))
assert_array_equal(bin_ids_clipped, bin_ids_clipped_mem)
assert_array_equal(bin_ids_unclipped, bin_ids_unclipped_mem)
# Check normal correlation Note: Use numpy correlate to verify result.
# Note: numpy conventions for input array 1 and input array 2 are
# swapped compared to Elephant!
mat1 = self.binned_st1.to_array()[0]
mat2 = self.binned_st2.to_array()[0]
target_numpy = np.correlate(mat2, mat1, mode='valid')
assert_array_equal(
target_numpy, np.squeeze(cch_unclipped.magnitude))
# Check correlation using binary spike trains
mat1 = np.array(self.binned_st1.to_bool_array()[0], dtype=int)
mat2 = np.array(self.binned_st2.to_bool_array()[0], dtype=int)
target_numpy = np.correlate(mat2, mat1, mode='valid')
assert_array_equal(
target_numpy, np.squeeze(cch_clipped.magnitude))
# Check the time axis and bin IDs of the resulting AnalogSignal
assert_array_equal(
(bin_ids_clipped - 0.5) * self.binned_st1.bin_size,
cch_unclipped.times)
assert_array_equal(
(bin_ids_clipped - 0.5) * self.binned_st1.bin_size,
cch_clipped.times)
# Check for wrong window parameter setting
self.assertRaises(
ValueError, sc.cross_correlation_histogram, self.binned_st1,
self.binned_st2, window='dsaij')
self.assertRaises(
ValueError, sc.cross_correlation_histogram, self.binned_st1,
self.binned_st2, window='dsaij', method='memory')
def test_raising_error_wrong_inputs(self):
'''Check that an exception is thrown if the two spike trains are not
fullfilling the requirement of the function'''
# Check the bin_sizes are the same
self.assertRaises(
ValueError,
sc.cross_correlation_histogram, self.binned_st1,
self.st_check_bin_size)
# Check input are one dimensional
self.assertRaises(
ValueError, sc.cross_correlation_histogram,
self.st_check_dimension, self.binned_st2)
self.assertRaises(
ValueError, sc.cross_correlation_histogram,
self.binned_st2, self.st_check_dimension)
def test_window(self):
'''Test if the window parameter is correctly interpreted.'''
cch_win, bin_ids = sc.cch(
self.binned_st1, self.binned_st2, window=[-30, 30])
cch_win_mem, bin_ids_mem = sc.cch(
self.binned_st1, self.binned_st2, window=[-30, 30],
method='memory')
self.assertEqual(len(bin_ids), cch_win.shape[0])
assert_array_equal(bin_ids, np.arange(-30, 31, 1))
assert_array_equal(
(bin_ids - 0.5) * self.binned_st1.bin_size, cch_win.times)
assert_array_equal(bin_ids_mem, np.arange(-30, 31, 1))
assert_array_equal(
(bin_ids_mem - 0.5) * self.binned_st1.bin_size, cch_win.times)
assert_array_equal(cch_win, cch_win_mem)
cch_unclipped, _ = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full', binary=False)
assert_array_equal(cch_win, cch_unclipped[19:80])
_, bin_ids = sc.cch(
self.binned_st1, self.binned_st2, window=[20, 30])
_, bin_ids_mem = sc.cch(
self.binned_st1, self.binned_st2, window=[20, 30], method='memory')
assert_array_equal(bin_ids, np.arange(20, 31, 1))
assert_array_equal(bin_ids_mem, np.arange(20, 31, 1))
_, bin_ids = sc.cch(
self.binned_st1, self.binned_st2, window=[-30, -20])
_, bin_ids_mem = sc.cch(
self.binned_st1, self.binned_st2, window=[-30, -20],
method='memory')
assert_array_equal(bin_ids, np.arange(-30, -19, 1))
assert_array_equal(bin_ids_mem, np.arange(-30, -19, 1))
# Check for wrong assignments to the window parameter
# Test for window longer than the total length of the spike trains
self.assertRaises(
ValueError, sc.cross_correlation_histogram, self.binned_st1,
self.binned_st2, window=[-60, 50])
self.assertRaises(
ValueError, sc.cross_correlation_histogram, self.binned_st1,
self.binned_st2, window=[-50, 60])
# Test for no integer or wrong string in input
self.assertRaises(
ValueError, sc.cross_correlation_histogram, self.binned_st1,
self.binned_st2, window=[-25.5, 25.5])
self.assertRaises(
ValueError, sc.cross_correlation_histogram, self.binned_st1,
self.binned_st2, window='test')
def test_border_correction(self):
'''Test if the border correction for bins at the edges is correctly
performed'''
# check that nothing changes for valid lags
cch_valid, _ = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full',
border_correction=True, binary=False, kernel=None)
valid_lags = sc._CrossCorrHist.get_valid_lags(self.binned_st1,
self.binned_st2)
left_edge, right_edge = valid_lags[(0, -1), ]
cch_builder = sc._CrossCorrHist(self.binned_st1, self.binned_st2,
window=(left_edge, right_edge))
cch_valid = cch_builder.correlate_speed(cch_mode='valid')
cch_corrected = cch_builder.border_correction(cch_valid)
np.testing.assert_array_equal(cch_valid, cch_corrected)
# test the border correction for lags without full overlap
cch_full, lags_full = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full')
cch_full_corrected, _ = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, window='full',
border_correction=True)
n_bins_outside_window = np.min(np.abs(
np.subtract.outer(lags_full, valid_lags)), axis=1)
min_n_bins = min(self.binned_st1.n_bins, self.binned_st2.n_bins)
border_correction = (cch_full_corrected / cch_full).magnitude.flatten()
# exclude NaNs caused by zeros in the cch
mask = np.logical_not(np.isnan(border_correction))
np.testing.assert_array_almost_equal(
border_correction[mask],
(float(min_n_bins)
/ (min_n_bins - n_bins_outside_window))[mask])
def test_kernel(self):
'''Test if the smoothing kernel is correctly defined, and wheter it is
applied properly.'''
smoothed_cch, _ = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, kernel=np.ones(3))
smoothed_cch_mem, _ = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, kernel=np.ones(3),
method='memory')
cch, _ = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, kernel=None)
cch_mem, _ = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, kernel=None, method='memory')
self.assertNotEqual(smoothed_cch.all, cch.all)
self.assertNotEqual(smoothed_cch_mem.all, cch_mem.all)
self.assertRaises(
ValueError, sc.cch, self.binned_st1, self.binned_st2,
kernel=np.ones(100))
self.assertRaises(
ValueError, sc.cch, self.binned_st1, self.binned_st2,
kernel=np.ones(100), method='memory')
def test_exist_alias(self):
'''
Test if alias cch still exists.
'''
self.assertEqual(sc.cross_correlation_histogram, sc.cch)
def test_annotations(self):
cch, _ = sc.cross_correlation_histogram(
self.binned_st1, self.binned_st2, kernel=np.ones(3))
target_dict = dict(window='full', border_correction=False,
binary=False, kernel=True,
normalization='counts')
self.assertIn('cch_parameters', cch.annotations)
self.assertEqual(cch.annotations['cch_parameters'], target_dict)
@unittest.skipUnless(python_version_major == 3, "subTest requires 3.4")
class CrossCorrelationHistDifferentTStartTStopTest(unittest.TestCase):
def _run_sub_tests(self, st1, st2, lags_true):
for window in ('valid', 'full'):
for method in ('speed', 'memory'):
with self.subTest(window=window, method=method):
bin_size = 1 * pq.s
st1_binned = conv.BinnedSpikeTrain(st1, bin_size=bin_size)
st2_binned = conv.BinnedSpikeTrain(st2, bin_size=bin_size)
left, right = lags_true[window][(0, -1), ]
cch_window, lags_window = sc.cross_correlation_histogram(
st1_binned, st2_binned, window=(left, right),
method=method,
)
cch, lags = sc.cross_correlation_histogram(
st1_binned, st2_binned, window=window)
# target cross correlation
cch_target = np.correlate(st1_binned.to_array()[0],
st2_binned.to_array()[0],
mode=window)
self.assertEqual(len(lags_window), cch_window.shape[0])
assert_array_almost_equal(cch.magnitude,
cch_window.magnitude)
# the output is reversed since we cross-correlate
# st2 with st1 rather than st1 with st2 (numpy behavior)
assert_array_almost_equal(np.ravel(cch.magnitude),
cch_target[::-1])
assert_array_equal(lags, lags_true[window])
assert_array_equal(lags, lags_window)
def test_cross_correlation_histogram_valid_full_overlap(self):
# ex. 1 in the source code
st1 = neo.SpikeTrain([3.5, 4.5, 7.5] * pq.s, t_start=3 * pq.s,
t_stop=8 * pq.s)
st2 = neo.SpikeTrain([1.5, 2.5, 4.5, 8.5, 9.5, 10.5]
* pq.s, t_start=1 * pq.s, t_stop=13 * pq.s)
lags_true = {
'valid': np.arange(-2, 6, dtype=np.int32),
'full': np.arange(-6, 10, dtype=np.int32)
}
self._run_sub_tests(st1, st2, lags_true)
def test_cross_correlation_histogram_valid_partial_overlap(self):
# ex. 2 in the source code
st1 = neo.SpikeTrain([2.5, 3.5, 4.5, 6.5] * pq.s, t_start=1 * pq.s,
t_stop=7 * pq.s)
st2 = neo.SpikeTrain([3.5, 5.5, 6.5, 7.5, 8.5] *
pq.s, t_start=2 * pq.s, t_stop=9 * pq.s)
lags_true = {
'valid': np.arange(1, 3, dtype=np.int32),
'full': np.arange(-4, 8, dtype=np.int32)
}
self._run_sub_tests(st1, st2, lags_true)
def test_cross_correlation_histogram_valid_no_overlap(self):
st1 = neo.SpikeTrain([2.5, 3.5, 4.5, 6.5] * pq.s, t_start=1 * pq.s,
t_stop=7 * pq.s)
st2 = neo.SpikeTrain([3.5, 5.5, 6.5, 7.5, 8.5] * pq.s + 6 * pq.s,
t_start=8 * pq.s, t_stop=15 * pq.s)
lags_true = {
'valid': np.arange(7, 9, dtype=np.int32),
'full': np.arange(2, 14, dtype=np.int32)
}
self._run_sub_tests(st1, st2, lags_true)
def test_invalid_time_shift(self):
# time shift of 0.4 s is not multiple of bin_size=1 s
st1 = neo.SpikeTrain([2.5, 3.5] * pq.s, t_start=1 * pq.s,
t_stop=7 * pq.s)
st2 = neo.SpikeTrain([3.5, 5.5] * pq.s, t_start=1.4 * pq.s,
t_stop=7.4 * pq.s)
bin_size = 1 * pq.s
st1_binned = conv.BinnedSpikeTrain(st1, bin_size=bin_size)
st2_binned = conv.BinnedSpikeTrain(st2, bin_size=bin_size)
self.assertRaises(ValueError, sc.cross_correlation_histogram,
st1_binned, st2_binned)
class SpikeTimeTilingCoefficientTestCase(unittest.TestCase):
def setUp(self):
# These two arrays must be such that they do not have coincidences
# spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
self.test_array_1d_1 = [
1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
self.test_array_1d_2 = [
1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
# Build spike trains
self.st_1 = neo.SpikeTrain(
self.test_array_1d_1, units='ms', t_stop=50.)
self.st_2 = neo.SpikeTrain(
self.test_array_1d_2, units='ms', t_stop=50.)
def test_sttc(self):
# test for result
target = 0.495860165593
self.assertAlmostEqual(target, sc.sttc(self.st_1, self.st_2,
0.005 * pq.s))
# test for same result with dt given in ms
self.assertAlmostEqual(target, sc.sttc(self.st_1, self.st_2,
5.0 * pq.ms))
# test no spiketrains
self.assertTrue(np.isnan(sc.sttc([], [])))
# test one spiketrain
self.assertTrue(np.isnan(sc.sttc(self.st_1, [])))
# test for one spike in a spiketrain
st1 = neo.SpikeTrain([1], units='ms', t_stop=1.)
st2 = neo.SpikeTrain([5], units='ms', t_stop=10.)
self.assertEqual(sc.sttc(st1, st2), 1.0)
self.assertTrue(bool(sc.sttc(st1, st2, 0.1 * pq.ms) < 0))
# test for high value of dt
self.assertEqual(sc.sttc(self.st_1, self.st_2, dt=5 * pq.s), 1.0)
# test for TA = PB = 1 but TB /= PA /= 1 and vice versa
st3 = neo.SpikeTrain([1, 5, 9], units='ms', t_stop=10.)
target2 = 1. / 3.
self.assertAlmostEqual(target2, sc.sttc(st3, st2,
0.003 * pq.s))
self.assertAlmostEqual(target2, sc.sttc(st2, st3,
0.003 * pq.s))
def test_exist_alias(self):
# Test if alias cch still exists.
self.assertEqual(sc.spike_time_tiling_coefficient, sc.sttc)
class SpikeTrainTimescaleTestCase(unittest.TestCase):
def test_timescale_calculation(self):
'''
Test the timescale generation using an alpha-shaped ISI distribution,
see [1, eq. 1.68]. This is equivalent to a homogeneous gamma process
with alpha=2 and beta=2*nu where nu is the rate.
For this process, the autocorrelation function is given by a sum of a
delta peak and a (negative) exponential, see [1, eq. 1.69].
The exponential decays with \tau_corr = 1 / (4*nu), thus this fixes
timescale.
[1] <NAME>. (2009). A brief introduction to some simple stochastic
processes. Stochastic Methods in Neuroscience, 1.
'''
nu = 25 / pq.s
T = 15 * pq.min
bin_size = 1 * pq.ms
timescale = 1 / (4 * nu)
np.random.seed(35)
for _ in range(10):
spikes = homogeneous_gamma_process(2, 2 * nu, 0 * pq.ms, T)
spikes_bin = conv.BinnedSpikeTrain(spikes, bin_size)
timescale_i = sc.spike_train_timescale(spikes_bin, 10 * timescale)
assert_array_almost_equal(timescale, timescale_i, decimal=3)
def test_timescale_errors(self):
spikes = neo.SpikeTrain([1, 5, 7, 8] * pq.ms, t_stop=10 * pq.ms)
binsize = 1 * pq.ms
spikes_bin = conv.BinnedSpikeTrain(spikes, binsize)
# Tau max with no units
tau_max = 1
self.assertRaises(ValueError,
sc.spike_train_timescale, spikes_bin, tau_max)
# Tau max that is not a multiple of the binsize
tau_max = 1.1 * pq.ms
self.assertRaises(ValueError,
sc.spike_train_timescale, spikes_bin, tau_max)
@unittest.skipUnless(python_version_major == 3,
"assertWarns requires python 3.2")
def test_timescale_nan(self):
st0 = neo.SpikeTrain([] * pq.ms, t_stop=10 * pq.ms)
st1 = neo.SpikeTrain([1] * pq.ms, t_stop=10 * pq.ms)
st2 = neo.SpikeTrain([1, 5] * pq.ms, t_stop=10 * pq.ms)
st3 = neo.SpikeTrain([1, 5, 6] * pq.ms, t_stop=10 * pq.ms)
st4 = neo.SpikeTrain([1, 5, 6, 9] * pq.ms, t_stop=10 * pq.ms)
binsize = 1 * pq.ms
tau_max = 1 * pq.ms
for st in [st0, st1]:
bst = conv.BinnedSpikeTrain(st, binsize)
with self.assertWarns(UserWarning):
timescale = sc.spike_train_timescale(bst, tau_max)
self.assertTrue(math.isnan(timescale))
for st in [st2, st3, st4]:
bst = conv.BinnedSpikeTrain(st, binsize)
timescale = sc.spike_train_timescale(bst, tau_max)
self.assertFalse(math.isnan(timescale))
if __name__ == '__main__':
unittest.main()
```
#### File: elephant/test/test_statistics.py
```python
from __future__ import division
import itertools
import math
import sys
import unittest
import neo
import numpy as np
import quantities as pq
import scipy.integrate as spint
from numpy.testing import assert_array_almost_equal, assert_array_equal, \
assert_array_less
import elephant.kernels as kernels
from elephant import statistics
from elephant.spike_train_generation import homogeneous_poisson_process
if sys.version_info.major == 2:
import unittest2 as unittest
class isi_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([[-0.28, 0.15, 0.95, 7.23],
[0.01, -0.57, -1.67, -7.54]])
self.targ_array_2d_1 = np.array([[0.26, 0.31, 0.36],
[0.69, 1.11, 6.64],
[0.11, 0.01, 0.77]])
self.targ_array_2d_default = self.targ_array_2d_1
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0, :]
def test_isi_with_spiketrain(self):
st = neo.SpikeTrain(
self.test_array_1d, units='ms', t_stop=10.0, t_start=0.29)
target = pq.Quantity(self.targ_array_1d, 'ms')
res = statistics.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d, 'ms')
res = statistics.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d
res = statistics.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default
res = statistics.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0
res = statistics.isi(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1
res = statistics.isi(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_unsorted_array(self):
np.random.seed(0)
array = np.random.rand(100)
with self.assertWarns(UserWarning):
isi = statistics.isi(array)
class isi_cv_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_regular = np.arange(1, 6)
def test_cv_isi_regular_spiketrain_is_zero(self):
st = neo.SpikeTrain(self.test_array_regular, units='ms', t_stop=10.0)
targ = 0.0
res = statistics.cv(statistics.isi(st))
self.assertEqual(res, targ)
def test_cv_isi_regular_array_is_zero(self):
st = self.test_array_regular
targ = 0.0
res = statistics.cv(statistics.isi(st))
self.assertEqual(res, targ)
class mean_firing_rate_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_3d = np.ones([5, 7, 13])
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([3, 3, 3, 3])
self.targ_array_2d_1 = np.array([4, 4, 4])
self.targ_array_2d_None = 12
self.targ_array_2d_default = self.targ_array_2d_None
self.max_array_2d_0 = np.array([0.3, 0.71, 1.82, 8.46])
self.max_array_2d_1 = np.array([1.23, 8.46, 0.92])
self.max_array_2d_None = 8.46
self.max_array_2d_default = self.max_array_2d_None
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0]
self.max_array_1d = self.max_array_2d_1[0]
def test_invalid_input_spiketrain(self):
# empty spiketrain
self.assertRaises(ValueError, statistics.mean_firing_rate, [])
for st_invalid in (None, 0.1):
self.assertRaises(TypeError, statistics.mean_firing_rate,
st_invalid)
def test_mean_firing_rate_with_spiketrain(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(self.targ_array_1d / 10., '1/ms')
res = statistics.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_typical_use_case(self):
np.random.seed(92)
st = homogeneous_poisson_process(rate=100 * pq.Hz, t_stop=100 * pq.s)
rate1 = statistics.mean_firing_rate(st)
rate2 = statistics.mean_firing_rate(st, t_start=st.t_start,
t_stop=st.t_stop)
self.assertEqual(rate1.units, rate2.units)
self.assertAlmostEqual(rate1.item(), rate2.item())
def test_mean_firing_rate_with_spiketrain_set_ends(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(2 / 0.5, '1/ms')
res = statistics.mean_firing_rate(st, t_start=0.4 * pq.ms,
t_stop=0.9 * pq.ms)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d / self.max_array_1d, '1/ms')
res = statistics.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d_set_ends(self):
st = pq.Quantity(self.test_array_1d, units='ms')
# t_stop is not a Quantity
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=400 * pq.us, t_stop=1.)
# t_start is not a Quantity
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=0.4, t_stop=1. * pq.ms)
def test_mean_firing_rate_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d / self.max_array_1d
res = statistics.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_1d_set_ends(self):
st = self.test_array_1d
target = self.targ_array_1d / (1.23 - 0.3)
res = statistics.mean_firing_rate(st, t_start=0.3, t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default / self.max_array_2d_default
res = statistics.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0 / self.max_array_2d_0
res = statistics.mean_firing_rate(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1 / self.max_array_2d_1
res = statistics.mean_firing_rate(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_None(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, None) / 5.
res = statistics.mean_firing_rate(st, axis=None, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_0(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 0) / 5.
res = statistics.mean_firing_rate(st, axis=0, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_1(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 1) / 5.
res = statistics.mean_firing_rate(st, axis=1, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_2(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 2) / 5.
res = statistics.mean_firing_rate(st, axis=2, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1_set_ends(self):
st = self.test_array_2d
target = np.array([4, 1, 3]) / (1.23 - 0.14)
res = statistics.mean_firing_rate(st, axis=1, t_start=0.14,
t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_None(self):
st = self.test_array_2d
target = self.targ_array_2d_None / self.max_array_2d_None
res = statistics.mean_firing_rate(st, axis=None)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_and_units_start_stop_typeerror(
self):
st = self.test_array_2d
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'))
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=10.)
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=0.,
t_stop=pq.Quantity(10, 'ms'))
class FanoFactorTestCase(unittest.TestCase):
def setUp(self):
np.random.seed(100)
num_st = 300
self.test_spiketrains = []
self.test_array = []
self.test_quantity = []
self.test_list = []
self.sp_counts = np.zeros(num_st)
for i in range(num_st):
r = np.random.rand(np.random.randint(20) + 1)
st = neo.core.SpikeTrain(r * pq.ms,
t_start=0.0 * pq.ms,
t_stop=20.0 * pq.ms)
self.test_spiketrains.append(st)
self.test_array.append(r)
self.test_quantity.append(r * pq.ms)
self.test_list.append(list(r))
# for cross-validation
self.sp_counts[i] = len(st)
def test_fanofactor_spiketrains(self):
# Test with list of spiketrains
self.assertEqual(
np.var(self.sp_counts) / np.mean(self.sp_counts),
statistics.fanofactor(self.test_spiketrains))
# One spiketrain in list
st = self.test_spiketrains[0]
self.assertEqual(statistics.fanofactor([st]), 0.0)
def test_fanofactor_empty(self):
# Test with empty list
self.assertTrue(np.isnan(statistics.fanofactor([])))
self.assertTrue(np.isnan(statistics.fanofactor([[]])))
# Test with empty quantity
self.assertTrue(np.isnan(statistics.fanofactor([] * pq.ms)))
# Empty spiketrain
st = neo.core.SpikeTrain([] * pq.ms, t_start=0 * pq.ms,
t_stop=1.5 * pq.ms)
self.assertTrue(np.isnan(statistics.fanofactor(st)))
def test_fanofactor_spiketrains_same(self):
# Test with same spiketrains in list
sts = [self.test_spiketrains[0]] * 3
self.assertEqual(statistics.fanofactor(sts), 0.0)
def test_fanofactor_array(self):
self.assertEqual(statistics.fanofactor(self.test_array),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_array_same(self):
lst = [self.test_array[0]] * 3
self.assertEqual(statistics.fanofactor(lst), 0.0)
def test_fanofactor_quantity(self):
self.assertEqual(statistics.fanofactor(self.test_quantity),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_quantity_same(self):
lst = [self.test_quantity[0]] * 3
self.assertEqual(statistics.fanofactor(lst), 0.0)
def test_fanofactor_list(self):
self.assertEqual(statistics.fanofactor(self.test_list),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_list_same(self):
lst = [self.test_list[0]] * 3
self.assertEqual(statistics.fanofactor(lst), 0.0)
def test_fanofactor_different_durations(self):
st1 = neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=4 * pq.s)
st2 = neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=4.5 * pq.s)
self.assertWarns(UserWarning, statistics.fanofactor, (st1, st2))
def test_fanofactor_wrong_type(self):
# warn_tolerance is not a quantity
st1 = neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=4 * pq.s)
self.assertRaises(TypeError, statistics.fanofactor, [st1],
warn_tolerance=1e-4)
class LVTestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 0.971826029994
def test_lv_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(statistics.lv(seq), self.target, decimal=9)
def test_lv_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(statistics.lv(seq), self.target, decimal=9)
def test_lv_with_list(self):
seq = self.test_seq
assert_array_almost_equal(statistics.lv(seq), self.target, decimal=9)
def test_lv_raise_error(self):
seq = self.test_seq
self.assertRaises(ValueError, statistics.lv, [])
self.assertRaises(ValueError, statistics.lv, 1)
self.assertRaises(ValueError, statistics.lv, np.array([seq, seq]))
def test_2short_spike_train(self):
seq = [1]
with self.assertWarns(UserWarning):
"""
Catches UserWarning: Input size is too small. Please provide
an input with more than 1 entry.
"""
self.assertTrue(math.isnan(statistics.lv(seq, with_nan=True)))
class LVRTestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 2.1845363464753134
def test_lvr_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(statistics.lvr(seq), self.target, decimal=9)
def test_lvr_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(statistics.lvr(seq), self.target, decimal=9)
def test_lvr_with_list(self):
seq = self.test_seq
assert_array_almost_equal(statistics.lvr(seq), self.target, decimal=9)
def test_lvr_raise_error(self):
seq = self.test_seq
self.assertRaises(ValueError, statistics.lvr, [])
self.assertRaises(ValueError, statistics.lvr, 1)
self.assertRaises(ValueError, statistics.lvr, np.array([seq, seq]))
self.assertRaises(ValueError, statistics.lvr, seq, -1 * pq.ms)
def test_lvr_refractoriness_kwarg(self):
seq = np.array(self.test_seq)
with self.assertWarns(UserWarning):
assert_array_almost_equal(statistics.lvr(seq, R=5),
self.target, decimal=9)
def test_2short_spike_train(self):
seq = [1]
with self.assertWarns(UserWarning):
"""
Catches UserWarning: Input size is too small. Please provide
an input with more than 1 entry.
"""
self.assertTrue(math.isnan(statistics.lvr(seq, with_nan=True)))
class CV2TestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 1.0022235296529176
def test_cv2_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
def test_cv2_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
def test_cv2_with_list(self):
seq = self.test_seq
assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
def test_cv2_raise_error(self):
seq = self.test_seq
self.assertRaises(ValueError, statistics.cv2, [])
self.assertRaises(ValueError, statistics.cv2, 1)
self.assertRaises(ValueError, statistics.cv2, np.array([seq, seq]))
class InstantaneousRateTest(unittest.TestCase):
def setUp(self):
# create a poisson spike train:
self.st_tr = (0, 20.0) # seconds
self.st_dur = self.st_tr[1] - self.st_tr[0] # seconds
self.st_margin = 5.0 # seconds
self.st_rate = 10.0 # Hertz
np.random.seed(19)
duration_effective = self.st_dur - 2 * self.st_margin
st_num_spikes = np.random.poisson(self.st_rate * duration_effective)
spike_train = sorted(
np.random.rand(st_num_spikes) *
duration_effective +
self.st_margin)
# convert spike train into neo objects
self.spike_train = neo.SpikeTrain(spike_train * pq.s,
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s)
# generation of a multiply used specific kernel
self.kernel = kernels.TriangularKernel(sigma=0.03 * pq.s)
def test_instantaneous_rate_and_warnings(self):
st = self.spike_train
sampling_period = 0.01 * pq.s
with self.assertWarns(UserWarning):
# Catches warning: The width of the kernel was adjusted to a
# minimally allowed width.
inst_rate = statistics.instantaneous_rate(
st, sampling_period, self.kernel, cutoff=0)
self.assertIsInstance(inst_rate, neo.core.AnalogSignal)
self.assertEqual(
inst_rate.sampling_period.simplified, sampling_period.simplified)
self.assertEqual(inst_rate.simplified.units, pq.Hz)
self.assertEqual(inst_rate.t_stop.simplified, st.t_stop.simplified)
self.assertEqual(inst_rate.t_start.simplified, st.t_start.simplified)
def test_error_instantaneous_rate(self):
self.assertRaises(
TypeError, statistics.instantaneous_rate,
spiketrains=[1, 2, 3] * pq.s,
sampling_period=0.01 * pq.ms, kernel=self.kernel)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=[1, 2, 3],
sampling_period=0.01 * pq.ms, kernel=self.kernel)
st = self.spike_train
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01, kernel=self.kernel)
self.assertRaises(
ValueError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=-0.01 * pq.ms, kernel=self.kernel)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel='NONE')
self.assertRaises(TypeError, statistics.instantaneous_rate,
self.spike_train,
sampling_period=0.01 * pq.s, kernel='wrong_string',
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s,
trim=False)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel,
cutoff=20 * pq.ms)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel, t_start=2)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel,
t_stop=20 * pq.mV)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel, trim=1)
# cannot estimate a kernel for a list of spiketrains
self.assertRaises(ValueError, statistics.instantaneous_rate,
spiketrains=[st, st], sampling_period=10 * pq.ms,
kernel='auto')
def test_rate_estimation_consistency(self):
"""
Test, whether the integral of the rate estimation curve is (almost)
equal to the number of spikes of the spike train.
"""
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.Kernel) and
kern_cls is not kernels.Kernel and
kern_cls is not kernels.SymmetricKernel)
kernels_available = [kern_cls(sigma=0.5 * pq.s, invert=False)
for kern_cls in kernel_types]
kernels_available.append('auto')
kernel_resolution = 0.01 * pq.s
for kernel in kernels_available:
for center_kernel in (False, True):
rate_estimate = statistics.instantaneous_rate(
self.spike_train,
sampling_period=kernel_resolution,
kernel=kernel,
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s,
trim=False,
center_kernel=center_kernel)
num_spikes = len(self.spike_train)
auc = spint.cumtrapz(
y=rate_estimate.magnitude.squeeze(),
x=rate_estimate.times.simplified.magnitude)[-1]
self.assertAlmostEqual(num_spikes, auc,
delta=0.01 * num_spikes)
def test_not_center_kernel(self):
# issue 107
t_spike = 1 * pq.s
st = neo.SpikeTrain([t_spike], t_start=0 * pq.s, t_stop=2 * pq.s,
units=pq.s)
kernel = kernels.AlphaKernel(200 * pq.ms)
fs = 0.1 * pq.ms
rate = statistics.instantaneous_rate(st,
sampling_period=fs,
kernel=kernel,
center_kernel=False)
rate_nonzero_index = np.nonzero(rate > 1e-6)[0]
# where the mass is concentrated
rate_mass = rate.times.rescale(t_spike.units)[rate_nonzero_index]
all_after_response_onset = (rate_mass >= t_spike).all()
self.assertTrue(all_after_response_onset)
def test_regression_288(self):
np.random.seed(9)
sampling_period = 200 * pq.ms
spiketrain = homogeneous_poisson_process(10 * pq.Hz,
t_start=0 * pq.s,
t_stop=10 * pq.s)
kernel = kernels.AlphaKernel(sigma=5 * pq.ms, invert=True)
# check that instantaneous_rate "works" for kernels with small sigma
# without triggering an incomprehensible error
rate = statistics.instantaneous_rate(spiketrain,
sampling_period=sampling_period,
kernel=kernel)
self.assertEqual(
len(rate), (spiketrain.t_stop / sampling_period).simplified.item())
def test_small_kernel_sigma(self):
# Test that the instantaneous rate is overestimated when
# kernel.sigma << sampling_period and center_kernel is True.
# The setup is set to match the issue 288.
np.random.seed(9)
sampling_period = 200 * pq.ms
sigma = 5 * pq.ms
rate_expected = 10 * pq.Hz
spiketrain = homogeneous_poisson_process(rate_expected,
t_start=0 * pq.s,
t_stop=10 * pq.s)
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.Kernel) and
kern_cls is not kernels.Kernel and
kern_cls is not kernels.SymmetricKernel)
for kern_cls, invert in itertools.product(kernel_types, (False, True)):
kernel = kern_cls(sigma=sigma, invert=invert)
with self.subTest(kernel=kernel):
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=sampling_period,
kernel=kernel, center_kernel=True)
self.assertGreater(rate.mean(), rate_expected)
def test_spikes_on_edges(self):
# this test demonstrates that the trimming (convolve valid mode)
# removes the edge spikes, underestimating the true firing rate and
# thus is not able to reconstruct the number of spikes in a
# spiketrain (see test_rate_estimation_consistency)
cutoff = 5
sampling_period = 0.01 * pq.s
t_spikes = np.array([-cutoff, cutoff]) * pq.s
spiketrain = neo.SpikeTrain(t_spikes, t_start=t_spikes[0],
t_stop=t_spikes[-1])
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.Kernel) and
kern_cls is not kernels.Kernel and
kern_cls is not kernels.SymmetricKernel)
kernels_available = [kern_cls(sigma=1 * pq.s, invert=False)
for kern_cls in kernel_types]
for kernel in kernels_available:
for center_kernel in (False, True):
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=sampling_period,
kernel=kernel,
cutoff=cutoff, trim=True,
center_kernel=center_kernel)
assert_array_almost_equal(rate.magnitude, 0, decimal=3)
def test_trim_as_convolve_mode(self):
cutoff = 5
sampling_period = 0.01 * pq.s
t_spikes = np.linspace(-cutoff, cutoff, num=(2 * cutoff + 1)) * pq.s
spiketrain = neo.SpikeTrain(t_spikes, t_start=t_spikes[0],
t_stop=t_spikes[-1])
kernel = kernels.RectangularKernel(sigma=1 * pq.s)
assert cutoff > kernel.min_cutoff, "Choose larger cutoff"
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.SymmetricKernel) and
kern_cls is not kernels.SymmetricKernel)
kernels_symmetric = [kern_cls(sigma=1 * pq.s, invert=False)
for kern_cls in kernel_types]
for kernel in kernels_symmetric:
for trim in (False, True):
rate_centered = statistics.instantaneous_rate(
spiketrain, sampling_period=sampling_period,
kernel=kernel, cutoff=cutoff, trim=trim)
rate_convolve = statistics.instantaneous_rate(
spiketrain, sampling_period=sampling_period,
kernel=kernel, cutoff=cutoff, trim=trim,
center_kernel=False)
assert_array_almost_equal(rate_centered, rate_convolve)
def test_instantaneous_rate_spiketrainlist(self):
np.random.seed(19)
duration_effective = self.st_dur - 2 * self.st_margin
st_num_spikes = np.random.poisson(self.st_rate * duration_effective)
spike_train2 = sorted(
np.random.rand(st_num_spikes) *
duration_effective +
self.st_margin)
spike_train2 = neo.SpikeTrain(spike_train2 * pq.s,
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s)
st_rate_1 = statistics.instantaneous_rate(self.spike_train,
sampling_period=0.01 * pq.s,
kernel=self.kernel)
st_rate_2 = statistics.instantaneous_rate(spike_train2,
sampling_period=0.01 * pq.s,
kernel=self.kernel)
combined_rate = statistics.instantaneous_rate(
[self.spike_train, spike_train2],
sampling_period=0.01 * pq.s,
kernel=self.kernel)
rate_concat = np.c_[st_rate_1, st_rate_2]
# 'time_vector.dtype' in instantaneous_rate() is changed from float64
# to float32 which results in 3e-6 abs difference
assert_array_almost_equal(combined_rate.magnitude,
rate_concat.magnitude, decimal=5)
# Regression test for #144
def test_instantaneous_rate_regression_144(self):
# The following spike train contains spikes that are so close to each
# other, that the optimal kernel cannot be detected. Therefore, the
# function should react with a ValueError.
st = neo.SpikeTrain([2.12, 2.13, 2.15] * pq.s, t_stop=10 * pq.s)
self.assertRaises(ValueError, statistics.instantaneous_rate, st,
1 * pq.ms)
# Regression test for #245
def test_instantaneous_rate_regression_245(self):
# This test makes sure that the correct kernel width is chosen when
# selecting 'auto' as kernel
spiketrain = neo.SpikeTrain(
range(1, 30) * pq.ms, t_start=0 * pq.ms, t_stop=30 * pq.ms)
# This is the correct procedure to attain the kernel: first, the result
# of sskernel retrieves the kernel bandwidth of an optimal Gaussian
# kernel in terms of its standard deviation sigma, then uses this value
# directly in the function for creating the Gaussian kernel
kernel_width_sigma = statistics.optimal_kernel_bandwidth(
spiketrain.magnitude, times=None, bootstrap=False)['optw']
kernel = kernels.GaussianKernel(kernel_width_sigma * spiketrain.units)
result_target = statistics.instantaneous_rate(
spiketrain, 10 * pq.ms, kernel=kernel)
# Here, we check if the 'auto' argument leads to the same operation. In
# the regression, it was incorrectly assumed that the sskernel()
# function returns the actual bandwidth of the kernel, which is defined
# as approximately bandwidth = sigma * 5.5 = sigma * (2 * 2.75).
# factor 2.0 connects kernel width with its half width,
# factor 2.7 connects half width of Gaussian distribution with
# 99% probability mass with its standard deviation.
result_automatic = statistics.instantaneous_rate(
spiketrain, 10 * pq.ms, kernel='auto')
assert_array_almost_equal(result_target, result_automatic)
def test_instantaneous_rate_grows_with_sampling_period(self):
np.random.seed(0)
rate_expected = 10 * pq.Hz
spiketrain = homogeneous_poisson_process(rate=rate_expected,
t_stop=10 * pq.s)
kernel = kernels.GaussianKernel(sigma=100 * pq.ms)
rates_mean = []
for sampling_period in np.linspace(1, 1000, num=10) * pq.ms:
with self.subTest(sampling_period=sampling_period):
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=sampling_period,
kernel=kernel)
rates_mean.append(rate.mean())
# rate means are greater or equal the expected rate
assert_array_less(rate_expected, rates_mean)
# check sorted
self.assertTrue(np.all(rates_mean[:-1] < rates_mean[1:]))
# Regression test for #360
def test_centered_at_origin(self):
# Skip RectangularKernel because it doesn't have a strong peak.
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.SymmetricKernel) and
kern_cls not in (kernels.SymmetricKernel,
kernels.RectangularKernel))
kernels_symmetric = [kern_cls(sigma=50 * pq.ms, invert=False)
for kern_cls in kernel_types]
# first part: a symmetric spiketrain with a symmetric kernel
spiketrain = neo.SpikeTrain(np.array([-0.0001, 0, 0.0001]) * pq.s,
t_start=-1,
t_stop=1)
for kernel in kernels_symmetric:
rate = statistics.instantaneous_rate(spiketrain,
sampling_period=20 * pq.ms,
kernel=kernel)
# the peak time must be centered at origin
self.assertEqual(rate.times[np.argmax(rate)], 0)
# second part: a single spike at t=0
periods = [2 ** c for c in range(-3, 6)]
for period in periods:
with self.subTest(period=period):
spiketrain = neo.SpikeTrain(np.array([0]) * pq.s,
t_start=-period * 10 * pq.ms,
t_stop=period * 10 * pq.ms)
for kernel in kernels_symmetric:
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=period * pq.ms,
kernel=kernel)
self.assertEqual(rate.times[np.argmax(rate)], 0)
def test_annotations(self):
spiketrain = neo.SpikeTrain([1, 2], t_stop=2 * pq.s, units=pq.s)
kernel = kernels.AlphaKernel(sigma=100 * pq.ms)
rate = statistics.instantaneous_rate(spiketrain,
sampling_period=10 * pq.ms,
kernel=kernel)
kernel_annotation = dict(type=type(kernel).__name__,
sigma=str(kernel.sigma),
invert=kernel.invert)
self.assertIn('kernel', rate.annotations)
self.assertEqual(rate.annotations['kernel'], kernel_annotation)
class TimeHistogramTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrains = [self.spiketrain_a, self.spiketrain_b]
def tearDown(self):
del self.spiketrain_a
self.spiketrain_a = None
del self.spiketrain_b
self.spiketrain_b = None
def test_time_histogram(self):
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
def test_time_histogram_binary(self):
targ = np.array([2, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
binary=True)
assert_array_equal(targ, histogram.magnitude[:, 0])
def test_time_histogram_tstart_tstop(self):
# Start, stop short range
targ = np.array([2, 1])
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
t_start=5 * pq.s,
t_stop=7 * pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
# Test without t_stop
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = statistics.time_histogram(self.spiketrains,
bin_size=1 * pq.s,
t_start=0 * pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
# Test without t_start
histogram = statistics.time_histogram(self.spiketrains,
bin_size=1 * pq.s,
t_stop=10 * pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
def test_time_histogram_output(self):
# Normalization mean
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
output='mean')
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0], dtype=float) / 2
assert_array_equal(targ.reshape(targ.size, 1), histogram.magnitude)
# Normalization rate
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
output='rate')
assert_array_equal(histogram.view(pq.Quantity),
targ.reshape(targ.size, 1) * 1 / pq.s)
# Normalization unspecified, raises error
self.assertRaises(ValueError, statistics.time_histogram,
self.spiketrains,
bin_size=pq.s, output=' ')
def test_annotations(self):
np.random.seed(1)
spiketrains = [homogeneous_poisson_process(
rate=10 * pq.Hz, t_stop=10 * pq.s) for _ in range(10)]
for output in ("counts", "mean", "rate"):
histogram = statistics.time_histogram(spiketrains,
bin_size=3 * pq.ms,
output=output)
self.assertIn('normalization', histogram.annotations)
self.assertEqual(histogram.annotations['normalization'], output)
class ComplexityPdfTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_c = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrains = [
self.spiketrain_a, self.spiketrain_b, self.spiketrain_c]
def tearDown(self):
del self.spiketrain_a
self.spiketrain_a = None
del self.spiketrain_b
self.spiketrain_b = None
def test_complexity_pdf(self):
targ = np.array([0.92, 0.01, 0.01, 0.06])
complexity = statistics.complexity_pdf(self.spiketrains,
bin_size=0.1 * pq.s)
assert_array_equal(targ, complexity.magnitude[:, 0])
self.assertEqual(1, complexity.magnitude[:, 0].sum())
self.assertEqual(len(self.spiketrains) + 1, len(complexity))
self.assertIsInstance(complexity, neo.AnalogSignal)
self.assertEqual(complexity.units, 1 * pq.dimensionless)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpgill86/ephyviewer",
"score": 2
} |
#### File: ephyviewer/datasource/spikeinterfacesources.py
```python
from .sourcebase import BaseDataSource
import sys
import logging
import numpy as np
try:
from distutils.version import LooseVersion as V
import spikeinterface
if V(spikeinterface.__version__)>='0.90.1':
HAVE_SI = True
else:
HAVE_SI = False
except ImportError:
HAVE_SI = False
from .signals import BaseAnalogSignalSource
from .spikes import BaseSpikeSource
class SpikeInterfaceRecordingSource(BaseAnalogSignalSource):
def __init__(self, recording, segment_index=0):
BaseAnalogSignalSource.__init__(self)
self.recording = recording
self.segment_index = segment_index
self._nb_channel = self.recording.get_num_channels()
self.sample_rate = self.recording.get_sampling_frequency()
@property
def nb_channel(self):
return self._nb_channel
def get_channel_name(self, chan=0):
return str(self.recording.channel_ids[chan])
@property
def t_start(self):
return 0.
@property
def t_stop(self):
return self.get_length() / self.sample_rate
def get_length(self):
return self.recording.get_num_samples(segment_index=self.segment_index)
def get_shape(self):
return (self.get_length(),self.nb_channel)
def get_chunk(self, i_start=None, i_stop=None):
traces = self.recording.get_traces(segment_index=self.segment_index, start_frame=i_start, end_frame=i_stop)
return traces
def time_to_index(self, t):
return int(t * self.sample_rate)
def index_to_time(self, ind):
return float(ind / self.sample_rate)
class SpikeInterfaceSortingSource(BaseSpikeSource):
def __init__(self, sorting, segment_index=0):
BaseSpikeSource.__init__(self)
self.sorting = sorting
self.segment_index = segment_index
#TODO
self._t_stop = 10.
@property
def nb_channel(self):
return len(self.sorting.unit_ids)
def get_channel_name(self, chan=0):
return str(self.sorting.unit_ids[chan])
@property
def t_start(self):
return 0.
@property
def t_stop(self):
return self._t_stop
def get_chunk(self, chan=0, i_start=None, i_stop=None):
unit_id = self.sorting.unit_ids[chan]
spike_frames = self.sorting.get_unit_spike_train(unit_id,
segment_index=self.segment_index, start_frame=i_start, end_frame=i_stop)
spike_frames = spike_frames[i_start:i_stop]
spike_times = spike_frames / self.sorting.get_sampling_frequency()
return spike_times
def get_chunk_by_time(self, chan=0, t_start=None, t_stop=None):
spike_times = self.get_chunk(chan=chan)
i1 = np.searchsorted(spike_times, t_start, side='left')
i2 = np.searchsorted(spike_times, t_stop, side='left')
sl = slice(i1, i2+1)
return spike_times[sl]
```
#### File: ephyviewer/tests/test_spikeinterfaceviewer.py
```python
import ephyviewer
import numpy as np
import os
def test_spikeinterface_viewer(interactive=False):
import spikeinterface as si
from spikeinterface.core.testing_tools import generate_recording, generate_sorting
recording = generate_recording()
sig_source = ephyviewer.SpikeInterfaceRecordingSource(recording=recording)
sorting = generate_sorting()
spike_source = ephyviewer.SpikeInterfaceSortingSource(sorting=sorting)
app = ephyviewer.mkQApp()
win = ephyviewer.MainViewer(debug=True, show_auto_scale=True)
view = ephyviewer.TraceViewer(source=sig_source, name='signals')
win.add_view(view)
view = ephyviewer.SpikeTrainViewer(source=spike_source, name='spikes')
win.add_view(view)
if interactive:
win.show()
app.exec_()
else:
# close thread properly
win.close()
if __name__=='__main__':
test_spikeinterface_viewer(interactive=True)
``` |
{
"source": "jpgill86/ModularSlug",
"score": 3
} |
#### File: ModularSlug/ModularSlug/aplysia.py
```python
import numpy as np
from .neuralmodels import GenericNeuralModel
from .musclemodels import GenericMuscleModel
class Aplysia:
def __init__(self, n_steps, neural_model, muscle_model):
self._neural_model = None
self._muscle_model = None
self.n_steps = n_steps
self.neural_model = neural_model
self.muscle_model = muscle_model
@property
def neural_model(self):
# defining this property is required for creating its custom setter
return self._neural_model
@neural_model.setter
def neural_model(self, obj):
# ensure neural_model can only be set to an instance of a subclass of GenericNeuralModel or to None
if not isinstance(obj, (GenericNeuralModel, type(None))):
raise TypeError('tried to set neural_model to an incompatible '
f'object type: {obj.__class__.__name__}')
# if there is an old neural_model, first unset its parent
if self._neural_model is not None:
self._neural_model.parent = None
self._neural_model = obj
self._neural_model.parent = self
@property
def muscle_model(self):
# defining this property is required for creating its custom setter
return self._muscle_model
@muscle_model.setter
def muscle_model(self, obj):
# ensure muscle_model can only be set to an instance of a subclass of GenericMuscleModel or to None
if not isinstance(obj, (GenericMuscleModel, type(None))):
raise TypeError('tried to set muscle_model to an incompatible '
f'object type: {obj.__class__.__name__}')
# if there is an old muscle_model, first unset its parent
if self._muscle_model is not None:
self._muscle_model.parent = None
self._muscle_model = obj
self._muscle_model.parent = self
@property
def neural_outputs(self):
'''The current values of the neural outputs'''
return self.neural_model.neural_outputs
@property
def muscle_outputs(self):
'''The current values of the muscle outputs'''
return self.muscle_model.muscle_outputs
def run(self):
for i in range(self.n_steps):
self.neural_model.step()
self.muscle_model.step()
def summarize(self):
print('-- Neural Model --')
print('params:', self.neural_model.params)
print('x:', self.neural_model.x)
print()
print('-- Muscle Model --')
print('params:', self.muscle_model.params)
print('x:', self.muscle_model.x)
``` |
{
"source": "jpgill86/neurotic",
"score": 3
} |
#### File: neurotic/datasets/ftpauth.py
```python
import ftplib
import urllib
from urllib.request import FTPHandler, HTTPPasswordMgr
from urllib.parse import splitport, splituser, unquote
import logging
logger = logging.getLogger(__name__)
class FTPBasicAuthHandler(FTPHandler):
"""
This subclass of :class:`urllib.request.FTPHandler` implements basic
authentication management for FTP connections. Like
:class:`urllib.request.HTTPBasicAuthHandler`, this handler for FTP connections
has a password manager that it checks for login credentials before
connecting to a server.
This subclass also ensures that file size is included in the response
header, which can fail for some FTP servers if the original
:class:`FTPHandler <urllib.request.FTPHandler>` is used.
This handler can be installed globally in a Python session so that calls
to :func:`urllib.request.urlopen('ftp://...') <urllib.request.urlopen>`
will use it automatically:
>>> handler = FTPBasicAuthHandler()
>>> handler.add_password(None, uri, user, passwd) # realm must be None
>>> opener = urllib.request.build_opener(handler)
>>> urllib.request.install_opener(opener)
"""
def __init__(self, password_mgr=None):
"""
Initialize a new FTPBasicAuthHandler.
"""
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
return super().__init__()
def ftp_open(self, req):
"""
When ftp requests are made using this handler, this function gets
called at some point, and it in turn calls the ``connect_ftp`` method.
In this subclass's reimplementation of ``connect_ftp``, the FQDN of
the request's host is needed for looking up login credentials in the
password manager. However, by the time ``connect_ftp`` is called, that
information has been stripped away, and the host argument passed to
``connect_ftp`` contains only the host's IP address instead of the
FQDN. This reimplementation of ``ftp_open``, which is little more than
a copy-and-paste from the superclass's implementation, captures the
original host FQDN before it is replaced with the IP address and saves
it for later use.
This reimplementation also ensures that the file size appears in the
response header by querying for it directly. For some FTP servers the
original implementation should handle this (``retrlen`` should contain
the file size). However, for others this can fail silently due to the
server response not matching an anticipated regular expression.
"""
import sys
import email
import socket
from urllib.error import URLError
from urllib.parse import splitattr, splitpasswd, splitvalue
from urllib.response import addinfourl
####################################################
# COPIED FROM FTPHandler.ftp_open (PYTHON 3.6.6) #
# WITH JUST A FEW ADDITIONS #
####################################################
import ftplib
import mimetypes
host = req.host
if not host:
raise URLError('ftp error: no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
############################################
# DIFFERENT FROM FTPHandler.ftp_open
# save the host FQDN for later
self.last_req_host = host
############################################
try:
host = socket.gethostbyname(host)
except OSError as msg:
raise URLError(msg)
path, attrs = splitattr(req.selector)
dirs = path.split('/')
dirs = list(map(unquote, dirs))
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
############################################
# DIFFERENT FROM FTPHandler.ftp_open
size = fw.ftp.size(file)
############################################
fp, retrlen = fw.retrfile(file, type)
headers = ""
mtype = mimetypes.guess_type(req.full_url)[0]
if mtype:
headers += "Content-type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-length: %d\n" % retrlen
############################################
# DIFFERENT FROM FTPHandler.ftp_open
elif size is not None and size >= 0:
headers += "Content-length: %d\n" % size
############################################
headers = email.message_from_string(headers)
return addinfourl(fp, headers, req.full_url)
except ftplib.all_errors as exp:
exc = URLError('ftp error: %r' % exp)
raise exc.with_traceback(sys.exc_info()[2])
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
"""
Unless authentication credentials are provided in the request URL
(``ftp://user:passwd@host/path``), this method will be called with
empty user and passwd arguments. In that case, this reimplementation of
``connect_ftp`` checks the password manager for credentials matching
the ``last_req_host`` (the host argument will be an IP address instead
of the FQDN and is thereby useless if the password manager is keyed by
FQDN).
"""
if not user and not passwd:
user, passwd = self.passwd.find_user_password(None, self.last_req_host)
return super().connect_ftp(user, passwd, host, port, dirs, timeout)
def setup_ftpauth():
"""
Install :class:`neurotic.datasets.ftpauth.FTPBasicAuthHandler` as the
global default FTP handler.
Note that :func:`urllib.request.install_opener` used here will remove all
other non-default handlers installed in a different opener, such as an
:class:`urllib.request.HTTPBasicAuthHandler`.
"""
handler = FTPBasicAuthHandler()
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener)
return handler
```
#### File: neurotic/datasets/metadata.py
```python
import os
import urllib
import yaml
from packaging.specifiers import SpecifierSet
from packaging import version
from .. import __version__
from ..datasets.download import download
import logging
logger = logging.getLogger(__name__)
class MetadataSelector():
"""
A class for managing metadata.
A metadata file can be specified at initialization, in which case it is
read immediately. The file contents are stored as a dictionary in
:attr:`all_metadata`.
>>> metadata = MetadataSelector(file='metadata.yml')
>>> print(metadata.all_metadata)
File contents can be reloaded after they have been changed, or after
changing ``file``, using the :meth:`load` method.
>>> metadata = MetadataSelector()
>>> metadata.file = 'metadata.yml'
>>> metadata.load()
A particular metadata set contained within the file can be selected at
initialization with ``initial_selection`` or later using the :meth:`select`
method. After making a selection, the selected metadata set is accessible
at :meth:`metadata.selected_metadata <selected_metadata>`, e.g.
>>> metadata = MetadataSelector(file='metadata.yml')
>>> metadata.select('Data Set 5')
>>> print(metadata.selected_metadata['data_file'])
A compact indexing method is implemented that allows the selected metadata
set to be accessed directly, e.g.
>>> print(metadata['data_file'])
This allows the MetadataSelector to be passed to functions expecting a
simple dictionary corresponding to a single metadata set, and the selected
metadata set will be used automatically.
Files associated with the selected metadata set can be downloaded
individually or all together, e.g.
>>> metadata.download('video_file')
or
>>> metadata.download_all_data_files()
The absolute path to a local file or the full URL to a remote file
associated with the selected metadata set can be resolved with the
:meth:`abs_path` and :meth:`abs_url` methods, e.g.
>>> print(metadata.abs_path('data_file'))
>>> print(metadata.abs_url('data_file'))
"""
def __init__(self, file=None, local_data_root=None, remote_data_root=None, initial_selection=None):
"""
Initialize a new MetadataSelector.
"""
self.file = file
self.local_data_root = local_data_root
self.remote_data_root = remote_data_root
self.all_metadata = None #: A dictionary containing the entire file contents, set by :meth:`load`.
self._selection = None
if self.file is not None:
self.load()
if initial_selection is not None:
self.select(initial_selection)
def load(self):
"""
Read the metadata file.
"""
self.all_metadata = _load_metadata(self.file, self.local_data_root, self.remote_data_root)
if self._selection not in self.all_metadata:
self._selection = None
def select(self, selection):
"""
Select a metadata set.
"""
if self.all_metadata is None:
logger.error('Load metadata before selecting')
elif selection not in self.all_metadata:
raise ValueError('{} was not found in {}'.format(selection, self.file))
else:
self._selection = selection
@property
def keys(self):
"""
The available metadata keys.
"""
if self.all_metadata is None:
return None
else:
return list(self.all_metadata.keys())
@property
def selected_metadata(self):
"""
The access point for the selected metadata set.
"""
if self._selection is None:
return None
else:
return self.all_metadata[self._selection]
def abs_path(self, file):
"""
Convert the relative path of ``file`` to an absolute path using
``data_dir``.
"""
return _abs_path(self.selected_metadata, file)
def abs_url(self, file):
"""
Convert the relative path of ``file`` to a full URL using
``remote_data_dir``.
"""
return _abs_url(self.selected_metadata, file)
def download(self, file, **kwargs):
"""
Download a file associated with the selected metadata set.
See :func:`neurotic.datasets.download.download` for possible keyword
arguments.
"""
_download_file(self.selected_metadata, file, **kwargs)
def download_all_data_files(self, **kwargs):
"""
Download all files associated with the selected metadata set.
See :func:`neurotic.datasets.download.download` for possible keyword
arguments.
"""
_download_all_data_files(self.selected_metadata, **kwargs)
def __iter__(self, *args):
if self.selected_metadata is None:
logger.error('No metadata set is selected. Use the select() method first.')
else:
return self.selected_metadata.__iter__(*args)
def __getitem__(self, *args):
if self.selected_metadata is None:
logger.error('No metadata set is selected. Use the select() method first.')
else:
return self.selected_metadata.__getitem__(*args)
def __setitem__(self, *args):
if self.selected_metadata is None:
logger.error('No metadata set is selected. Use the select() method first.')
else:
return self.selected_metadata.__setitem__(*args)
def __delitem__(self, *args):
if self.selected_metadata is None:
logger.error('No metadata set is selected. Use the select() method first.')
else:
return self.selected_metadata.__delitem__(*args)
def get(self, *args):
if self.selected_metadata is None:
logger.error('No metadata set is selected. Use the select() method first.')
else:
return self.selected_metadata.get(*args)
def setdefault(self, *args):
if self.selected_metadata is None:
logger.error('No metadata set is selected. Use the select() method first.')
else:
return self.selected_metadata.setdefault(*args)
def _load_metadata(file = 'metadata.yml', local_data_root = None, remote_data_root = None):
"""
Read metadata stored in a YAML file about available collections of data,
assign defaults to missing parameters, and resolve absolute paths for local
data stores and full URLs for remote data stores.
``local_data_root`` must be an absolute or relative path on the local
system, or None. If it is a relative path, it is relative to the current
working directory. If it is None, its value defaults to the directory
containing ``file``.
``remote_data_root`` must be a full URL or None. If it is None, ``file``
will be checked for a fallback value. "remote_data_root" may be provided in
the YAML file under the reserved keyword "neurotic_config". Any non-None
value passed to this function will override the value provided in the file.
If both are unspecified, it is assumed that no remote data store exists.
The "data_dir" property is optional for every data set in ``file`` and
specifies the directory on the local system containing the data files.
"data_dir" may be an absolute path or a relative path with respect to
``local_data_root``. If it is a relative path, it will be converted to an
absolute path.
The "remote_data_dir" property is optional for every data set in ``file``
and specifies the directory on a remote server containing the data files.
"remote_data_dir" may be a full URL or a relative path with respect to
``remote_data_root``. If it is a relative path, it will be converted to a
full URL.
File paths (e.g., "data_file", "video_file") are assumed to be relative to
both "data_dir" and "remote_data_dir" (i.e., the local and remote data
stores mirror one another) and can be resolved with ``_abs_path`` or
``_abs_url``.
"""
assert file is not None, 'metadata file must be specified'
assert os.path.exists(file), 'metadata file "{}" cannot be found'.format(file)
# local_data_root defaults to the directory containing file
if local_data_root is None:
local_data_root = os.path.dirname(file)
# load metadata from file
with open(file) as f:
md = yaml.safe_load(f)
# remove special entry "neurotic_config" from the dict if it exists
config = md.pop('neurotic_config', None)
if isinstance(config, dict):
# process global settings
neurotic_version = config.get('neurotic_version', None)
remote_data_root_from_file = config.get('remote_data_root', None)
else:
# use defaults for all global settings
neurotic_version = None
remote_data_root_from_file = None
# check neurotic version requirements
if neurotic_version is not None:
version_spec = SpecifierSet(str(neurotic_version), prereleases=True)
if version.parse(__version__) not in version_spec:
logger.warning('the installed version of neurotic '
f'({__version__}) does not meet version '
'requirements specified in the metadata file: '
f'{version_spec}')
# use remote_data_root passed to function preferentially
if remote_data_root is not None:
if not _is_url(remote_data_root):
raise ValueError('"remote_data_root" passed to function is not a full URL: "{}"'.format(remote_data_root))
else:
# use the value passed to the function
pass
elif remote_data_root_from_file is not None:
if not _is_url(remote_data_root_from_file):
raise ValueError('"remote_data_root" provided in file is not a full URL: "{}"'.format(remote_data_root_from_file))
else:
# use the value provided in the file
remote_data_root = remote_data_root_from_file
else:
# both potential sources of remote_data_root are None
pass
# iterate over all data sets
for key in md:
assert type(md[key]) is dict, 'File "{}" may be formatted incorrectly, especially beginning with entry "{}"'.format(file, key)
# fill in missing metadata with default values
defaults = _defaults_for_key(key)
for k in defaults:
md[key].setdefault(k, defaults[k])
# determine the absolute path of the local data directory
if md[key]['data_dir'] is not None:
# data_dir is either an absolute path already or is specified
# relative to local_data_root
if os.path.isabs(md[key]['data_dir']):
dir = md[key]['data_dir']
else:
dir = os.path.abspath(os.path.join(local_data_root, md[key]['data_dir']))
else:
# data_dir is a required property
raise ValueError('"data_dir" missing for "{}"'.format(key))
md[key]['data_dir'] = os.path.normpath(dir)
# determine the full URL to the remote data directory
if md[key]['remote_data_dir'] is not None:
# remote_data_dir is either a full URL already or is specified
# relative to remote_data_root
if _is_url(md[key]['remote_data_dir']):
url = md[key]['remote_data_dir']
elif _is_url(remote_data_root):
url = '/'.join([remote_data_root, md[key]['remote_data_dir']])
else:
url = None
else:
# there is no remote data store
url = None
md[key]['remote_data_dir'] = url
return md
def _defaults_for_key(key):
"""
Default values for metadata.
"""
defaults = {
# store the key with the metadata
'key': key,
# description of data set
'description': None,
# the path of the directory containing the data on the local system
# - this may be an absolute or relative path, but not None since data
# must be located locally
# - if it is a relative path, it will be interpreted by _load_metadata
# as relative to local_data_root and will be converted to an absolute
# path
'data_dir': '.',
# the path of the directory containing the data on a remote server
# - this may be a full URL or a relative path, or None if there exists
# no remote data store
# - if it is a relative path, it will be interpreted by _load_metadata
# as relative to remote_data_root and will be converted to a full URL
'remote_data_dir': None,
# the ephys data file
# - path relative to data_dir and remote_data_dir
'data_file': None,
# the name of a Neo IO class
# - this parameter is optional and exists for overriding the IO class
# determined automatically from the data file's extension
'io_class': None,
# arguments for the Neo IO class
# - e.g. for AsciiSignalIO, {'delimiter': ',', 'sampling_rate': 1000, 'units': 'mV'}
'io_args': None,
# a real-world start time for the data_file, which overrides the value
# that may be stored in the data_file
# - e.g. '2020-01-01 13:14:15'
'rec_datetime': None,
# digital filters to apply before analysis and plotting
# 0 <= highpass <= lowpass < sample_rate/2
# - e.g. [{'channel': 'Channel A', 'highpass': 0, 'lowpass': 50}, ...]
'filters': None,
# the annotations file
# - path relative to data_dir and remote_data_dir
'annotations_file': None,
# the epoch encoder file
# - path relative to data_dir and remote_data_dir
'epoch_encoder_file': None,
# list of labels for epoch encoder
'epoch_encoder_possible_labels': [],
# list of dicts giving name, channel, units, amplitude window, epoch window, color for each unit
# - e.g. [{'name': 'Unit X', 'channel': 'Channel A', 'units': 'uV', 'amplitude': [75, 150], 'epoch': 'Type 1', 'color': 'ff0000'}, ...]
'amplitude_discriminators': None,
# list of dicts giving name of a spiketrain, start and stop firing rate
# thresholds in Hz for each burst
# - 'spiketrain' is required and used to find the appropriate spike
# train by name, whereas 'name' is option and is used to name the
# Epoch generated by load_dataset, defaults to the spiketrain's name
# with ' burst' appended
# - e.g. [{'spiketrain': 'Unit X', 'name': 'Unit X burst', 'thresholds': [10, 8]}, ...]
'burst_detectors': None,
# the output file of a tridesclous spike sorting analysis
# - path relative to data_dir and remote_data_dir
'tridesclous_file': None,
# dict mapping spike ids to lists of channel indices
# - e.g. {0: ['Channel A'], 1: ['Channel A'], ...} to indicate clusters 0 and 1 are both on channel A
# - e.g. {0: ['Channel A', 'Channel B'], ...} to indicate cluster 0 is on both channels A and B
'tridesclous_channels': None,
# list of lists of spike ids specifying how to merge clusters
# - e.g. [[0, 1, 2], [3, 4]] to merge clusters 1 and 2 into 0, merge 4 into 3, and discard all others
# - e.g. [[0], [1], [2], [3], [4]] to keep clusters 0-4 as they are and discard all others
'tridesclous_merge': None,
# list of dicts giving name of a spiketrain, name of a kernel to be
# convolved with the spiketrain, and the sigma parameter of the kernel
# in seconds
# - e.g. [{'name': 'Unit X', 'kernel': 'CausalAlphaKernel', 'sigma': 0.5}, ...]
'firing_rates': None,
# the video file
# - path relative to data_dir and remote_data_dir
'video_file': None,
# the video time offset in seconds
'video_offset': None,
# list of ordered pairs specifying times and durations that the ephys
# data collection was paused while the video continued recording
# - e.g. [[60, 10], [120, 10], [240, 10]] for three 10-second pauses
# occurring at times 1:00, 2:00, 3:00 according to the daq, which
# would correspond to times 1:00, 2:10, 3:20 according to the video
'video_jumps': None,
# a factor to multiply the video frame rate by to correct for async
# error that accumulates over time at a constant rate
# - a value less than 1 will decrease the frame rate and shift video
# events to later times
# - a value greater than 1 will increase the frame rate and shift video
# events to earlier times
# - a good estimate can be obtained by taking the amount of time
# between two events in the video and dividing by the amount of time
# between the same two events in the data
'video_rate_correction': None,
# list the channels in the order they should be plotted
# - e.g. [{'channel': 'Channel A', 'ylabel': 'My channel', 'ylim': [-120, 120], 'units': 'uV', 'color': 'ff0000'}, ...]
'plots': None,
# amount of time in seconds to plot initially
't_width': 40,
# proportion of the plot range, between 0 and 1, to the left of the
# current time (in the "past"), indicated by the position of the
# vertical line
'past_fraction': 0.3,
# factor to subtract from each signal before rectification when
# calculating rectified area under the curve (RAUC)
# - can be None, 'mean', or 'median'
'rauc_baseline': None,
# width of bins in seconds used for calculating rectified area under
# the curve (RAUC) for signals
'rauc_bin_duration': None,
}
return defaults
def _abs_path(metadata, file):
"""
Convert the relative path of file to an absolute path using data_dir
"""
if metadata.get(file, None) is None:
return None
else:
return os.path.normpath(os.path.join(metadata.get('data_dir', '.'), metadata[file]))
def _abs_url(metadata, file):
"""
Convert the relative path of file to a full URL using remote_data_dir
"""
if metadata.get(file, None) is None or metadata.get('remote_data_dir', None) is None:
return None
else:
file_path = metadata[file].replace(os.sep, '/')
url = '/'.join([metadata['remote_data_dir'], file_path])
# url = urllib.parse.unquote(url)
# url = urllib.parse.quote(url, safe='/:')
return url
def _is_url(url):
"""
Returns True only if the parameter begins with the form <scheme>://<netloc>
"""
try:
result = urllib.parse.urlparse(url)
return all([result.scheme, result.netloc])
except Exception:
return False
def _download_file(metadata, file, **kwargs):
"""
Download a file.
See :func:`neurotic.datasets.download.download` for possible keyword
arguments.
"""
if not _is_url(metadata.get('remote_data_dir', None)):
logger.error('metadata[remote_data_dir] is not a full URL')
return
if metadata.get(file, None):
# create directories if necessary
if not os.path.exists(os.path.dirname(_abs_path(metadata, file))):
os.makedirs(os.path.dirname(_abs_path(metadata, file)))
# download the file only if it does not already exist
download(_abs_url(metadata, file), _abs_path(metadata, file), **kwargs)
def _download_all_data_files(metadata, **kwargs):
"""
Download all files associated with metadata.
See :func:`neurotic.datasets.download.download` for possible keyword
arguments.
"""
if not _is_url(metadata.get('remote_data_dir', None)):
logger.error('metadata[remote_data_dir] is not a full URL')
return
for file in [k for k in metadata if k.endswith('_file')]:
_download_file(metadata, file, **kwargs)
logger.info('Downloads complete')
def _selector_labels(all_metadata):
"""
"""
# indicate presence of local data files with symbols
has_local_data = {}
for key, metadata in all_metadata.items():
filenames = [k for k in metadata if k.endswith('_file') and metadata[k] is not None]
files_exist = [os.path.exists(_abs_path(metadata, file)) for file in filenames]
if all(files_exist):
has_local_data[key] = '◆'
elif any(files_exist):
has_local_data[key] = '⬖'
else:
has_local_data[key] = '◇'
# indicate lack of video_offset with an exclamation point unless there is
# no video_file
has_video_offset = {}
for key, metadata in all_metadata.items():
if metadata.get('video_offset', None) is None and metadata.get('video_file', None) is not None:
has_video_offset[key] = '!'
else:
has_video_offset[key] = ' '
# create display text for the selector from keys and descriptions
longest_key_length = max([len(k) for k in all_metadata.keys()])
labels = [
has_local_data[k] +
has_video_offset[k] +
' ' +
k.ljust(longest_key_length + 4) +
str(all_metadata[k]['description']
if all_metadata[k]['description'] else '')
for k in all_metadata.keys()]
return labels
```
#### File: neurotic/gui/config.py
```python
import re
import numpy as np
import pandas as pd
import quantities as pq
import neo
import ephyviewer
from ..datasets.metadata import _abs_path
from ..gui.epochencoder import NeuroticWritableEpochSource
import logging
logger = logging.getLogger(__name__)
# raise the threshold for PyAV messages printed to the console from
# warning to critical
logging.getLogger('libav').setLevel(logging.CRITICAL)
pq.mN = pq.UnitQuantity('millinewton', pq.N/1e3, symbol = 'mN'); # define millinewton
available_themes = ['light', 'dark', 'original', 'printer-friendly']
available_ui_scales = ['tiny', 'small', 'medium', 'large', 'huge']
class EphyviewerConfigurator():
"""
A class for launching ephyviewer for a dataset with configurable viewers.
At initialization, invalid viewers are automatically disabled (e.g., the
video viewer is disabled if ``video_file`` is not given in ``metadata``).
Viewers can be hidden or shown before launch using the built-in methods.
Valid viewer names are:
* ``traces``
* ``traces_rauc``
* ``freqs``
* ``spike_trains``
* ``traces_rates``
* ``epochs``
* ``epoch_encoder``
* ``video``
* ``event_list``
* ``data_frame``
:meth:`launch_ephyviewer` is provided for starting a new Qt app and
launching the ephyviewer main window all at once.
:meth:`create_ephyviewer_window` generates just the ephyviewer window
and should be used if there is already a Qt app running.
"""
def __init__(self, metadata, blk, lazy = False):
"""
Initialize a new EphyviewerConfigurator.
"""
self.metadata = metadata
self.blk = blk
self.lazy = lazy
self.viewer_settings = {
'traces': {'show': True, 'disabled': False, 'reason': ''},
'traces_rauc': {'show': False, 'disabled': False, 'reason': ''},
'freqs': {'show': False, 'disabled': True, 'reason': 'Disabled because feature is experimental and computationally expensive'},
'spike_trains': {'show': True, 'disabled': False, 'reason': ''},
'traces_rates': {'show': True, 'disabled': False, 'reason': ''},
'epochs': {'show': True, 'disabled': False, 'reason': ''},
'epoch_encoder': {'show': True, 'disabled': False, 'reason': ''},
'video': {'show': True, 'disabled': False, 'reason': ''},
'event_list': {'show': True, 'disabled': False, 'reason': ''},
'data_frame': {'show': False, 'disabled': False, 'reason': ''},
}
self.themes = {}
self.themes['original'] = None # special keyword to use ephyviewer's defaults
self.themes['light'] = {
'cmap': 'Dark2', # dark traces
'background_color': '#F0F0F0', # light gray
'vline_color': '#000000AA', # transparent black
'label_fill_color': '#DDDDDDDD', # transparent light gray
}
self.themes['dark'] = {
'cmap': 'Accent', # light traces
'background_color': 'k', # black
'vline_color': '#FFFFFFAA', # transparent white
'label_fill_color': '#222222DD', # transparent dark gray
}
self.themes['printer-friendly'] = {
'cmap': 'Dark2', # dark traces
'background_color': '#FFFFFF', # white
'vline_color': '#000000AA', # transparent black
'label_fill_color': '#DDDDDDDD', # transparent light gray
}
# hide and disable viewers for which inputs are missing
if not self.blk.segments[0].analogsignals:
self.viewer_settings['traces']['show'] = False
self.viewer_settings['traces']['disabled'] = True
self.viewer_settings['traces']['reason'] = 'Cannot enable because there are no signals'
if not [sig.annotations['rauc_sig'] for sig in blk.segments[0].analogsignals if 'rauc_sig' in sig.annotations]:
self.viewer_settings['traces_rauc']['show'] = False
self.viewer_settings['traces_rauc']['disabled'] = True
self.viewer_settings['traces_rauc']['reason'] = 'Cannot enable because there are no RAUC signals'
if not self.blk.segments[0].spiketrains:
self.viewer_settings['spike_trains']['show'] = False
self.viewer_settings['spike_trains']['disabled'] = True
self.viewer_settings['spike_trains']['reason'] = 'Cannot enable because there are no spike trains'
if not [st.annotations['firing_rate_sig'] for st in blk.segments[0].spiketrains if 'firing_rate_sig' in st.annotations]:
self.viewer_settings['traces_rates']['show'] = False
self.viewer_settings['traces_rates']['disabled'] = True
self.viewer_settings['traces_rates']['reason'] = 'Cannot enable because there are no firing rate signals'
if not [ep for ep in self.blk.segments[0].epochs if ep.size > 0 and '(from epoch encoder file)' not in ep.labels]:
self.viewer_settings['epochs']['show'] = False
self.viewer_settings['epochs']['disabled'] = True
self.viewer_settings['epochs']['reason'] = 'Cannot enable because there are no read-only epochs'
self.viewer_settings['data_frame']['show'] = False
self.viewer_settings['data_frame']['disabled'] = True
self.viewer_settings['data_frame']['reason'] = 'Cannot enable because there are no read-only epochs'
if not [ev for ev in self.blk.segments[0].events if ev.size > 0]:
self.viewer_settings['event_list']['show'] = False
self.viewer_settings['event_list']['disabled'] = True
self.viewer_settings['event_list']['reason'] = 'Cannot enable because there are no read-only epochs or events'
if not self.metadata.get('epoch_encoder_file', None):
self.viewer_settings['epoch_encoder']['show'] = False
self.viewer_settings['epoch_encoder']['disabled'] = True
self.viewer_settings['epoch_encoder']['reason'] = 'Cannot enable because epoch_encoder_file is not set'
if not ephyviewer.HAVE_AV:
self.viewer_settings['video']['show'] = False
self.viewer_settings['video']['disabled'] = True
self.viewer_settings['video']['reason'] = 'Cannot enable because PyAV is not installed'
if not self.metadata.get('video_file', None):
self.viewer_settings['video']['show'] = False
self.viewer_settings['video']['disabled'] = True
self.viewer_settings['video']['reason'] = 'Cannot enable because video_file is not set'
if not ephyviewer.HAVE_AV and self.metadata.get('video_file', None):
logger.warning('Ignoring video_file because PyAV is not installed')
# warn about potential video sync problems
if metadata.get('video_file', None) is not None and metadata.get('video_offset', None) is None:
logger.warning('Your video will likely be out of sync with your '
'data because video_offset is unspecified! '
'Consider adding it to your metadata.')
if metadata.get('video_file', None) is not None and metadata.get('video_jumps', None) is None:
approx_video_jumps = _estimate_video_jump_times(blk)
if approx_video_jumps:
approx_video_jumps_recommendation = ' video_jumps:\n' + \
'\n'.join([f' - [{t}, {dur}]' for t, dur in approx_video_jumps])
logger.warning('It seems that AxoGraph was paused at least '
'once during data acquisition, but video_jumps '
'is unspecified. This will cause your video '
'and data to get out of sync. Consider adding '
'the following to your metadata:'
f'\n{approx_video_jumps_recommendation}\n'
'Each ordered pair specifies the timing of a '
'pause and approximately how long the pause '
'lasted in seconds. The pause durations are '
'only rough estimates +/- a second! You should '
'refine them by inspecting the video to make '
'sure your sync is accurate!')
def is_enabled(self, name):
"""
Return whether the viewer ``name`` is enabled.
"""
if name in self.viewer_settings:
return not self.viewer_settings[name]['disabled']
else:
return False
def enable(self, name):
"""
Enable the viewer ``name``.
"""
if name in self.viewer_settings:
self.viewer_settings[name]['disabled'] = False
def disable(self, name):
"""
Disable the viewer ``name``.
"""
if name in self.viewer_settings:
self.viewer_settings[name]['disabled'] = True
def is_shown(self, name):
"""
Return whether the viewer ``name`` is shown.
"""
if name in self.viewer_settings:
return self.viewer_settings[name]['show']
else:
return False
def show(self, name):
"""
Show the viewer ``name``.
"""
if name in self.viewer_settings:
if not self.viewer_settings[name]['disabled']:
self.viewer_settings[name]['show'] = True
else:
logger.warning(self.viewer_settings[name]['reason'])
else:
logger.error(f'"{name}" is not a viewer in viewer_settings')
def hide(self, name):
"""
Hide the viewer ``name``.
"""
if name in self.viewer_settings:
self.viewer_settings[name]['show'] = False
else:
logger.error(f'"{name}" is not a viewer in viewer_settings')
def show_all(self):
"""
Show all viewers.
"""
for name in self.viewer_settings:
if not self.viewer_settings[name]['disabled']:
self.show(name)
def hide_all(self):
"""
Hide all viewers.
"""
for name in self.viewer_settings:
self.hide(name)
def launch_ephyviewer(self, theme='light', ui_scale='medium', support_increased_line_width=False, show_datetime=False, datetime_format='%Y-%m-%d %H:%M:%S'):
"""
Start a Qt app and create an ephyviewer window.
"""
app = ephyviewer.mkQApp()
win = self.create_ephyviewer_window(theme=theme, ui_scale=ui_scale, support_increased_line_width=support_increased_line_width, show_datetime=show_datetime, datetime_format=datetime_format)
win.show()
app.exec_()
def create_ephyviewer_window(self, theme='light', ui_scale='medium', support_increased_line_width=False, show_datetime=False, datetime_format='%Y-%m-%d %H:%M:%S'):
"""
Load data into each ephyviewer viewer and return the main window.
"""
########################################################################
# DATA SOURCES
seg = self.blk.segments[0]
sigs = seg.analogsignals
sources = {'signal': [], 'epoch': [], 'event': [], 'spike': []}
sources['epoch'].append(ephyviewer.NeoEpochSource(seg.epochs))
sources['event'].append(ephyviewer.NeoEventSource(seg.events))
sources['spike'].append(ephyviewer.NeoSpikeTrainSource(seg.spiketrains))
# filter epoch encoder data out of read-only epoch and event lists
# so they are not presented multiple times, and remove empty channels
sources['epoch'][0].all = [ep for ep in sources['epoch'][0].all if len(ep['time']) > 0 and '(from epoch encoder file)' not in ep['label']]
sources['event'][0].all = [ev for ev in sources['event'][0].all if len(ev['time']) > 0 and '(from epoch encoder file)' not in ev['label']]
########################################################################
# WINDOW
# optionally display the real-world date and time
if show_datetime and self.blk.rec_datetime is not None:
show_label_datetime = True
datetime0 = self.blk.rec_datetime
else:
show_label_datetime = False
datetime0 = None
# create a window that will be populated with viewers
win = ephyviewer.MainViewer(
# settings_name='test2', # remember settings (e.g. xsize) between sessions
show_auto_scale = True,
global_xsize_zoom = True,
play_interval = 0.1, # refresh period in seconds
show_label_datetime = show_label_datetime,
datetime0 = datetime0,
datetime_format = datetime_format,
)
win.setWindowTitle(self.metadata.get('key', 'neurotic'))
win.setWindowIcon(ephyviewer.QT.QIcon(':/neurotic-logo-150.png'))
# delete on close so that memory and file resources are released
win.setAttribute(ephyviewer.QT.WA_DeleteOnClose, True)
# determine ui_scale parameters
default_font_size = ephyviewer.QT.QFont().pointSize()
ui_scales = {
'tiny': {'app_font_size': default_font_size-4, 'channel_label_size': default_font_size-4, 'scatter_size': 4},
'small': {'app_font_size': default_font_size-2, 'channel_label_size': default_font_size-2, 'scatter_size': 6},
'medium': {'app_font_size': default_font_size, 'channel_label_size': default_font_size, 'scatter_size': 8},
'large': {'app_font_size': default_font_size+4, 'channel_label_size': default_font_size+4, 'scatter_size': 10},
'huge': {'app_font_size': default_font_size+8, 'channel_label_size': default_font_size+8, 'scatter_size': 12},
}
# set the font size for most text
font = win.font()
font.setPointSize(ui_scales[ui_scale]['app_font_size'])
win.setFont(font)
########################################################################
# COLORS
# colors for signals given explicitly in plots, used for raw signals
# and RAUC
sig_colors = {}
if self.metadata.get('plots', None) is not None:
sig_colors = {p['channel']: p['color'] for p in self.metadata['plots'] if 'color' in p}
# colors for units given explicitly in amplitude_discriminators, used
# for scatter markers, spike trains, and burst epochs
unit_colors = {}
if self.metadata.get('amplitude_discriminators', None) is not None:
unit_colors = {d['name']: d['color'] for d in self.metadata['amplitude_discriminators'] if 'color' in d}
########################################################################
# TRACES WITH SCATTER PLOTS
_set_defaults_for_plots(self.metadata, self.blk)
if self.is_shown('traces') and self.metadata['plots']:
lazy_load_signals = False
if self.lazy:
# check whether blk contains a rawio, which would have been put
# there by _read_data_file if lazy=True and if Neo has a RawIO
# that supports the file format
if hasattr(self.blk, 'rawio') and isinstance(self.blk.rawio, neo.rawio.baserawio.BaseRawIO):
io = self.blk.rawio
if io.support_lazy:
lazy_load_signals = True
if lazy_load_signals:
# Intan-specific tricks
if isinstance(io, neo.io.IntanIO):
# dirty trick for getting ungrouped channels into a single source
io.header['signal_channels']['group_id'] = 0
# prepare to append custom channel names stored in data file to ylabels
custom_channel_names = {c['native_channel_name']: c['custom_channel_name'] for c in io._ordered_channels}
channel_indexes = [p['index'] for p in self.metadata['plots']]
sources['signal'].append(ephyviewer.AnalogSignalFromNeoRawIOSource(io, channel_indexes))
# modify loaded channel names to use ylabels
for i, p in enumerate(self.metadata['plots']):
ylabel = p['ylabel']
# Intan-specific tricks
if isinstance(io, neo.io.IntanIO):
# append custom channel names stored in data file to ylabels
if custom_channel_names[p['channel']] != ylabel:
ylabel += ' ({})'.format(custom_channel_names[p['channel']])
sources['signal'][-1].channels['name'][i] = ylabel
# TODO support scatter from tridesclous_file
else: # lazy==False or io.support_lazy==False
# even if lazy==True, signals do not need to be loaded now
# because load_dataset will have already taken care of that and
# saved them in blk when it detected that Neo did not support
# lazy loading for the given file reader
# prepare scatter plot parameters
plotNameToIndex = {p['channel']:i for i, p in enumerate(self.metadata['plots'])}
all_times = sigs[0].times.rescale('s').magnitude # assuming all AnalogSignals have the same sampling rate and start time
spike_indices = {}
spike_channels = {}
for st in seg.spiketrains:
if 'channels' in st.annotations:
c = []
for channel in st.annotations['channels']:
index = plotNameToIndex.get(channel, None)
if index is None:
logger.warning('Spike train {} will not be plotted on channel {} because that channel isn\'t being plotted'.format(st.name, channel))
else:
c.append(index)
if c:
spike_channels[st.name] = c
spike_indices[st.name] = np.where(np.isin(all_times, st.times.magnitude))[0]
sources['signal'].append(ephyviewer.AnalogSignalSourceWithScatter(
signals = np.concatenate([sigs[p['index']].magnitude for p in self.metadata['plots']], axis = 1),
sample_rate = sigs[0].sampling_rate.rescale('Hz'), # assuming all AnalogSignals have the same sampling rate
t_start = sigs[0].t_start.rescale('s'), # assuming all AnalogSignals start at the same time
channel_names = [p['ylabel'] for p in self.metadata['plots']],
scatter_indexes = spike_indices,
scatter_channels = spike_channels,
))
# instead of passing colors into AnalogSignalSourceWithScatter
# constructor with scatter_colors, first let the constructor
# choose reasonable default colors (done above), and only then
# override colors for units that have been explicitly set in
# amplitude_discriminators (done here)
sources['signal'][-1].scatter_colors.update(unit_colors)
# useOpenGL=True eliminates the extremely poor performance associated
# with TraceViewer's line_width > 1.0, but it also degrades overall
# performance somewhat and is reportedly unstable
if support_increased_line_width:
useOpenGL = True
line_width = 2.0
else:
useOpenGL = None
line_width = 1.0
trace_view = ephyviewer.TraceViewer(source = sources['signal'][0], name = 'Signals', useOpenGL = useOpenGL)
win.add_view(trace_view)
trace_view.params['xratio'] = self.metadata.get('past_fraction', 0.3)
trace_view.params['auto_scale_factor'] = 0.02
trace_view.params['scatter_size'] = ui_scales[ui_scale]['scatter_size']
trace_view.params['line_width'] = line_width
trace_view.params['label_size'] = ui_scales[ui_scale]['channel_label_size']
trace_view.params['display_labels'] = True
trace_view.params['antialias'] = True
# set the theme
if theme != 'original':
trace_view.params['background_color'] = self.themes[theme]['background_color']
trace_view.params['vline_color'] = self.themes[theme]['vline_color']
trace_view.params['label_fill_color'] = self.themes[theme]['label_fill_color']
trace_view.params_controller.combo_cmap.setCurrentText(self.themes[theme]['cmap'])
trace_view.params_controller.on_automatic_color()
# set explicitly assigned signal colors
for name, color in sig_colors.items():
try:
index = [p['channel'] for p in self.metadata['plots']].index(name)
trace_view.by_channel_params['ch{}'.format(index), 'color'] = color
except ValueError:
# sig name may not have been found in the trace list
pass
# adjust plot range, scaling, and positioning
trace_view.params['ylim_max'] = 0.5
trace_view.params['ylim_min'] = -trace_view.source.nb_channel + 0.5
trace_view.params['scale_mode'] = 'by_channel'
for i, p in enumerate(self.metadata['plots']):
sig_units = sigs[p['index']].units
units_ratio = (pq.Quantity(1, p['units'])/pq.Quantity(1, sig_units)).simplified
assert units_ratio.dimensionality.string == 'dimensionless', f"Channel \"{p['channel']}\" has units {sig_units} and cannot be converted to {p['units']}"
ylim_span = np.ptp(p['ylim'] * units_ratio.magnitude)
ylim_center = np.mean(p['ylim'] * units_ratio.magnitude)
trace_view.by_channel_params['ch{}'.format(i), 'gain'] = 1/ylim_span # rescale [ymin,ymax] across a unit
trace_view.by_channel_params['ch{}'.format(i), 'offset'] = -i - ylim_center/ylim_span # center [ymin,ymax] within the unit
########################################################################
# TRACES OF RAUC
if self.is_shown('traces_rauc'):
rauc_sigs = [sig.annotations['rauc_sig'] for sig in sigs if 'rauc_sig' in sig.annotations]
if rauc_sigs:
sig_rauc_source = ephyviewer.InMemoryAnalogSignalSource(
signals = np.concatenate([rauc_sigs[p['index']].as_array() for p in self.metadata['plots']], axis = 1),
sample_rate = rauc_sigs[0].sampling_rate.rescale('Hz'), # assuming all AnalogSignals have the same sampling rate
t_start = rauc_sigs[0].t_start.rescale('s'), # assuming all AnalogSignals start at the same time
channel_names = [p['ylabel'] + ' RAUC' for p in self.metadata['plots']],
)
sources['signal_rauc'] = [sig_rauc_source]
trace_rauc_view = ephyviewer.TraceViewer(source = sources['signal_rauc'][0], name = 'Integrated signals (RAUC)')
if 'Signals' in win.viewers:
win.add_view(trace_rauc_view, tabify_with = 'Signals')
else:
win.add_view(trace_rauc_view)
trace_rauc_view.params['xratio'] = self.metadata.get('past_fraction', 0.3)
trace_rauc_view.params['line_width'] = line_width
trace_rauc_view.params['label_size'] = ui_scales[ui_scale]['channel_label_size']
trace_rauc_view.params['display_labels'] = True
trace_rauc_view.params['display_offset'] = True
trace_rauc_view.params['antialias'] = True
# set the theme
if theme != 'original':
trace_rauc_view.params['background_color'] = self.themes[theme]['background_color']
trace_rauc_view.params['vline_color'] = self.themes[theme]['vline_color']
trace_rauc_view.params['label_fill_color'] = self.themes[theme]['label_fill_color']
trace_rauc_view.params_controller.combo_cmap.setCurrentText(self.themes[theme]['cmap'])
trace_rauc_view.params_controller.on_automatic_color()
# set explicitly assigned signal colors
for name, color in sig_colors.items():
try:
index = [p['channel'] for p in self.metadata['plots']].index(name)
trace_rauc_view.by_channel_params['ch{}'.format(index), 'color'] = color
except ValueError:
# sig name may not have been found in the rauc trace list
pass
# adjust plot range
trace_rauc_view.params['ylim_max'] = 0.5
trace_rauc_view.params['ylim_min'] = -trace_rauc_view.source.nb_channel + 0.5
trace_rauc_view.params['scale_mode'] = 'by_channel'
for i, p in enumerate(self.metadata['plots']):
ylim_span = np.median(rauc_sigs[p['index']].magnitude) * 10
ylim_center = ylim_span / 2
trace_rauc_view.by_channel_params['ch{}'.format(i), 'gain'] = 1/ylim_span # rescale [ymin,ymax] across a unit
trace_rauc_view.by_channel_params['ch{}'.format(i), 'offset'] = -i - ylim_center/ylim_span # center [ymin,ymax] within the unit
########################################################################
# FREQUENCY (EXPERIMENTAL AND COMPUTATIONALLY EXPENSIVE!)
if self.is_shown('freqs'):
freq_view = ephyviewer.TimeFreqViewer(source = trace_view.source, name = 'Time-Frequency')
freq_view.params['xratio'] = self.metadata.get('past_fraction', 0.3)
freq_view.params['scale_mode'] = 'by_channel'
freq_view.params['nb_column'] = 1
freq_view.params['colormap'] = 'gray'
freq_view.params.param('timefreq')['deltafreq'] = 100
freq_view.params.param('timefreq')['f_start'] = 1
freq_view.params.param('timefreq')['f_stop'] = 1500
freq_view.by_channel_params['ch0', 'visible'] = False
freq_view.by_channel_params['ch1', 'visible'] = True
freq_view.by_channel_params['ch2', 'visible'] = True
freq_view.by_channel_params['ch3', 'visible'] = True
freq_view.by_channel_params['ch4', 'visible'] = False
# freq_view.params.param('timefreq')['normalisation'] = 1.5
freq_view.by_channel_params['ch1', 'clim'] = 3
freq_view.by_channel_params['ch2', 'clim'] = 5
freq_view.by_channel_params['ch3', 'clim'] = 10
if 'Signals' in win.viewers:
win.add_view(freq_view, tabify_with = 'Signals')
elif 'Integrated signals (RAUC)' in win.viewers:
win.add_view(freq_view, tabify_with = 'Integrated signals (RAUC)')
else:
win.add_view(freq_view)
########################################################################
# SPIKE TRAINS
if self.is_shown('spike_trains') and sources['spike'][0].nb_channel > 0:
spike_train_view = ephyviewer.SpikeTrainViewer(source = sources['spike'][0], name = 'Spike trains')
win.add_view(spike_train_view)
# set the theme
if theme != 'original':
spike_train_view.params['background_color'] = self.themes[theme]['background_color']
spike_train_view.params['vline_color'] = self.themes[theme]['vline_color']
spike_train_view.params['label_fill_color'] = self.themes[theme]['label_fill_color']
spike_train_view.params_controller.combo_cmap.setCurrentText(self.themes[theme]['cmap'])
spike_train_view.params_controller.on_automatic_color()
# set explicitly assigned unit colors
for name, color in unit_colors.items():
try:
index = [st.name for st in seg.spiketrains].index(name)
spike_train_view.by_channel_params['ch{}'.format(index), 'color'] = color
except ValueError:
# unit name may not have been found in the spike train list
pass
spike_train_view.params['xratio'] = self.metadata.get('past_fraction', 0.3)
spike_train_view.params['label_size'] = ui_scales[ui_scale]['channel_label_size']
########################################################################
# TRACES OF FIRING RATES
if self.is_shown('traces_rates'):
firing_rate_sigs = [st.annotations['firing_rate_sig'] for st in seg.spiketrains if 'firing_rate_sig' in st.annotations]
if firing_rate_sigs:
sig_rates_source = ephyviewer.InMemoryAnalogSignalSource(
signals = np.concatenate([sig.as_array() for sig in firing_rate_sigs], axis = 1),
sample_rate = firing_rate_sigs[0].sampling_rate.rescale('Hz'), # assuming all AnalogSignals have the same sampling rate
t_start = firing_rate_sigs[0].t_start.rescale('s'), # assuming all AnalogSignals start at the same time
channel_names = [sig.name for sig in firing_rate_sigs],
)
sources['signal_rates'] = [sig_rates_source]
trace_rates_view = ephyviewer.TraceViewer(source = sources['signal_rates'][0], name = 'Firing rates')
if 'Spike trains' in win.viewers:
win.add_view(trace_rates_view, tabify_with = 'Spike trains')
else:
win.add_view(trace_rates_view)
trace_rates_view.params['xratio'] = self.metadata.get('past_fraction', 0.3)
trace_rates_view.params['line_width'] = line_width
trace_rates_view.params['label_size'] = ui_scales[ui_scale]['channel_label_size']
trace_rates_view.params['display_labels'] = True
trace_rates_view.params['display_offset'] = True
trace_rates_view.params['antialias'] = True
# set the theme
if theme != 'original':
trace_rates_view.params['background_color'] = self.themes[theme]['background_color']
trace_rates_view.params['vline_color'] = self.themes[theme]['vline_color']
trace_rates_view.params['label_fill_color'] = self.themes[theme]['label_fill_color']
trace_rates_view.params_controller.combo_cmap.setCurrentText(self.themes[theme]['cmap'])
trace_rates_view.params_controller.on_automatic_color()
# set explicitly assigned firing rate sig colors
for name, color in unit_colors.items():
try:
index = [sig.name for sig in firing_rate_sigs].index(name)
trace_rates_view.by_channel_params['ch{}'.format(index), 'color'] = color
except ValueError:
# unit name may not have been found in the firing rate sig list
pass
# adjust plot range
trace_rates_view.params['ylim_max'] = 0.5
trace_rates_view.params['ylim_min'] = -trace_rates_view.source.nb_channel + 0.5
trace_rates_view.params['scale_mode'] = 'by_channel'
for i, sig in enumerate(firing_rate_sigs):
ylim_span = 10
ylim_center = ylim_span / 2
trace_rates_view.by_channel_params['ch{}'.format(i), 'gain'] = 1/ylim_span # rescale [ymin,ymax] across a unit
trace_rates_view.by_channel_params['ch{}'.format(i), 'offset'] = -i - ylim_center/ylim_span # center [ymin,ymax] within the unit
########################################################################
# EPOCHS
if self.is_shown('epochs') and sources['epoch'][0].nb_channel > 0:
epoch_view = ephyviewer.EpochViewer(source = sources['epoch'][0], name = 'Epochs')
win.add_view(epoch_view)
# set the theme
if theme != 'original':
epoch_view.params['background_color'] = self.themes[theme]['background_color']
epoch_view.params['vline_color'] = self.themes[theme]['vline_color']
epoch_view.params['label_fill_color'] = self.themes[theme]['label_fill_color']
epoch_view.params_controller.combo_cmap.setCurrentText(self.themes[theme]['cmap'])
epoch_view.params_controller.on_automatic_color()
# set explicitly assigned unit colors
for name, color in unit_colors.items():
try:
index = [ep['name'] for ep in sources['epoch'][0].all].index(name + ' burst')
epoch_view.by_channel_params['ch{}'.format(index), 'color'] = color
except ValueError:
# unit burst name may not have been found in the epoch list
pass
epoch_view.params['xratio'] = self.metadata.get('past_fraction', 0.3)
epoch_view.params['label_size'] = ui_scales[ui_scale]['channel_label_size']
########################################################################
# EPOCH ENCODER
if self.is_shown('epoch_encoder') and self.metadata.get('epoch_encoder_file', None) is not None:
possible_labels = self.metadata.get('epoch_encoder_possible_labels', [])
# append labels found in the epoch encoder file but not in the
# epoch_encoder_possible_labels list, preserving the original
# ordering of epoch_encoder_possible_labels
labels_from_file = [ep.name for ep in seg.epochs if len(ep.times) > 0 and '(from epoch encoder file)' in ep.labels]
for label in labels_from_file:
if label not in possible_labels:
possible_labels.append(label)
if not possible_labels:
# an empty epoch encoder file and an empty list of possible
# labels were provided
logger.warning('Ignoring epoch_encoder_file because epoch_encoder_possible_labels was unspecified')
else:
writable_epoch_source = NeuroticWritableEpochSource(
filename = _abs_path(self.metadata, 'epoch_encoder_file'),
possible_labels = possible_labels,
)
epoch_encoder = ephyviewer.EpochEncoder(source = writable_epoch_source, name = 'Epoch encoder')
epoch_encoder.params['exclusive_mode'] = False
win.add_view(epoch_encoder)
# set the theme
if theme != 'original':
epoch_encoder.params['background_color'] = self.themes[theme]['background_color']
epoch_encoder.params['vline_color'] = self.themes[theme]['vline_color']
epoch_encoder.params['label_fill_color'] = self.themes[theme]['label_fill_color']
# TODO add support for combo_cmap
epoch_encoder.params['xratio'] = self.metadata.get('past_fraction', 0.3)
epoch_encoder.params['label_size'] = ui_scales[ui_scale]['channel_label_size']
########################################################################
# VIDEO
if self.is_shown('video') and self.metadata.get('video_file', None) is not None:
video_source = ephyviewer.MultiVideoFileSource(video_filenames = [_abs_path(self.metadata, 'video_file')])
# some video files are loaded with an incorrect start time, so
# reset video start to zero
video_source.t_stops[0] -= video_source.t_starts[0]
video_source.t_starts[0] = 0
# apply the video_offset
if self.metadata.get('video_offset', None) is not None:
video_source.t_starts[0] += self.metadata['video_offset']
video_source.t_stops[0] += self.metadata['video_offset']
# correct for videos that report frame rates that are too fast or
# too slow compared to the clock on the data acquisition system
if self.metadata.get('video_rate_correction', None) is not None:
video_source.rates[0] *= self.metadata['video_rate_correction']
if self.metadata.get('video_jumps', None) is not None:
# create an unmodified video_times vector with evenly spaced times
video_times = np.arange(video_source.nb_frames[0])/video_source.rates[0] + video_source.t_starts[0]
# insert repeating times at pause_start to fill pause_duration
# so that that section of the video is skipped over
for pause_start, pause_duration in self.metadata['video_jumps']:
pause_start_index = np.searchsorted(video_times, pause_start)
pause_fill = video_times[pause_start_index] * np.ones(int(np.round(pause_duration*video_source.rates[0])))
video_times = np.insert(video_times, pause_start_index, pause_fill)
video_times = video_times[:video_source.nb_frames[0]]
# add the modified video_times to the video_source
video_source.video_times = [video_times]
video_source.t_starts[0] = min(video_times)
video_source.t_stops[0] = max(video_times)
# update the source-level times from the modified file-level times
video_source._t_start = max(min(video_source.t_starts), 0)
video_source._t_stop = max(video_source.t_stops)
video_view = ephyviewer.VideoViewer(source = video_source, name = 'Video')
if theme != 'original':
video_view.graphiclayout.setBackground(self.themes[theme]['background_color'])
win.add_view(video_view, location = 'bottom', orientation = 'horizontal')
########################################################################
# EVENTS
if self.is_shown('event_list') and sources['event'][0].nb_channel > 0:
event_list = ephyviewer.EventList(source = sources['event'][0], name = 'Events')
if 'Video' in win.viewers:
win.add_view(event_list, split_with = 'Video')
else:
win.add_view(event_list, location = 'bottom', orientation = 'horizontal')
########################################################################
# DATAFRAME
annotations_dataframe = _neo_epoch_to_dataframe(seg.epochs, exclude_epoch_encoder_epochs=True)
if self.is_shown('data_frame') and len(annotations_dataframe) > 0:
data_frame_view = ephyviewer.DataFrameView(source = annotations_dataframe, name = 'Table')
if 'Events' in win.viewers:
win.add_view(data_frame_view, tabify_with = 'Events')
elif 'Video' in win.viewers:
win.add_view(data_frame_view, split_with = 'Video')
else:
win.add_view(data_frame_view, location = 'bottom', orientation = 'horizontal')
########################################################################
# FINAL TOUCHES
# select first tabs
for widget in win.children():
if isinstance(widget, ephyviewer.PyQt5.QtWidgets.QTabBar):
widget.setCurrentIndex(0)
# set amount of time shown initially
win.set_xsize(self.metadata.get('t_width', 40)) # seconds
return win
def _set_defaults_for_plots(metadata, blk):
"""
Set defaults for plot channels, units, ylim, and ylabel if these
parameters are missing from ``metadata``.
"""
sigs = blk.segments[0].analogsignals
signalNameToIndex = {sig.name:i for i, sig in enumerate(sigs)}
if metadata.get('plots', None) is None:
metadata['plots'] = [{'channel': sig.name} for sig in sigs if _default_keep_signal(sig)]
plots = []
for plot in metadata['plots']:
index = signalNameToIndex.get(plot['channel'], None)
if index is None:
logger.warning('Removing plot with channel name "{}" because channel was not found in blk!'.format(plot['channel']))
else:
plot['index'] = index
plot.setdefault('units', _default_units(sigs[index]))
plot.setdefault('ylim', _default_ylim(sigs[index]))
plot.setdefault('ylabel', sigs[index].name)
plots.append(plot)
metadata['plots'] = plots
return metadata['plots']
def _default_keep_signal(sig):
"""
If ``plots`` is not specified in ``metadata``, this function determines
which channels are plotted by default.
"""
return (not sig.name.startswith('Analog Input #')) and (sig.name != 'Clock')
def _default_units(sig):
"""
If ``plots`` is missing ``units`` in ``metadata``, this function determines
default units.
"""
mapping = {
'V': 'uV', # convert voltages to microvolts
'N': 'mN', # convert forces to millinewtons
}
mapping = {pq.Quantity(1, k).dimensionality.simplified: v for k, v in mapping.items()}
return mapping.get(sig.units.dimensionality.simplified, sig.units)
def _default_ylim(sig):
"""
If ``plots`` is missing ``ylim`` in ``metadata``, this function determines
default plot ranges.
"""
mapping = {
'V': [-120, 120], # plot range for voltages
'N': [ -10, 300], # plot range for forces
}
mapping = {pq.Quantity(1, k).dimensionality.simplified: v for k, v in mapping.items()}
return mapping.get(sig.units.dimensionality.simplified, [-1, 1])
def _neo_epoch_to_dataframe(neo_epochs, exclude_epoch_encoder_epochs=False):
"""
Convert a list of Neo Epochs into a dataframe.
"""
dtypes = {
'Start (s)': float,
'End (s)': float,
'Duration (s)': float,
'Type': str,
'Label': str,
}
columns = list(dtypes.keys())
df = pd.DataFrame(columns=columns)
for ep in neo_epochs:
if len(ep.times) > 0 and (not exclude_epoch_encoder_epochs or '(from epoch encoder file)' not in ep.labels):
data = np.array([ep.times, ep.times+ep.durations, ep.durations, [ep.name]*len(ep), ep.labels]).T
df = df.append(pd.DataFrame(data, columns=columns), ignore_index=True)
return df.astype(dtype=dtypes).sort_values(['Start (s)', 'End (s)', 'Type', 'Label']).reset_index(drop=True)
def _estimate_video_jump_times(blk):
"""
Estimate how much time to skip in video playback if AxoGraph was temporarily
paused during data acquisition while the video continued to record. Returns
a list of ordered pairs suitable for the video_jumps metadata parameter. The
returned stop times are exact, but pause durations have only whole-second
precision and should be manually refined by inspecting the video before
using.
"""
if 'notes' not in blk.annotations:
return None
else:
# obtain approximate start and stop times according to AxoGraph notes
note_start_times = np.array([0], dtype=np.int)
note_stop_times = np.array([], dtype=np.int)
for note_line in blk.annotations['notes'].split('\n'):
m = re.match('\d\d\d: Start at (\d*) s', note_line)
if m:
note_start_times = np.append(note_start_times, int(m.group(1)))
m = re.match('\d\d\d: Stop at (\d*) s', note_line)
if m:
note_stop_times = np.append(note_stop_times, int(m.group(1)))
# calculate approximate pause durations
pause_durations = note_start_times[1:]-note_stop_times[:-1]
# obtain exact stop times (AxoGraph time, not video time)
event_stop_times = np.array([], dtype=np.float)
ev = next((ev for ev in blk.segments[0].events if ev.name == 'AxoGraph Tags'), None)
if ev is not None:
for time, label in zip(ev.times, ev.labels):
if label == 'Stop':
event_stop_times = np.append(event_stop_times, time.magnitude)
# pair stop times with pause durations
video_jumps = []
for t, dur in zip(event_stop_times[:-1], pause_durations):
video_jumps.append([t, dur])
return video_jumps
``` |
{
"source": "jpgill86/tdms2axg",
"score": 3
} |
#### File: tdms2axg/tdms2axg/scripts.py
```python
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
import quantities as pq
import nptdms
import axographio
def tdms2axg(filename, force=False, verbose=True):
"""
Convert a TDMS file to an AxoGraph (AXGX) file
"""
if not os.path.isfile(filename):
raise ValueError('error: file not found: ' + filename)
if filename.split('.')[-1] != 'tdms':
raise ValueError('error: file does not appear to be a TDMS file (does not end in ".tdms"): ' + filename)
output_filename = '.'.join(filename.split('.')[:-1]) + '.axgx'
if os.path.isfile(output_filename) and not force:
raise OSError('error: output file exists, use force flag to overwrite: ' + output_filename)
# read the TDMS file
tdms_file = nptdms.TdmsFile.read(filename)
group = tdms_file.groups()[0] # use only first group
channels = group.channels()
if verbose:
print()
print('Properties of "' + filename + '":')
print()
for name, value in tdms_file.properties.items():
print(str(name) + ': ' + str(value))
print()
# collect the data for writing to AxoGraph format
names = ['Time (s)']
columns = [axographio.aslinearsequence(channels[0].time_track())] # assume time is same for all columns
for c in channels:
# try to determine channel units
unit_string = None
if 'unit_string' in c.properties:
u = c.properties['unit_string']
try:
# test whether the unit is recognizable
q = pq.Quantity(1, u)
unit_string = u
except LookupError:
try:
# try assuming its a simple compound unit (e.g., Nm = N*m)
u = '*'.join(u)
q = pq.Quantity(1, u)
unit_string = u
except LookupError:
# unit string cannot be interpreted
pass
if unit_string:
name = c.name + ' (' + unit_string + ')'
else:
name = c.name
names += [name]
columns += [c[:]]
if verbose:
print('Channel names:', names[1:])
print()
# write the AxoGraph file
if verbose:
print('Writing contents to AxoGraph file "' + output_filename + '"...')
f = axographio.file_contents(names, columns)
f.write(output_filename)
if verbose:
print('Done!')
return output_filename
def parse_args(argv):
description = """
A simple script for converting LabVIEW TDMS files to AxoGraph files.
The AxoGraph (AXGX) file is saved with the same name and in the same
directory as the TDMS file. By default, an existing AxoGraph file will not
be overwritten; use --force to overwrite it.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file',
help='the path to a TDMS file')
parser.add_argument('-f', '--force', action='store_true', dest='force',
help='overwrite the output file if it already exists')
parser.add_argument('-q', '--quiet', action='store_false', dest='verbose',
help='run silently')
args = parser.parse_args(argv[1:])
return args
def main():
args = parse_args(sys.argv)
try:
tdms2axg(args.file, args.force, args.verbose)
except Exception as e:
# skip the traceback when run from the command line
print(e)
``` |
{
"source": "jpgil/logdelay",
"score": 3
} |
#### File: notebooks/2-structures/helpers.py
```python
import networkx as nx
import scipy
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import write_dot
from networkx.drawing.nx_agraph import to_agraph
from IPython.display import Image
import pygraphviz as pgv
def graph(G, color="#cccccc", filename="/tmp/simple.png"):
for u, v in G.edges:
if "weight" in G[u][v]:
G[u][v]["label"] = G[u][v]["weight"]
G.graph['graph']={'rankdir':'TD'}
G.graph['node']={'shape':'circle'}
G.graph['edges']={'arrowsize':'1.0'}
A = to_agraph(G)
A.layout('dot')
A.draw(filename)
display(Image(filename))
# Stolen from https://stackoverflow.com/questions/12836385/how-can-i-interleave-or-create-unique-permutations-of-two-strings-without-recur/12837695
# More doc at http://www.cs.utsa.edu/~wagner/knuth/fasc2b.pdf
class Interleave():
def __init__(self, A, B):
self.A = A
self.B = B
self.results = list(self.__interleave())
# from https://stackoverflow.com/a/104436/1561176
def __all_perms(self, elements):
if len(elements) <=1:
yield elements
else:
for perm in self.__all_perms(elements[1:]):
for i in range(len(elements)):
#nb elements[0:1] works in both string and list contexts
yield perm[:i] + elements[0:1] + perm[i:]
def __sequences(self):
return list( sorted( set(
["".join(x) for x in self.__all_perms(['a'] * len(self.A) + ['b'] * len(self.B))] ) ) )
def __interleave(self):
for sequence in self.__sequences():
result = ""
a = 0
b = 0
for item in sequence:
if item == 'a':
result+=self.A[a]
a+=1
else:
result+=self.B[b]
b+=1
yield result
def __str__(self):
return str(self.results)
def __repr__(self):
return repr(self.results)
# # Function to calculate the cliques and GRAPH in one single line.
# def nice_graph( weighted_paths ):
# def append_path(G, path, weight):
# previous = path[0]
# for node in path[1:]:
# edges.append( (previous, node, {"weight":weight} ) )
# previous = node
# G.add_edges_from(edges)
# # weighted_paths = clique_discovery.infer_paths_from_traces( T )
# print(weighted_paths)
# found_paths = {}
# edges = []
# G = nx.DiGraph()
# # Check serial: solo disjuntos del mismo largo
# for w in sorted(weighted_paths, reverse=True):
# for path_w in weighted_paths[w]:
# if w not in found_paths.keys():
# found_paths[w] = []
# if all( [ not set(path_w).intersection(set(z)) for z in found_paths[w] ] ):
# append_path( G, path_w, w )
# weights = { (str(u), str(v)): G[u][v]['weight'] for u,v in G.edges() }
# pos = nx.spring_layout(G)
# plt.rcParams['figure.figsize'] = [14, 6]
# plt.subplot(111)
# nx.draw_networkx (G, pos, width=1, node_color="#cccccc", with_labels=True, connectionstyle='arc3, rad=0.5' )
# nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=weights)
# plt.show()
def created_auxiliary_grap( weighted_paths ):
def append_path(G, path, weight):
edges = []
previous = path[0]
for node in path[1:]:
edges.append( (previous, node, {"weight":weight} ) )
previous = node
G.add_edges_from(edges)
G = nx.DiGraph()
# Check serial: solo disjuntos del mismo largo
for w in sorted(weighted_paths, reverse=True):
for path_w in weighted_paths[w]:
append_path( G, path_w, w )
return G
# Function to calculate the cliques and GRAPH in one single line.
def nice_graph( weighted_paths, with_weigths=True ):
print(weighted_paths)
G = created_auxiliary_grap( weighted_paths )
try:
G.remove_node("_START_")
G.remove_node("_END_")
except:
pass
weights = { (str(u), str(v)): round(G[u][v]['weight'], 2) for u,v in G.edges() }
pos = nx.spring_layout(G)
pos = nx.circular_layout(G)
plt.rcParams['figure.figsize'] = [8, 4]
plt.subplot(111)
nx.draw_networkx (G, pos, width=1, node_color="#cccccc", with_labels=True, connectionstyle='arc3, rad=0.05' )
if with_weigths:
nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=weights)
plt.show()
def untangled_graph(G, with_weigths=True):
weights = { (str(u), str(v)): G[u][v]['weight'] for u,v in G.edges() }
pos = nx.spring_layout(G)
pos = nx.circular_layout(G)
plt.rcParams['figure.figsize'] = [10, 4]
plt.subplot(111)
nx.draw_networkx (G, pos, width=1, node_color="#cccccc", with_labels=True, connectionstyle='arc3, rad=0.05' )
if with_weigths:
nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=weights)
plt.show()
``` |
{
"source": "jpgil/procdelay",
"score": 3
} |
#### File: procdelay/src/CaseManagerBase.py
```python
import doctest
import logging
class CaseManagerBase:
""" Base case for case manager
This class splits logs in two dimensions:
1) Detects parallel executions of cases
2) Detects start / stop events for cases.
Basic example:
>>> CaseManagerBase.newCase( basedOn="sample event" )
('ID', <__main__.CaseManagerBase instance at ...
"""
def __init__(self):
self.history = []
self.uniques = {}
self.myName = str(self.__class__).split(".")[-1]
# The very first event is special. If you use the same marker for start/end , then the second instance is the one that must stop this machine.
self.isFirstEvent = True
self._processingEngines = self.processingEngines()
self.log = logging.getLogger( self.myName )
# Validate processing engines?
@staticmethod
def isStartEvent(event):
raise NotImplementedError
@staticmethod
def newCase(basedOn):
"""Factory method.
It returns also an ID to be used as case identifier.
:returns: (string, object) -- caseID and case instance
"""
return "ID", CaseManagerBase()
def isEndEvent(self, event):
if self.isFirstEvent:
return False
else:
return self.conditionForEndEvent(event)
def isValidEvent(self, event):
""" Override if you need to filter events based on some criteria. True by default
"""
return True
def conditionForEndEvent(self, event):
""" Condition based on event that must trigger the end of execution of the case.
You can also choose the same start condition, the first event is indeed recognized as different than any other event specifically for this case.
:returns: bool
"""
raise NotImplementedError
def processingEngines(self):
""" List instances of case processing engines.
They must be inherited from CaseProcessingEngineBase and being initialized here.
"""
return []
# Some magic here....
def appendActivity(self, ts, activity):
self.history.append( (ts, activity) )
if activity not in self.uniques.keys():
self.uniques[activity] = 1
else:
self.uniques[activity] = self.uniques[activity] + 1
self.isFirstEvent = False
for engine in self._processingEngines:
if engine.isEnabled:
engine.streamProcess(ts, activity)
def getTotalEvents(self):
return len(self.history)
def getUniqueActivities(self):
return self.uniques.keys()
def finish(self):
# Do whatever you need to do here
# For example... count the delays and send to PairsDB
for engine in self._processingEngines:
if engine.isEnabled:
engine.postProcess()
def stats(self):
return {
'TOTAL_EVENTS': self.getTotalEvents() ,
'UNIQUE_ACTIVITIES': self.getUniqueActivities(),
'TRACE': ["C%s" % event[1] for event in self.history]
}
def worldStats(self):
stats = self.stats()
gentle = stats["TRACE"][:50]
if len(gentle) == len(stats["TRACE"]):
gentleTrace = " ".join(gentle)
else:
gentleTrace = " ".join(gentle) + " ...(%s more)" % ( len(stats["TRACE"])-len(gentle) )
returnTxt = "EVENTS=%s UNIQUES=%s " % (stats["TOTAL_EVENTS"], len(stats["UNIQUE_ACTIVITIES"]) )
returnTxt = returnTxt + " ".join ([eng.worldStats() for eng in self._processingEngines if eng.isEnabled] )
# returnTxt = returnTxt + " TRACE=%s" % gentleTrace
return returnTxt
# Move elsewhere
class CaseProcessingEngineBase:
def __init__(self, parent=None, enabled=True):
self.isEnabled = enabled
self.parent = parent
self.myName = str(self.__class__).split(".")[-1]
self.log = logging.getLogger( "%s.%s" % (self.parent.myName, self.myName) )
def streamProcess(self, ts, activity):
return
def postProcess(self):
return
def worldStats(self):
"""
Single line of stats to be append to case worldStats
"""
return "%s: No stats given" % self.myName
# Move elsewhere
class CasesPool:
def __init__(self, cases_list):
self.pool = {}
self.cases_list = cases_list
self.myName = str(self.__class__).split(".")[-1]
self.log = logging.getLogger( self.myName )
def removeEndedCases(self, event, ts, activity):
what2kill = []
for idx, case in self.pool.iteritems():
if case.isEndEvent(event):
what2kill.append(idx)
# Kill now.
for idx in what2kill:
case = self.pool[idx]
# Add last event just in case.
self.appendToCase(event, ts, activity, case )
case.finish()
self.log.info("%s[%s] %s" % (case.myName, idx, case.worldStats()))
del(self.pool[idx])
self.log.debug("case[%s] killed at %s because %s" % (idx, event[0], event[1]))
def appendToCase(self, event, ts, activity, case):
if case.isValidEvent(event):
case.appendActivity( ts, activity )
def spawnNewCases(self, event):
for ThisCase in self.cases_list:
if ThisCase.isStartEvent(event):
caseId, caseInstance = ThisCase.newCase(basedOn=event)
# Is case already active? Then fail
if caseId in self.pool.keys():
caseInstance.log.info("case[%s] %s" % (caseId, self.pool[caseId].worldStats()))
raise ValueError("[%s] Error in your logic, buddy. There is already an active case. Check above and belowe offending event:\n%s" % (caseId, event) )
self.pool[caseId] = caseInstance
# logging.info("case[%s] created" % caseId)
caseInstance.log.debug("[%s] created at %s because %s" % (caseId, event[0], event[1]))
def appendActivity(self, event, ts, activity):
for idx, case in self.pool.iteritems():
if case.isValidEvent(event):
case.appendActivity( ts, activity )
def removeAfterLastEvent(self):
for idx in self.pool.keys():
self.pool[idx].finish()
self.log.info("%s[%s] %s" % (self.pool[idx].myName, idx, self.pool[idx].worldStats()))
self.log.info("case[%s] KILL because no more events are available" % idx)
del(self.pool[idx])
if __name__ == '__main__':
"""If this file is called directly it will execute a self test
"""
doctest.testmod( optionflags = doctest.ELLIPSIS )
``` |
{
"source": "jpgimenez/cbpi-CascadeControl",
"score": 3
} |
#### File: jpgimenez/cbpi-CascadeControl/__init__.py
```python
import time
from modules import cbpi
from modules.core.controller import KettleController
from modules.core.props import Property
# Property descriptions
kp_description = "The proportional term, also known as kp, is the action of PID in response to each unit of error. kp dictates the aggressiveness of action. \nThe units of kp are output / process variable (e.g. % / °C)"
ki_description = "The integral term, also known as ki, is the action of the PID in response to cumulative error in the system. ki is used primarily to reduce steady state error, but also factors into aggressivness of action. \nThe units of ki are output/(process variable • time) (e.g. % / (°C • seconds))"
kd_description = "The derivative term, also known as kd, is the action of the PID in response to the rate of change of the error. kd is used primarily to reduce overshoot. \nThe units are of output / process variable / time (e.g. % /°C / seconds)"
update_interval_description = "This is the length of time in seconds between recalculation of actor output with the PID algorithm."
notification_timeout_description = "Notification duration in milliseconds"
action_description = "Positive action results in the Actor being ON when current value of control variable is BELOW it's set point (e.g. heating). Negative action results in an Actor being OFF when the current value of the control variable is ABOVE it's setpoint (e.g. cooling)."
maxset_description = "The maximum temperature that the outer loop can set as the target for the inner loop"
maxoutput_description = "The maximum PWM output %"
@cbpi.controller
class CascadePID(KettleController):
a_inner_sensor = Property.Sensor(label="Inner loop sensor")
b_inner_kp = Property.Number("Inner loop proportional term", True, 5.0, description=kp_description)
c_inner_ki = Property.Number("Inner loop integral term", True, 0.25, description=ki_description)
d_inner_kd = Property.Number("Inner loop derivative term", True, 0.0, description=kd_description)
e_inner_integrator_initial = Property.Number("Inner loop integrator initial value", True, 0.0)
if cbpi.get_config_parameter("unit", "C") == "C":
f_maxset = Property.Number("Max inner loop target (°C)", True, 75, description=maxset_description)
else:
f_maxset = Property.Number("Max inner loop target (°F)", True, 168, description=maxset_description)
g_maxoutput = Property.Number("Max inner loop output (%)", True, 100, description=maxoutput_description)
h_outer_kp = Property.Number("Outer loop proportional term", True, 5.0, description=kp_description)
i_outer_ki = Property.Number("Outer loop integral term", True, 2.0, description=ki_description)
j_outer_kd = Property.Number("Outer loop derivative term", True, 1.0, description=kd_description)
k_outer_integrator_initial = Property.Number("Outer loop integrator initial value", True, 0.0)
l_update_interval = Property.Number("Update interval (s)", True, 2.5, description=update_interval_description)
m_notification_timeout = Property.Number("Notification duration (ms)", True, 5000, description=notification_timeout_description)
def stop(self):
self.actor_power(0.0)
self.heater_off()
super(KettleController, self).stop()
def run(self):
if not isinstance(self.a_inner_sensor, str):
self.notify("PID Error", "An inner sensor must be selected", timeout=None, type="danger")
raise UserWarning("PID - An inner sensor must be selected")
# Get inner sensor as an integer
inner_sensor = int(self.a_inner_sensor)
# Ensure all numerical properties are floats
inner_kp = float(self.b_inner_kp)
inner_ki = float(self.c_inner_ki)
inner_kd = float(self.d_inner_kd)
inner_integrator_initial = float(self.e_inner_integrator_initial)
maxset = float(self.f_maxset)
maxoutput = min(float(self.g_maxoutput), 100.0)
outer_kp = float(self.h_outer_kp)
outer_ki = float(self.i_outer_ki)
outer_kd = float(self.j_outer_kd)
outer_integrator_initial = float(self.k_outer_integrator_initial)
update_interval = float(self.l_update_interval)
notification_timeout = float(self.m_notification_timeout)
# Error check
if update_interval <= 0.0:
self.notify("PID Error", "Update interval must be positive", timeout=None, type="danger")
raise ValueError("PID - Update interval must be positive")
elif notification_timeout <= 0.0:
cbpi.notify("PID Error", "Notification timeout must be positive", timeout=None, type="danger")
raise ValueError("PID - Notification timeout must be positive")
elif maxoutput < 5.0:
cbpi.notify("PID Error", "Notification timeout must be positive", timeout=None, type="danger")
raise ValueError("PID - Max output must be at least 5%")
else:
self.heater_on(0.0)
# Initialize PID cascade
if cbpi.get_config_parameter("unit", "C") == "C":
outer_pid = PID(outer_kp, outer_ki, outer_kd, 0.0, maxset, 1.0, outer_integrator_initial)
else:
outer_pid = PID(outer_kp, outer_ki, outer_kd, 32, maxset, 1.8, outer_integrator_initial)
inner_pid = PID(inner_kp, inner_ki, inner_kd, 0.0, maxoutput, 1.0, inner_integrator_initial)
while self.is_running():
waketime = time.time() + update_interval
# Get the target temperature
outer_target_value = self.get_target_temp()
# Calculate inner target value from outer PID
outer_current_value = self.get_temp()
inner_target_value = round(outer_pid.update(outer_current_value, outer_target_value), 2)
# Calculate inner output from inner PID
inner_current_value = float(cbpi.cache.get("sensors")[inner_sensor].instance.last_value)
inner_output = round(inner_pid.update(inner_current_value, inner_target_value), 2)
# Update the heater power
self.actor_power(inner_output)
# Print loop details
cbpi.app.logger.info("[%s] Outer loop PID target/actual/output/integrator: %s/%s/%s/%s" % (waketime, outer_target_value, outer_current_value, inner_target_value, round(outer_pid.integrator, 2)))
cbpi.app.logger.info("[%s] Inner loop PID target/actual/output/integrator: %s/%s/%s/%s" % (waketime, inner_target_value, inner_current_value, inner_output, round(inner_pid.integrator, 2)))
print(("[%s] Outer loop PID target/actual/output/integrator: %s/%s/%s/%s" % (waketime, outer_target_value, outer_current_value, inner_target_value, round(outer_pid.integrator, 2))))
print(("[%s] Inner loop PID target/actual/output/integrator: %s/%s/%s/%s" % (waketime, inner_target_value, inner_current_value, inner_output, round(inner_pid.integrator, 2))))
# Sleep until update required again
if waketime <= time.time() + 0.25:
self.notify("PID Error", "Update interval is too short", timeout=notification_timeout, type="warning")
cbpi.app.logger.info("PID - Update interval is too short")
print("PID - Update interval is too short")
else:
self.sleep(waketime - time.time())
@cbpi.controller
class AdvancedPID(KettleController):
a_kp = Property.Number("Proportional term", True, 10.0, description=kp_description)
b_ki = Property.Number("Integral term", True, 2.0, description=ki_description)
c_kd = Property.Number("Derivative term", True, 1.0, description=kd_description)
d_maxoutput = Property.Number("Max output (%)", True, 100, description=maxoutput_description)
e_integrator_initial = Property.Number("Integrator initial value", True, 0.0)
f_update_interval = Property.Number("Update interval (s)", True, 2.5, description=update_interval_description)
g_notification_timeout = Property.Number("Notification duration (ms)", True, 5000, description=notification_timeout_description)
def stop(self):
self.actor_power(0.0)
self.heater_off()
super(KettleController, self).stop()
def run(self):
kp = float(self.a_kp)
ki = float(self.b_ki)
kd = float(self.c_kd)
maxoutput = min(float(self.d_maxoutput), 100.0)
integrator_initial = float(self.e_integrator_initial)
update_interval = float(self.f_update_interval)
notification_timeout = float(self.g_notification_timeout)
# Error check
if update_interval <= 0.0:
self.notify("PID Error", "Update interval must be positive", timeout=None, type="danger")
raise ValueError("PID - Update interval must be positive")
elif notification_timeout <= 0.0:
cbpi.notify("PID Error", "Notification timeout must be positive", timeout=None, type="danger")
raise ValueError("PID - Notification timeout must be positive")
elif maxoutput < 5.0:
cbpi.notify("PID Error", "Notification timeout must be positive", timeout=None, type="danger")
raise ValueError("PID - Max output must be at least 5%")
else:
self.heater_on(0.0)
# Initialize PID
SinglePID = PID(kp, ki, kd, 0.0, maxoutput, 1.0, integrator_initial)
while self.is_running():
waketime = time.time() + update_interval
# Get the target temperature
target_value = self.get_target_temp()
# Calculate inner target value from outer PID
current_value = self.get_temp()
output = round(SinglePID.update(current_value, target_value), 2)
# Update the heater power
self.actor_power(output)
# Log details
cbpi.app.logger.info("[%s] PID target/actual/output/integrator: %s/%s/%s/%s" % (waketime, target_value, current_value, output, round(SinglePID.integrator, 2)))
print(("[%s] PID target/actual/output/integrator: %s/%s/%s/%s" % (waketime, target_value, current_value, output, round(SinglePID.integrator, 2))))
# Sleep until update required again
if waketime <= time.time() + 0.25:
self.notify("PID Error", "Update interval is too short", timeout=notification_timeout, type="warning")
cbpi.app.logger.info("PID - Update interval is too short")
print("PID - Update interval is too short")
else:
self.sleep(waketime - time.time())
@cbpi.controller
class CascadeHysteresis(KettleController):
aa_kp = Property.Number("Proportional term", True, 10.0, description=kp_description)
ab_ki = Property.Number("Integral term", True, 2.0, description=ki_description)
ac_kd = Property.Number("Derivative term", True, 1.0, description=kd_description)
ad_integrator_initial = Property.Number("Integrator initial value", True, 0.0)
if cbpi.get_config_parameter("unit", "C") == "C":
ae_maxset = Property.Number("Max hysteresis target (°C)", True, 75, description=maxset_description)
else:
ae_maxset = Property.Number("Max hysteresis target (°F)", True, 168, description=maxset_description)
ba_inner_sensor = Property.Sensor(label="Inner (hysteresis) sensor")
bb_action = Property.Select(label="Hysteresis Action Type", options=["Positive", "Negative"], description=action_description)
bc_on_min = Property.Number("Hysteresis Minimum Time On (s)", True, 45)
bd_on_max = Property.Number("Hysteresis Maximum Time On (s)", True, 1800)
be_off_min = Property.Number("Hysteresis Minimum Time Off (s)", True, 90)
c_update_interval = Property.Number("Update interval (s)", True, 2.5, description=update_interval_description)
d_notification_timeout = Property.Number("Notification duration (ms)", True, 5000, description=notification_timeout_description)
def stop(self):
self.heater_off()
super(KettleController, self).stop()
def run(self):
# Get inner sensor as an integer
inner_sensor = int(self.ba_inner_sensor)
# Outer PID settings
kp = float(self.aa_kp)
ki = float(self.ab_ki)
kd = float(self.ac_kd)
integrator_initial = float(self.ad_integrator_initial)
maxset = float(self.ae_maxset)
# Inner hysteresis settings
positive = self.bb_action == "Positive"
on_min = float(self.bc_on_min)
on_max = float(self.bd_on_max)
off_min = float(self.be_off_min)
# General settings
update_interval = float(self.c_update_interval)
notification_timeout = float(self.d_notification_timeout)
# Error check
if on_min <= 0.0:
self.notify("Hysteresis Error", "Minimum 'on time' must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Minimum 'on time' must be positive")
if on_max <= 0.0:
self.notify("Hysteresis Error", "Maximum 'on time' must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Maximum 'on time' must be positive")
if on_min >= on_max:
self.notify("Hysteresis Error", "Maximum 'on time' must be greater than the minimum 'on time'", timeout=None, type="danger")
raise ValueError("Hysteresis - Maximum 'on time' must be greater than the minimum 'on time'")
if off_min <= 0.0:
self.notify("Hysteresis Error", "Minimum 'off time' must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Minimum 'off time' must be positive")
if update_interval <= 0.0:
self.notify("Hysteresis Error", "Update interval must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Update interval must be positive")
elif notification_timeout <= 0.0:
cbpi.notify("Hysteresis Error", "Notification timeout must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Notification timeout must be positive")
else:
# Initialize outer PID
if cbpi.get_config_parameter("unit", "C") == "C":
outer_pid = PID(kp, ki, kd, 0.0, maxset, 1.0, integrator_initial)
else:
outer_pid = PID(kp, ki, kd, 32, maxset, 1.8, integrator_initial)
# Initialize hysteresis
inner_hysteresis = Hysteresis(positive, on_min, on_max, off_min)
while self.is_running():
waketime = time.time() + update_interval
# Get the target temperature
outer_target_value = self.get_target_temp()
# Calculate inner target value from outer PID
outer_current_value = self.get_temp()
inner_target_value = round(outer_pid.update(outer_current_value, outer_target_value), 2)
inner_current_value = float(cbpi.cache.get("sensors")[inner_sensor].instance.last_value)
# Update the hysteresis controller
if inner_hysteresis.update(inner_current_value, inner_target_value):
self.heater_on(100)
cbpi.app.logger.info("[%s] Inner hysteresis actor stays ON" % (waketime))
print(("[%s] Innner hysteresis actor stays ON" % (waketime)))
else:
self.heater_off()
cbpi.app.logger.info("[%s] Inner hysteresis actor stays OFF" % (waketime))
print(("[%s] Innner hysteresis actor stays OFF" % (waketime)))
# Print loop details
cbpi.app.logger.info("[%s] Outer loop PID target/actual/output/integrator: %s/%s/%s/%s" % (waketime, outer_target_value, outer_current_value, inner_target_value, round(outer_pid.integrator, 2)))
print(("[%s] Outer loop PID target/actual/output/integrator: %s/%s/%s/%s" % (waketime, outer_target_value, outer_current_value, inner_target_value, round(outer_pid.integrator, 2))))
# Sleep until update required again
if waketime <= time.time() + 0.25:
self.notify("Hysteresis Error", "Update interval is too short", timeout=notification_timeout, type="warning")
cbpi.app.logger.info("Hysteresis - Update interval is too short")
print("Hysteresis - Update interval is too short")
else:
self.sleep(waketime - time.time())
@cbpi.controller
class AdvancedHysteresis(KettleController):
a_action = Property.Select(label="Hysteresis Action Type", options=["Positive", "Negative"], description=action_description)
b_on_min = Property.Number("Hysteresis Minimum Time On (s)", True, 45)
c_on_max = Property.Number("Hysteresis Maximum Time On (s)", True, 1800)
d_off_min = Property.Number("Hysteresis Minimum Time Off (s)", True, 90)
e_update_interval = Property.Number("Update interval (s)", True, 2.5, description=update_interval_description)
f_notification_timeout = Property.Number("Notification duration (ms)", True, 5000, description=notification_timeout_description)
def stop(self):
self.heater_off()
super(KettleController, self).stop()
def run(self):
positive = self.a_action == "Positive"
on_min = float(self.b_on_min)
on_max = float(self.c_on_max)
off_min = float(self.d_off_min)
update_interval = float(self.e_update_interval)
notification_timeout = float(self.f_notification_timeout)
# Error check
if on_min <= 0.0:
self.notify("Hysteresis Error", "Minimum 'on time' must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Minimum 'on time' must be positive")
if on_max <= 0.0:
self.notify("Hysteresis Error", "Maximum 'on time' must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Maximum 'on time' must be positive")
if on_min >= on_max:
self.notify("Hysteresis Error", "Maximum 'on time' must be greater than the minimum 'on time'", timeout=None, type="danger")
raise ValueError("Hysteresis - Maximum 'on time' must be greater than the minimum 'on time'")
if off_min <= 0.0:
self.notify("Hysteresis Error", "Minimum 'off time' must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Minimum 'off time' must be positive")
if update_interval <= 0.0:
self.notify("Hysteresis Error", "Update interval must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Update interval must be positive")
elif notification_timeout <= 0.0:
cbpi.notify("Hysteresis Error", "Notification timeout must be positive", timeout=None, type="danger")
raise ValueError("Hysteresis - Notification timeout must be positive")
else:
# Initialize hysteresis
hysteresis_on = Hysteresis(positive, on_min, on_max, off_min)
while self.is_running():
waketime = time.time() + update_interval
# Get the target temperature
current_value = self.get_temp()
target_value = self.get_target_temp()
if hysteresis_on.update(current_value, target_value):
self.heater_on(100)
cbpi.app.logger.info("[%s] Hysteresis actor stays ON" % (waketime))
print(("[%s] Hysteresis actor stays ON" % (waketime)))
else:
self.heater_off()
cbpi.app.logger.info("[%s] Hysteresis actor stays OFF" % (waketime))
print(("[%s] Hysteresis actor stays OFF" % (waketime)))
# Sleep until update required again
if waketime <= time.time() + 0.25:
self.notify("Hysteresis Error", "Update interval is too short", timeout=notification_timeout, type="warning")
cbpi.app.logger.info("Hysteresis - Update interval is too short")
print("Hysteresis - Update interval is too short")
else:
self.sleep(waketime - time.time())
class PID(object):
def __init__(self, kp, ki, kd, output_min, output_max, integrator_error_max, integrator_initial):
self.kp = kp
self.ki = ki
self.kd = kd
self.output_min = output_min
self.output_max = output_max
# Set integrator maximum in relation to ki and output range
# such that the maximum integrator alone could result in no
# more than 100% of the output. This can help limit excessive
# integrator wind up.
if ki == 0.0:
self.integrator_max = 0.0
else:
self.integrator_max = abs((output_max-output_min)/ki)
# Setting an error maximum for the integrator is an additional
# measure to prevent excessive integrator windup
self.integrator_error_max = abs(integrator_error_max)
self.last_time = 0.0
self.last_error = 0.0
# Quietly ensure the initial integrator does not exceed
# the magnitude of the integrator maximum
if abs(integrator_initial) > abs(self.integrator_max):
self.integrator = self.integrator_max
else:
self.integrator = integrator_initial
def update(self, current, target):
# Initialization iteration
if self.last_time == 0.0:
self.last_time = time.time()
current_error = target - current
# Update last_error
self.last_error = current_error
# Return output
return max(min(self.kp * current_error, self.output_max), self.output_min)
# Regular iteration
else:
# Calculate duration of iteration
current_time = time.time()
iteration_time = current_time - self.last_time
self.last_time = current_time
# Calculate error
current_error = target - current
# Calculate error for use with integratorwith respect to specified error limits
integrator_error = max(min(current_error, self.integrator_error_max), -self.integrator_error_max)
# Update the integrator with respect to total integrator limits
self.integrator = max(min(self.integrator + (integrator_error * iteration_time), self.integrator_max), -self.integrator_max)
# Calculate error derivative
derivative = (current_error - self.last_error)/iteration_time
# Calculate output components
p_action = self.kp * current_error
i_action = self.ki * self.integrator
d_action = self.kd * derivative
# Update last_error
self.last_error = current_error
# Return output
return max(min(p_action + i_action + d_action, self.output_max), self.output_min)
class Hysteresis(object):
def __init__(self, positive, on_min, on_max, off_min):
# If positive is true, output will be ON when the control variable is
# BELOW the lowerbound (i.e. heating if controlling temperature)
#
# If positive is false, output will be ON when the control variable is
# ABOVE the setpoint (i.e. cooling if controlling temperature)
self.positive = positive
# The minimum and maximum time in seconds that an output should remain ON
# This setting is intended to protect agaisnt scenarios which may lead to
# excessive cycling of a compressor, etc. This overrides the buffer.
self.on_min = on_min
self.on_max = on_max
# The minimum time in seconds that an output should remain OFF
# This setting is intended to protect agaisnt scenarios which may lead to
# excessive cycling of a compressor, etc.
self.off_min = off_min
# To implement min/max on/off times, keep track of time of last change
# in the output
self.last_change = time.time()
# Record intended state
self.on = False
def update(self, current, target):
interval = time.time() - self.last_change
if (self.positive & (current <= target)) | (not(self.positive) & (current >= target)):
if self.on:
if interval > self.on_max:
# Current ON time has exceeded ON time maximum
# Turn OFF, and update time of last change
self.last_change = time.time()
self.on = False
else:
# Leave ON
self.on = True
else:
if interval < self.off_min:
# Prevent turning ON due to OFF time mininum
self.on = False
else:
# OK to turn ON
# Turn ON, and update time of last change
self.last_change = time.time()
self.on = True
elif (self.positive & (current > target)) | (not(self.positive) & (current < target)):
if self.on:
if interval < self.on_min:
# Current ON time has NOT exceeded minimum, so leave ON
self.on = True
else:
# OK to turn OFF
# Turn OFF, and update time of last change
self.last_change = time.time()
self.on = False
else:
# Leave OFF
self.on = False
return self.on
``` |
{
"source": "JPGlaser/Tycho",
"score": 2
} |
#### File: JPGlaser/Tycho/gen_RandEncounters.py
```python
import sys, os, math
import numpy as np
import time as tp
from optparse import OptionParser
import glob
# Importing cPickle/Pickle
try:
import pickle as pickle
except:
import pickle
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import particle_attributes
from amuse.io import *
from amuse.lab import *
from tycho import util
# Import the Amuse Gravity & Close-Encounter Packages
from amuse.community.smalln.interface import SmallN
from amuse.community.kepler.interface import Kepler
# Import the Tycho Packages
from tycho import create, util, read, write, stellar_systems
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
def gen_scatteringIC(encounter_db, doMultipleClusters=False):
global rootDir
global cluster_name
max_number_of_rotations = 100
if doMultipleClusters:
output_ICDirectory = rootDir+'/'+cluster_name+'/Scatter_IC/'
else:
output_ICDirectory = rootDir+'/Scatter_IC/'
if not os.path.exists(output_ICDirectory): os.mkdir(output_ICDirectory)
# Set up the Kepler Workers for Subroutines Now
converter = nbody_system.nbody_to_si(1 | units.MSun, 100 |units.AU)
kepler_workers = [Kepler(unit_converter = converter, redirection = 'none'),
Kepler(unit_converter = converter, redirection = 'none')]
for kw in kepler_workers:
kw.initialize_code()
# Loop Through the Star_IDs
for star_ID in list(encounter_db.keys()):
output_KeyDirectory = output_ICDirectory+str(star_ID)
if not os.path.exists(output_KeyDirectory): os.mkdir(output_KeyDirectory)
encounter_ID = 0
for encounter in encounter_db[star_ID]:
# Set Up Subdirectory for this Specific Encounter
output_EncPrefix = output_KeyDirectory+"/Enc-"+str(encounter_ID)
# Set up Encounter Key for this Specific Encounter for this Specific Star
rotation_ID = 0
while rotation_ID <= max_number_of_rotations:
# Set Up Output Directory for this Specific Iteration
output_HDF5File = output_EncPrefix+"_Rot-"+str(rotation_ID)+'.hdf5'
next_outFile = output_EncPrefix+"_Rot-"+str(rotation_ID+1)+'.hdf5'
if os.path.exists(output_HDF5File):
if rotation_ID == 99:
rotation_ID += 1
continue
elif os.path.exists(next_outFile):
rotation_ID += 1
continue
# Remove Jupiter and Add Desired Planetary System
enc_bodies = replace_planetary_system(encounter.copy(), kepler_workers=kepler_workers)
write_set_to_file(enc_bodies, output_HDF5File, 'hdf5', version='2.0', close_file=True)
printID = str(star_ID)+"-"+str(encounter_ID)+"-"+str(rotation_ID)
print(util.timestamp(), "Finished Generating Random Encounter ID:", printID, "...")
rotation_ID += 1
encounter_ID += 1
# Stop the Kepler Workers
for kw in kepler_workers:
kw.stop()
def replace_planetary_system(bodies, kepler_workers=None, base_planet_ID=50000, converter=None):
# Set up the Converter if not ProvIDed
if kepler_workers == None and converter == None:
converter = nbody_system.nbody_to_si(bodies.mass.sum(), 2 * np.max(bodies.radius.number) | bodies.radius.unit)
# Get the Hierarchical Systems from the Particle Set
enc_systems = stellar_systems.get_heirarchical_systems_from_set(bodies, kepler_workers=kepler_workers, converter=converter)
sys_with_planets = []
# Remove Any Tracer Planets in the Encounter and Adds the Key to Add in the New System
for sys_key in list(enc_systems.keys()):
for particle in enc_systems[sys_key]:
if particle.id >= base_planet_ID:
enc_systems[sys_key].remove_particle(particle)
sys_with_planets.append(sys_key)
# Allows for Planets to be Added to Single Stars
for sys_key in list(enc_systems.keys()):
if (len(enc_systems[sys_key]) == 1) and (sys_key not in sys_with_planets):
sys_with_planets.append(sys_key)
# Add in a New Planetary System
for sys_key in sys_with_planets:
planets = create.planetary_systems_v2(enc_systems[sys_key], 1, Jupiter=True, \
Earth=True, Neptune=True, \
kepler_worker=kepler_workers[0])
enc_systems[sys_key].add_particles(planets)
new_bodies = Particles()
for sys_key in enc_systems:
new_bodies.add_particles(enc_systems[sys_key])
return new_bodies
# ------------------------------------- #
# Main Production Script #
# ------------------------------------- #
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-d", "--rootdirectory", dest="rootDir", default=None, type="str",
help="Enter the full directory of the Root Folder. Defaults to your CWD unless -M is on.")
parser.add_option("-M", "--doMultipleClusters", dest="doMultipleClusters", action="store_true",
help="Flag to turn on for running the script over a series of multiple clusters.")
(options, args) = parser.parse_args()
if options.doMultipleClusters:
if options.rootDir != None:
rootDir = options.rootDir+'/*'
else:
print(util.timestamp(), "Please provide the path to your root directory which contains all cluster folders!", cluster_name,"...")
else:
if options.rootDir != None:
rootDir = options.rootDir
else:
rootDir = os.getcwd()
# Bring Root Directory Path Inline with os.cwd()
if rootDir.endswith("/"):
rootDir = rootDir[:-1]
orig_stdout = sys.stdout
log_file = open(rootDir+"/rand_encounters.log","w")
sys.stdout = log_file
paths_of_enc_files = glob.glob(rootDir+'/*_encounters_cut.pkl')
print(paths_of_enc_files)
cluster_names = [path.split("/")[-2] for path in paths_of_enc_files]
print(cluster_names)
for i, path in enumerate(paths_of_enc_files):
# Read in Encounter Directory
encounter_db = pickle.load(open(path, "rb"))
cluster_name = cluster_names[i]
# Report Start of Generating IC for Cut Encounter Directory
sys.stdout.flush()
print(util.timestamp(), "Generating initial conditions for", cluster_name,"...")
sys.stdout.flush()
# Generate IC for Scattering Experiments
gen_scatteringIC(encounter_db, doMultipleClusters=options.doMultipleClusters)
sys.stdout = orig_stdout
log_file.close()
```
#### File: JPGlaser/Tycho/gen_scatterDB.py
```python
import numpy as np
import random as rp
import os, sys
import scipy as sp
from scipy import optimize
from scipy import special
import pickle
import glob
import copy
# Import the Amuse Base Packages
import amuse
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import particle_attributes
from amuse.io import *
from amuse.lab import *
from tycho import util, create, read, write, stellar_systems
set_printing_strategy("custom", preferred_units = [units.MSun, units.AU, units.day], precision = 6, prefix = "", separator = "[", suffix = "]")
def replace_planetary_system(bodies, base_planet_ID=50000, converter=None):
enc_systems = stellar_systems.get_heirarchical_systems_from_set(bodies, converter=converter)
sys_with_planets = []
# Remove Any Tracer Planets in the Encounter and Adds the Key to Add in the New System
for sys_key in list(enc_systems.keys()):
for particle in enc_systems[sys_key]:
if particle.id >= base_planet_ID:
enc_systems[sys_key].remove_particle(particle)
sys_with_planets.append(sys_key)
# Allows for Planets to be Added to Single Stars
for sys_key in list(enc_systems.keys()):
if (len(enc_systems[sys_key]) == 1) and (sys_key not in sys_with_planets):
sys_with_planets.append(sys_key)
#print sys_with_planets
# Add in a New Planetary System
for sys_key in sys_with_planets:
planets = create.planetary_systems_v2(enc_systems[sys_key], 1, Jupiter=True, Earth=True, Neptune=True)
enc_systems[sys_key].add_particles(planets)
new_bodies = Particles()
for sys_key in enc_systems:
new_bodies.add_particles(enc_systems[sys_key])
return new_bodies
if __name__ == '__main__':
cutEnc_filePath = glob.glob("/home/draco/jglaser/Public/Tycho_Runs/MarkG/*/*_encounters_cut.pkl")
cluster_rootDir = glob.glob("/home/draco/jglaser/Public/Tycho_Runs/MarkG/*/")
cluster_names = [x.split("/")[-2] for x in cluster_rootDir]
cutEnc_db = {}
for i, filepath in enumerate(cutEnc_filePath):
opened_file = open(filepath, 'rb')
cutEnc_db[cluster_names[i]] = pickle.load(opened_file)
opened_file.close()
print(list(cutEnc_db.keys()))
old_cutEnc_db = copy.deepcopy(cutEnc_db)
for clusterName in list(cutEnc_db.keys()):
for primaryStar in list(cutEnc_db[clusterName].keys()):
origionalEnc = cutEnc_db[clusterName][primaryStar][0].copy()
newEncList = []
for i in range(100):
newEncList.append(replace_planetary_system(origionalEnc))
cutEnc_db[clusterName][primaryStar] = newEncList
output_file = open(os.getcwd()+"full_scatterDB.pkl", 'rb')
pickle.dump(cutEnc_db, output_file)
print("Finished!")
``` |
{
"source": "jpgneves/hojehatransportes",
"score": 2
} |
#### File: hojehatransportes/hat/forms.py
```python
from django.forms import ModelForm, Textarea, HiddenInput
from models import Strike
class SubmitForm(ModelForm):
class Meta:
model = Strike
fields = ('description', 'company', 'start_date', 'all_day', 'end_date', 'region', 'source_link', 'submitter')
widgets = {'description': Textarea(attrs={'cols': 60, 'rows': 10}), 'submitter': HiddenInput()}
def clean(self):
cleaned_data = self.cleaned_data
all_day = cleaned_data.get("all_day")
end_date = cleaned_data.get("end_date")
if not all_day and not end_date: # If it isn't an "all day" event, we must have an end_date
msg = u"Must have an end date or mark as an 'all day' event."
self._errors["all_day"] = self.error_class([msg])
self._errors["end_date"] = self.error_class([msg])
# Clear invalid data
del cleaned_data["all_day"]
del cleaned_data["end_date"]
if all_day:
cleaned_data["end_date"] = cleaned_data["start_date"]
return cleaned_data
```
#### File: hojehatransportes/social_auth_extra/sapo.py
```python
import json
from social.backends.oauth import BaseOAuth1
# Sapo configuration
SAPO_SERVER = 'id.sapo.pt'
SAPO_REQUEST_TOKEN_URL = 'https://%s/oauth/request_token' % SAPO_SERVER
SAPO_ACCESS_TOKEN_URL = 'https://%s/oauth/access_token' % SAPO_SERVER
# Note: oauth/authorize forces the user to authorize every time.
# oauth/authenticate uses their previous selection, barring revocation.
SAPO_AUTHORIZATION_URL = 'http://%s/oauth/authenticate' % SAPO_SERVER
SAPO_CHECK_AUTH = 'https://services.sapo.pt/SSO/GetPublicProfile'
class SapoOAuth(BaseOAuth1):
"""Sapo OAuth authentication backend"""
name = 'sapo'
EXTRA_DATA = [('id', 'id')]
AUTHORIZATION_URL = SAPO_AUTHORIZATION_URL
REQUEST_TOKEN_URL = SAPO_REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = SAPO_ACCESS_TOKEN_URL
SERVER_URL = SAPO_SERVER
SETTINGS_KEY_NAME = 'SAPO_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'SAPO_CONSUMER_SECRET'
def get_user_details(self, response):
"""Return user details from Sapo account"""
return {'username': response['screen_name'],
'email': '', # not supplied
'fullname': response['name'],
'first_name': response['name'],
'last_name': ''}
def user_data(self, access_token):
"""Return user data provided"""
request = self.oauth_request(access_token, SAPO_CHECK_AUTH)
json = self.fetch_response(request)
try:
return json.loads(json)
except json.JSONDecodeError:
return None
# Backend definition
BACKENDS = {
'sapo': SapoOAuth,
}
``` |
{
"source": "jpgrayson/dot-files",
"score": 3
} |
#### File: jpgrayson/dot-files/install.py
```python
from pathlib import Path
import argparse
import hashlib
import os
import shutil
HERE = Path(__file__).parent.absolute()
EXCLUDE = [
HERE / 'install.py',
HERE / 'README.md',
HERE / 'LICENSE.md',
HERE / '.git',
HERE / '.gitmodules',
HERE / '.gitignore',
]
def hash_digest(path):
return hashlib.md5(path.read_bytes()).hexdigest()
def identical(path0, path1):
return hash_digest(path0) == hash_digest(path1)
def link(src, dst):
target = Path(os.path.relpath(src, os.path.dirname(dst)))
os.symlink(target, dst)
print(f'{dst}: linked to {target}')
def install(src_dir, dst_dir):
if not dst_dir.exists():
print(f'{dst_dir}: mkdir')
dst_dir.mkdir(parents=True)
make_links(src_dir, dst_dir)
elif dst_dir.is_symlink():
print(f'{dst_dir}: is symlink, skipping')
elif dst_dir.is_dir():
make_links(src_dir, dst_dir)
else:
print(f'{dst_dir}: exists but is not dir')
def make_links(src_dir, dst_dir):
for src in src_dir.iterdir():
if src in EXCLUDE:
continue
dst = dst_dir / src.relative_to(src_dir)
if src.is_dir():
install(src, dst)
elif not dst.exists():
link(src, dst)
elif dst.samefile(src):
print(f'{dst}: already linked')
elif dst.is_file():
if identical(dst, src):
dst.unlink()
link(src, dst)
else:
print(f'{dst}: differs, not messing')
else:
print(f'{dst}: exists but is not file')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'dst',
metavar='DIR',
nargs='?',
default=Path.home(),
type=Path,
help='Installation destination. Default: %(default)s',
)
args = parser.parse_args()
install(HERE, args.dst)
if __name__ == '__main__':
main()
``` |
{
"source": "jpgualdarrama/pokescrape",
"score": 3
} |
#### File: jpgualdarrama/pokescrape/parse.py
```python
import os.path
import urllib.request
# import urllib3
from html.parser import HTMLParser
from pk import *
from pokemon import *
from move import *
from ability import *
nameMap = {}
baseExpYields = [0 for i in range(0, num_pokemon + 1)]
colors = [0 for i in range(0, num_pokemon + 1)]
bodyStyles = [0 for i in range(0, num_pokemon + 1)]
def Init():
# http = urllib3
loadNames()
loadBaseExpYields()
loadColors()
loadBodyStyles()
loadMoves()
loadAbilities()
def loadNames():
source = open('input_files/names.txt')
for line in source:
pieces = line.split('\t')
name = pieces[1].strip().lower()
nameMap[name] = int(pieces[0].strip())
source.close()
def loadBaseExpYields():
source = open('input_files/baseexpyields.txt')
for line in source:
pieces = line.split('\t')
name = pieces[2].strip().lower()
baseExpYields[nameMap[name]] = int(pieces[3].strip())
source.close()
def loadColors():
source = open('input_files/colors.txt')
cur_color = 0
for line in source:
if line.strip().isdigit():
cur_color = int(line.strip())
else:
pieces = line.split('\t')
for piece in pieces:
name = piece.strip().lower()
if name in nameMap:
colors[nameMap[name]] = cur_color
source.close()
def loadBodyStyles():
source = open('input_files/bodystyles.txt')
cur_style = 0
for line in source:
if line.strip().isdigit():
cur_style = int(line.strip())
else:
pieces = line.split('\t')
name = pieces[1].strip().lower()
if name in nameMap:
bodyStyles[nameMap[name]] = cur_style
source.close()
def loadMoves():
source = open('input_files/moves.txt')
for line in source:
pieces = line.split('\t')
move = Move()
move.name = pieces[0]
if pieces[1] == 'NOR':
move.type = 1
elif pieces[1] == 'FIG':
move.type = 2
elif pieces[1] == 'FLY':
move.type = 3
elif pieces[1] == 'POI':
move.type = 4
elif pieces[1] == 'GRO':
move.type = 5
elif pieces[1] == 'ROC':
move.type = 6
elif pieces[1] == 'BUG':
move.type = 7
elif pieces[1] == 'GHO':
move.type = 8
elif pieces[1] == 'STE':
move.type = 9
elif pieces[1] == 'FIR':
move.type = 10
elif pieces[1] == 'WAT':
move.type = 11
elif pieces[1] == 'GRA':
move.type = 12
elif pieces[1] == 'ELE':
move.type = 13
elif pieces[1] == 'PSY':
move.type = 14
elif pieces[1] == 'ICE':
move.type = 15
elif pieces[1] == 'DRA':
move.type = 16
elif pieces[1] == 'DAR':
move.type = 17
elif pieces[1] == 'FAI':
move.type = 18
else:
print('Failed to parse move type \'%s\'' % pieces[1])
move.pp = int(pieces[2])
move.power = 0 if pieces[3] == '-' else int(pieces[3])
move.accuracy = 0 if pieces[4] == '-' else int(pieces[4])
if pieces[5] == 'No Damage.':
move.category = 3
move.damage = 0
else:
if 'Physical Attack' in pieces[5]:
move.category = 1
elif 'Special Attack' in pieces[5]:
move.category = 2
else:
print('Failed to parse category \'%s\'' % pieces[5])
if 'Sp.Atk' in pieces[5]:
if 'Sp.Def' in pieces[5]:
move.damage = 4
elif 'Def' in pieces[5]:
move.damage = 2
else:
print('Failed to parse damage \'%s\'' % pieces[5])
elif 'Atk' in pieces[5]:
if 'Sp.Def' in pieces[5]:
move.damage = 3
elif 'Def' in pieces[5]:
move.damage = 1
else:
print('Failed to parse damage \'%s\'' % pieces[5])
else:
print('Failed to parse damage \'%s\'' % pieces[5])
move.description = pieces[6].strip()
moves_map[move.name.lower()] = len(moves)
moves.append(move)
source.close()
def loadAbilities():
source = open('input_files/abilities.txt', encoding='utf8')
for line in source:
pieces = line.split('\t')
ability = Ability()
ability.name = pieces[0]
ability.description = pieces[1].strip()
abilities_map[ability.name.lower()] = len(abilities)
abilities.append(ability)
source.close()
def GetAndParse(number, force = False):
if not os.path.exists('cache'):
os.makedirs('cache')
suffix = '%03i.shtml' % number
path = 'cache/' + suffix
if force == True or os.path.isfile(path) == False:
url = 'http://www.serebii.net/pokedex-xy/' + suffix
print('Fetching \'%s\' to \'%s\'' % (url, path))
data = urllib.request.urlopen(url)
# data = http.request.urlopen(url)
out = open(path, 'wb')
out.write(data.read().decode('ISO-8859-1').replace('é', '\u00E9').encode('utf-8'))
print('Using newly-fetched file \'%s\'' % path)
else:
print('Using already-fetched file \'%s\'' % path)
source = open(path, 'r', encoding='utf8')
parser = PokemonParser()
parser.feed(source.read())
source.close()
parser.pokemon.color = colors[parser.pokemon.national_dex_number]
parser.pokemon.body_style = bodyStyles[parser.pokemon.national_dex_number]
parser.pokemon.base_exp_yield = baseExpYields[parser.pokemon.national_dex_number]
print('Done parsing \'%s\'' % path)
return parser.pokemon
class PokemonParser(HTMLParser):
def __init__(self):
super().__init__(convert_charrefs=True)
self.pokemon = Pokemon()
self.cur_fooinfo = 0
self.td_cur_level = 0
self.fooinfo_enter_level = -1
self.fooinfo_cur_td = 0
self.cen_enter_level = -1
self.cen_cur_td = 0
self.is_bold = False
self.is_evo = False
self.is_font = False
self.fooinfo_temp = 0
def handle_starttag(self, tag, attrs):
if tag == 'td':
if ('class', 'fooinfo') in attrs:
if self.fooinfo_enter_level != -1:
self.cur_fooinfo += 1
self.fooinfo_cur_td = 0
self.fooinfo_temp = 0
self.fooinfo_enter_level = self.td_cur_level
if ('class', 'cen') in attrs:
if self.cen_enter_level != -1:
self.cen_cur_td = 0
self.cen_enter_level = self.td_cur_level
self.td_cur_level += 1
# Parse types out of links
if tag == 'a':
if self.cen_enter_level != -1:
if self.cur_fooinfo == 5:
ptype = PkType[attrs[0][1][12:-6]]
if self.pokemon.types[0] == 0:
self.pokemon.types = (ptype, 0)
else:
self.pokemon.types = (self.pokemon.types[0], ptype)
self.is_bold = tag == 'b'
self.is_evo = ('class', 'fooevo') in attrs
self.is_font = tag == 'font'
def handle_endtag(self, tag):
if tag == 'td':
self.td_cur_level -= 1
if self.fooinfo_enter_level != -1:
self.fooinfo_cur_td += 1
if self.fooinfo_enter_level == self.td_cur_level:
self.cur_fooinfo += 1
self.fooinfo_enter_level = -1
self.fooinfo_cur_td = 0
self.fooinfo_temp = 0
if self.cen_enter_level != -1:
self.cen_cur_td += 1
if self.cen_enter_level == self.td_cur_level:
self.cen_enter_level = -1
self.cen_cur_td = 0
def handle_data(self, data):
if self.is_evo:
if data == 'Locations':
self.cur_fooinfo = 18
elif data == 'Flavor Text':
self.cur_fooinfo = 50
elif data == 'Generation VI Level Up':
self.cur_fooinfo = 100
elif data == 'TM & HM Attacks':
self.cur_fooinfo = 400
elif data == 'Egg Moves ':
self.cur_fooinfo = 500
elif data == 'Move Tutor Attacks':
self.cur_fooinfo = 600
elif data == 'Omega Ruby/Alpha Sapphire Move Tutor Attacks':
self.cur_fooinfo = 700
elif data == 'Special Moves':
self.cur_fooinfo = 800
elif data == 'Pre-Evolution Only Moves':
self.cur_fooinfo = 900
elif data == 'Transfer Only Moves ':
self.cur_fooinfo = 1000
if self.is_font:
if data == 'X & Y Level Up':
self.cur_fooinfo = 200
elif data == '\u03A9R\u03B1S Level Up':
self.cur_fooinfo = 300
if self.is_bold:
if data == 'Stats':
self.cur_fooinfo = 1100
if self.fooinfo_enter_level != -1:
# 'Name'
if self.cur_fooinfo == 1:
self.pokemon.name = data
# 'No.'
elif self.cur_fooinfo == 3:
if self.fooinfo_cur_td == 1:
self.pokemon.national_dex_number = int(data[1:])
# 'Gender Ratio'
elif self.cur_fooinfo == 4:
if 'is Genderless' in data:
self.pokemon.gender_threshold = PkGender['0:0']
elif self.fooinfo_cur_td == 1:
if data == '0%':
self.pokemon.gender_threshold = PkGender['0:1']
elif data == '12.5%':
self.pokemon.gender_threshold = PkGender['1:7']
elif data == '25%':
self.pokemon.gender_threshold = PkGender['1:3']
elif data == '50%':
self.pokemon.gender_threshold = PkGender['1:1']
elif data == '75%':
self.pokemon.gender_threshold = PkGender['3:1']
elif data == '87.5%':
self.pokemon.gender_threshold = PkGender['7:1']
elif data == '100%':
self.pokemon.gender_threshold = PkGender['1:0']
else:
print('Failed to parse gender ratio \'%s\'' % data)
# 'Classification'
elif self.cur_fooinfo == 5:
self.pokemon.species = data[:-8]
# 'Height'
elif self.cur_fooinfo == 6:
if 'm' in data:
self.pokemon.height = float(data.strip()[:-1])
# 'Weight'
elif self.cur_fooinfo == 7:
if 'kg' in data:
self.pokemon.weight = float(data.strip()[:-2])
# 'Capture Rate'
elif self.cur_fooinfo == 8:
if data != '(XY)' and data != '(\u03A9R\u03B1S)':
self.pokemon.catch_rate = int(data)
# 'Base Egg Steps'
elif self.cur_fooinfo == 9:
if data != '\xa0':
self.pokemon.hatch_counter = int(data.replace(',', '')) // 255
# 'Abilities'
elif self.cur_fooinfo == 10:
if self.is_bold:
if self.fooinfo_temp % 2 == 0:
if data == 'Hidden Ability':
self.fooinfo_temp = 4
else:
data = data.strip().lower()
if self.fooinfo_temp == 0:
self.pokemon.abilities = (abilities_map[data], 0, 0)
elif self.fooinfo_temp == 2:
self.pokemon.abilities = (self.pokemon.abilities[0], abilities_map[data], 0)
elif self.fooinfo_temp == 6:
self.pokemon.abilities = (self.pokemon.abilities[0], self.pokemon.abilities[1], abilities_map[data])
self.fooinfo_temp += 1
# 'Experience Growth'
elif self.cur_fooinfo == 11:
if not 'Points' in data:
if data == 'Slow':
self.pokemon.exp_group = PkExpGroup['slow']
elif data == 'Medium Slow':
self.pokemon.exp_group = PkExpGroup['mediumslow']
elif data == 'Medium Fast':
self.pokemon.exp_group = PkExpGroup['mediumfast']
elif data == 'Fast':
self.pokemon.exp_group = PkExpGroup['fast']
elif data == 'Erratic':
self.pokemon.exp_group = PkExpGroup['erratic']
elif data == 'Fluctuating':
self.pokemon.exp_group = PkExpGroup['fluctuating']
else:
print('Failed to parse experience group \'%s\'' % data)
# 'Base Happiness'
elif self.cur_fooinfo == 12:
self.pokemon.base_friendship = int(data)
# 'Effort Values Earned'
elif self.cur_fooinfo == 13:
n = int(data[:1])
y = self.pokemon.ev_yield
if 'HP' in data:
self.pokemon.ev_yield = (n, y[1], y[2], y[3], y[4], y[5])
elif 'Sp. Attack' in data:
self.pokemon.ev_yield = (y[0], y[1], y[2], n, y[4], y[5])
elif 'Sp. Defense' in data:
self.pokemon.ev_yield = (y[0], y[1], y[2], y[3], n, y[5])
elif 'Attack' in data:
self.pokemon.ev_yield = (y[0], n, y[2], y[3], y[4], y[5])
elif 'Defense' in data:
self.pokemon.ev_yield = (y[0], y[1], n, y[3], y[4], y[5])
elif 'Speed' in data:
self.pokemon.ev_yield = (y[0], y[1], y[2], y[3], y[4], n)
else:
print('Failed to parse EV yield \'%s\'' % data)
# 'Egg Groups'
elif self.cur_fooinfo == 15:
data = data.strip().lower()
if 'cannot breed' in data:
self.pokemon.egg_groups = (PkEggGroup['undiscovered'], 0)
elif data == 'ditto':
if self.pokemon.national_dex_number == 132:
self.pokemon.egg_groups = (PkEggGroup['ditto'], 0)
elif data != '':
if data in PkEggGroup:
group = PkEggGroup[data]
if self.pokemon.egg_groups[0] == 0:
self.pokemon.egg_groups = (group, 0)
elif self.pokemon.egg_groups[0] != group:
self.pokemon.egg_groups = (self.pokemon.egg_groups[0], group)
# 'Flavor Text' (X)
elif self.cur_fooinfo == 50:
self.pokemon.pokedex_x = data
# XXX Compensate for Serebii's double closing tags at the end of pokedex entries
self.td_cur_level += 1
# 'Flavor Text' (Y)
elif self.cur_fooinfo == 51:
self.pokemon.pokedex_y = data
# XXX Compensate for Serebii's double closing tags at the end of pokedex entries
self.td_cur_level += 1
# 'Flavor Text' (OR/AS?)
elif self.cur_fooinfo == 52:
self.pokemon.pokedex_or = data
self.pokemon.pokedex_as = data
# XXX Compensate for Serebii's double closing tags at the end of pokedex entries
self.td_cur_level += 1
# 'Flavor Text' (AS)
elif self.cur_fooinfo == 53:
self.pokemon.pokedex_as = data
# XXX Compensate for Serebii's double closing tags at the end of pokedex entries
self.td_cur_level += 1
# 'Gen VI Level Up'
elif self.cur_fooinfo >= 100 and self.cur_fooinfo < 200:
data = data.strip()
index = (self.cur_fooinfo - 100) // 3
offset = (self.cur_fooinfo - 100) % 3
if offset == 0:
level = 0 if data == '\u2014' else int(data)
self.pokemon.learnset_level_xy.append(level)
self.pokemon.learnset_level_oras.append(level)
elif offset == 1:
self.pokemon.learnset_level_xy.append((self.pokemon.learnset_level_xy.pop(), moves_map[data.lower()]))
self.pokemon.learnset_level_oras.append((self.pokemon.learnset_level_oras.pop(), moves_map[data.lower()]))
elif offset == 2:
self.cur_fooinfo = 100 + offset
# 'X & Y Level Up'
elif self.cur_fooinfo >= 200 and self.cur_fooinfo < 300:
data = data.strip()
index = (self.cur_fooinfo - 200) // 3
offset = (self.cur_fooinfo - 200) % 3
if offset == 0:
level = 0 if data == '\u2014' else int(data)
self.pokemon.learnset_level_xy.append(level)
elif offset == 1:
self.pokemon.learnset_level_xy.append((self.pokemon.learnset_level_xy.pop(), moves_map[data.lower()]))
elif offset == 2:
self.cur_fooinfo = 200 + offset
# 'ORaS Level Up'
elif self.cur_fooinfo >= 300 and self.cur_fooinfo < 400:
data = data.strip()
index = (self.cur_fooinfo - 300) // 3
offset = (self.cur_fooinfo - 300) % 3
if offset == 0:
level = 0 if data == '\u2014' else int(data)
self.pokemon.learnset_level_oras.append(level)
elif offset == 1:
self.pokemon.learnset_level_oras.append((self.pokemon.learnset_level_oras.pop(), moves_map[data.lower()]))
elif offset == 2:
self.cur_fooinfo = 300 + offset
# 'TM & HM Attacks'
elif self.cur_fooinfo >= 400 and self.cur_fooinfo < 500:
data = data.strip()
index = (self.cur_fooinfo - 400) // 3
offset = (self.cur_fooinfo - 400) % 3
if offset == 1:
self.pokemon.learnset_machine.append(moves_map[data.lower()])
elif offset == 2:
self.cur_fooinfo = 400 + offset
# 'Egg Moves'
elif self.cur_fooinfo >= 500 and self.cur_fooinfo < 600:
data = data.strip()
index = (self.cur_fooinfo - 500) // 9
offset = (self.cur_fooinfo - 500) % 9
if offset == 0:
self.pokemon.learnset_egg_move.append(moves_map[data.lower()])
if data != 'Volt Tackle':
self.cur_fooinfo += 7
elif offset == 8:
self.cur_fooinfo = 500 + offset
# 'Move Tutor Attacks'
elif self.cur_fooinfo >= 600 and self.cur_fooinfo < 700:
data = data.strip()
index = (self.cur_fooinfo - 600) // 8
offset = (self.cur_fooinfo - 600) % 8
if offset == 0:
self.pokemon.learnset_tutor.append(moves_map[data.lower()])
elif offset == 7:
self.cur_fooinfo = 600 + offset
# 'Omega Ruby/Alpha Sapphire Move Tutor Attacks'
elif self.cur_fooinfo >= 700 and self.cur_fooinfo < 800:
data = data.strip()
index = (self.cur_fooinfo - 700) // 8
offset = (self.cur_fooinfo - 700) % 8
if offset == 0:
self.pokemon.learnset_tutor.append(moves_map[data.lower()])
elif offset == 7:
self.cur_fooinfo = 700 + offset
# 'Special Moves'
elif self.cur_fooinfo >= 800 and self.cur_fooinfo < 900:
data = data.strip()
index = (self.cur_fooinfo - 800) // 9
offset = (self.cur_fooinfo - 800) % 9
if offset == 0:
self.pokemon.learnset_special.append(moves_map[data.lower()])
elif offset == 8:
self.cur_fooinfo = 800 + offset
# 'Pre-Evolution Only Moves'
elif self.cur_fooinfo >= 900 and self.cur_fooinfo < 1000:
data = data.strip()
index = (self.cur_fooinfo - 900) // 3
offset = (self.cur_fooinfo - 900) % 3
if offset == 0:
self.pokemon.learnset_evolve.append(moves_map[data.lower()])
elif offset == 2:
self.cur_fooinfo = 900 + offset
# 'Transfer Only Moves'
elif self.cur_fooinfo >= 1000 and self.cur_fooinfo < 1100:
data = data.strip()
index = (self.cur_fooinfo - 1000) // 2
offset = (self.cur_fooinfo - 1000) % 2
if offset == 0:
self.pokemon.learnset_transfer.append(moves_map[data.lower()])
elif offset == 1:
self.cur_fooinfo = 1000 + offset
# 'Stats'
elif self.cur_fooinfo >= 1101 and self.cur_fooinfo < 1107:
b = self.pokemon.base_stats
index = self.cur_fooinfo - 1101
n = int(data)
if index == 0:
self.pokemon.base_stats = (n, b[1], b[2], b[3], b[4], b[5])
elif index == 1:
self.pokemon.base_stats = (b[0], n, b[2], b[3], b[4], b[5])
elif index == 2:
self.pokemon.base_stats = (b[0], b[1], n, b[3], b[4], b[5])
elif index == 3:
self.pokemon.base_stats = (b[0], b[1], b[2], n, b[4], b[5])
elif index == 4:
self.pokemon.base_stats = (b[0], b[1], b[2], b[3], n, b[5])
elif index == 5:
self.pokemon.base_stats = (b[0], b[1], b[2], b[3], b[4], n)
```
#### File: jpgualdarrama/pokescrape/pokemon.py
```python
from pk import *
from move import *
from ability import *
class Pokemon:
def __init__(self):
self.national_dex_number = 0
self.name = 'None'
self.species = 'None'
self.types = (0, 0)
self.abilities = (0, 0, 0)
self.gender_threshold = 0
self.catch_rate = 0
self.egg_groups = (0, 0)
self.hatch_counter = 0
self.height = 0.0
self.weight = 0.0
self.base_exp_yield = 0
self.base_friendship = 0
self.exp_group = 0
self.ev_yield = (0, 0, 0, 0, 0, 0)
self.body_style = 0
self.color = 0
self.base_stats = (0, 0, 0, 0, 0, 0)
self.pokedex_x = 'None'
self.pokedex_y = 'None'
self.pokedex_or = 'None'
self.pokedex_as = 'None'
self.learnset_level_xy = []
self.learnset_level_oras = []
self.learnset_machine = []
self.learnset_egg_move = []
self.learnset_tutor = []
self.learnset_special = []
self.learnset_evolve = []
self.learnset_transfer = []
def __str__(self):
return ('Dex #: %i' % self.national_dex_number +
'\nName: %s' % self.name +
'\nSpecies: %s' % self.species +
'\nTypes: %i/%i (%s/%s)' % (self.types[0], self.types[1], PkIType[self.types[0]], PkIType[self.types[1]]) +
'\nAbilities: %i/%i/%i (%s/%s/%s)' % (self.abilities[0], self.abilities[1], self.abilities[2], abilities[self.abilities[0]].name, abilities[self.abilities[1]].name, abilities[self.abilities[2]].name) +
'\nGender Threshold: %i (%s)' % (self.gender_threshold, PkIGender[self.gender_threshold]) +
'\nCatch Rate: %i' % self.catch_rate +
'\nEgg Groups: %i/%i (%s/%s)' % (self.egg_groups[0], self.egg_groups[1], PkIEggGroup[self.egg_groups[0]], PkIEggGroup[self.egg_groups[1]]) +
'\nHatch Counter: %i' % self.hatch_counter +
'\nHeight: %f' % self.height +
'\nWeight: %f' % self.weight +
'\nBase Exp Yield: %i' % self.base_exp_yield +
'\nBase Friendship: %i' % self.base_friendship +
'\nExp Group: %i (%s)' % (self.exp_group, PkIExpGroup[self.exp_group]) +
'\nEV Yield: %i/%i/%i/%i/%i/%i' % self.ev_yield +
'\nBody Style: %i' % self.body_style +
'\nColor: %i (%s)' % (self.color, PkIColor[self.color]) +
'\nBase Stats: %i/%i/%i/%i/%i/%i' % self.base_stats +
'\nPokedex Entry (X): %s' % self.pokedex_x +
'\nPokedex Entry (Y): %s' % self.pokedex_y +
'\nPokedex Entry (OR): %s' % self.pokedex_or +
'\nPokedex Entry (AS): %s' % self.pokedex_as +
'\nLearnset (XY): %s' % str(self.learnset_level_xy) +
'\nLearnset (ORAS): %s' % str(self.learnset_level_oras) +
'\nLearnset (TM/HM): %s' % str(self.learnset_machine) +
'\nLearnset (Egg Move): %s' % str(self.learnset_egg_move) +
'\nLearnset (Tutor): %s' % str(self.learnset_tutor) +
'\nLearnset (Special): %s' % str(self.learnset_special) +
'\nLearnset (Pre-evo): %s' % str(self.learnset_evolve) +
'\nLearnset (Transfer): %s' % str(self.learnset_transfer))
``` |
{
"source": "jph00/SimpleSQLite",
"score": 2
} |
#### File: SimpleSQLite/test/test_func.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from simplesqlite import (
append_table,
copy_table,
connect_sqlite_memdb,
InvalidAttributeNameError,
InvalidTableNameError,
NullDatabaseConnectionError,
)
from simplesqlite._func import (
validate_attr_name,
validate_table_name,
)
from .fixture import (
TEST_TABLE_NAME,
con_mix,
con_ro,
con_profile,
con_null,
con_empty,
)
class Test_validate_table_name(object):
@pytest.mark.parametrize(["value"], [
["valid_table_name"],
["table_"],
])
def test_normal(self, value):
validate_table_name(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, InvalidTableNameError],
["", InvalidTableNameError],
["table", InvalidTableNameError],
["TABLE", InvalidTableNameError],
["Table", InvalidTableNameError],
["%hoge", InvalidTableNameError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
validate_table_name(value)
class Test_validate_attr_name(object):
@pytest.mark.parametrize(["value"], [
["valid_attr_name"],
["attr_"],
])
def test_normal(self, value):
validate_attr_name(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, InvalidAttributeNameError],
["", InvalidAttributeNameError],
["table", InvalidAttributeNameError],
["TABLE", InvalidAttributeNameError],
["Table", InvalidAttributeNameError],
["%hoge", InvalidAttributeNameError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
validate_attr_name(value)
class Test_append_table(object):
def test_normal(self, con_mix, con_empty):
assert append_table(
src_con=con_mix, dst_con=con_empty, table_name=TEST_TABLE_NAME)
result = con_mix.select(select="*", table_name=TEST_TABLE_NAME)
src_data_matrix = result.fetchall()
result = con_empty.select(select="*", table_name=TEST_TABLE_NAME)
dst_data_matrix = result.fetchall()
assert src_data_matrix == dst_data_matrix
assert append_table(
src_con=con_mix, dst_con=con_empty, table_name=TEST_TABLE_NAME)
result = con_mix.select(select="*", table_name=TEST_TABLE_NAME)
src_data_matrix = result.fetchall()
result = con_empty.select(select="*", table_name=TEST_TABLE_NAME)
dst_data_matrix = result.fetchall()
assert src_data_matrix * 2 == dst_data_matrix
def test_exception_mismatch_schema(self, con_mix, con_profile):
with pytest.raises(ValueError):
append_table(
src_con=con_mix, dst_con=con_profile,
table_name=TEST_TABLE_NAME)
def test_exception_null_connection(self, con_mix, con_null):
with pytest.raises(NullDatabaseConnectionError):
append_table(
src_con=con_mix, dst_con=con_null, table_name=TEST_TABLE_NAME)
def test_exception_permission(self, con_mix, con_ro):
with pytest.raises(IOError):
append_table(
src_con=con_mix, dst_con=con_ro, table_name=TEST_TABLE_NAME)
class Test_copy_table(object):
def test_normal(self, con_mix, con_empty):
assert copy_table(
src_con=con_mix, dst_con=con_empty,
src_table_name=TEST_TABLE_NAME, dst_table_name="dst")
result = con_mix.select(select="*", table_name=TEST_TABLE_NAME)
src_data_matrix = result.fetchall()
result = con_empty.select(select="*", table_name="dst")
dst_data_matrix = result.fetchall()
assert src_data_matrix == dst_data_matrix
assert not copy_table(
src_con=con_mix, dst_con=con_empty,
src_table_name=TEST_TABLE_NAME, dst_table_name="dst",
is_overwrite=False)
assert copy_table(
src_con=con_mix, dst_con=con_empty,
src_table_name=TEST_TABLE_NAME, dst_table_name="dst",
is_overwrite=True)
class Test_connect_sqlite_db_mem(object):
def test_normal(self):
con_mem = connect_sqlite_memdb()
assert con_mem is not None
assert con_mem.database_path == ":memory:"
``` |
{
"source": "jph425/py_template",
"score": 3
} |
#### File: py_template/tests/test_package.py
```python
from package_name.source import hi
import pytest
def test_pkg():
assert True
@pytest.mark.parametrize("tuple1,tuple2", [
("2", "10"),
("1", "-4"),
])
def test_multiple_things(tuple1, tuple2):
assert hi(tuple1, tuple2) is True
@pytest.mark.skip(reason="I haven't implemented this in the module yet.")
def test_doesnt_work():
assert False
@pytest.mark.xfail
def test_div_by_zero():
assert 1/0 == 1
def test_invalid_input():
with pytest.raises(TypeError):
hi("bad input", 3.1415)
def test_something(capture_stdout):
# This shouldn't go to stdout, because we monkeypatch it.
# This lets us test the string sent to stdout.
print("test")
assert capture_stdout["stdout"] == "test\n"
``` |
{
"source": "jph425/terminal_tattoo",
"score": 2
} |
#### File: jph425/terminal_tattoo/terminal_tattoo.py
```python
from PIL import Image, ImageDraw, ImageFont
import argparse
import logging
import tempfile
from os import path, get_terminal_size
from sys import exit
import pprint
import re
### SOME REASONABLE DEFAULTS
DEFAULT_FONT = '/System/Library/Fonts/Menlo.ttc'
DEFAULT_SIZE = 45
DEFAULT_POS = 'tR' # top right corner
DEFAULT_FGC = 'fK' # black
DEFAULT_BGC = 'bW' # white
### I USE THESE IN A BUNCH OF PLACES AND I DON'T WANT TO KEEP TYPING IT
POSITION_CODES = ['pT', 'pTL', 'pTR', 'pB', 'pBL', 'pBR', 'pC', 'pL', 'pR']
### EMPIRICAL LINEAR TRANSFORMATION FROM CHARACTER GRID TO PIXELS
RETINA_HCELL_PIXELS = 14
RETINA_VCELL_PIXELS = 28
RETINA_H_OFFSET = 20
RETINA_V_OFFSET = 14
### EXIT CODES
EXIT_INVALID_FG = -1
EXIT_INVALID_BG = -2
EXIT_DOES_NOT_FIT = -3
EXIT_CATASTROPHIC_ERROR = -10
##############################################################################
# logging stuff:
##############################################################################
class ColorizingStreamHandler(logging.StreamHandler):
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty
def emit(self, record):
# noinspection PyBroadException
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def format(self, record):
message = logging.StreamHandler.format(self, record)
if self.is_tty:
# Don't colorize traceback
parts = message.split('\n', 1)
parts[0] = self.colorize(parts[0], record)
message = '\n'.join(parts)
return message
# color names to partial ANSI code (there'll be math later to make complete codes)
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
# logging levels to (background, foreground, intense)
# NOT multi-platform!
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'black', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', True),
logging.CRITICAL: ('red', 'white', True),
}
csi = '\x1b['
reset = '\x1b[0m'
def colorize(self, message, record):
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params), 'm', message, self.reset))
return message
def output_colorized(self, message):
self.stream.write(message)
# End Class ColorizingStreamHandler
# Initialize logging objects
Logger = logging.getLogger("terminal_tattoo")
formatter = logging.Formatter('%(levelname)-10s: %(message)s')
handler = ColorizingStreamHandler()
handler.setFormatter(formatter)
Logger.addHandler(handler)
pp = pprint.PrettyPrinter(indent=4)
##############################################################################
# main:
##############################################################################
def main():
parser = config_parser()
args = parser.parse_args()
if None == args.verbose:
Logger.setLevel(logging.ERROR)
handler.setLevel(logging.ERROR)
else:
if args.verbose > 0:
Logger.setLevel(logging.WARNING)
handler.setLevel(logging.WARNING)
if args.verbose > 1:
Logger.setLevel(logging.INFO)
handler.setLevel(logging.INFO)
if args.verbose > 2:
Logger.setLevel(logging.DEBUG)
handler.setLevel(logging.DEBUG)
Logger.debug("parsed args:")
pp.pprint(args)
(c, l) = get_terminal_size()
(w, h) = get_terminal_pixel_size(c, l, RETINA_HCELL_PIXELS, RETINA_VCELL_PIXELS, RETINA_H_OFFSET, RETINA_V_OFFSET)
position = check_position(args)
fg_color = check_fg_color(args)
bg_color = check_bg_color(args)
out_path = check_output_file(args)
font = check_font(args)
text = ' '.join(args.text)
size = check_size(args)
alpha = check_alpha(args)
margin = check_margin(args)
render_base_image = create_image(w, h, rgb_to_rgba(html_to_888(bg_color), 255))
render_font = create_font(font, size)
(render_text_width, render_text_height) = get_text_dimensions(render_font, text)
if not fit_check(render_text_width, render_text_height, margin, w, h):
exit(EXIT_DOES_NOT_FIT)
anchor = get_text_anchor_pos(position, render_text_width, render_text_height, render_base_image.size[0], render_base_image.size[1], margin)
render_comp = composite_text(render_base_image, text, render_font, anchor, rgb_to_rgba(html_to_888(fg_color), alpha))
render_comp.save(out_path)
return
##############################################################################
# rendering functions:
##############################################################################
def create_font(font_name, size):
return ImageFont.truetype(font_name, size)
def create_image(w, h, blanking_color):
image = Image.new('RGBA', (w, h), blanking_color)
return image
def composite_text(base_image, text, font_obj, anchor, color_rgba):
text_img = Image.new('RGBA', base_image.size, (255,255,255,0))
drawing = ImageDraw.Draw(text_img)
drawing.text(anchor, text, font=font_obj, fill=color_rgba)
ret = Image.alpha_composite(base_image, text_img)
return ret
##############################################################################
# help, sanitize, re-format, safety:
##############################################################################
def rgb_to_rgba(rgb, a):
return (rgb[0], rgb[1], rgb[2], a)
def fit_check(render_text_width, render_text_height, margin, w, h):
m2 = 2 * margin
if h >= render_text_height + m2 and w >= render_text_width + m2:
return True
if h >= render_text_height + margin and w >= render_text_width + margin:
Logger.warning("this text just barely fits")
return True
return False
def get_terminal_dimensions():
ts = get_terminal_size()
columns = ts.columns
lines = ts.lines
Logger.debug("terminal character cell dimensions measured at ({}, {})".format(columns, lines))
return (columns, lines)
def get_terminal_pixel_size(columns, lines, h_pix, v_pix, h_offset=0, v_offset=0):
height = lines * v_pix + v_offset
width = columns * h_pix + h_offset
Logger.info("terminal dimensions: width: {} height: {}".format(width, height))
return (width, height)
def sanitize_html_color(code):
m = re.match(r'[#]?([0-9a-fA-F]{6})', code)
Logger.debug("santized html code {} to {}".format(code, m.group(1)))
return m.group(1)
def validate_html_color(color):
ret = False
pattern = r'[#]?[0-9a-fA-F]{6}'
m = re.search(pattern, color)
if m.group(0) == color:
ret = True
return ret
def html_to_888(html_str):
pat = r'(?P<red>[0-9a-fA-F]{2})(?P<green>[0-9a-fA-F]{2})(?P<blue>[0-9a-fA-F]{2})'
m = re.match(pat, html_str)
r = hex_byte_to_int(m.group('red'))
g = hex_byte_to_int(m.group('green'))
b = hex_byte_to_int(m.group('blue'))
Logger.debug("converted color #{} to RGB888 ({}, {}, {})".format(html_str, r, g, b))
return (r,g,b)
def hex_byte_to_int(hexbyte_string):
return int(hexbyte_string, 16)
def get_text_dimensions(font_obj, text):
(w, h) = font_obj.getsize(text)
Logger.debug("measured size of text (\'{}\') is ({}, {})".format(text, w, h))
return (w, h)
def get_text_anchor_pos(pos, text_w, text_h, image_w, image_h, margin=0):
"""the text anchor is (by default in ImageFont) the top left corner of the
bounding box. I see no reason to change this. The math is trivial when we
know the desired text location in the image, the image width and height,
and the text width and height."""
anchor_x = 0
anchor_y = 0
(centered_x, centered_y) = center_nested_frames(image_w, image_h, text_w, text_h)
far_x = image_w - (margin + text_w)
far_y = image_h - (margin + text_h)
if pos == 'pTL':
# top left corner: just apply margins
anchor_x = margin
anchor_y = margin
elif pos == 'pT':
# top center: margin in y, centered text in x
anchor_x = centered_x
anchor_y = margin
elif pos == 'pTR':
# top right corner: image_w - (margin + text_w), margin in y
anchor_x = far_x
anchor_y = margin
elif pos == 'pR':
# right center: image_w - (margin + text_w), center in y
anchor_x = far_x
anchor_y = centered_y
elif pos == 'pBR':
# bottom right corner: image_w - (margin + text_w), image_h - (margin + text_h)
anchor_x = far_x
anchor_y = far_y
elif pos == 'pB':
# bottom center: center in x, image_h - (margin + text_h) in y
anchor_x = centered_x
anchor_y = far_y
elif pos == 'pBL':
# bottom left corner: margin in x, image_ - (margin + text_h) in y
anchor_x = margin
anchor_y = far_y
elif pos == 'pL':
# left center: margin in x, center in y
anchor_x = margin
anchor_y = centered_y
elif pos == 'pC':
# centered: center in x, center in y
anchor_x = centered_x
anchor_y = centered_y
else:
raise RuntimeError("Not sure how we got here, but this isn't a valid position {}".format(pos))
exit(EXIT_CATASTROPHIC_ERROR)
return (anchor_x, anchor_y)
def center_nested_frames(outer_w, outer_h, inner_w, inner_h):
# we can ignore the margin since it's symmetric, however other
# checks are still needed to ensure the margin isn't violated.
w = (outer_w / 2) - (inner_w / 2)
h = (outer_h / 2) - (inner_h / 2)
return (w, h)
##############################################################################
# input validation functions:
##############################################################################
def check_position(args):
ret = DEFAULT_POS
position_args = POSITION_CODES
for p in position_args:
if p in args and getattr(args, p) == True:
ret = p
break
Logger.info("position will be {}".format(ret))
return ret
def check_fg_color(args):
ret = DEFAULT_FGC
skip_iter = False
if args.f is not None:
if validate_html_color(args.f):
Logger.debug("the detected bg color is {}".format(args.f))
ret = args.f
else:
Logger.error("invalid bg color format given, a 6-digit hex value is required (HTML format)")
exit(EXIT_INVALID_FG)
color_args = ['f', 'fR', 'fG', 'fB', 'fW', 'fK', 'fC', 'fM', 'fY', 'fg']
color_in_hex = {'fR': 'FF0000', 'fG': '00FF00', 'fB': '0000FF', 'fW': 'FFFFFF', 'fK': '000000', 'fC': '00FFFF', 'fM': 'FF00ff', 'fY': 'FFFF00', 'fg': 'A9A9A9'}
if not skip_iter:
for color in color_args:
if getattr(args, color) == True:
Logger.debug("the detected fg color is: {}".format(color))
ret = color_in_hex[color]
Logger.info("background color will be {}".format(ret))
ret = sanitize_html_color(ret)
return ret
def check_bg_color(args):
ret = DEFAULT_BGC
skip_iter = False
if args.b is not None:
if validate_html_color(args.b):
Logger.debug("the detected bg color is {}".format(args.b))
ret = args.b
else:
Logger.error("invalid bg color format given, a 6-digit hex value is required (HTML format)")
exit(EXIT_INVALID_BG)
color_args = ['bR', 'bG', 'bB', 'bW', 'bK', 'bC', 'bM', 'bY', 'bg']
color_in_hex = {'bR': 'FF0000', 'bG': '00FF00', 'bB': '0000FF', 'bW': 'FFFFFF', 'bK': '000000', 'bC': '00FFFF', 'bM': 'FF00ff', 'bY': 'FFFF00', 'bg': 'A9A9A9'}
if not skip_iter:
for color in color_args:
if getattr(args, color) == True:
Logger.debug("the detected bg color is: {}".format(color))
ret = color_in_hex[color]
Logger.info("background color will be {}".format(ret))
ret = sanitize_html_color(ret)
return ret
def check_alpha(args):
a = args.alpha
if a > 255:
a = 255
Logger.info("clamping alpha to 255")
elif a < 0:
a = 0
Logger.info("clamping alpha to 0 (what are you doing?)")
else:
Logger.info("alpha will be {}".format(a))
return a
def check_margin(args):
ret = args.margin
if ret is None:
return 0
MAX = 500
if ret < 0:
ret = 0
Logger.info("clamping margin to 0")
elif ret > MAX:
ret = MAX
Logger.info("clamping margin to {}} (what are you doing?)".format(MAX))
else:
Logger.info("margin will be {}".format(ret))
return ret
def check_output_file(args):
if not args.out_path:
fallback = tempfile.NamedTemporaryFile(dir='/tmp/')
fallback.close() # heh, gross hack to just get some random path
ret = fallback.name + '.png'
else:
ret = args.out_path
Logger.info("Output file for image is {}".format(ret))
return ret
def check_font(args):
ret = False
if args.font:
if path.exists(args.font):
(_, file_extension) = path.splitext(args.font)
if file_extension == '.ttf' or file_extension == '.ttc':
ret = True
if ret:
ret = args.font
else:
ret = DEFAULT_FONT
Logger.info("font will be {}".format(ret))
return ret
def check_size(args):
ret = DEFAULT_SIZE
if args.s:
ret = args.s
Logger.info("text will be point size {}".format(ret))
return ret
##############################################################################
# argparse:
##############################################################################
def config_parser():
parser = argparse.ArgumentParser(description='Render an image for a watermarked Terminal window.', \
epilog='Defaults to {}, size {}, black text on white, positioned in the top right corner.'.format(path.basename(DEFAULT_FONT), DEFAULT_SIZE))
parser.add_argument('text', type=str, nargs='+', help='Text to use in the watermark')
parser.add_argument('-s', metavar='POINTS', type=int, help='point size of the text')
parser.add_argument('--font', metavar='PATH', help='font to use for the watermark')
parser.add_argument('--verbose', '-v', action='count', help='verbose mode (can be repeated)', default=0)
parser.add_argument('-o', metavar='PATH', dest='out_path', help='output file for the rendered image')
parser.add_argument('--alpha', '-a', type=int, metavar='ALPHA', default=255, help='alpha value of text')
position = parser.add_mutually_exclusive_group()
position.add_argument('--pT', action='store_true', help='top center')
position.add_argument('--pTL', action='store_true', help='top left')
position.add_argument('--pTR', action='store_true', help='top right')
position.add_argument('--pB', action='store_true', help='bottom center')
position.add_argument('--pBL', action='store_true', help='bottom left')
position.add_argument('--pBR', action='store_true', help='bottom right')
position.add_argument('--pC', action='store_true', help='center')
position.add_argument('--pL', action='store_true', help='left center')
position.add_argument('--pR', action='store_true', help='right center')
parser.add_argument('--margin', type=int, metavar='PIXELS', default=25, help='no-text perimeter width')
fgColor = parser.add_mutually_exclusive_group()
fgColor.add_argument('--fR', action='store_true', help='red')
fgColor.add_argument('--fG', action='store_true', help='green')
fgColor.add_argument('--fB', action='store_true', help='blue')
fgColor.add_argument('--fW', action='store_true', help='white')
fgColor.add_argument('--fK', action='store_true', help='black')
fgColor.add_argument('--fC', action='store_true', help='cyan')
fgColor.add_argument('--fM', action='store_true', help='magenta')
fgColor.add_argument('--fY', action='store_true', help='yellow')
fgColor.add_argument('--fg', action='store_true', help='medium gray')
fgColor.add_argument('--f', metavar='RRGGBB', help='arbitrary color in HTML format')
bgColor = parser.add_mutually_exclusive_group()
bgColor.add_argument('--bR', action='store_true', help='red')
bgColor.add_argument('--bG', action='store_true', help='green')
bgColor.add_argument('--bB', action='store_true', help='blue')
bgColor.add_argument('--bW', action='store_true', help='white')
bgColor.add_argument('--bK', action='store_true', help='black')
bgColor.add_argument('--bC', action='store_true', help='cyan')
bgColor.add_argument('--bM', action='store_true', help='magenta')
bgColor.add_argument('--bY', action='store_true', help='yellow')
bgColor.add_argument('--bg', action='store_true', help='medium gray')
bgColor.add_argument('--b', metavar='RRGGBB', help='arbitrary color in HTML format')
return parser
if __name__ == '__main__':
main()
``` |
{
"source": "jph58/runko",
"score": 3
} |
#### File: projects/shocks/shock_RH.py
```python
import numpy as np
import matplotlib.pyplot as plt
import csv
# Import Savitzky–Golay filter and command line input
from scipy.signal import savgol_filter
from parser import parse_input
# Function to find lines of file - from online
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
# Function to find the nearest value in array compared to a given value. Outputs
# element of nearest value - from online
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
# Main function
if __name__ == "__main__":
conf, fdir, args = parse_input()
omp = conf.cfl / conf.c_omp # Plasma frequency
qe = (omp ** 2.0 * conf.gamma) / (conf.ppc)
frames = int(conf.Nt / conf.interval) # Number of simulation frames
x_shock = [] # Lists for position of shock and respective lap no.
t_laps = []
CR_upstream_ar = [] #Arrays for finding CR
CR_downstream_ar = []
CR_delete_index = []
##############################################################################
for i in range(15, frames + 1): #First 15 frames unphysical due to shock formation
# Read 1D density values from file
rhox = []
rhoy = []
slap = str(i * conf.interval).rjust(5, "0")
file = "{}/rho1_{}.csv".format(conf.outdir, slap)
with open(file.format(slap), "r") as f:
length = file_len(file)
reader = csv.reader(f)
bob = list(reader)
for l in range(length):
rhox.append(float(bob[l][0]))
rhoy.append(float(bob[l][1]))
f.close()
# Set upstream density to unity
for conv in range(len(rhoy)):
rhoy[conv] = rhoy[conv] / (conf.ppc * qe)
# Apply Savitzky-Golay filter to data
rhoy_f = savgol_filter(rhoy, 51, 3) # 21,2 ... 51,1
#print("Data points = {}".format(len(rhoy_f)))
###########################
# This block of code finds the position of the shock, then identifies an upstream position to measure upstream density. Only use if upstream density <> 1
#ep = 0
#r = 0
#while ep != 1:
# r += 1
# #print("rhox[r] = {} and rhox[0] = {} and rhoy_f[1] = {}".format(rhox[r], rhox[0], rhoy_f[10]))
# rec_a = (rhox[r] - rhox[0])*rhoy_f[10]
# rhoy_i = rhoy_f[:r]
# rhox_i = rhox[:r]
# trap_area = np.trapz(rhoy_i, rhox_i)
# #print("r is {} and i is {}".format(r,i))
# #print("rec_a = {} amd trap_area = {}".format(rec_a, trap_area))
# if (rec_a - trap_area) > 2: #2 for Perpendicular
# #CR_upstream = rhoy_f[r] #Use where Upstream is not 1
# CR_upstream = 1
# #print("CR found at: {}".format(r))
# ep = 1
#CR upstream value nominally 1
CR_upstream = 1
shock_loc = 2*CR_upstream #True midpoint of shock y-axis
xs_index = find_nearest(rhoy_f, shock_loc) # Midpoint and position of shock found
#This block of code finds the average Downstream density per frame
CR_downstream = 0.0
CR_LFS = 20 #20 is good for Perpendicular shocks, 1000 for Parallel
CR_upper_index = xs_index-CR_LFS #Controls how far left of shock program measures Downstream
if (CR_upper_index > 10): #Filter out bad frames
CR_upstream_ar.append(CR_upstream) #This is upstream density, appended only if a downstream density can be found
x_upper_count = 10 #Starting away from reflecting wall
while (x_upper_count < CR_upper_index):
CR_downstream = CR_downstream + rhoy_f[x_upper_count]
x_upper_count = x_upper_count + 1
#print("x_upper_count is {}".format(x_upper_count))
#print("CR_upper_index is {}".format(CR_upper_index))
#print("upper avg is {}".format(CR_downstream))
#print("CR_Upper_index is {}".format(CR_upper_index))
CR_downstream = CR_downstream/(x_upper_count - 10)
CR_downstream_ar.append(CR_downstream)
else:
CR_delete_index.append(i)
CR_upstream_ar.append(0.0)
CR_downstream_ar.append(0.0)
###########################
# Data for lap appended
x_shock.append(rhox[xs_index])
t_laps.append(i * conf.interval)
#print(t_laps[i])
print("Frame {} appended".format(i))
print("Upstream density is: {}".format(CR_upstream))
print("x is: {}".format(rhox[xs_index]))
d = plt.figure()
ax = d.add_subplot(111)
ax.set_xlim(0, 800)
ax.set_ylim(0, 5)
plt.plot(rhox, rhoy_f)
plt.axhline(y=shock_loc, color = 'purple') #Horizontal line halfway up shock
plt.axhline(y=CR_upstream, color = 'yellow') #Measures upstream density
if (CR_upper_index > 0):
plt.axhline(y=CR_downstream, color = 'orange') #Average Downstream density
plt.axvline(x=rhox[xs_index], color = 'green') #Measures x-position of shock
#plt.axvline(x = rhox[r], color = 'red') #Use for debug if CR upsteam <> 1
plt.savefig("{}/sav_{}".format(conf.outdir, i))
plt.close()
###############################################################################
x_shock = x_shock[:150]
t_laps = t_laps[:150]
CR_upstream_ar = CR_upstream_ar[:150]
CR_downstream_ar = CR_downstream_ar[:150]
# Lists converted to numpy arrays
t_laps = np.array(t_laps)
x_shock = np.array(x_shock)
CR_upstream_ar = np.array(CR_upstream_ar)
CR_downstream_ar = np.array(CR_downstream_ar)
# Remove Zero points
zero_points = np.where(x_shock < 20.0)
x_shock = np.delete(x_shock, zero_points[0])
t_laps = np.delete(t_laps, zero_points[0])
CR_upstream_ar = np.delete(CR_upstream_ar, zero_points[0])
CR_downstream_ar = np.delete(CR_downstream_ar, zero_points[0])
zero_points = np.where(CR_upstream_ar == 0.0)
x_shock = np.delete(x_shock, zero_points[0])
t_laps = np.delete(t_laps, zero_points[0])
CR_upstream_ar = np.delete(CR_upstream_ar, zero_points[0])
CR_downstream_ar = np.delete(CR_downstream_ar, zero_points[0])
#Calculate all CR values across simulation
CR_ar = CR_downstream_ar/CR_upstream_ar
# Unit conversion
t_omp = t_laps * conf.cfl/conf.c_omp
x_shock_sd = x_shock #/ conf.c_omp
# Fit straight line to data
line, covmat = np.polyfit(t_omp, x_shock_sd, 1, cov = True)
grad_uncert = np.sqrt(float(covmat[0,0]))
inter_uncert = np.sqrt(float(covmat[1,1]))
print("The gradient is: {} +- {}".format(line[0], grad_uncert))
print("The y-intercept is: {} +- {}".format(line[1], inter_uncert))
#Find CR value by averaging later values
CR_avg = 0.0
start_CR_count = 1 #Minimum 1
stop_CR_count = 50 #Varies depending on how long it takes for CR to stabilise, check CR_RH.png to see if this value needs adjusting
for CR_count in range(start_CR_count, stop_CR_count):
CR_avg = CR_avg + CR_ar[-CR_count]
#print(CR_ar[-CR_count])
CR_avg = CR_avg/(stop_CR_count - start_CR_count)
# Plot data and fit
plt.plot(t_omp, x_shock_sd, ".")
y = []
for t in t_omp:
thing = line[0] * t + line[1]
y.append(thing)
# Output data, Beta-shock and CR value
print("beta_shock: {}".format(line[0]))
plt.plot(t_omp, y, "-")
plt.xlabel("t_omp")
plt.ylabel("x_shock")
plt.savefig("{}/shock_RH.png".format(conf.outdir))
plt.clf()
print("CR: {}".format(CR_avg))
plt.xlabel("t_omp")
plt.ylabel("Compression Ratio")
plt.plot(t_omp, CR_ar, 'bo')
plt.axhline(y = CR_avg, color = 'red')
plt.savefig("{}/CR_RH.png".format(conf.outdir))
```
#### File: runko/tests/test_io.py
```python
from mpi4py import MPI
import unittest
import os
import numpy as np
import pycorgi
import pyrunko
import pytools
import h5py
#from visualize import get_yee
#from visualize import getYee2D
#from combine_files import combine_tiles
#import injector
#from read_mesh import TileInfo
#from read_mesh import get_mesh
#import initialize as init
# combine tiles inside grid together into one array
def combine_tiles(ff, fvar, conf, isp=None ):
arr = np.zeros((conf.Nx*conf.NxMesh,
conf.Ny*conf.NyMesh,
conf.Nz*conf.NzMesh))
f = h5py.File(ff,'r')
for dset in f:
if not(isp==None):
if not(f[dset]['ispcs'][()] == isp):
continue
i = f[dset]['i'][()]
j = f[dset]['j'][()]
k = f[dset]['k'][()]
NxMesh = f[dset]['Nx'][()]
NyMesh = f[dset]['Ny'][()]
NzMesh = f[dset]['Nz'][()]
ii = int( i*NxMesh )
jj = int( j*NyMesh )
kk = int( k*NzMesh )
tile = f[dset][fvar][()]
tile = np.reshape(tile, (NzMesh, NyMesh, NxMesh))
for s in range(NzMesh):
for r in range(NyMesh):
for q in range(NxMesh):
arr[ii+q, jj+r, kk+s] = tile[s,r,q]
return arr
class Conf:
Nx = 3
Ny = 3
Nz = 1
oneD = False
twoD = False
threeD = False
NxMesh = 5
NyMesh = 5
NzMesh = 5
xmin = 0.0
xmax = 1.0
ymin = 0.0
ymax = 1.0
zmin = 0.0
zmax = 1.0
me = -1.0
mi = 1.0
qe = 1.0
qi = 1.0
#def __init__(self):
# print("initialized...")
def density_profile(xloc, ispcs, conf):
return conf.ppc
#load tiles into each grid
def loadTiles1D(n, conf):
for i in range(n.get_Nx()):
for j in range(n.get_Ny()):
#if n.get_mpi_grid(i) == n.rank:
c = pyrunko.fields.oneD.Tile(conf.NxMesh, conf.NyMesh, conf.NzMesh)
n.add_tile(c, (i,) )
#load tiles into each grid
def loadTiles2D(n, conf):
for i in range(n.get_Nx()):
for j in range(n.get_Ny()):
#if n.get_mpi_grid(i,j) == n.rank:
c = pyrunko.fields.twoD.Tile(conf.NxMesh, conf.NyMesh, conf.NzMesh)
n.add_tile(c, (i,j) )
# create similar reference array
def fill_ref(grid, conf):
data = np.zeros((conf.Nx*conf.NxMesh, conf.Ny*conf.NyMesh, conf.Nz*conf.NzMesh, 9))
# lets put floats into Yee lattice
val = 1.0
Nx = grid.get_Nx()
Ny = grid.get_Ny()
Nz = grid.get_Nz()
NxM = conf.NxMesh
NyM = conf.NyMesh
NzM = conf.NzMesh
#print("filling ref")
for i in range(grid.get_Nx()):
for j in range(grid.get_Ny()):
for k in range(grid.get_Nz()):
#if n.get_mpi_grid(i,j) == n.rank:
if True:
for q in range(conf.NxMesh):
for r in range(conf.NyMesh):
for s in range(conf.NzMesh):
#print(i,j,k,q,r,s, "val=",val)
# insert 1/val to get more complex floats (instead of ints only)
data[i*NxM + q, j*NyM + r, k*NzM + s, 0] = 1.0/val + 1.0
data[i*NxM + q, j*NyM + r, k*NzM + s, 1] = 1.0/val + 2.0
data[i*NxM + q, j*NyM + r, k*NzM + s, 2] = 1.0/val + 3.0
data[i*NxM + q, j*NyM + r, k*NzM + s, 3] = 1.0/val + 4.0
data[i*NxM + q, j*NyM + r, k*NzM + s, 4] = 1.0/val + 5.0
data[i*NxM + q, j*NyM + r, k*NzM + s, 5] = 1.0/val + 6.0
data[i*NxM + q, j*NyM + r, k*NzM + s, 6] = 1.0/val + 7.0
data[i*NxM + q, j*NyM + r, k*NzM + s, 7] = 1.0/val + 8.0
data[i*NxM + q, j*NyM + r, k*NzM + s, 8] = 1.0/val + 9.0
val += 1
return data
# fill Yee mesh with values
def fill_yee(grid, data, conf):
Nx = grid.get_Nx()
Ny = grid.get_Ny()
Nz = grid.get_Nz()
NxM = conf.NxMesh
NyM = conf.NyMesh
NzM = conf.NzMesh
# lets put ref array into Yee lattice
#print("filling yee")
for i in range(grid.get_Nx()):
for j in range(grid.get_Ny()):
for k in range(grid.get_Nz()):
#if n.get_mpi_grid(i,j) == n.rank:
if True:
c = grid.get_tile(i,j,k)
yee = c.get_yee(0)
for q in range(conf.NxMesh):
for r in range(conf.NyMesh):
for s in range(conf.NzMesh):
yee.ex[q,r,s] = data[i*NxM + q, j*NyM + r, k*NzM + s, 0]
yee.ey[q,r,s] = data[i*NxM + q, j*NyM + r, k*NzM + s, 1]
yee.ez[q,r,s] = data[i*NxM + q, j*NyM + r, k*NzM + s, 2]
yee.bx[q,r,s] = data[i*NxM + q, j*NyM + r, k*NzM + s, 3]
yee.by[q,r,s] = data[i*NxM + q, j*NyM + r, k*NzM + s, 4]
yee.bz[q,r,s] = data[i*NxM + q, j*NyM + r, k*NzM + s, 5]
yee.jx[q,r,s] = data[i*NxM + q, j*NyM + r, k*NzM + s, 6]
yee.jy[q,r,s] = data[i*NxM + q, j*NyM + r, k*NzM + s, 7]
yee.jz[q,r,s] = data[i*NxM + q, j*NyM + r, k*NzM + s, 8]
# Generic function to fill the velocity mesh
def filler(xloc, uloc, ispcs, conf):
x = xloc[0]
y = xloc[1]
z = xloc[2]
ux = uloc[0]
uy = uloc[1]
uz = uloc[2]
return x + y + z + ux + uy + uz + ispcs
#physical version
mux = 0.0
muy = 0.0
muz = 0.0
delgam = conf.delgam
#electrons
if ispcs == 0:
delgam = conf.delgam * np.abs(conf.mi / conf.me) * conf.temperature_ratio
mux = conf.ub_e
muy = 0.0
muz = 0.0
#positrons/ions/second species
if ispcs == 1:
delgam = conf.delgam
mux = conf.ub_i
muy = 0.0
muz = 0.0
#plasma reaction
omp = conf.cfl*conf.dx
n0 = (omp**2.0)/conf.Nspecies
#velocity perturbation
Lx = conf.Nx*conf.NxMesh*conf.dx
kmode = conf.modes
mux_noise = conf.beta*np.cos(2.0*np.pi*kmode*x/Lx) * (Lx/(2.0*np.pi*kmode))
#Classical Maxwellian
f = n0*(1.0/(2.0*np.pi*delgam))**(0.5)
f *= np.exp(-0.5*((ux - mux - mux_noise)**2.0)/(delgam))
return f
class IO(unittest.TestCase):
def test_write_fields1D(self):
##################################################
# write
conf = Conf()
conf.oneD = True
conf.Nx = 3
conf.Ny = 1
conf.Nz = 1
conf.NxMesh = 2
conf.NyMesh = 1
conf.NzMesh = 1
conf.outdir = "io_test_1D/"
if not os.path.exists( conf.outdir ):
os.makedirs(conf.outdir)
grid = pycorgi.oneD.Grid(conf.Nx, conf.Ny)
grid.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax)
loadTiles1D(grid, conf)
ref = fill_ref(grid, conf)
fill_yee(grid, ref, conf)
pyrunko.fields.oneD.write_yee(grid, 0, conf.outdir)
##################################################
# read using analysis tools
arrs = combine_tiles(conf.outdir+"fields-0_0.h5", "ex", conf)
Nx = grid.get_Nx()
Ny = grid.get_Ny()
Nz = grid.get_Nz()
NxM = conf.NxMesh
NyM = conf.NyMesh
NzM = conf.NzMesh
for i in range(grid.get_Nx()):
for j in range(grid.get_Ny()):
for k in range(grid.get_Nz()):
#if n.get_mpi_grid(i,j) == n.rank:
if True:
for q in range(conf.NxMesh):
for r in range(conf.NyMesh):
for s in range(conf.NzMesh):
self.assertAlmostEqual( arrs[i*NxM + q, j*NyM + r, k*NzM + s],
ref[i*NxM + q, j*NyM + r, k*NzM + s, 0], places=6)
##################################################
# test reading back
node2 = pycorgi.oneD.Grid(conf.Nx, conf.Ny)
node2.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax)
loadTiles1D(node2, conf)
pyrunko.fields.oneD.read_yee(node2, 0, "io_test_1D")
yee1 = pytools.visualize.get_yee(grid, conf)
yee2 = pytools.visualize.get_yee(node2, conf)
for i in range(grid.get_Nx()):
for j in range(grid.get_Ny()):
for k in range(grid.get_Nz()):
#if n.get_mpi_grid(i,j) == n.rank:
if True:
for q in range(conf.NxMesh):
for r in range(conf.NyMesh):
for s in range(conf.NzMesh):
for m in ["jx", "jy", "jz", "ex", "ey", "ez", "bx", "by", "bz", "rho"]:
self.assertAlmostEqual(
yee1[m][i*NxM + q],
yee2[m][i*NxM + q],
places=6)
def test_write_fields2D(self):
##################################################
# write
conf = Conf()
conf.twoD = True
conf.Nx = 3
conf.Ny = 4
conf.Nz = 1
conf.NxMesh = 5
conf.NyMesh = 6
conf.NzMesh = 1
conf.outdir = "io_test_2D/"
if not os.path.exists( conf.outdir ):
os.makedirs(conf.outdir)
grid = pycorgi.twoD.Grid(conf.Nx, conf.Ny)
grid.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax)
loadTiles2D(grid, conf)
ref = fill_ref(grid, conf)
fill_yee(grid, ref, conf)
pyrunko.fields.twoD.write_yee(grid, 0, conf.outdir)
##################################################
# read using analysis tools
arrs = combine_tiles(conf.outdir+"fields-0_0.h5", "ex", conf)
Nx = grid.get_Nx()
Ny = grid.get_Ny()
Nz = grid.get_Nz()
NxM = conf.NxMesh
NyM = conf.NyMesh
NzM = conf.NzMesh
for i in range(grid.get_Nx()):
for j in range(grid.get_Ny()):
for k in range(grid.get_Nz()):
#if n.get_mpi_grid(i,j) == n.rank:
if True:
for q in range(conf.NxMesh):
for r in range(conf.NyMesh):
for s in range(conf.NzMesh):
self.assertAlmostEqual( arrs[i*NxM + q, j*NyM + r, k*NzM + s],
ref[i*NxM + q, j*NyM + r, k*NzM + s, 0], places=6)
##################################################
# test reading back
node2 = pycorgi.twoD.Grid(conf.Nx, conf.Ny)
node2.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax)
loadTiles2D(node2, conf)
pyrunko.fields.twoD.read_yee(node2, 0, "io_test_2D")
yee1 = pytools.visualize.get_yee_2D(grid, conf)
yee2 = pytools.visualize.get_yee_2D(node2, conf)
for i in range(grid.get_Nx()):
for j in range(grid.get_Ny()):
for k in range(grid.get_Nz()):
#if n.get_mpi_grid(i,j) == n.rank:
if True:
for q in range(conf.NxMesh):
for r in range(conf.NyMesh):
for s in range(conf.NzMesh):
for m in ["jx", "jy", "jz", "ex", "ey", "ez", "bx", "by", "bz", "rho"]:
self.assertAlmostEqual(
yee1[m][i*NxM + q, j*NyM + r],
yee2[m][i*NxM + q, j*NyM + r],
places=6)
# compare two AdaptiveMesh3D objects and assert their equality
def compareMeshes(self, vm, ref):
cells = vm.get_cells(True)
refcells = ref.get_cells(True)
#metainfo
self.assertEqual( vm.length, ref.length )
self.assertEqual( vm.maximum_refinement_level, ref.maximum_refinement_level )
self.assertEqual( vm.top_refinement_level, ref.top_refinement_level )
self.assertEqual( len(cells), len(refcells) )
for cid in cells:
#refinement level
rfl1 = vm.get_refinement_level(cid)
rfl2 = ref.get_refinement_level(cid)
self.assertEqual(rfl1, rfl2)
#indices
[ii1,jj1,kk1] = vm.get_indices(cid)
[ii2,jj2,kk2] =ref.get_indices(cid)
self.assertEqual(ii1, ii2)
self.assertEqual(jj1, jj2)
self.assertEqual(kk1, kk2)
#value
self.assertEqual( vm[ii1,jj1,kk1,rfl1], ref[ii2,jj2,kk2,rfl2] )
#center
xx1,yy1,zz1 = vm.get_center([ii1,jj1,kk1], rfl1)
xx2,yy2,zz2 =ref.get_center([ii2,jj2,kk2], rfl2)
self.assertEqual(xx1, xx2)
self.assertEqual(yy1, yy2)
self.assertEqual(zz1, zz2)
def test_write_Mesh3V(self):
##################################################
# write
conf = Conf()
conf.twoD = True
conf.Nx = 2
conf.Ny = 1
conf.Nz = 1
conf.NxMesh = 3
conf.NyMesh = 1
conf.NzMesh = 1
conf.Nspecies = 2
conf.outdir = "io_test_mesh/"
conf.dx = 1.0
conf.dy = 1.0
conf.dz = 1.0
conf.Nvx = 3
conf.Nvy = 5
conf.Nvz = 8
conf.vxmin = -1.0
conf.vymin = -2.0
conf.vzmin = -3.0
conf.vxmax = 4.0
conf.vymax = 5.0
conf.vzmax = 6.0
conf.refinement_level= 0
conf.clip= True
conf.clipThreshold= 1.0e-5
if not os.path.exists( conf.outdir ):
os.makedirs(conf.outdir)
grid = pycorgi.oneD.Grid(conf.Nx, conf.Ny)
grid.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax)
for i in range(grid.get_Nx()):
for j in range(grid.get_Ny()):
#if n.get_mpi_grid(i) == n.rank:
c = pyrunko.vlv.oneD.Tile(conf.NxMesh, conf.NyMesh, conf.NzMesh)
grid.add_tile(c, (i,) )
pytools.vlv.inject(grid, filler, conf) #inject plasma into vlv mesh
pyrunko.vlv.oneD.write_mesh(grid, 0, conf.outdir)
##################################################
# read using analysis tools
fname = conf.outdir + "meshes-0_0.h5"
f = h5py.File(fname,'r')
for i in range(grid.get_Nx()):
for j in range(grid.get_Ny()):
for k in range(grid.get_Nz()):
c = grid.get_tile(i,j,k)
#if n.get_mpi_grid(i,j) == n.rank:
if True:
for ispcs in range(conf.Nspecies):
block = c.get_plasma_species(0, ispcs)
for q in range(conf.NxMesh):
for r in range(conf.NyMesh):
for s in range(conf.NzMesh):
tinfo = pytools.vlv.TileInfo()
tinfo.i = i
tinfo.j = j
tinfo.k = k
tinfo.q = q
tinfo.r = r
tinfo.s = s
tinfo.ispcs = ispcs
#now assert
vm = pytools.vlv.get_mesh(f, tinfo)
ref = block[q,r,s]
self.compareMeshes(vm, ref)
##################################################
# read back
node2 = pycorgi.oneD.Grid(conf.Nx, conf.Ny)
node2.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax)
for i in range(node2.get_Nx()):
for j in range(node2.get_Ny()):
#if n.get_mpi_grid(i) == n.rank:
c = pyrunko.vlv.oneD.Tile(conf.NxMesh, conf.NyMesh, conf.NzMesh)
node2.add_tile(c, (i,) )
pytools.vlv.inject(node2, pytools.vlv.empty_filler, conf, empty=True) #injecting empty meshes
#pyrunko.vlv.oneD.write_mesh(node2, 1, conf.outdir)
pyrunko.vlv.oneD.read_mesh(node2, 0, "io_test_mesh")
#pyrunko.vlv.oneD.write_mesh(node2, 2, conf.outdir)
for i in range(node2.get_Nx()):
for j in range(node2.get_Ny()):
for k in range(node2.get_Nz()):
c1 = grid.get_tile(i,j,k)
c2 = node2.get_tile(i,j,k)
#if n.get_mpi_grid(i,j) == n.rank:
if True:
for ispcs in range(conf.Nspecies):
block1 = c1.get_plasma_species(0, ispcs)
block2 = c2.get_plasma_species(0, ispcs)
for q in range(conf.NxMesh):
for r in range(conf.NyMesh):
for s in range(conf.NzMesh):
tinfo = pytools.vlv.TileInfo()
tinfo.i = i
tinfo.j = j
tinfo.k = k
tinfo.q = q
tinfo.r = r
tinfo.s = s
tinfo.ispcs = ispcs
#now assert
vm1 = block1[q,r,s]
vm2 = block2[q,r,s]
self.compareMeshes(vm1, vm2)
# Complicated restart from file with heterogeneous (boundary) tiles
def skip_test_restart(self):
conf = Conf()
conf.twoD = True
conf.Nx = 5
conf.Ny = 1
conf.Nz = 1
conf.NxMesh = 5
conf.NyMesh = 1
conf.NzMesh = 1
conf.outdir = "io_test_restart/"
if not os.path.exists( conf.outdir ):
os.makedirs(conf.outdir)
grid = pycorgi.oneD.Grid(conf.Nx, conf.Ny)
grid.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax)
pytools.vlv.loadTiles(grid, conf)
#load boundaries
for i in [0, grid.get_Nx()-1]:
for j in range(grid.get_Ny()):
if i == 0:
c = plasma.Tile_outflow_L(conf.NxMesh, conf.NyMesh, conf.NzMesh)
#set left (top) wall location
c.fld1 = 1
c.fld2 = 1
else:
c = plasma.Tile_outflow_R(conf.NxMesh, conf.NyMesh, conf.NzMesh)
#set right (bottom) wall location
l = conf.NxMesh-2
iglob, jglob, kglob = globalIndx( (i,j), (l,0,0), conf)
c.fld1 = iglob
c.fld2 = iglob
pytools.vlv.initialize_tile(c, (i,j), grid, conf)
#add it to the grid
grid.add_tile(c, (i,))
def test_write_pic2D(self):
def test_filler(xloc, ispcs, conf):
xx = xloc[0]
yy = xloc[1]
#electrons
if ispcs == 0:
zz = 0.1
#positrons/ions/second species
if ispcs == 1:
zz = 0.2
ux = xx*100.0
uy = yy*1000.0
uz =-xx*yy*1.0
x0 = [xx, yy, zz]
u0 = [ux, uy, uz]
return x0, u0
##################################################
# write
conf = Conf()
conf.twoD = True
conf.Nx = 3
conf.Ny = 4
conf.Nz = 1
conf.NxMesh = 5
conf.NyMesh = 6
conf.NzMesh = 1
conf.outdir = "io_test_2D/"
conf.ppc = 1
conf.Nspecies = 2
conf.Nspecies_test = 0
#tmp non-needed variables
conf.omp = 1
conf.gamma_e = 0.0
conf.me = 1
conf.mi = 1
conf.cfl = 1.0
conf.c_omp = 1.0
if not os.path.exists( conf.outdir ):
os.makedirs(conf.outdir)
grid = pycorgi.twoD.Grid(conf.Nx, conf.Ny)
grid.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax)
for i in range(grid.get_Nx()):
for j in range(grid.get_Ny()):
for k in range(grid.get_Nz()):
c = pyrunko.pic.twoD.Tile(conf.NxMesh, conf.NyMesh, conf.NzMesh)
pytools.pic.initialize_tile(c, (i, j, k), grid, conf)
grid.add_tile(c, (i,j))
pytools.pic.inject(grid, test_filler, density_profile, conf)
print("write prtcls")
pyrunko.pic.twoD.write_particles(grid, 0, conf.outdir)
# TODO: read with h5py
# TODO: read with internal read tool
node2 = pycorgi.twoD.Grid(conf.Nx, conf.Ny)
node2.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax)
for i in range(node2.get_Nx()):
for j in range(node2.get_Ny()):
for k in range(node2.get_Nz()):
c = pyrunko.pic.twoD.Tile(conf.NxMesh, conf.NyMesh, conf.NzMesh)
pytools.pic.initialize_tile(c, (i, j,k), node2, conf)
node2.add_tile(c, (i,j))
print("read prtcls")
pyrunko.pic.twoD.read_particles(node2, 0, conf.outdir)
#assert
for i in range(node2.get_Nx()):
for j in range(node2.get_Ny()):
if node2.get_mpi_grid(i,j) == node2.rank():
cid = node2.id(i,j)
c = node2.get_tile(cid) #get cell ptr
c_ref = grid.get_tile(cid) #get cell ptr
for ispcs in range(conf.Nspecies):
container1 = c.get_container(ispcs)
container2 = c_ref.get_container(ispcs)
#TODO: assert content
xxs1 = container1.loc(0)
yys1 = container1.loc(1)
zzs1 = container1.loc(2)
vxs1 = container1.vel(0)
vys1 = container1.vel(1)
vzs1 = container1.vel(2)
wgs1 = container1.wgt()
xxs2 = container2.loc(0)
yys2 = container2.loc(1)
zzs2 = container2.loc(2)
vxs2 = container2.vel(0)
vys2 = container2.vel(1)
vzs2 = container2.vel(2)
wgs2 = container2.wgt()
nprtcls = conf.NxMesh*conf.NyMesh*conf.NzMesh*conf.ppc
self.assertEqual(len(xxs1), len(xxs2))
self.assertEqual(len(yys1), len(yys2))
self.assertEqual(len(zzs1), len(zzs2))
self.assertEqual(len(vxs1), len(vxs2))
self.assertEqual(len(vys1), len(vys2))
self.assertEqual(len(vzs1), len(vzs2))
self.assertEqual(len(wgs1), len(wgs2))
for n in range(nprtcls):
self.assertAlmostEqual(xxs1[n], xxs2[n], places=6)
self.assertAlmostEqual(yys1[n], yys2[n], places=6)
self.assertAlmostEqual(zzs1[n], zzs2[n], places=6)
self.assertAlmostEqual(vxs1[n], vxs2[n], places=6)
self.assertAlmostEqual(vys1[n], vys2[n], places=6)
self.assertAlmostEqual(vzs1[n], vzs2[n], places=6)
self.assertAlmostEqual(wgs1[n], wgs2[n], places=6)
``` |
{
"source": "jphacks/A_2001",
"score": 3
} |
#### File: src/models/task.py
```python
from app.database import db
from sqlalchemy.dialects.mysql import INTEGER
class Task(db.Model):
__tablename__ = "tasks"
id = db.Column(
INTEGER(unsigned=True),
primary_key=True,
autoincrement=True,
)
quest_id = db.Column(
INTEGER(unsigned=True),
db.ForeignKey("quests.id", ondelete="cascade", onupdate="cascade"),
nullable=False,
)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.String(5000))
start = db.Column(db.DateTime)
done = db.Column(db.Boolean, default=False)
created_at = db.Column(
db.DateTime,
server_default=db.func.current_timestamp(),
server_onupdate=db.func.current_timestamp(),
nullable=False,
)
updated_at = db.Column(
db.DateTime,
server_default=db.func.current_timestamp(),
server_onupdate=db.func.current_timestamp(),
nullable=False,
)
subtasks = db.relationship("Subtask", backref="task", cascade="all")
def __init__(self, quest_id, name, description=None):
self.quest_id = quest_id
self.name = name
self.description = description
def to_dict(self):
return dict(
id=self.id,
name=self.name,
description="" if self.description is None else self.description,
done=self.done,
subtasks=[subtask.to_dict() for subtask in self.subtasks],
start=self.start,
)
``` |
{
"source": "jphacks/A_2004",
"score": 3
} |
#### File: A_2004/make-dataset/square_size.py
```python
from PIL import Image
from pathlib import Path
import os
#im = Image.open('./data/src')
dir = input("学習データ候補の画像があるディレクトリ名> ")
p = Path("./{}".format(dir))
files = sorted(p.glob("*"))
def expand2square(pil_img, background_color):
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, ((height - width) // 2, 0))
return result
for filename in files:
im = Image.open(filename)
im_new = expand2square(im, (255, 255, 255))
im_new.save('./{}/{}.jpg'.format(dir,(str(filename).split('/')[1]).split('.')[0]), quality=95)
```
#### File: deep/DCGAN/networks.py
```python
import torch
from torch import nn
### Generatorの作成 ###
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
nn.ConvTranspose2d(100, 256, 4, 1, 0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64, 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(32, 3, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, x):
return self.main(x)
### Discriminatorの作成 ###
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(3, 32, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(32, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(128, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(256, 1, 4, 1, 0, bias=False),
)
def forward(self, x):
return self.main(x).squeeze()
``` |
{
"source": "jphacks/A_2006",
"score": 3
} |
#### File: jphacks/A_2006/scraping.py
```python
from bs4 import BeautifulSoup
import urllib.request, urllib.error, urllib.parse
def select_img(keyword="TT兄弟"):
max_page = 3 # ページ数(20枚/ページ)
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0",
}
# cnt = random.choice([i for i in range(200) if i%20 == 0])
url = 'https://search.yahoo.co.jp/image/search?p={}&fr=top_ga1_sa&ei=UTF-8'.format(urllib.parse.quote(keyword))
req = urllib.request.Request(url=url, headers=headers)
res = urllib.request.urlopen(req)
soup = BeautifulSoup(res, features="lxml")
# div = soup.find_all(class_="sw-ThumbnailGrid")
# print(div)
imgs = soup.find_all('img')
print(imgs)
# img = random.choice(imgs)
# img = img["src"]
# tmp = urllib.request.urlopen(img)
# data = tmp.read()
# return data
if __name__ == "__main__":
select_img("犬")
```
#### File: A_2006/server/calc.py
```python
from flask import Flask, request, jsonify, make_response
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from werkzeug.utils import secure_filename
from PIL import Image
import json
import io
import base64
# サニタイズ卍
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg','gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
#TODO:画像から犬の状態を判定する
coefficient_dic = {'adult':1.8, #成犬
'sterilized':1.6, #不妊治療済み
'obesity':1.4, #肥満気味
'increasing':1.3, #増量中
'pregnancy':2.4, #妊娠期
'lactation':6, #泌乳期
'growth_period':3, #成長期
'old': 1.4 #老犬
}
# base64をpillowで扱えるように変換
def img_parser(img):
img = request.form["img"]
img = base64.b64decode(img)
img = BytesIO(img)
img = Image.open(img)
return img
# 距離を返す(km)
def ideal_walk_dst(cal, weight):
dst = ((cal/weight - 1.25*(weight)**(-1/4))*weight**(-2/5))/1.77
return dst
def calc_rep(weight): #安静時エネルギー
rep=70*(weight)**(2/3)
return rep
def calc_der(rep, state): #一日あたりの理想エネルギー
der = rep*state
return der*0.3
def azure_request(img_by):
project_ID = "xxx"
iteration_name = "xxx"
key = "xxx"
endpointurl = "xxx"
prediction_credentials = ApiKeyCredentials(
in_headers={"Prediction-key": key}
)
predictor = CustomVisionPredictionClient(
endpointurl,
prediction_credentials
)
results = predictor.classify_image(
project_ID,
iteration_name,
img_by
)
predict = {}
for prediction in results.predictions:
predict[prediction.tag_name] = prediction.probability
return predict # 予測を辞書で返却
app = Flask(__name__)
@app.route('/route', methods=["POST"])
def main():
# json_data = request.get_json()
# print(json_data)
# weight = float(json_data.get("weight"))
# coefficient = json_data.get("coefficient")
weight = request.get_json()["weight"]
img = request.get_json()['image']
#《追加》=======================================================================
# 画像をazureにぶん投げて結果を取得
img = base64.b64decode(img)
azure_results = azure_request(img)
# ['normal: 65.00%', 'slender: 56.72%', 'fat: 9.45%']こんな感じで帰ってくる
print("[DEBUG] azure_results: ", azure_results)
# fat指数(適当)
fat = azure_results["slender"]*(-1) + azure_results["fat"]*1 + 2
#=======================================================================
print("[DEBUG] fat指数: ", fat)
#TODO:POSTされた画像データを扱う
#img = img_parser(json_data.get("img"))
rep = calc_rep(float(weight))
der = calc_der(rep, fat)
dst = ideal_walk_dst(der, float(weight))
print("[DEBUG] distance: "+str(dst))
payload = {
"result":True,
"data":{
"distance": dst/2,
}
}
return jsonify(payload)
if __name__ == "__main__":
app.run(debug=True, host='localhost', port=5678)
```
#### File: A_2006/server/client.py
```python
import requests
import json
def main():
files = { "image_file": open("../data/dog/142520422.jpg", 'rb') }
data = {"weight":50, "coefficient":50}
response = requests.post("http://127.0.0.1:5678/route", files=files, json=json.dumps(data))
results = response.json()
return results
if __name__ == "__main__":
results = main()
print(results)
``` |
{
"source": "jphacks/A_2016",
"score": 2
} |
#### File: domain/repository/container_repository.py
```python
from typing import List
from sqlalchemy.orm import Session
from app.domain import entity
def get_all_containers(db: Session) -> List[entity.Container]:
return db.query(entity.Container).all()
```
#### File: domain/schemas/containers.py
```python
import re
from pydantic import BaseModel, validator
def validate_image(image: str) -> str:
if re.fullmatch(r"https?://[\w!?/+\-_~;.,*&@#$%()'[\]]+", image) is None:
raise ValueError('malformed image url: %s' % image)
return image
def validate_name(name: str) -> str:
if len(name) == 0:
raise ValueError('name is required')
return name
def validate_min(min_v: int, values) -> int:
if min_v > values.get('max'):
raise ValueError('max must be larger than min')
return min_v
def validate_nat(v: int) -> int:
if v < 0:
raise ValueError('must be larger than 0')
return v
class ContainerBase(BaseModel):
id: str
image: str
name: str
max: int
min: int
_validate_image = validator('image', allow_reuse=True)(validate_image)
_validate_name = validator('name', allow_reuse=True)(validate_name)
_validate_max = validator('max', allow_reuse=True)(validate_nat)
_validate_min = validator('min', allow_reuse=True)(validate_min)
class Container(ContainerBase):
class Config:
orm_mode = True
``` |
{
"source": "jphacks/A_2104",
"score": 3
} |
#### File: lambda/applyGoogleCal/lambda_function.py
```python
import os
import json
import datetime
import httplib2
from oauth2client.service_account import ServiceAccountCredentials
from apiclient import discovery
import traceback
class GoogleCalendar(object):
def __init__(self):
self.service_account_id = os.environ["GOOGLE_SERVICE_ACCOUNT_ID"]
def get_credentials(self):
scopes = "https://www.googleapis.com/auth/calendar"
credentials = ServiceAccountCredentials.from_json_keyfile_name(
"google_key.json", scopes=scopes
)
return credentials
def get_schedule(self, calendar_id, time_min, time_max):
try:
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build("calendar", "v3", http=http,cache_discovery=False)
events = (
service.events()
.list(
calendarId=calendar_id,
timeMin=time_min,
timeMax=time_max,
singleEvents=True,
)
.execute()
)
items = events["items"]
return items
except Exception as e:
raise Exception(f"Error:{e}")
def insert_event(self,calendar_id,start,end):
try:
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build("calendar", "v3", http=http,cache_discovery=False)
event_body = {
'summary': '移動',
'start': {'dateTime': f'{start}', 'timeZone': 'Asia/Tokyo'},
'end': {'dateTime': f'{end}', 'timeZone': 'Asia/Tokyo'},
'colorId':'6',
}
res = service.events().insert(calendarId=calendar_id,body=event_body).execute()
except Exception as e:
raise Exception(f"Error:{e}")
return res
def lambda_handler(event, context):
GoogleCal = GoogleCalendar()
email = event["email"]
contents = event["body"]
for content in contents:
start = content["start"]
end = content["end"]
GoogleCal.insert_event(email,start,end)
return {
'statusCode': 200,
'body': json.dumps('Success')
}
```
#### File: lambda/postEvent/lambda_function.py
```python
import os
import json
import urllib
import requests
import datetime
def get_time(origins,destinations,mode):
qs = {"origins":origins,"destinations":destinations,"mode":mode,"key":os.environ["API_KEY"]}
d_qs = urllib.parse.urlencode(qs)
url = os.environ["API_ENDPOINT"] + d_qs
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
res = json.loads(response.text)
return res['rows'][0]['elements'][0]['duration']['value']
def calc_transportation(body):
"""
移動の予定のみreturn.
"""
margin = 5*60 #margin before next event start
ret = []
for i in range(len(body)):
indict = {}
if i==0:
dur = get_time(os.environ["HOME"],body[i]["location"],body[i]["mode"])
else:
dur = get_time(body[i-1]["location"],body[i]["location"],body[i]["mode"])
t_delta = datetime.timedelta(seconds=(dur+margin))
t_delta_margin = datetime.timedelta(seconds=margin)
dt = datetime.datetime.fromisoformat(body[i]["start"])
indict["title"] = "移動"
indict["start"] = (dt - t_delta).isoformat()
indict["end"] = (dt - t_delta_margin).isoformat()
indict["backgroundColor"] = "#FFCC99"
ret.append(indict)
return ret
def lambda_handler(event, context):
events = event["body"]
transport_events = calc_transportation(events)
combined_events = events + transport_events
res = [combined_events,transport_events]
return {
'statusCode': 200,
'body': json.dumps(res)
}
``` |
{
"source": "jphacks/A_2111",
"score": 3
} |
#### File: A_2111/backend/crud.py
```python
from fastapi import HTTPException, status
import os
from uuid import uuid4
from firebase import db
from firebase_admin import firestore
import numpy as np
# 全ての登録情報を取得
async def get_all_members():
docs = db.collection("members").stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
return data
# 特定の登録情報を取得
async def get_member(uuid: str):
docs = db.collection("members").where("uuid", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) == 0:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="このIDは見つかりません")
return data
# すべてのリレーション情報を取得
async def get_all_familiars():
docs = db.collection("familiars").stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
return data
# 特定のリレーション情報を取得
async def get_familiar(uuid: str):
docs = db.collection("familiars").where("start", "==", uuid).stream()
docs2 = db.collection("familiars").where("end", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
for doc in docs2:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
return data
# メンバー登録
async def create_member(name: str, size: str, vector: str) -> str:
size_width = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
if size not in size_width:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="1~10のいずれかの整数を半角で入力してください")
uuid = str(uuid4())
doc_ref = db.collection("members").document()
doc_ref.set({
"uuid": uuid,
"name": name,
"size": size,
"vector": vector
})
return uuid
# リレーション登録
async def create_familiar(start: str, end: str):
doc_ref = db.collection("familiars").document()
doc_ref.set({
"start": start,
"end": end
})
return True
# 既存のリレーションの有無を確認
async def existed_familiar(start: str, end: str):
docs = db.collection("familiars").where("start", "==", start).where("end", "==", end).stream()
docs2 = db.collection("familiars").where("start", "==", end).where("end", "==", start).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
for doc in docs2:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) != 0:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="このIDはすでに登録されています")
return True
# 登録情報を更新
async def update_member(uuid: str, name: str, size: str):
size_width = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
if size not in size_width:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="1~10のいずれかの整数を半角で入力してください")
docs = db.collection("members").where("uuid", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) == 0:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="あなたのIDが見つかりませんでした")
doc_ref = db.collection("members").document(data[0]["id"])
result = doc_ref.update({"name": name, "size": size})
return result
# 登録情報を削除
async def remove_member(uuid: str):
docs = db.collection("members").where("uuid", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) == 0:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="あなたのIDが見つかりませんでした")
result = db.collection("members").document(data[0]["id"]).delete()
return result
# 登録情報を削除した際、それに付随するリレーションも全て削除
async def remove_familiar_related_member(uuid: str):
docs = db.collection("familiars").where("start", "==", uuid).stream()
docs2 = db.collection("familiars").where("end", "==", uuid).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
for doc in docs2:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
i = 0
while True:
if i > len(data) - 1:
break
db.collection("familiars").document(data[i]["id"]).delete()
i += 1
return True
# 特定のリレーションを削除
async def remove_familiar(start: str, end: str):
docs = db.collection("familiars").where("start", "==", start).where("end", "==", end).stream()
docs2 = db.collection("familiars").where("start", "==", end).where("end", "==", start).stream()
data = []
for doc in docs:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
for doc in docs2:
post = {"id": doc.id, **doc.to_dict()}
data.append(post)
if len(data) == 0:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="登録しているIDがありません")
result = db.collection("familiars").document(data[0]["id"]).delete()
return result
# cos類似度を計算
async def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
# もらったベクトルとDBに登録されているベクトルを照合
async def login(uuid: str, vector: list):
already_registered_vector = db.collection("members").where("uuid", "==", uuid).stream()
for vec in already_registered_vector:
post = {"id": vec.id, **vec.to_dict()}
cosine_result = await cosine_similarity(vector, post["vector"])
return cosine_result
```
#### File: A_2111/backend/main.py
```python
import json
from fastapi import FastAPI, Form, status
from fastapi.responses import JSONResponse
import uvicorn
import crud
from typing import Optional
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
origins = [
# TODO: フロントエンドデプロイしたらそのURLも入れる
"http://localhost:3000",
"http://localhost",
"https://coconomask.web.app",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# ルート
@app.get("/")
async def root():
return {"message": "this is root"}
# 登録情報を全て取得
@app.get("/members")
async def get_members():
members = await crud.get_all_members()
resp = {
"status": "ok",
"count": len(members),
"data": members
}
return resp
# 特定の登録情報を取得
@app.get("/member")
async def get_member(uuid: str):
member = await crud.get_member(uuid)
resp = {
"status": "ok",
"data": member
}
return resp
# リレーションを全て取得
@app.get("/familiars")
async def get_familiars():
familiars = await crud.get_all_familiars()
resp = {
"status": "ok",
"count": len(familiars),
"data": familiars
}
return resp
# 特定のリレーションを取得
@app.get("/familiar")
async def get_familiar(uuid: str = Form(...)):
member = await crud.get_familiar(uuid)
if len(member) == 0:
return JSONResponse(content={"status": "error", "message": "このIDは見つかりません"}, status_code=status.HTTP_404_NOT_FOUND)
resp = {
"status": "ok",
"data": member
}
return resp
# メンバー登録
@app.post("/member")
async def post_member(
name: str = Form(...),
size: str = Form(...),
vector: str = Form(...)
):
vector = list(map(float, vector[1:-1].split(",")))
uuid = await crud.create_member(name, size, vector)
return JSONResponse(content={"status": "ok", "uuid": uuid, "name": name, "size": size, "vector": vector}, status_code=status.HTTP_201_CREATED)
# リレーション登録
@app.post("/familiar")
async def post_familiar(
start: str = Form(...),
end: str = Form(...)
):
await crud.existed_familiar(start, end)
await crud.create_familiar(start, end)
return JSONResponse(content={"status": "ok"}, status_code=status.HTTP_201_CREATED)
# 登録情報を更新
@app.put("/member")
async def put_member(
uuid: str = Form(...),
name: str = Form(...),
size: str = Form(...)
):
await crud.update_member(uuid, name, size)
return JSONResponse(content={"status": "ok"}, status_code=status.HTTP_201_CREATED)
# 登録情報を削除
@app.delete("/member")
async def delete_member(uuid: str = Form(...)):
await crud.remove_member(uuid)
await crud.remove_familiar_related_member(uuid)
return JSONResponse(content={"status": "ok"}, status_code=status.HTTP_201_CREATED)
# 特定のリレーションを削除
@app.delete("/familiar")
async def delete_familiar(
start: str = Form(...),
end: str = Form(...)
):
await crud.remove_familiar(start, end)
return JSONResponse(content={"status": "ok"}, status_code=status.HTTP_201_CREATED)
# ログイン用
@app.post("/login")
async def login(
uuid: str = Form(...),
vector: str = Form(...)
):
vector = list(map(float, vector[1:-1].split(",")))
cosine_similarity = await crud.login(uuid, vector)
return JSONResponse(content={"status": "ok", "uuid": uuid, "cosine": cosine_similarity}, status_code=status.HTTP_202_ACCEPTED)
# 起動
if __name__ == '__main__':
uvicorn.run("main:app", reload=True)
```
#### File: device/ML/runonnx.py
```python
import onnxruntime as ort
from PIL import Image
import numpy as np
def run(url):
x=np.array(Image.open(url),dtype=np.float32).reshape(1,3,160,160)
ort_sess = ort.InferenceSession('model.onnx')
outputs = ort_sess.run(None, {'input': x})
return outputs[0][0]
if __name__ == '__main__':
print(run("sample.jpg"))
``` |
{
"source": "jphacks/A_2112",
"score": 3
} |
#### File: img_proc_server/app/app.py
```python
from flask import *
from sklearn import base
import cv2
import numpy as np
import matplotlib.pyplot as plt
import json
import base64
from main_modules import *
app = Flask(__name__)
@app.route('/', methods = ['POST'])
def post():
base64Image = json.loads(request.data.decode('UTF-8'))["base64Image"]
tmp1 = base64.b64decode(base64Image)
tmp2 = np.frombuffer(tmp1, dtype = np.uint8)
img = cv2.imdecode(tmp2, cv2.IMREAD_COLOR)
img2 = cv2.flip(img, 1)
retval, buffer = cv2.imencode('.jpg', img2)
base64Image2 = base64.b64encode(buffer).decode('UTF-8')
# print(base64Image2)
# plt.imshow(img)
# plt.show()
return make_response(jsonify({'base64Image':base64Image2 }),200)
# return make_response(jsonify({'error':'Does not support POST method'}),404)
@app.route('/', methods = ['GET'])
def get():
print("test")
return make_response(jsonify({'base64Image':"test" }),200)
@app.route('/calib', methods = ['POST'])
def calib():
base64Image = json.loads(request.data.decode('UTF-8'))["base64Image"]
arrow_point = json.loads(request.data.decode('UTF-8'))["arrowPoint"]
marker_points = json.loads(request.data.decode('UTF-8'))["markerPoints"]
crop_points = json.loads(request.data.decode('UTF-8'))["cropPoints"]
manual_marker = json.loads(request.data.decode('UTF-8'))["manualMarker"]
img_org = image_preprocess(base64Image, calib=True)
# arrow_point = [int(arrow_point[0]*h - h_crop[0]), int(arrow_point[1]*w -w_crop[0])]
# marker_points = np.array([[int(marker_point[1]*w - w_crop[0]), int(marker_point[0]*h - h_crop[0])] for marker_point in marker_points])
img = init_calib(img_org, arrow_point, marker_points, crop_points, debug=True, manual_marker=manual_marker)
if img is None:
return make_response(jsonify({'base64Image':base64Image }),200)
else:
# img = cv2.resize(img, img_org.shape[:2])
retval, buffer = cv2.imencode('.jpg', img)
base64Image2 = base64.b64encode(buffer).decode('UTF-8')
return make_response(jsonify({'base64Image':base64Image2 }),200)
@app.route('/arrow', methods = ['POST'])
def arrow():
base64ImagePrev = json.loads(request.data.decode('UTF-8'))["base64ImagePrev"]
base64Image = json.loads(request.data.decode('UTF-8'))["base64Image"]
img_prev, _ = image_preprocess(base64ImagePrev, calib=False)
img, img_org = image_preprocess(base64Image, calib=False)
# cv2.imwrite('./work/test1.png', img)
# cv2.imwrite('./work/test2.png', img_prev)
# img, r, theta, score = detect_arrow(img, img_prev, count)
img, x, y, score = detect_arrow(img, img_prev, debug=False)
# if img is None:
# return make_response(jsonify({'base64Image':base64Image }),200)
# else:
# # img = cv2.resize(img, img_org.shape[:2])
# retval, buffer = cv2.imencode('.jpg', img)
# base64Image2 = base64.b64encode(buffer).decode('UTF-8')
# return make_response(jsonify({'base64Image':base64Image2 }),200)
return make_response(jsonify({'x': x, 'y': y, 'score': score}),200)
@app.route('/trajectory', methods = ['POST'])
def proc_movie():
base64Movie = json.loads(request.data.decode('UTF-8'))["base64"]
dec_movie = base64.b64decode(base64Movie)
movie_path = './work/dec_movie.dat'
# f = open(movie_path, 'wb')
# f.write(dec_movie)
# f.close()
# print(cv2.VideoCapture(movie_path).set(cv2.CAP_PROP_POS_FRAMES, 0))
with open(movie_path, 'wb') as f:
f.write(dec_movie)
cap = cv2.VideoCapture(movie_path)
base64mp4 = detc_traj(cap)
return make_response(jsonify({'base64mp4':base64mp4 }),200)
if __name__ == "__main__":
app.run(
host='0.0.0.0',
port=5000, debug=True,
# use_reloader=False, threaded=False
)
``` |
{
"source": "jphacks/B_2003",
"score": 3
} |
#### File: lambda/get_number/lambda.py
```python
import sys
import os
import pymysql
import json
#rds settings
DB_USER = os.environ["user"]
DB_PASSWORD = os.environ["password"]
DB_HOST = os.environ["endpoint"]
DB_NAME = os.environ["db"]
# connect to SQL
try:
conn = pymysql.connect(DB_HOST, user=DB_USER, passwd=<PASSWORD>, db=DB_NAME, connect_timeout=5)
print("OK")
except Exception as e:
print("Fail connecting to RDS mysql instance")
print(e)
sys.exit()
print("Success connecting to RDS mysql instance")
# main関数(handlerからスタート, eventから引数とってくる)
def handler(event, context):
print('body=',event['body'])
body = json.loads(event['body'])
facility_name = body['name']
password_token = body['password_token']
tmp = [(facility_name), (password_token)]
facilityID = "none"
name_list = []
item_count = 0
with conn.cursor(pymysql.cursors.DictCursor) as cur:
try:
cur.execute("SELECT facilityID FROM facility WHERE name= %s AND password_token= %s", tmp)
row = cur.fetchone()
if row == None:
print('no Exist in facility')
body = json.dumps({
"result":0,
})
conn.commit()
return {
'isBase64Encoded': False,
'statusCode': 200,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': body
}
print('row[facilityID]=',row['facilityID'])
facilityID = row['facilityID']
cur.execute("SELECT users.name FROM facility_number INNER JOIN users ON users.userID = facility_number.userID WHERE facilityID = %s AND is_leave=0", facilityID)
row = cur.fetchone()
while row is not None:
print('row=',row)
item_count += 1
name_list.append(row['name'])
row = cur.fetchone()
print('name=',name_list)
print( "Get %d items from RDS MySQL table" %(item_count) )
except Exception as e:
print(e)
result = 0
#データベースコミット!
conn.commit()
body = json.dumps({
"result":1,
"name": name_list
})
return {
'isBase64Encoded': False,
'statusCode': 200,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': body
}
```
#### File: lambda/list_get/lambda.py
```python
import sys
import os
import pymysql
import json
import datetime
# rds settings
DB_USER = os.environ["user"]
DB_PASSWORD = os.environ["password"]
DB_HOST = os.environ["endpoint"]
DB_NAME = os.environ["db"]
# connect to SQL
try:
conn = pymysql.connect(DB_HOST, user=DB_USER,
passwd=<PASSWORD>, db=DB_NAME, connect_timeout=5)
print("OK")
except Exception as e:
print("Fail connecting to RDS mysql instance")
print(e)
sys.exit()
print("Success connecting to RDS mysql instance")
def default(o):
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
def handler(event, context):
print('body=', event['body'])
body = json.loads(event['body'])
facility_name = body['name']
password = body['password']
starttime = body['starttime']
endtime = body['endtime']
tmp = [(facility_name), (password)]
facilityID = "none"
adaptive_list = []
item_count = 0
with conn.cursor(pymysql.cursors.DictCursor) as cur:
try:
cur.execute(
"SELECT facilityID FROM facility WHERE name= %s AND password= %s", tmp)
row = cur.fetchone()
if row == None:
print('no Exist in facility')
body = json.dumps({
"result": 0,
})
conn.commit()
return {
'isBase64Encoded': False,
'statusCode': 200,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': body
}
print('row[facilityID]=', row['facilityID'])
facilityID = row['facilityID']
cur.execute(
"SELECT users.name, users.email_address, users.affiliation, starttime, endtime FROM facility_number INNER JOIN users ON users.userID = facility_number.userID WHERE facilityID = %s AND %s<=starttime AND endtime<=%s", [
(facilityID), (starttime), (endtime)])
row=cur.fetchone()
while row is not None:
print('row=', row)
item_count += 1
adaptive_list.append(row)
row=cur.fetchone()
print('adaptive_list=', adaptive_list)
print("Get %d items from RDS MySQL table" % (item_count))
except Exception as e:
print(e)
result=0
# データベースコミット!
conn.commit()
body=json.dumps({
"result": 1,
"list": adaptive_list
},
sort_keys=True,
indent=1,
default=default)
return {
'isBase64Encoded': False,
'statusCode': 200,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': body
}
```
#### File: lambda/recognition/lambda.py
```python
import sys
import os
import pymysql
import json
#add
import boto3
import datetime
import botocore
import base64
import uuid
#rds settings
DB_USER = os.environ["user"]
DB_PASSWORD = os.environ["password"]
DB_HOST = os.environ["endpoint"]
DB_NAME = os.environ["db"]
try:
conn = pymysql.connect(DB_HOST, user=DB_USER, passwd=<PASSWORD>, db=DB_NAME, connect_timeout=5)
except Exception as e:
print("Fail connecting to RDS mysql instance")
print(e)
sys.exit()
print("Success connecting to RDS mysql instance")
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
def decode_base64(photo):
return base64.b64decode(photo.encode('utf-8'))
def searchByimg(photo):
bucket = 'recognition'
collectionId = 'Collection'
photo_bytes = decode_base64(photo) #検索したい画像。渡し方要検討
threshold = 70 #信頼度
maxFaces = 1 #認識させる最大の顔の数
client = boto3.client('rekognition')
response = client.search_faces_by_image(CollectionId=collectionId,
Image= {'Bytes':photo_bytes},
FaceMatchThreshold=threshold,
MaxFaces=maxFaces)
faceMatches = response['FaceMatches']
return faceMatches
def handler(event, context):
print(event['body'])
body = json.loads(event['body'])
face_photo = body["face_photo"]
facilityID = body["facilityID"]
faceMatches = searchByimg(face_photo)
print(faceMatches)
result = 0
with conn.cursor(pymysql.cursors.DictCursor) as cur:
for match in faceMatches:
print('FaceId:' + match['Face']['FaceId'])
print('Similarity: ' + "{:.2f}".format(match['Similarity']) + "%")
faceID = match['Face']['FaceId']
try:
result = 2
cur.execute("SELECT starttime FROM facility_number WHERE userID=%s AND is_leave=0", faceID)
for row in cur:
#TODO 早すぎる退出の例外処理
dt_now = datetime.datetime.now()
print('exit',dt_now)
cur.execute("UPDATE facility_number SET endtime= %s, is_leave=1 WHERE (is_leave=0 AND userID=%s) AND facilityID= %s",[(dt_now), (faceID),(facilityID)])
print("OK2")
body = json.dumps({
"result":3,
})
conn.commit()
return {
'isBase64Encoded': False,
'statusCode': 200,
'headers':{
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': body
}
print('start checking room limit')
cur.execute("SELECT * FROM facility_number WHERE ((is_leave=0 AND facilityID=%s) AND (userID!=%s ))", [(facilityID),(faceID)])
is_leave_num = 0
for row in cur:
print('row=',row)
is_leave_num += 1
print('now staying num=',is_leave_num)
cur.execute("SELECT limit_days FROM facility WHERE facilityID=%s", facilityID)
limit = 0
for row in cur:
limit = row['limit_days']
print('limit=',limit)
if is_leave_num >= limit:
body = json.dumps({
"result":1,
})
print('room over limit')
return { #人数が制限を超えていたとき
'isBase64Encoded': False,
'statusCode': 200,
'headers':{
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': body
}
print("finish checking room limit")
if result == 2:
dt_now = datetime.datetime.now()
tmp1 = [(faceID), (facilityID), (dt_now), (0)]
print('enter', tmp1)
cur.execute("INSERT INTO facility_number(userID, facilityID, starttime, is_leave) VALUE (%s, %s, %s, %s)",tmp1)
conn.commit()
except Exception as e:
print('Exception ',e)
result = 0
print('finish searching By faceID')
#データベースコミット!
conn.commit()
body = json.dumps({
"result":result
})
# returnは4パラメータで、辞書(or json)を返す
return {
'isBase64Encoded': False,
'statusCode': 200,
'headers':{
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': body
}
``` |
{
"source": "jphacks/B_2011",
"score": 3
} |
#### File: proctor/websocket/consumers.py
```python
from channels.generic.websocket import AsyncWebsocketConsumer
from sent_messages.serizlizers import MessageSerializer
from channels.db import database_sync_to_async
import json
class ExamineeConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['exam_id']
self.room_group_name = 'chat_{}'.format(self.room_name)
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket`
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json
#send message to users from examinees
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'examinee_message',
'message': message
}
)
#save message to database
await self.save_message(text_data)
# Receive message from users
async def user_message(self, event):
message = event['message']
# Send message to WebSocket
print(message)
await self.send(text_data=message)
async def examinee_message(self, event):
pass
@database_sync_to_async
def save_message(self, text):
message = json.loads(text)
serializer = MessageSerializer(data=message)
serializer.is_valid(raise_exception=True)
serializer.save()
class UserConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['exam_id']
self.room_group_name = 'chat_{}'.format(self.room_name)
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket`
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
#send message to users from examinees
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'user_message',
'message': message
}
)
# Receive message from examinees
async def examinee_message(self, event):
message = event['message']
if message['alert']==0:
return 0
# Send message to WebSocket
await self.send(text_data=json.dumps(message))
async def user_message(self, event):
pass
``` |
{
"source": "jphacks/B_2015",
"score": 3
} |
#### File: polls/application/bungou.py
```python
import os
import csv
import random
#from dictionary import make_synonym_dict
def search(param):
#from janome.tokenizer import Tokenizer
#t = Tokenizer()
#print(param2)
print("search:")
print(param)
def flatten(nested_list):
"""2重のリストをフラットにする関数"""
return [e for inner_list in nested_list for e in inner_list]
def search2(param):
param2 = '"' + param + '":'
gokan_sentence_list = []
print(os.getcwd())
with open(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'akutagawa_gokan_dict.tsv', encoding = 'utf-8')as f:
print("開けた!")
for line in f:
#print(line)
#t = 0
if param2 in line:
_,line = line.split(param2)
gokan_sentence_list = line.split('\t')
#t = random.randint(0,len(gokan_sentence_list)-1)
break
print(gokan_sentence_list)
print("aaa")
if gokan_sentence_list!=[]:
return random.choice(gokan_sentence_list)
else:
return ""
gokan_sentence_list = []
gokan_sentence_list.append(search2(param))
#print(gokan_dict)
synonym_dict = {}
#synonym_dict=make_synonym_dict(param)
#print(synonym_dict)
with open(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'akutagawa_synonym_dict.tsv', encoding = 'utf-8')as f:
param2 = '"'+param+'":'
for line in f:
if param2 in line:
_,line = line.split(param2)
synonym_dict[param] = line.split('\t')
for synonym in synonym_dict[param]:
#print(synonym)
gokan_sentence_list.append(search2(synonym))
#if gokan_dict.get(synonym):
#for sentence in gokan_sentence_list:
with open(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'data.csv','a') as f:
for sentence in gokan_sentence_list:
f.write(sentence + '::::::::::')
#writer.writerow(gokan_sentence_list)
# writer.writerow("DONE")"""
# 以下を追記(return_text()を呼び出すと"Hello!!"が返される)
def return_text():
#return "Hello!"
with open(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'data.csv') as f:
reader = csv.reader(f, lineterminator='\n,')
datas = []
for row in reader:
# print(row)
datas.append(row)
os.remove(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'data.csv')
return datas
"""
with open(os.getcwd()+'/polls/application/'+'data.csv','a') as f:
reader = csv.reader(f, lineterminator='\n')
datas = []
for row in reader:
datas.append(row)
return datas
"""
"""
テスト用
# coding:utf-8
import os
import csv
# htmlからのデータをcsvファイルに記録
def search(data):
print("dataだよ")
datas = [data]
with open(os.getcwd()+'/polls/application/'+'data.csv','a') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(datas)
"""
```
#### File: polls/application/dictionary.py
```python
import requests
from bs4 import BeautifulSoup
def make_synonym_dict(word):
#word = input()
synonym_dict={word:[]}
url = "https://thesaurus.weblio.jp/content/" + word
#headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'}
r = requests.get(url)
html = r.text
bs = BeautifulSoup(html, 'html.parser')
try:
synonyms_table = bs.find_all("td" ,class_="nwntsR")
#synonyms_table = bs.find_all("div" ,class_="Nwnts")
for synonyms in synonyms_table:
synonyms = synonyms.find_all("li")#class_='crosslink')
#meanings = bs.select_one("#main > div:nth-of-type(13) > div > div.Nwnts > table > tbody > tr:nth-of-type(2) > td.nwntsR > ul > li:nth-of-type(1) > a").text
for synonym in synonyms:
if synonym.find(class_='crosslink')!=None:
synonym = synonym.find(class_='crosslink')
synonym_dict[word] += synonym.contents
#print(synonym_dict)
return synonym_dict
except AttributeError:
meanings = "そのような言葉は見つからなかったよ...。ごめんね。"
print(meanings)
return {}
synonym_dict={}
synonym_dict = make_synonym_dict("ぬこ")
synonym_dict
``` |
{
"source": "jphacks/B_2021",
"score": 3
} |
#### File: 334jst_server/web/models.py
```python
from web import db, ma
from marshmallow_sqlalchemy import ModelSchema
from flask import jsonify
import json
class Sound(db.Model):
__tablename__ = 'sounds'
id = db.Column(db.Integer, primary_key=True)
sound_type = db.Column(db.String(64), index=True)
pitch_name = db.Column(db.String(64), index=True)
start = db.Column(db.Integer, index=True)
length = db.Column(db.Integer, index=True)
room = db.Column(db.String(64), index=True)
made_by = db.Column(db.String(64), index=True)
def registSound(sound):
record = Sound(
sound_type=sound['sound_type'],
pitch_name=sound['pitch_name'],
start=sound['start'],
length=sound['length'],
room=sound['room'],
made_by=sound['made_by']
)
db.session.add(record)
db.session.commit()
return record.id
def removeSound(sound):
found_entry = Sound.query.filter_by(id=sound['id']).first()
db.session.delete(found_entry)
db.session.commit()
return sound
def room_sound_all_remove(sound):
db.session.query(Sound).filter(Sound.room==sound['room']).delete()
db.session.commit()
return sound
def __repr__(self):
return '<Sound id={id} sound_type={sound_type} pitch_name={pitch_name} start={start} length={length} room={room} made_by={made_by}>'.format(id=self.id, sound_type=self.sound_type, pitch_name=self.pitch_name, start=self.start, length=self.length, room=self.room, made_by=self.made_by)
class SoundSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Sound
load_instance = True
class Room(db.Model):
__tablename__ = 'rooms'
name = db.Column(db.String(64), primary_key=True)
bpm = db.Column(db.Integer, index=True)
num_of_bar = db.Column(db.Integer, index=True)
def registRoom(room):
record = Room(
name=room['name'],
bpm=room['bpm'],
num_of_bar = room['num_of_bar']
)
db.session.add(record)
db.session.commit()
return record.name
def removeRoom(room):
found_entry = Room.query.filter_by(name=room['name']).first()
db.session.delete(found_entry)
db.session.commit()
return room
def changeBpm(room):
found_entry = db.session.query(Room).filter(Room.name==room['name']).first()
found_entry.bpm = room['bpm']
db.session.commit()
def changeNumofbar(room):
found_entry = db.session.query(Room).filter(Room.name==room['name']).first()
found_entry.num_of_bar = room['num_of_bar']
db.session.commit()
def __repr__(self):
return '<Room name={name} bpm={bpm} num_of_bar={num_of_bar}>'.format(name=self.name, bpm=self.bpm, num_of_bar=self.num_of_bar)
class RoomSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Room
load_instance = True
``` |
{
"source": "jphacks/B_2113",
"score": 3
} |
#### File: seculities/main/resizing.py
```python
import re
import cv2
import argparse
import numpy as np
def only_resize(imgcv):
# 画像から数字だけを取得するために初期画像を分割する
name = "yuuka"
h, w, ch = imgcv.shape
# 縦横幅の初期値
h_start = 350
h_end = h-125
w_start = round(w/2)
w_end = w-92
# 分割する
imgcv = imgcv[h_start:h_end, w_start:w_end, :ch]
#現在の縦横幅
height = h_end - h_start
width = w_end - w_start
# n倍
n = 3
width = round(width *n)
height = round(height *n)
imgcv = cv2.resize(imgcv,(width,height))
return imgcv
```
#### File: seculities/main/views.py
```python
import os
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from .forms import UploadFileForm
from pathlib import Path
from pdf2image import convert_from_path
from .google_ocr_jap import japanese_array
from .google_ocr_num import number_array
from .creation_excel import cre_ex
#語句の辞書
dic_account = {'流動資産' : '短い期間で現金にすることが可能な資産','現金及び預金' : '企業が所有している現金や、銀行に預けている預金',
'受取手形及び売掛金' : '売上代金の未回収分' ,'有価証券' : '財産的価値のある権利を表彰する証券','棚卸資産' : '企業が販売する目的で一時的に保有している商品・製品・原材料・仕掛品の総称',
'貸倒引当金' : '貸倒損失によるリスクに備え、損失になるかもしれない金額を予想して、あらかじめ計上しておく引当金', '流動資産合計' : '流動資産の合計','固定資産' : '会社が長期間にわたり保有するものや1年を超えて現金化・費用かされる資産',
'有形固定資産' : '実体を持つ固定資産' , '無形固定資産':'実体を持たない固定資産','投資その他の資産':'固定資産のひとつで、有形固定資産、無形固定資産に入らない資産',
'投資有価証券':'満期保有目的の有価証券など','投資その他の資産合計':'固定資産の中で、有形固定資産、無形固定資産に入らない資産','固定資産合計':'固定資産の合計','資産合計':'会社が運用している財産の総額すなわち資産の合計',
'負債の部':'株主・会社以外の外部からの調達資金','流動負債':'原則として1年以内に返済しなくてはならない債務','支払手形及び買掛金':'仕入先との取引に基づいた手形上の債務と仕入先との取引によって発生した営業上の未払金',
'引当金':'将来の支出に備えてあらかじめ準備しておく見積金額','未払法人税等':'納付すべき法人税、住民税および事業税の未払い額','流動負債合計':'流動負債の合計','固定負債':'1年以内に支払い義務が発生しない負債',
'退職給付に係る負債':'連結財務諸表上、退職給付から年金資産の額を控除した貸方残高(積立状況を示す額)を負債として計上したもの','固定負債合計':'固定負債の合計','負債合計':'流動負債と固定負債の合計',
'純資産の部':'資産から負債を差し引いた金額','株主資本':'株主が出資した資本と資本を使って生じた利益のこと','資本金':'事業を円滑に進めるために、株主が会社に出資した金額のこと','資本剩余金':'設立後新たに株式を発行した時など資本取引によって発生する余剰金',
'利益剩余金':'会社の活動によって得た利益のうち、社内に留保している額','自己株式':'株式会社が発行する株式のうち、自社で取得した上で保有している株式のこと','株主資本合計':'株主資本の合計',
'その他の包括利益累計額':'これまでに公表された会計基準等で使用されている純資産の部の「評価・換算差額等」を読み替えたもの','その他有価証券評価差額金':'その他有価証券を毎期末に時価評価した場合の、相手勘定を表す勘定科目',
'為替換算調整勘定':'連結財務諸表を作成する手続で発生する換算差額を調整する勘定科目','その他の包括利益累計額合計':'その他の包括利益累計額の合計','非支配株主持分':'連結子会社の資本のうち連結親会社の持分に属しない部分',
'純資産合計':'純資産の合計','負債純資産合計':'負債と純資産の合計','その他1':'','その他2':'','その他3':'','その他4':'','その他5':'',
}
# Create your views here.
def top(request):
return render(request,'top.html')
UPLOAD_DIR = os.path.dirname(os.path.abspath(__file__)) + '/uploads/' # アップロードしたファイルを保存するディレクトリ
UPLOADS_DIR = os.path.dirname(os.path.abspath(__file__)) + '/jpg_uploads/'
# アップロードされたファイルのハンドル
def handle_uploaded_file(f):
path = os.path.join(UPLOAD_DIR, f.name)
with open(path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
# PDFファイルのパス
pdf_path = Path(f"{UPLOAD_DIR}\{f.name}")
#outputのファイルパス
img_path=Path(f"{UPLOADS_DIR}")
#この1文で変換されたjpegファイルが、imageホルダー内に作られます。
convert_from_path(pdf_path, output_folder=img_path,fmt='jpeg',output_file=pdf_path.stem)
# ファイルアップロード
def upload(request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
pdf_name = request.FILES['yuuka']
handle_uploaded_file(request.FILES['yuuka'])
return render(request, 'upload.html', {'form': form,'pdf_name':pdf_name})
# ファイルアップロード完了
def upload_complete(request):
jap_ar,num_ar = japanese_array()
asset = list()
liability = list()
net_asset = list()
flag = 0
asset_num = list()
liability_num = list()
net_asset_num = list()
co = 0
for i in jap_ar:
if(i=='資産の部'):
flag = 1
elif(i=='負債の部'):
flag = 2
elif(i=='純資産の部'):
flag = 3
elif(flag ==1):
asset.append(i)
elif(flag ==2):
liability.append(i)
elif(flag==3):
net_asset.append(i)
all = list()
for i in asset:
all.append(i)
for j in liability:
all.append(j)
for k in net_asset:
all.append(k)
asset_num = list()
liability_num = list()
net_asset_num = list()
num_before = list()
num_current = list()
for j in num_ar:
if(co%2==0):
co += 1
num_before.append(j)
else:
co += 1
num_current.append(j)
co = 0
ad = 0
#判定方法は辞書型に変えられそう
for k in all:
if(k=='資産の部'):
continue
elif(k=='負債の部'):
continue
elif(k=='純資産の部'):
continue
elif('流動資産' == k):
ad += 1
asset_num.append(' ')
continue
elif('固定資産' == k):
ad += 1
asset_num.append(' ')
continue
elif('投資その他の資産' ==k):
ad += 1
asset_num.append(' ')
continue
elif('流動負債' == k):
ad += 1
liability_num.append(' ')
continue
elif('固定負債' == k):
ad += 1
liability_num.append(' ')
continue
elif('株主資本' == k):
ad += 1
net_asset_num.append(' ')
continue
elif('その他の包括利益累計額' == k):
ad += 1
net_asset_num.append(' ')
continue
elif(ad<len(asset)):
ad += 1
asset_num.append(num_current[co])
co += 1
continue
elif(len(asset)<=ad and ad <len(liability)+len(asset)):
liability_num.append(num_current[co])
ad += 1
co += 1
continue
elif(len(liability)+len(asset)<=ad):
net_asset_num.append(num_current[co])
ad += 1
co += 1
continue
co = 0
ad = 0
asset_num_before = list()
liability_num_before = list()
net_asset_num_before = list()
for k in all:
if(k=='資産の部'):
continue
elif(k=='負債の部'):
continue
elif(k=='純資産の部'):
continue
elif('流動資産' == k):
ad += 1
asset_num_before.append(' ')
continue
elif('固定資産' == k):
ad += 1
asset_num_before.append(' ')
continue
elif('投資その他の資産' ==k):
ad += 1
asset_num_before.append(' ')
continue
elif('流動負債' == k):
ad += 1
liability_num_before.append(' ')
continue
elif('固定負債' == k):
ad += 1
liability_num_before.append(' ')
continue
elif('株主資本' == k):
ad += 1
net_asset_num_before.append(' ')
continue
elif('その他の包括利益累計額' == k):
ad += 1
net_asset_num_before.append(' ')
continue
elif(ad<len(asset)):
ad += 1
asset_num_before.append(num_before[co])
co += 1
continue
elif(len(asset)<=ad and ad <len(liability)+len(asset)):
liability_num_before.append(num_before[co])
ad += 1
co += 1
continue
elif(len(liability)+len(asset)<=ad):
net_asset_num_before.append(num_before[co])
ad += 1
co += 1
continue
current = list()
ex_temp0 = list()
ex_temp1 = list()
for i in asset_num:
ex_temp0.append(i)
for j in liability_num:
ex_temp0.append(j)
for k in net_asset_num:
ex_temp0.append(k)
for i in asset_num_before:
ex_temp1.append(i)
for j in liability_num_before:
ex_temp1.append(j)
for k in net_asset_num_before:
ex_temp1.append(k)
all_new = list()
for i in asset:
all_new.append(i)
for j in liability:
all_new.append(j)
for k in net_asset:
all_new.append(k)
cre_ex(all_new,ex_temp0,ex_temp1)
dic = {key:val for key,val in zip(asset,asset_num)}
dic_lia = {key:val for key,val in zip(liability,liability_num)}
dic_net = {key:val for key,val in zip(net_asset,net_asset_num)}
#語句の説明
phrase_description = list()
for k in all:
val =dic_account[k]
phrase_description.append(val)
dic_phrase_description = {key:val for key,val in zip(all,phrase_description)}
co = 0
#流動比率
# 流動比率 = 流動資産/流動負債 × 100
#もし流動資産合計という語句があれば
flow_rate = dic['流動資産合計']/dic_lia['流動負債合計']*100
flow_rate = round(flow_rate)
#当座比率
# 当座比率 = 当座資産/流動負債 × 100
flag = 0
current_term_asset = 0
for i in asset:
if(flag):
break
elif(i=='流動資産'):
continue
elif(i=='棚卸資産' ):
flag = 1
elif(i=='その他1'):
flag =1
else:
current_term_asset+=dic[i]
flag = 0
current_liabilities = 0
for j in liability:
if(flag):
break
elif(j=='流動負債'):
continue
elif(j=='その他3'):
flag = 1
else:
current_liabilities+=dic_lia[j]
current_rate = current_term_asset/current_liabilities*100
current_rate = round(current_rate)
#長期的な安全性を示す固定比率
#固定比率 = 固定資産/自己資本 × 100
fixed_rate = dic['固定資産合計']/dic_net['純資産合計']*100
fixed_rate = round(fixed_rate)
#自己資本比率
#自己資本比率 = 純資産/総資本 × 100
capital_adequacy_ratio = dic_net['純資産合計']/dic['資産合計']*100
capital_adequacy_ratio = round(capital_adequacy_ratio)
return render( request, 'upload_complete.html',context={
'liability' : liability,
'net_asset' : net_asset,
'liability_num_current' : liability_num,
'net_asset_num_current' : net_asset_num,
'dic_current' : dic,
'dic_current_lia' : dic_lia,
'dic_current_net' : dic_net,
'dic_phrase_description' : dic_phrase_description,
'flow_rate' : flow_rate,
'current_rate' : current_rate,
'fixed_rate' : fixed_rate,
'capital_adequacy_ratio' : capital_adequacy_ratio,
})
``` |
{
"source": "jphacks/C_2002",
"score": 3
} |
#### File: server/FunctionTest/ToHiragana.py
```python
import os
import configparser
from goolabs import GoolabsAPI
import json
import operator
# ソースファイルの場所取得
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
# 設定ファイルの読み込み
config = configparser.ConfigParser()
config.read(APP_ROOT + '/config_local.ini')
# 設定ファイルからgooラボAPIに関する情報を取得
Goo_API_APPLICATION_ID = config.get("Goo_API", "ApplicationId")
# gooラボAPIのAPIクライアント設定
gooAPI = GoolabsAPI(Goo_API_APPLICATION_ID)
# 名前を50音順にソートする関数
def sort_name(list):
name_list = []
for name in list:
response = gooAPI.hiragana(sentence=name, output_type="hiragana")
response['before'] = name
# print(response)
name_list.append(response)
name_sort_list = sorted(name_list, key=operator.itemgetter('converted'))
return name_sort_list
arr = [ '高尾', '大羽', '岡崎', '近藤', '石倉' ]
print(sort_name(arr))
``` |
{
"source": "jphacks/C_2008",
"score": 2
} |
#### File: data/src/global_variables.py
```python
def init():
global tobiiCurrentX, tobiiCurrentY
global participant
global writeScreenCapVideo, useAaronCircles
global wgCurrentX, wgCurrentY
global pctFile
global participantPos, participantDirList
global onlyWritingVideos
# Options
onlyWritingVideos = True # Only process videos where the participant is asked to write into a text field
writeScreenCapVideo = False
useAaronCircles = False
# global_variables for current state of eye tracking
tobiiCurrentX = 0
tobiiCurrentY = 0
wgCurrentX = 0
wgCurrentY = 0
# Which participant are we on?
participantDirList = []
participantPos = -1
participant = []
```
#### File: data/src/participant.py
```python
import csv
import glob
import math
import os
import json
from decimal import Decimal
import tornado.escape
import global_variables
pctFile = "participant_characteristics.csv"
##################################################################
# Screen properties
#
# WARNING: MAGIC NUMBERS
# PC clientX = 0 screen position
pcDocumentStartX = 0
pcDocumentStartY = 66
pcDocumentEndXNoScrollbar = 1902 # not including scroll bar
pcDocumentEndX = 1919 # not including scroll bar
pcDocumentEndY = 1110 # not including scroll bar
# Laptop clientX = 0 screen position
laptopDocumentStartX = 0
laptopDocumentStartY = 97
laptopDocumentEndXNoScrollbar = 1439 # not including scroll bar
laptopDocumentEndX = 1439 # not including scroll bar
laptopDocumentEndY = 775 # not including scroll bar
# In pixels
chromeDownloadBarHeight = 52
##################################################################
# Classes for data storage
#
class TobiiData:
timestamp = 0
rightEyeValid = False
leftEyeValid = False
rightScreenGazeX = -math.inf
rightScreenGazeY = -math.inf
leftScreenGazeX = -math.inf
leftScreenGazeY = -math.inf
def __init__(self, timestamp, rev, lev, rsgx, rsgy, lsgx, lsgy ):
self.timestamp = timestamp
self.rightEyeValid = rev
self.leftEyeValid = lev
self.rightScreenGazeX = rsgx
self.rightScreenGazeY = rsgy
self.leftScreenGazeX = lsgx
self.leftScreenGazeY = lsgy
def __str__(self):
return "[TobiiData] Timestamp: " + str(self.timestamp) + " RightEyeValid: " + str(self.rightEyeValid) + " REX: " + str(self.rightScreenGazeX) + " REY: " + str(self.rightScreenGazeY)
class ParticipantVideo:
filename = []
startTimestamp = -1
stopTimestamp = -1
frameFilesList = []
frameFilesPos = -1
def __init__(self, filename, startTimestamp):
self.filename = filename
self.startTimestamp = startTimestamp
def __str__(self):
return "[ParticipantVideo] Timestamp: " + str(self.startTimestamp) + " Filename: " + str(self.filename)
class ParticipantData:
directory = ""
videos = []
videosPos = -1
startTimestamp = -1
screenWidthPixels = -1
screenHeightPixels = -1
inputLogFile = ""
wgWindowX = -1
wgWindowY = -1
wgWindowInnerWidth = -1
wgWindowInnerHeight = -1
wgWindowOuterWidth = -1
wgWindowOuterHeight = -1
touchTypist = "" # Equals either 'Yes' or 'No'
pcOrLaptop = "" # Equals either 'Laptop' or 'PC'
tobiiLogFile = ""
tobiiList = []
tobiiListPos = 0
screencapFile = ""
screencap = None
screencapOut = None
screencapStartTime = 0
screencapFrameWidth = 0
screencapFrameHeight = 0
screencapFrameRate = 0
prevMSECIntoVideo = -1
def __init__(self, directory):
self.directory = directory
def __str__(self):
return "[ParticipantData] Directory: " + self.directory + " PC or Laptop: " + str(self.pcOrLaptop)
def loadParticipantData(self):
########################
# Load participant characteristics as technical parts
with open( pctFile ) as f:
readCSV = csv.reader(f, delimiter=',')
for row in readCSV:
if row[0] == self.directory:
self.screenWidthPixels = int(row[4])
self.screenHeightPixels = int(row[5])
self.pcOrLaptop = str(row[3]) # Equals either 'Laptop' or 'PC'
self.touchTypist = str(row[18]) # Equals either 'Yes' or 'No'
if row[9] != '':
self.screencapStartTime = int(row[9]) # 20180316 JT Note: the value in the .csv is currently inaccurate or incomplete
else:
self.screencapStartTime = 0
break
########################
# WebGazer event log
# *dot_test_instructions.webm is the first video file.
webMFile = glob.glob( self.directory + '/' + '*dot_test_instructions.webm' )
# Split the video name into its pieces
try:
f = os.path.split( webMFile[0] )[1]
except IndexError:
raise OSError('Files are not in right location, see https://webgazer.cs.brown.edu/data/ for details'\
+ 'on how to correct this')
# Find the first part of the video filename, which is the timestamp as a string
self.startTimestamp = int(f[0:f.find('_')])
print( self.directory )
self.inputLogFile = self.directory + "/" + str(self.startTimestamp) + ".json"
# Load WebGazer browser window parameters
with open( self.inputLogFile ) as f:
inputLog = json.load( f )
for l in inputLog:
if l.get('windowX') != None:
self.wgWindowX = int(l['windowX'])
self.wgWindowY = int(l['windowY'])
self.wgWindowInnerWidth = int(l['windowInnerWidth'])
self.wgWindowInnerHeight = int(l['windowInnerHeight'])
self.wgWindowOuterWidth = int(l['windowOuterWidth'])
self.wgWindowOuterHeight = int(l['windowOuterHeight'])
break
# Find all video recordings, with start times
self.videos = []
self.videosPos = -1
for l in inputLog:
if l.get("type") == "recording start":
fn = l.get("sessionString")
fn = fn.replace('/', '-') + '.webm'
starttime = l.get("epoch")
pv = ParticipantVideo( fn, starttime )
self.videos.append( pv )
################################
# Filter video names
#
# Oftentimes, we want to only process a subset of the videos per participant.
# We can remove videos from self.videos here to accomplish this.
#
# E.G., if we only wanted video files to do with _writing and the dot tests, then
if global_variables.onlyWritingVideos:
self.videos = [x for x in self.videos if (x.filename.find('_writing') >= 0 or x.filename.find('dot_test.') >= 0 or x.filename.find( 'dot_test_final.' ) >= 0)]
################################
# Read in JSON output from Tobii
self.tobiiLogFile = self.directory + '/' + self.directory + ".txt"
self.tobiiList = []
self.tobiiListPos = 0
# Each line is a JSON object, so let's read the file line by line
with open( self.tobiiLogFile, 'r' ) as f:
for line in f:
l = json.loads(line, parse_float=Decimal)
rsgx = float(l['right_gaze_point_on_display_area'][0])
rsgy = float(l['right_gaze_point_on_display_area'][1])
lsgx = float(l['left_gaze_point_on_display_area'][0])
lsgy = float(l['left_gaze_point_on_display_area'][1])
timestamp = round( l['true_time'] * 1000 )
rpv = l['right_pupil_validity']
lpv = l['left_pupil_validity']
td = TobiiData( timestamp, rpv, lpv, rsgx, rsgy, lsgx, lsgy )
self.tobiiList.append( td )
################################
# Define screen capture file
if self.pcOrLaptop == "Laptop":
self.screencapFile = self.directory + "/" + self.directory + ".mov"
elif self.pcOrLaptop == "PC":
self.screencapFile = self.directory + "/" + self.directory + ".flv"
else:
print( "Not PC nor Laptop! ERROR ERROR" )
global_variables.writeScreenCapVideo = False
return
# If we're going to write out a screen capture video, then load it
if global_variables.writeScreenCapVideo:
openScreenCapOutVideo( self )
loadScreenCapVideo( self )
###########################################################################################################
# Messages to send over WebSockets
#
def sendParticipantInfo( wsh, participant ):
# Tell the connecting socket about the participant
# Screen coordinate data
if participant.pcOrLaptop == "PC":
docStartX = pcDocumentStartX
docStartY = pcDocumentStartY
else:
docStartX = laptopDocumentStartX
docStartY = laptopDocumentStartY
parcel = ({ 'msgID': "0",
'screenWidthPixels': str(participant.screenWidthPixels),
'screenHeightPixels': str(participant.screenHeightPixels),
'docStartX': str(docStartX),
'docStartY': str(docStartY),
'touchTypist': str(participant.touchTypist),
'screencapStartTime': str(participant.screencapStartTime),
'participantScreenCapFile': str(participant.screencapFile),
'participantInputLogFile': str(participant.inputLogFile)})
wsh.write_message( tornado.escape.json_encode( parcel ) )
def newParticipant( wsh ):
global_variables.participantPos = global_variables.participantPos + 1
# Check we're not at the last participant
if global_variables.participantPos >= len(global_variables.participantDirList):
print( "All participants completed." )
exit()
else:
# Load the participant data
global_variables.participant = ParticipantData( global_variables.participantDirList[global_variables.participantPos] )
global_variables.participant.loadParticipantData()
sendParticipantInfo( wsh, global_variables.participant )
``` |
{
"source": "jphacks/C_2013",
"score": 3
} |
#### File: backend/controller/mayu_controller.py
```python
import cv2
import numpy as np
import time
import urllib
from PIL import Image
# from middleware.detect import detection
from middleware.fast_detect import detection
def cv2pil(image):
''' OpenCV型 -> PIL型 '''
new_image = image.copy()
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA)
new_image = Image.fromarray(new_image)
return new_image
def pil2cv(image):
''' PIL型 -> OpenCV型 '''
new_image = np.array(image, dtype=np.uint8)
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA)
return new_image
def calc_mayu_size(landmarks):
# まゆの幅
r_mayu_len = landmarks[4][0] - landmarks[0][0]
l_mayu_len = landmarks[9][0] - landmarks[5][0]
ave_mayu_len = (r_mayu_len+l_mayu_len)/2
# 両まゆ間の距離
mayu_dist = landmarks[5][0] - landmarks[4][0]
return ave_mayu_len, mayu_dist
def synth_mayu_temp(f_img, b_img, pos, isflip=False):
if isflip:
# 左眉
f_img = cv2.flip(f_img, 1) # 反転
x_pos = pos[0] - f_img.shape[1]
else:
# 右眉
x_pos = pos[0]
y_pos = int(pos[1] - f_img.shape[0] * (4 / 5))
layer1 = cv2pil(b_img).convert('RGBA')
layer2 = cv2pil(f_img).convert('RGBA')
c = Image.new('RGBA', layer1.size, (255, 255, 255, 0))
mask = layer2.copy()
layer2.putalpha(80)
c.paste(layer2, (x_pos, y_pos), mask=mask)
result = Image.alpha_composite(layer1, c)
return pil2cv(result)
def resize_mayu(temp_img, mayu_len):
height = temp_img.shape[0]
width = temp_img.shape[1]
ratio = mayu_len / width
temp_img = cv2.resize(temp_img, (int(width*ratio), int(height*ratio)))
return temp_img
def mayu_handler(stream, img_uri):
landmarks, img = detection(stream)
for landmark in landmarks:
# まゆのサイズを計算 [17-27]
mayu_len, mayu_dist = calc_mayu_size(landmark[17:27])
# テンプレート画像貼り付け
resp = urllib.request.urlopen(img_uri)
mayu_byte = np.asarray(bytearray(resp.read()), dtype="uint8")
mayu_img = cv2.imdecode(mayu_byte, cv2.IMREAD_UNCHANGED)
# mayu_img = cv2.imread('./template-images/mayu-1.png', cv2.IMREAD_UNCHANGED)
mayu_img = resize_mayu(mayu_img, mayu_len)
img = synth_mayu_temp(mayu_img, img, landmark[22], False) # 左眉
img = synth_mayu_temp(mayu_img, img, landmark[21], True) # 右眉
result, dst_data = cv2.imencode('.png', img)
# cv2.imwrite("./result/{}.png".format(str(time.time())), img)
return dst_data
if __name__ == "__main__":
pass
```
#### File: backend/middleware/detect_dots.py
```python
import cv2
import dlib
import numpy as np
from flask import Flask
import base64
import time
app = Flask(__name__)
app.config.from_json('../config/config.json')
cfg = app.config
# OpenCVのカスケードファイルと学習済みモデルのパスを指定
CASCADE_PATH = cfg["CASCADEPASS"]
CASCADE = cv2.CascadeClassifier(CASCADE_PATH + cfg["CASCADE"])
LEARNED_MODEL_PATH = cfg["LEARNEDMODELPATH"]
MODEL = {}
MODEL['PREDICTOR'] = dlib.shape_predictor(LEARNED_MODEL_PATH + cfg["DETECTMODEL"])
MODEL['SURROUNDER'] = dlib.shape_predictor(LEARNED_MODEL_PATH + cfg['SURROUNDMODEL'])
def face_position(gray_img):
# 顔の位置を検出 return:リスト(x,y,w,h)
faces = CASCADE.detectMultiScale(gray_img, minSize=(100, 100))
return faces
def facemark(gray_img, model):
# ランドマーク検出
faces_roi = face_position(gray_img)
landmarks = []
for face in faces_roi:
detector = dlib.get_frontal_face_detector()
rects = detector(gray_img, 1)
landmarks = []
for rect in rects:
landmarks.append(
np.array([[p.x, p.y] for p in MODEL[model](gray_img, rect).parts()]))
return landmarks
def detect_dots(stream, model='PREDICTOR'):
img_binary = base64.b64decode(stream)
jpg = np.frombuffer(img_binary, dtype=np.uint8)
img = cv2.imdecode(jpg, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 処理を早くするためグレースケールに変換
landmarks = facemark(gray, model) # ランドマーク検出
b_channel, g_channel, r_channel = cv2.split(img)
alpha_channel = np.zeros(b_channel.shape, dtype=b_channel.dtype)
img_BGRA = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
# ランドマークの描画
for landmark in landmarks:
for points in landmark:
cv2.drawMarker(
img_BGRA, (points[0], points[1]), (21, 255, 12, 100))
result, dst_data = cv2.imencode('.png', img_BGRA)
# cv2.imwrite("./result/{}.png".format(str(time.time())), img_BGRA)
return dst_data
if __name__ == '__main__':
pass
```
#### File: C_2013/backend/personal_color.py
```python
import os
from glob import glob
import numpy as np
from PIL import Image, ImageFile
from pytorch_lightning import seed_everything
import torchvision.transforms as transforms
from io import BytesIO
import base64
import ml_model
ImageFile.LOAD_TRUNCATED_IMAGES = True
transform = transforms.Compose([
transforms.Resize(400),
transforms.CenterCrop(384),
# transforms.RandomRotation([-10, 10]),
# transforms.RandomCrop(384),
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomVerticalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
"""
0:autumn
1:spring
2:summer
3:winter
"""
PRETRAINED = "learned-models/epoch=572.ckpt"
def predict_Personal_Color(data):
predict_model = ml_model.FineTuningModel.load_from_checkpoint(PRETRAINED)
predict_model.eval()
img = Image.open(BytesIO(base64.b64decode(data))).convert('RGB')
# img = Image.open('../../backend/img/input_1.jpg')
img = transform(img)
img = img.unsqueeze(0)
output = predict_model(img).squeeze(0).detach().numpy()
rate = {"autumn": output[0], "spring": output[1], "summer": output[2], "winter": output[3]}
return rate
if __name__ == "__main__":
print(predict_Personal_Color())
```
#### File: landmark_detection/tools/detect.py
```python
import cv2
import dlib
import numpy
import os
class detect:
def __init__(self, cascade_model_path, landmark_model_path):
self.face_cascade = cv2.CascadeClassifier(cascade_model_path)
self.predictor = dlib.shape_predictor(landmark_model_path)
def detect_face_position(self):
faces = self.face_cascade.detectMultiScale(self.gray, minSize=(100, 100))
return faces
def detect_facial_landmarks(self):
faces_roi = self.detect_face_position()
landmarks = []
for face in faces_roi:
x, y, w, h = face
face_img = self.gray[y: y + h, x: x + w];
rects = dlib.rectangle(x, y, x + w, y + h)
landmarks = []
landmarks.append(numpy.array([[p.x, p.y] for p in self.predictor(self.gray, rects).parts()]))
return landmarks
def draw_landmarks_img(self, img_path):
img = cv2.imread(img_path)
self.gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
landmarks = self.detect_facial_landmarks()
for landmark in landmarks:
for i,points in enumerate(landmark):
cv2.putText(img, str(i), (points[0], points[1]),cv2.FONT_HERSHEY_PLAIN, 0.4, (255, 100, 12))
# cv2.drawMarker(img, (points[0], points[1]), (255, 100, 12))
while True:
cv2.imshow("Image", img)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
def draw_landmarks_video(self):
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
self.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
landmarks = self.detect_facial_landmarks()
for landmark in landmarks:
for points in landmark:
cv2.drawMarker(frame, (points[0], points[1]), (21, 255, 12))
cv2.imshow("video frame", frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
```
#### File: ml/parsonalcolor_detection/model.py
```python
from utils import EasyDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from pytorch_lightning.core import LightningModule
import torchvision.models as models
from efficientnet_pytorch import EfficientNet
MODELS = [
"vgg16", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "resnet101_v2",
'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3',
'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6', 'efficientnet-b7',
'efficientnet-b8'
]
class FineTuningModel(LightningModule):
def __init__(self, env):
super().__init__()
self.save_hyperparameters()
if env == {}:
# save.hyperparameters()を行っていなかったため
from train import env
if type(env) == dict:
env = EasyDict(env)
self.env = env
assert env.base_model in MODELS
if env.base_model == "vgg16":
self.model = models.vgg16(pretrained=True)
self.model = nn.Sequential(
*list(self.model.children())[:-2])
fc_in_features = 512
if env.base_model.startswith("resnet"):
self.model = getattr(models, env.base_model)(pretrained=True)
fc_in_features = self.model.fc.in_features
self.model = nn.Sequential(*list(self.model.children())[:-2])
if env.base_model.startswith("efficientnet"):
self._model = EfficientNet.from_pretrained(
env.base_model, include_top=False)
fc_in_features = self._model._fc.in_features
self.model = self._model.extract_features
self.dropout = nn.Dropout(env.dropout_rate)
self.fc = nn.Linear(fc_in_features, env.num_class)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.model(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x)
x = self.softmax(x)
return x
def training_step(self, batch, batch_idx):
# forward pass
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
val_loss = F.cross_entropy(y_hat, y)
labels_hat = torch.argmax(y_hat, dim=1)
n_correct_pred = torch.sum(y == labels_hat).item()
return {'val_loss': val_loss, "n_correct_pred": n_correct_pred, "n_pred": len(x)}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
test_loss = F.cross_entropy(y_hat, y)
labels_hat = torch.argmax(y_hat, dim=1)
n_correct_pred = torch.sum(y == labels_hat).item()
return {'test_loss': test_loss, "n_correct_pred": n_correct_pred, "n_pred": len(x)}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
val_acc = sum([x['n_correct_pred'] for x in outputs]) / \
sum(x['n_pred'] for x in outputs)
tensorboard_logs = {'val_loss': avg_loss, 'val_acc': val_acc}
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
test_acc = sum([x['n_correct_pred'] for x in outputs]
) / sum(x['n_pred'] for x in outputs)
tensorboard_logs = {'test_loss': avg_loss, 'test_acc': test_acc}
return {'test_loss': avg_loss, 'log': tensorboard_logs}
# ---------------------
# TRAINING SETUP
# ---------------------
def configure_optimizers(self):
"""
Return whatever optimizers and learning rate schedulers you want here.
At least one optimizer is required.
"""
optimizer = optim.Adam(
self.parameters(), lr=self.env.learning_rate)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
return [optimizer], [scheduler]
``` |
{
"source": "jphacks/C_2105",
"score": 2
} |
#### File: backend/src/app.py
```python
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
# import db_init
import db_seed
import json
import datetime
import uuid
import random
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/jphacks.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
CORS(app)
db = SQLAlchemy(app)
# プロジェクトテーブルの定義
class Project(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
explanation = db.Column(db.String)
progress = db.Column(db.Integer)
imgUrl = db.Column(db.String)
targetAmount = db.Column(db.Integer)
# 外部キー
log = db.relationship('Log')
column = db.relationship('Column')
# 初期化
def __init__(self, id=0, title='', explanation='', progress=0, imgUrl='', targetAmount=0):
self.id = id
self.title = title
self.explanation = explanation
self.progress = progress
self.imgUrl = imgUrl
self.targetAmount = targetAmount
# ログテーブルの定義
class Log(db.Model):
logId = db.Column(db.Integer, primary_key=True)
id = db.Column(db.Integer, db.ForeignKey('project.id'))
date = db.Column(db.String)
earnedValue = db.Column(db.Integer)
donationType = db.Column(db.String)
def __init__(self, logId=0, id=0, date='', earnedValue=0, donationType=''):
self.logId = logId
self.id = id
self.date = date
self.earnedValue = earnedValue
self.donationType = donationType
# コラムテーブルの定義
class Column(db.Model):
columnId = db.Column(db.Integer, primary_key=True)
id = db.Column(db.Integer, db.ForeignKey('project.id'))
columnTitle = db.Column(db.String)
body = db.Column(db.String)
date = db.Column(db.String)
imgUrl = db.Column(db.String)
def __init__(self, columnId=0, id=0, columnTitle='', body='', date='', imgUrl=''):
self.columnId = columnId
self.id = id
self.columnTitle = columnTitle
self.body = body
self.date = date
self.imgUrl = imgUrl
# プロジェクトのオブジェクトを返す。※そのままだと扱いにくい型のため。
def project_record(p):
return {'id': p.id, 'title': p.title, 'explanation': p.explanation, 'progress': p.progress, 'imgUrl': p.imgUrl, 'targetAmount': p.targetAmount}
# ログのオブジェクトを返す。
def log_record(l):
return {'logId': l.logId, 'id': l.id, 'date': l.date, 'earnedValue': l.earnedValue, 'donationType': l.donationType}
# コラムのオブジェクトを返す。
def column_record(c):
return {'columnId': c.columnId, 'id': c.id, 'columnTitle': c.columnTitle, 'body': c.body, 'date': c.date, 'imgUrl': c.imgUrl}
# プロジェクトのリストを返す。
@app.route('/project')
def project():
id = request.args.get('id')
if id is not None:
# idに一致するプロジェクトのみを返す
project = Project.query.get(id)
return json.dumps(project_record(project))
else:
project = Project.query.all()
result = []
for p in project:
result.append(project_record(p))
return json.dumps(result)
# ログのリストを返す。
@app.route('/log')
def log():
id = request.args.get('id')
if id is not None:
log = Project.query.get(id).log
result = []
for l in log:
result.append(log_record(l))
return json.dumps(result)
else:
return 'パラメータが正しくありません。'
# コラムのリストを返す。
@app.route('/column')
def column():
id = request.args.get('id')
if id is not None:
column = Project.query.get(id).column
result = []
for c in column:
result.append(column_record(c))
return json.dumps(result)
else:
return 'パラメータが正しくありません。'
# 募金をする。
@app.route('/collect')
def collect():
logId = int(str(uuid.uuid4().int)[:5])
id = request.args.get('id')
dt_now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
earnedValue = request.args.get('earnedValue')
if id is not None:
# idに一致するプロジェクトに募金
donationType = 'selected'
else:
# ランダムに募金
donationType = 'auto'
project_all = Project.query.all()
id_array = []
for p in project_all:
id_array.append(p.id)
target_index = random.randint(0, len(id_array) - 1)
id = id_array[target_index]
# ログを記録する。
log = Log(logId, id, dt_now, earnedValue, donationType)
db.session.add(log)
# プロジェクトのprogressを更新する。
project = Project.query.get(id)
project.progress += int(earnedValue)
# dbを更新
db.session.commit()
return json.dumps(project_record(project))
# プロジェクトを作成する。
@app.route('/create_project')
def create_project():
id = request.args.get('id')
title = request.args.get('title')
explanation = request.args.get('explanation')
progress = request.args.get('progress')
imgUrl = request.args.get('imgUrl')
targetAmount = request.args.get('targetAmount')
p = Project(id, title, explanation, progress, imgUrl, targetAmount)
db.session.add(p)
db.session.commit()
return json.dumps(project_record(p))
# コラムを作成する。
@app.route('/create_column')
def create_column():
columnId = int(str(uuid.uuid4().int)[:5])
id = request.args.get('id')
columnTitle = request.args.get('columnTitle')
body = request.args.get('body')
dt_now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
imgUrl = request.args.get('imgUrl')
c = Column(columnId, id, columnTitle, body, dt_now, imgUrl)
db.session.add(c)
db.session.commit()
return json.dumps(column_record(c))
if __name__ == '__main__':
# 初期化
db.drop_all()
# db生成
db.create_all()
# ダミーデータを保存 ※本番では削除して、APIからプロジェクトなどを作成する。
# db_init.initialize()
db_seed.seed()
app.run(host='0.0.0.0', port=8080)
``` |
{
"source": "jphacks/C_2111",
"score": 3
} |
#### File: wtfml/cross_validation/fold_generator.py
```python
from typing import Optional, Tuple, Union
import numpy as np
import pandas as pd
import scipy as sp
from sklearn import model_selection
class FoldGenerator:
"""
pd.DataFrameをn_split文だけ分割する、class
"""
def __init__(
self,
targets: Union[pd.DataFrame, pd.Series],
num_splits: int = 5,
shuffle: bool = True,
task: str = "binary_classification",
random_state: Optional[int] = None,
):
self.task = task
self.targets = targets
if isinstance(self.targets, pd.DataFrame) or isinstance(
self.targets, pd.Series
):
self.targets = self.targets.values
if len(self.targets.shape) == 1:
self.targets = self.targets.reshape((-1, 1))
self.num_splits = num_splits
if self.task == "binary_classification":
self.folds = model_selection.StratifiedKFold(
n_splits=self.num_splits, shuffle=shuffle, random_state=random_state
)
elif self.task == "multiclass_classification":
self.folds = model_selection.StratifiedKFold(
n_splits=self.num_splits, shuffle=shuffle, random_state=random_state
)
elif self.task == "multilabel_classification":
self.folds = model_selection.KFold(
n_splits=self.num_splits, shuffle=shuffle, random_state=random_state
)
elif self.task == "single_col_regression":
self.folds = model_selection.KFold(
n_splits=self.num_splits, shuffle=shuffle, random_state=random_state
)
elif self.task == "multi_col_regression":
self.folds = model_selection.KFold(
n_splits=self.num_splits, shuffle=shuffle, random_state=random_state
)
else:
raise Exception("Task not understood")
self.splits = dict()
for fold_, (trn, val) in enumerate(
self.folds.split(self.targets, self.targets)
):
self.splits[fold_] = dict()
self.splits[fold_]["train_idx"] = trn
self.splits[fold_]["valid_idx"] = val
def get_fold(
self, data: Union[pd.DataFrame, pd.Series], fold: int
) -> Tuple[
dict,
dict,
Union[pd.DataFrame, pd.Series],
Union[pd.DataFrame, pd.Series],
np.ndarray,
np.ndarray,
]:
if fold >= self.num_splits or fold < 0:
raise Exception("Invalid fold number specified")
if isinstance(data, pd.DataFrame):
if self.targets.shape[1] == 1 and self.task != "binary_classification":
return (
self.splits[fold]["train_idx"],
self.splits[fold]["valid_idx"],
data.loc[self.splits[fold]["train_idx"], :],
data.loc[self.splits[fold]["valid_idx"], :],
self.targets[self.splits[fold]["train_idx"]].ravel(),
self.targets[self.splits[fold]["valid_idx"]].ravel(),
)
else:
return (
self.splits[fold]["train_idx"],
self.splits[fold]["valid_idx"],
data.loc[self.splits[fold]["train_idx"], :],
data.loc[self.splits[fold]["valid_idx"], :],
self.targets[self.splits[fold]["train_idx"], :],
self.targets[self.splits[fold]["valid_idx"], :],
)
elif isinstance(data, sp.sparse.coo.coo_matrix) or isinstance(
data, sp.sparse.csc.csc_matrix
):
if self.targets.shape[1] == 1 and self.task != "binary_classification":
return (
self.splits[fold]["train_idx"],
self.splits[fold]["valid_idx"],
data[self.splits[fold]["train_idx"]],
data[self.splits[fold]["valid_idx"]],
self.targets[self.splits[fold]["train_idx"]].ravel(),
self.targets[self.splits[fold]["valid_idx"]].ravel(),
)
else:
return (
self.splits[fold]["train_idx"],
self.splits[fold]["valid_idx"],
data[self.splits[fold]["train_idx"]],
data[self.splits[fold]["valid_idx"]],
self.targets[self.splits[fold]["train_idx"], :],
self.targets[self.splits[fold]["valid_idx"], :],
)
else:
if self.targets.shape[1] == 1 and self.task != "binary_classification":
return (
self.splits[fold]["train_idx"],
self.splits[fold]["valid_idx"],
data[self.splits[fold]["train_idx"], :],
data[self.splits[fold]["valid_idx"], :],
self.targets[self.splits[fold]["train_idx"]].ravel(),
self.targets[self.splits[fold]["valid_idx"]].ravel(),
)
else:
return (
self.splits[fold]["train_idx"],
self.splits[fold]["valid_idx"],
data[self.splits[fold]["train_idx"], :],
data[self.splits[fold]["valid_idx"], :],
self.targets[self.splits[fold]["train_idx"], :],
self.targets[self.splits[fold]["valid_idx"], :],
)
```
#### File: engine/nlp/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import transformers
class BERTBaseClassifier(nn.Module):
def __init__(
self,
num_classes: int = 4,
pretrain_model_name: str = "cl-tohoku/bert-base-japanese-whole-word-masking",
):
super().__init__()
self.bert = transformers.BertModel.from_pretrained(pretrain_model_name)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(768, num_classes)
def forward(self, ids, mask, token_type_ids):
output = self.bert(ids, attention_mask=mask, token_type_ids=token_type_ids)
bo = self.bert_drop(output.pooler_output)
output = self.out(bo)
return output
def get_features(self, ids, mask, token_type_ids):
output = self.bert(ids, attention_mask=mask, token_type_ids=token_type_ids)
return output.pooler_output
class DistilBERTBaseClassifier(nn.Module):
def __init__(
self,
num_classes: int = 4,
pretrain_model_name: str = "bandainamco-mirai/distilbert-base-japanese",
):
super().__init__()
self.backborn= transformers.AutoModel.from_pretrained("bandainamco-mirai/distilbert-base-japanese")
# transformers.DistilBertTokenizer.from_pretrained(pretrain_model_name)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(768, num_classes)
def forward(self, ids, mask): #, token_type_ids
output = self.backborn(ids, attention_mask=mask) #, token_type_ids=token_type_ids)
hidden_state = output[0]
pooler = hidden_state[:, 0]
bo = self.bert_drop(pooler)
output = self.out(bo)
return output
def get_features(self, ids, mask): #, token_type_ids
output = self.backborn(ids, attention_mask=mask) #, token_type_ids=token_type_ids)
hidden_state = output[0]
pooler = hidden_state[:, 0]
return pooler
```
#### File: C_2111/django_app/models.py
```python
from django.db import models
from django.db.models.fields import DateTimeField
from django.contrib.auth import get_user_model
import uuid
class Questionnaire(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
date = models.DateTimeField(auto_now_add=True, null=True, help_text='作成日')
title = models.CharField(max_length=100)
author = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
)
def __str__(self):
return self.title
class DailyReport(models.Model):
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
text = models.TextField('日報', max_length=200, blank=False)
date = models.DateTimeField(auto_now_add=True, null=True, help_text='作成日')
score = models.FloatField(blank=True, null=True)
author = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
)
def __str__(self):
return self.text
```
#### File: C_2111/django_app/views.py
```python
from django.shortcuts import get_object_or_404, redirect, render
from django_app.pytorch_utils import OnnxPredictor
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django_app.forms import UploadForm, DailyReportForm, CreateQuestionnaireForm
from django_app.models import DailyReport, Questionnaire
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from django.urls import reverse
import csv
import io
import pandas as pd
from goo_lab.goo_apis import Goo
from graph.word_cloud import create_wordcloud_base64
from graph.psn_freq_graph import create_psn_freq_graph_base64
def index(request):
return render(request, 'index.html')
def load_model(model_path: str = "./onnx_model/epoch=9-valid_loss=0.1356-valid_acc=0.9745_quant.onnx"):
predictor = OnnxPredictor(model_path=model_path, device="cpu")
return predictor
@login_required
def create(request):
params = {'message': '', 'form': None}
if request.method == 'POST':
form = CreateQuestionnaireForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return HttpResponseRedirect(reverse('django_app:index'))
else:
params['message'] = '再入力してください'
params['form'] = form
else:
params['form'] = CreateQuestionnaireForm()
return render(request, 'create.html', params)
@login_required
def questionnaires(request):
params = {"questionnaires": Questionnaire.objects.filter(
author=request.user)}
return render(request, 'questionnaires.html', params)
model = load_model()
@login_required
def new(request, questionnaire_id):
questionnaire = get_object_or_404(Questionnaire, pk=questionnaire_id)
params = {'message': '', 'form': None, 'questionnaire': questionnaire}
if request.method == 'POST':
form = DailyReportForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
score = model.predict(form["text"].value())
post.score = score[0][0][1]
post.questionnaire = questionnaire
post.author = request.user
post.save()
return HttpResponseRedirect(reverse('django_app:index'))
else:
params['message'] = '再入力してください'
params['form'] = form
else:
params['form'] = DailyReportForm()
return render(request, 'user/new.html', params)
@login_required
def info(request, questionnaire_id):
questionnaire = get_object_or_404(Questionnaire, pk=questionnaire_id)
daily_reports = DailyReport.objects.all().filter(questionnaire=questionnaire)
texts = []
for dr in list(daily_reports):
texts.append(dr.text)
wc_base64Img = create_wordcloud_base64(texts)
psn_freq_base64Img = create_psn_freq_graph_base64(texts)
return render(request, 'info.html', {'questionnaire': questionnaire,
'daily_reports': daily_reports,
'exists_wordcloud': bool(wc_base64Img is not None),
'wc_base64Img': wc_base64Img,
'exists_psn_freq': bool(psn_freq_base64Img is not None),
'psn_freq_base64Img': psn_freq_base64Img, })
```
#### File: C_2111/graph/create_graph.py
```python
import sys
sys.path.append("./")
from goo_lab.goo_apis import Goo
import pandas as pd
from collections import Counter, defaultdict
from wordcloud import WordCloud
df = pd.read_csv("./user_data/user_data_sampled.csv").sample(50).reindex()
app_id = "e8be92a5e7fbf6a4b60bb8ff34cbdbf551e65a626b32090fe095864a7f2565e3"
print("固有表現抽出")
g = Goo(app_id=app_id, request_id="record001")
# for text in df["tweet"]:
# result = g.entity(sentence=text, class_filter="PSN|ORG").json()
# print(result["ne_list"])
print("キーワード")
# for title, text in zip(df["username"],df["tweet"]):
# result = g.keyword(title=title,
# body = text).json()
# for i in result["keywords"]:
# print(list(i.keys())[0])
def counter(texts_list: list):
words_count = defaultdict(int)
words = []
for text in texts_list:
result = g.keyword(title="title", body = text).json()
for i in result["keywords"]:
words_count[list(i.keys())[0]]+=1
words.append(list(i.keys())[0])
return words_count, words
words_count, words = counter(df["tweet"])
text = ' '.join(words)
fpath = "/usr/share/fonts/truetype/hanazono/HanaMinA.ttf"
wordcloud = WordCloud(background_color="white",font_path=fpath,width=900, height=500).generate(text)
wordcloud.to_file("./test/wordcloud_sample.png")
```
#### File: jphacks/C_2111/main.py
```python
from flask import Flask, request, jsonify
import sys
import json
sys.path.append("./")
from app.pytorch_utils import OnnxPredictor
from goo_lab.goo_apis import Goo
def load_model(model_path:str = "./onnx_model/epoch=9-valid_loss=0.6320-valid_acc=1.0000_quant.onnx"):
predictor = OnnxPredictor(model_path=model_path, device="cpu")
return predictor
model = load_model()
res = model.predict("今日も自殺")[0][0]
print(list(res))
data=json.dumps({"result":[float(i) for i in list(res)]}).encode()
print(data)
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def predict():
model = load_model()
res = model.predict("今日も自殺")[0][0]
res = json.dumps({"result":[float(i) for i in list(res)]}).encode()
return res
@app.route('/goo', methods = ["POST"])
def textpair(text1:str, text2:str):
textpair = Goo.textpair(text1, text2)
return textpair
``` |
{
"source": "jphacks/C_2113",
"score": 3
} |
#### File: C_2113/tts/ttsthread.py
```python
import base64
import numpy as np
import urllib.request
import json
import subprocess as sp
from playsound import playsound
import queue
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../common'))
from interface_struct import SpeakingData
# copy from https://qiita.com/to_obara/items/d8d5c92c2ea85a197e2d
def get_token() -> str:
"""
Google Text-To-Speechの認証した上で、gcloudをセットアップした状態で
tokenを取得するために、gcloud auth print-access-tokenの結果を取得する
"""
res = sp.run('gcloud auth application-default print-access-token',
shell=True,
stdout=sp.PIPE,
stderr=sp.PIPE,
encoding='utf-8')
print("[[TTS]]", f"error=[{res.stderr}]")
return res.stdout.strip()
def makeRequestDict(txt: str) -> dict:
"""
Google Text-To-Speechへリクエストのための情報を生成する
SSMLには未対応
Args:
txt(in): 音声合成するテキスト
Returns:
音声合成するために必要な情報をdictで返却する
"""
dat = {
"audioConfig": {
"audioEncoding": "LINEAR16",
"pitch": 0,
"speakingRate": 1
},
"voice": {
"languageCode": "ja-JP",
"name": "ja-JP-Standard-B"
}
}
dat["input"] = {"text": txt}
return dat
def output_mp3(dat: dict, ofile: str) -> int:
"""
Google Text-To-Speechへリクエストした結果を元に音声データにしてファイルに書き込む
Args:
dat(in): リクエストした結果得られたJSON文字列をdictにしたもの
ofile(in): 音声データを書き出すファイル名
"""
b64str = dat["audioContent"]
binary = base64.b64decode(b64str)
uint8_dat = np.frombuffer(binary, dtype=np.uint8)
with open(ofile, "wb") as f:
f.write(uint8_dat)
return len(uint8_dat) / 2 / 24 # playing time in ms. 24,000 Hz with 2 bytes
def gtts(txt: str, ofile: str) -> int:
# Returns playing time in ms.
dat = makeRequestDict(txt)
req_data = json.dumps(dat).encode()
url = 'https://texttospeech.googleapis.com/v1/text:synthesize'
token = get_token()
req_header = {
'Authorization': f"Bearer {token}",
'Content-Type': 'application/json; charset=utf-8',
}
req = urllib.request.Request(url,
data=req_data,
method='POST',
headers=req_header)
try:
with urllib.request.urlopen(req) as response:
dat = response.read()
body = json.loads(dat)
ret = output_mp3(body, ofile)
print("[[TTS]]", "done..")
return ret
except urllib.error.URLError as e:
print("[[TTS]]", "error happen...")
print("[[TTS]]", e.reason)
print("[[TTS]]", e)
return -1
def tts_and_speak(txt: str, id: int, output_queue: queue.Queue = None) -> None:
ofile = "tts" + str(id) + ".mp3"
playtime = gtts(txt, ofile)
if (output_queue is not None):
output_queue.put(SpeakingData(txt=txt, sec=playtime*1e-3))
playsound(ofile)
def main(txt_queue: queue.Queue, output_queue: queue.Queue, debug: bool = False) -> None:
print("[[TTS]]", f"debug={debug}")
i = 0
while True:
try:
txt = txt_queue.get(timeout=500.0)
print("[[TTS]]", "get:", txt)
if debug:
output_queue.put(SpeakingData(txt=txt, sec=0.02*len(txt)))
print("[[TTS]]", "speak:", txt)
else:
tts_and_speak(txt, i, output_queue)
i += 1
except:
print("[[TTS]]", "get text timeout.")
continue
``` |
{
"source": "jphacks/C_2117",
"score": 2
} |
#### File: server/trip_diary/models.py
```python
from django.db import models
class User(models.Model):
user_name = models.CharField(max_length=30)
def __str__(self):
return self.user_name
#Tripモデル(id, trip_name, trip_start, trip_end)
class Trip(models.Model):
trip_name = models.CharField(max_length=30)
trip_start = models.DateTimeField(verbose_name='The start of trip', null=True, blank=True)
trip_end = models.DateTimeField(verbose_name='The end of trip', null=True, blank=True)
trip_menber = models.ManyToManyField(User)
def __str__(self):
return self.trip_name
#Categoryモデル(id, name)
class Category(models.Model):
category_name = models.CharField(max_length=30)
def __str__(self):
return self.category_name
#VisitHistryモデル(id, trip_id, places_visited_id, date_of_visit)
class VisitHistry(models.Model):
trip_id = models.ForeignKey(Trip,on_delete=models.CASCADE)
place_visited = models.CharField(max_length=30)
visit_start = models.DateTimeField('The start of visit')
def __str__(self):
return self.place_visited
#Imageモデル(id, trip_id, date_of_shooting, file_name, placesVisited_id)
#primary_keyは未指定のため自動で生成される
class Image(models.Model):
trip_id = models.ForeignKey(Trip,on_delete=models.CASCADE)
date_of_shooting = models.DateTimeField(verbose_name='date of shooting', null=True, blank=True)
file_name = models.CharField(max_length=100)
location = models.ForeignKey(VisitHistry,on_delete=models.CASCADE)
user_id = models.ForeignKey(User,on_delete=models.CASCADE)
importance = models.IntegerField()
category_name = models.ManyToManyField(Category)
def __str__(self):
return self.file_name
```
#### File: server/trip_diary/views.py
```python
from django.http import HttpResponse
import django_filters
from rest_framework import viewsets, filters
from django.http import FileResponse
from .models import Image,Trip,VisitHistry
from .serializer import ImageSerializer, TripSerializer, VisitHistrySerializer
#ユーザがいった旅のリスト->旅を選ぶ->旅にアクセス
#旅の訪れた場所ごとに時系列で(上位n個のハイライト写真を送る)->場所を選ぶ
#場所ごとの写真を送る
#旅の名前, 日付, 参加したユーザ
class ImageViewSet(viewsets.ModelViewSet):
queryset = Image.objects.order_by('date_of_shooting')
serializer_class = ImageSerializer
filter_fields = ('user_id', 'trip_id','category_name')
class ImageVeiwSet_highlight(viewsets.ModelViewSet):
queryset = Image.objects.order_by('-importance')[:4]
serializer_class = ImageSerializer
class TripViewSet(viewsets.ModelViewSet):
queryset = Trip.objects.all()
serializer_class = TripSerializer
filter_fields = ('trip_menber','trip_name')
class VisitHistryViewSet(viewsets.ModelViewSet):
queryset = VisitHistry.objects.order_by('visit_start')
serializer_class = VisitHistrySerializer
filter_fields = ('trip_id','place_visited')
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
``` |
{
"source": "jphacks/C_2118",
"score": 3
} |
#### File: api/sentiment/analysis.py
```python
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdapterType
model = AutoModelForSequenceClassification.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
tokenizer = AutoTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
model.load_adapter("./api/sentiment/adapter/sst-2/")
def predict(sentence):
token_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentence))
input_tensor = torch.tensor([token_ids])
outputs = model(input_tensor, adapter_names=["sst-2"])
result = torch.argmax(outputs[0]).item()
return result
```
#### File: api/sentiment/app.py
```python
from flask import Flask, jsonify, request
import json
import analysis
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///data.sqlite"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
@app.route("/positiveness", methods=["POST"])
def get_positiveness():
req = json.loads(request.data)
comment_body = req["body"]
positiveness = analysis.predict(comment_body)
return jsonify({"positiveness": positiveness})
if __name__ == "__main__":
app.run()
```
#### File: C_2118/web/snowflake.py
```python
import time
class Snowflake:
def __init__(self, init_serial_no=0):
self.machine_id = 0
self.epoch = 0
self.serial_no = init_serial_no
def generate(self):
unique_id = (
((int(time.time() * 1000) - self.epoch) & 0x1FFFFFFFFFF) << 22
| (self.machine_id & 0x3FF) << 12
| (self.serial_no & 0xFFF)
)
self.serial_no += 1
return unique_id
``` |
{
"source": "jphacks/C_2122",
"score": 3
} |
#### File: C_2122/pyapp/app.py
```python
import datetime
import sqlite3
import flask
import flask_login
SECRET_KEY = "secret_key"
app = flask.Flask(
__name__,
template_folder="static")
app.config["SECRET_KEY"] = SECRET_KEY
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
user_id = 0
# ログイン用のクラス
class User(flask_login.UserMixin):
def __init__(self, user_id):
self.id = user_id
def get_id(self):
return self.id
@login_manager.user_loader
def load_user(user_id):
return User(user_id)
def get_abs(path):
# 共通部分の読み込み
f = open("static/"+path, 'r')
read = f.read()
f.close()
return read
@app.route('/', methods=["POST", "GET"])
def index():
# スタートページを表示
# return flask.render_template("index.html", navbar=NAVBAR, headend=HEADEND)
return flask.render_template("index.html", abs_path=get_abs)
@app.route('/css/style.css', methods=["POST", "GET"])
def css():
return flask.render_template("css/style.css", abs_path=get_abs)
@app.route("/login.html", methods=["POST", "GET"])
def login():
# loginページの処理
if(flask.request.method == "POST"):
# ユーザーチェック
conn = sqlite3.connect('chat_test.db')
c = conn.cursor()
try:
c.execute(
"select * from user where username = '{}' and password = '{}'".format(flask.request.form["name"], flask.request.form["password"]))
except:
return flask.abort(401)
user = c.fetchall()
if user != []:
conn.close()
user_id = int(user[0][0])
flask_login.login_user(User(int(user[0][0])))
print("login success")
return flask.redirect("room.html")
else:
print("login fail : Name or password does not match")
print(user)
flask.flash("ユーザー名またはパスワードが間違っています", "sign in fail")
return flask.redirect("login.html")
return flask.render_template("login.html", abs_path=get_abs)
@app.route("/signup.html", methods=["POST", "GET"])
def sign_up():
# loginページの処理
if(flask.request.method == "POST"):
# ユーザーチェック
conn = sqlite3.connect('chat_test.db')
c = conn.cursor()
if len(flask.request.form["name"]) < 2:
flask.flash("ユーザー名が短すぎます", "name is too short.")
return flask.redirect("/signup.html")
if len(flask.request.form["password"]) < 2:
flask.flash("パスワードが短すぎます", "password is too short.")
return flask.redirect("/signup.html")
try:
c.execute(
"select * from user where username = '{}' ".format(flask.request.form["name"]))
user = c.fetchall()
if user == []:
raise "NoData"
flask.flash("その名前はすでに使用されています", "sign up fail")
return flask.redirect("/signup.html")
except:
c.execute(
"insert into user(username, password) values('{}', '{}')".format(
flask.request.form["name"], flask.request.form["password"])
)
print("Sign up success")
c.execute(
"select * from user where username = '{}' ".format(flask.request.form["name"]))
user = c.fetchall()
print(user)
conn.commit()
conn.close()
flask_login.login_user(User(int(user[0][0])))
return flask.redirect("room.html")
return flask.render_template("signup.html", abs_path=get_abs)
@app.route("/logout", methods=["GET"])
@flask_login.login_required
def logout():
# logoutの処理
flask_login.logout_user()
return flask.redirect("/")
@app.route("/dashboard", methods=["POST", "GET"])
def show_dashboard():
# dashboardの表示
pass
#チャット処理
@app.route("/room.html")
def room():
conn = sqlite3.connect('chat_test.db')
c = conn.cursor()
user_id = flask_login.current_user.get_id() # ログインしているユーザのidを取得
c.execute(
"select id, date, user_id, content, reserved_id from purpose where (reserved_id is null) and (purpose.user_id = {})".format(user_id)
)
purpose_list = c.fetchall()
c.execute(
"select purpose_id1 from reserve"
)
reserve_list = c.fetchall()
my_list = []
wait_list = []
for p in purpose_list:
b = False
for r in reserve_list:
if p[0] == r[0]:
b = True
break
if b:
wait_list.append(p)
else:
my_list.append(p)
c.execute(
"select purpose.date, purpose.content, reserve.id from purpose inner join reserve on purpose.reserved_id = reserve.id where purpose.user_id = {}".format(user_id)
)
open_list = c.fetchall()
conn.close()
return flask.render_template("room.html", my_list=my_list, wait_list=wait_list, open_list=open_list, abs_path=get_abs)
@app.route("/chat.html/<int:reserveid>")
def chat(reserveid):
user_id = flask_login.current_user.get_id()
conn = sqlite3.connect('chat_test.db')
c = conn.cursor()
c.execute(
"select reserve_id, date, content, user_id_sender from chat where reserve_id = {}".format(reserveid))
chat_fetch = c.fetchall()
chat_list = []
for chat in chat_fetch:
chat_list.append(
{"reserve_id": chat[0], "date": chat[1],
"content": chat[2], "user_id": chat[3]}
)
conn.close()
return flask.render_template("chat.html", chat_list=chat_list, reserve_id=reserveid, user_id=user_id, abs_path=get_abs)
@app.route("/chat.html/<int:reserveid>", methods=["POST"])
def chat_post(reserveid):
user_id = flask_login.current_user.get_id()
chat_message = flask.request.form.get("input_message")
conn = sqlite3.connect('chat_test.db')
c = conn.cursor()
c.execute("insert into chat values(?,?,?,?)", (reserveid,
datetime.datetime.now(), chat_message, user_id))
conn.commit()
conn.close()
return flask.redirect("/chat.html/{}".format(reserveid))
#予約処理
@app.route("/reservation.html", methods=["POST", "GET"])
@flask_login.login_required
def reserve():
"""
conn = sqlite3.connect('reserve_test.db')
c = conn.cursor()
c.execute('CREATE TABLE reserve (id integer, date text, time text, purpose text)')
conn.commit()
conn.close()
return flask.render_template('<reserve {}>', abs_path=get_abs)"""
if(flask.request.method == "POST"):
# ユーザーチェック
conn = sqlite3.connect('chat_test.db')
c = conn.cursor()
user_id = flask_login.current_user.get_id()
print(flask.request.form)
t = flask.request.form["time"].replace(":", "-")
c.execute("insert into purpose(date, user_id, content) values('{}', '{}', '{}')".format(
str(flask.request.form["date"]) + "-" + t, user_id, flask.request.form["purpose"]))
conn.commit()
c.execute(
"select reserve.id, purpose.content from reserve inner join purpose on (reserve.purpose_id1 = purpose.id) or (reserve.purpose_id2 = purpose.id) where purpose.user_id = {}".format(user_id))
room_list = c.fetchall()
print(room_list)
conn.close()
return flask.render_template("/reservation.html", abs_path=get_abs, messages=room_list)
return flask.render_template("/reservation.html", abs_path=get_abs)
#マッチング処理
@app.route("/match.html")
def match():
conn = sqlite3.connect('chat_test.db')
c = conn.cursor()
user_id = flask_login.current_user.get_id() # ログインしているユーザのidを取得
c.execute(
"select id, date, purpose_id1, purpose_id2 from reserve where (purpose_id1 is not null) and (purpose_id2 is not null)"
)
room_list = c.fetchall()
c.execute(
"select purpose.id, purpose.date, purpose.user_id, purpose.content, purpose.reserved_id, user.username from purpose inner join user on purpose.user_id = user.id where reserved_id is null"
)
purpose_list = c.fetchall()
to_me = []
for r in room_list:
for p in purpose_list:
if p[2] == user_id and r[3] == p[0]:
r = list(r)
to_me.append(r)
for i in range(len(to_me)):
for p in purpose_list:
if to_me[i][2] == p[0]:
to_me[i].append(p[5])
break
c.execute(
"select id, date, purpose_id1, purpose_id2 from reserve"
)
room_list = c.fetchall()
c.execute(
"select purpose.id, purpose.date, purpose.user_id, purpose.content, purpose.reserved_id, user.username from purpose inner join user on purpose.user_id = user.id where (reserved_id is null) and (user_id = {})".format(user_id)
)
my_list = c.fetchall()
c.execute(
"select purpose.id, purpose.date, purpose.user_id, purpose.content, purpose.reserved_id, user.username from purpose inner join user on purpose.user_id = user.id where (reserved_id is null) and (user_id != {})".format(user_id)
)
your_list = c.fetchall()
candidate = []
for my in my_list:
for your in your_list:
if my[1] == your[1]:
b = True
for r in room_list:
if r[2] == my[0] or r[3] == my[0] or r[2] == your[0] or r[3] == your[0]:
b = False
if b:
your = list(your)
your.append(my[0])
candidate.append(your)
conn.close()
return flask.render_template("match.html", abs_path=get_abs, to_me=to_me, candidate=candidate)
@app.route("/match.html", methods=["POST"])
def match_post():
user_id = flask_login.current_user.get_id() # ログインしているユーザのidを取得
conn = sqlite3.connect('chat_test.db')
c = conn.cursor()
print(flask.request.form)
if "reserve" in flask.request.form:
c.execute("update purpose set reserved_id='{}' where id={}".format(flask.request.form["reserve"], flask.request.form["my_purpose"]))
conn.commit()
c.execute("update purpose set reserved_id='{}' where id={}".format(flask.request.form["reserve"], flask.request.form["your_purpose"]))
conn.commit()
else:
c.execute("insert into reserve(date, purpose_id1, purpose_id2) values('{}','{}','{}')".format(flask.request.form["date"], flask.request.form["my_purpose"], flask.request.form["your_purpose"]))
conn.commit()
conn.close()
return flask.redirect("/match.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8008, debug=True)
``` |
{
"source": "jphacks/C_2126",
"score": 2
} |
#### File: C_2126/api/models.py
```python
from django.db import models
from django.utils import timezone
FOOD_TYPE_CHOICES = (
(0, '水'),
(1, '食料品'),
(2, 'おかず'),
(3, 'その他')
)
FOOD_GENRE_CHOICES = (
(0, '水'),
(1, '麺'),
(2, 'おかゆ'),
(3, 'その他')
)
class FoodModel(models.Model):
title = models.CharField(max_length=50)
type = models.IntegerField(
choices=FOOD_TYPE_CHOICES,
help_text='0: 水, 1: 食料品, 2: おかず, 3: その他'
)
genre = models.IntegerField(
choices=FOOD_GENRE_CHOICES,
help_text='0: 水, 1: 麺, 2: おかゆ, 3: その他'
)
count = models.IntegerField(default=0)
expiration = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
class UserModel(models.Model):
line_id = models.CharField(max_length=50)
token = models.CharField(max_length=50)
def __str__(self):
return self.line_id
class GroupModel(models.Model):
line_group_id = models.CharField(max_length=50, blank=False)
title = models.CharField(max_length=50, blank=False)
count_users = models.IntegerField()
stock_days = models.IntegerField()
foods = models.ManyToManyField(
FoodModel,
related_name='foods'
)
users = models.ManyToManyField(
UserModel,
related_name='users'
)
def __str__(self):
return self.title
``` |
{
"source": "jphacks/D_2002",
"score": 2
} |
#### File: src/main/models.py
```python
from django.db import models
from stdimage.models import StdImageField
from stdimage.validators import MinSizeValidator, MaxSizeValidator
# Create your models here.
class Product(models.Model):
name = models.CharField(
verbose_name='name',
max_length=100,
blank=False,
null=False)
amount = models.IntegerField(
verbose_name='amount',
blank=True,
null=True)
price = models.IntegerField(
verbose_name='price',
blank=False,
null=False)
intro = models.CharField(
verbose_name='intro',
max_length=200,
blank=True,
null=True
)
image = StdImageField(
verbose_name='image',
upload_to="product",
validators=[MinSizeValidator(300, 300), MaxSizeValidator(5760, 3840)],
blank=True,
variations={
'medium': (500, 500, True),
'small': (300, 300, True),
},
)
tx_hash = models.CharField(
verbose_name='name',
max_length=2000,
blank=False,
null=False)
def __str__(self):
return self.name
``` |
{
"source": "jphacks/D_2006",
"score": 3
} |
#### File: D_2006/server/app.py
```python
from flask import Flask,render_template,abort
import sys,os
from flask import request,Response
from flask_cors import CORS
from flask.helpers import make_response
app=Flask(__name__,static_url_path="/")
CORS(app)
@app.route("/")
def root():
return render_template("index.html")
import sys
sys.path.append("../")
from algorithm.いじったらだめ import Matching
# import algorithm.いじったらだめ.Matching
'''
input
{
analyze_text:str
}
return
{
difficult_words:{
{words},
{words},...
}
analyzed_text:str{
{sentence},...
}
}
'''
import json
@app.route("/analyze",methods=["POST"])
def anal():
message = request.get_json()
text=message["analyze_text"]
print("show-->",text)
print("show end")
## Matching()
#先に難しい単語を抽出した文を返す(string型)
#二個目に要点をまとめた文を返す([str])
words,text=Matching.virtual_server(text)
if len(words)==0:
words.append(["ありませんでした"])
if len(text)==0:
text.append("解析できませんでした")
test={"analyzed_text":text,"difficult_words":words}
return json.dumps(test),200
if __name__ == "__main__":
print(app.url_map)
app.run()
``` |
{
"source": "jphacks/D_2010",
"score": 2
} |
#### File: super_live/lives/models.py
```python
import datetime
from django.db import models
from django.utils import timezone
#liveのURLを管理
class Live(models.Model):
liveName = models.CharField(verbose_name='liveName', max_length=100)
liveUser = models.CharField(verbose_name='liveUser', max_length=100)
liveId = models.CharField(verbose_name='liveId', max_length=100)
pub_date = models.DateTimeField()
def __str__(self):
return self.liveId
#リアクションの管理
class Reactions(models.Model):
live = models.ForeignKey(Live, on_delete=models.CASCADE)
reaction = models.CharField(max_length=100)
reactionCount = models.IntegerField(default=0)
def __str__(self):
return self.reaction
``` |
{
"source": "jphacks/D_2014",
"score": 2
} |
#### File: esuits/escreate/forms.py
```python
from django import forms
from ..models import ESGroupModel, PostModel, TagModel
class CreateESForm(forms.ModelForm):
'''ES作成のためのフォーム'''
class Meta:
model = ESGroupModel
fields = (
'company',
'event_type',
'company_url',
'is_editing',
'deadline_date',
# 'author',
)
class CreatePostForm(forms.ModelForm):
'''ポスト作成のためのフォーム'''
class Meta:
model = PostModel
fields = (
'question',
'answer',
'tags',
'char_num',
'es_group_id',
)
def __init__(self, *args, user, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
available_tags = TagModel.objects.filter(author=user)
self.fields['tags'].queryset = available_tags
```
#### File: D_2014/esuits/models.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_in
# Create your models here.
class CustomUserModel(AbstractUser):
'''カスタムユーザークラス'''
class Meta(object):
db_table = 'custom_user'
def __str__(self):
return self.username
class TagModel(models.Model):
'''タグモデル'''
class Meta(object):
db_table = 'tag_table'
tag_name = models.CharField(verbose_name='タグ名', max_length=255)
author = models.ForeignKey(CustomUserModel, on_delete=models.CASCADE)
def __str__(self):
return self.tag_name
class WordCloudModel(models.Model):
'''ワードクラウドを保存するデータベース'''
class Meta(object):
db_table = 'word_cloud'
company_url = models.URLField(verbose_name='企業ホームページ', max_length=200)
word_cloud_image_url = models.URLField(verbose_name='ワードクラウドURL', max_length=200)
class ESGroupModel(models.Model):
'''ESモデル'''
class Meta(object):
db_table = 'esgroup_table'
company = models.CharField(verbose_name='会社名', max_length=100)
event_type = models.CharField(verbose_name='イベントタイプ', max_length=50, blank=True, null=True)
company_url = models.URLField(verbose_name='企業ホームページ', max_length=200)
author = models.ForeignKey(CustomUserModel, verbose_name='作成者',
on_delete=models.CASCADE, blank=True)
is_editing = models.BooleanField(verbose_name='作成中', default=True)
created_date = models.DateTimeField(verbose_name='作成日時', default=timezone.now, blank=True)
deadline_date = models.DateTimeField(
verbose_name='提出期限', default=timezone.now, blank=True, null=True)
word_cloud = models.ForeignKey(WordCloudModel, verbose_name='ワードクラウド',
on_delete=models.SET_NULL, null=True)
def __str__(self):
return str(self.pk) + '_' + self.company + '_' + self.event_type
class PostModel(models.Model):
'''ポストモデル'''
class Meta(object):
db_table = 'post_table'
question = models.TextField(verbose_name='質問')
answer = models.TextField(verbose_name='', blank=True, null=True)
# create_date = models.DateTimeField(verbose_name='作成日時', default=timezone.now)
tags = models.ManyToManyField(TagModel, verbose_name='タグ名', blank=True)
# OPEN_INFO_CHOICES = [
# ('public', '公開'),
# ('private', '非公開')
# ]
# open_info = models.CharField(verbose_name='公開', max_length=20,
# choices=OPEN_INFO_CHOICES, default='private')
# company = models.CharField(verbose_name='会社名', max_length=50, blank=True, null=True)
# state = models.CharField(verbose_name='状況', max_length=50, blank=True, null=True)
# author = models.ForeignKey(CustomUserModel, verbose_name='ユーザ名', on_delete=models.CASCADE)
es_group_id = models.ForeignKey(ESGroupModel, verbose_name='ES名',
on_delete=models.CASCADE, blank=True, null=True)
char_num = models.IntegerField(default=0, blank=True)
def __str__(self):
return self.question
``` |
{
"source": "jphacks/D_2015",
"score": 2
} |
#### File: D_2015/DaiCon-raspi/irmcli.py
```python
import sys
import serial
import time
import json
import argparse
import os
here = os.path.abspath(os.path.dirname(__file__))
ir_serial = serial.Serial("/dev/ttyACM0", 9600, timeout = 1)
#ir_serial = serial.Serial("/dev/tty.usbmodem01231", 9600, timeout = 1)
def captureIR(path):
print("Capturing IR...".encode())
ir_serial.write("c\r\n".encode())
time.sleep(3.0)
msg = ir_serial.readline()
print(msg)
if path and not 'Time Out' in msg.decode():
return saveIR(path)
def playIR(data):
#if path and os.path.isfile(path):
if len(data) != 0:
#print("Playing IR with %s ..." % path)
#f = open(path)
#data = json.load(f)
#f.close()
#recNumber = len(data['data'])
recNumber = len(data)
#rawX = data['data']
rawX = data
ir_serial.write("n,%d\r\n".encode() % recNumber)
ir_serial.readline()
#postScale = data['postscale']
#ir_serial.write("k,%d\r\n".encode() % postScale)
#time.sleep(1.0)
msg = ir_serial.readline()
#print msg
for n in range(recNumber):
bank = n / 64
pos = n % 64
if (pos == 0):
ir_serial.write("b,%d\r\n".encode() % bank)
ir_serial.write("w,%d,%d\n\r".encode() % (pos, rawX[n]))
ir_serial.write("p\r\n".encode())
msg = ir_serial.readline()
print(msg)
return "Successed"
#ir_serial.close()
else:
print("Playing IR...")
ir_serial.write("p\r\n".encode())
time.sleep(1.0)
msg = ir_serial.readline()
print(msg)
return "Failed"
def saveIR(path):
print("Saving IR data to %s ..." % path)
rawX = []
ir_serial.write("I,1\r\n".encode())
time.sleep(1.0)
recNumberStr = ir_serial.readline()
recNumber = int(recNumberStr, 16)
ir_serial.write("I,6\r\n".encode())
time.sleep(1.0)
postScaleStr = ir_serial.readline()
postScale = int(postScaleStr, 10)
#for n in range(640):
for n in range(recNumber):
bank = n / 64
pos = n % 64
if (pos == 0):
ir_serial.write("b,%d\r\n".encode() % bank)
ir_serial.write("d,%d\n\r".encode() % pos)
xStr = ir_serial.read(3)
xData = int(xStr, 16)
rawX.append(xData)
data = {'format':'raw', 'freq':38, 'data':rawX, 'postscale':postScale}
f = open(path, 'w')
json.dump(data, f)
f.close()
print("Done !")
print(rawX)
return rawX
def measureTemperature():
#print "Sending T command..."
ir_serial.write("T\r\n")
#print "Reading raw temperature..."
raw = ir_serial.readline()
#print "Reading command status..."
status = ir_serial.readline().rstrip()
celsiusTemp = None
try:
celsiusTemp = ((5.0 / 1024.0 * float(raw)) - 0.4) / 0.01953
print("Temperature: %s" % "{:4.1f}".format(celsiusTemp))
except (ValueError, TypeError):
print("TemperatureExcetion: raw => %s, status => %s" % (raw, status))
def printFirmwareVer():
ir_serial.write("V\r\n")
print(ir_serial.readline().rstrip())
ir_serial.readline()
if __name__ == "__main__":
# parse options
parser = argparse.ArgumentParser(description='irMagician CLI utility.')
parser.add_argument('-c', '--capture', action="store_true", dest="cap", help="capture IR data", default=False)
parser.add_argument('-p', '--play', action="store_true", dest="play", help="play IR data", default=False)
parser.add_argument('-s', '--save', action="store_true", dest="save", help="save IR data", default=False)
parser.add_argument('-f', '--file', action="store", dest="file", help="IR data file (json)", default=False)
parser.add_argument('-t', '--temperature', action="store_true", dest="temperature", help="measure ambient irMagicianT temperature in degrees Celsius", default=False)
parser.add_argument('-v', '--version', action="store_true", dest="version", help="show firmware version", default=False)
args = parser.parse_args()
if args.version:
printFirmwareVer()
if args.temperature:
measureTemperature()
if args.play:
playIR(args.file)
if args.save and args.file:
saveIR(args.file)
if args.cap:
captureIR(args.file)
# release resources
ir_serial.close()
``` |
{
"source": "jphacks/D_2105",
"score": 3
} |
#### File: opt/composer/get_tempo.py
```python
def get_bpm(related_value_list, positive_param):
"""
パラメータごとのテンポを返す
Parameter
---------
related_value_list : [str]
言語分析の結果を格納したリスト
positive_param : float
Tweetから算出されたポジティブ度
Return
------
bpm
そのパラメータに対応したbpm
"""
bpm = 100
NO_ITEM = 0
ONE_ITEMS = 1
TWO_ITEMS = 2
prime_value = 'none'
secondary_value = 'none'
third_value = 'none'
if len(related_value_list) == NO_ITEM:
prime_value = 'none'
secondary_value = 'none'
third_value = 'none'
elif len(related_value_list) == ONE_ITEMS:
prime_value = related_value_list[0]
elif len(related_value_list) == TWO_ITEMS:
prime_value = related_value_list[0]
secondary_value = related_value_list[1]
else:
prime_value = related_value_list[0]
secondary_value = related_value_list[1]
third_value = related_value_list[2]
if prime_value == 'sea':
if positive_param < 0.33:
bpm = 100
elif positive_param < 0.66:
bpm = 100
else:
bpm = 100
elif prime_value == 'cherry':
if positive_param < 0.5:
bpm = 100
else:
bpm = 100
elif prime_value == 'cat':
bpm = 100
elif prime_value == 'dog':
bpm = 100
elif prime_value == 'train':
bpm = 134
elif prime_value == 'pc':
bpm = 100
elif prime_value == 'gourmet':
bpm = 120
elif prime_value == 'sport':
bpm = 100
elif prime_value == 'soccer':
bpm = 100
elif prime_value == 'baseball':
bpm = 100
elif prime_value == 'tabletennis':
bpm = 120
elif prime_value == 'japanese':
bpm = 100
elif prime_value == 'scandinavian':
bpm = 100
elif prime_value == 'tropical':
bpm = 100
elif prime_value == 'school':
bpm = 100
elif prime_value == 'idol':
bpm = 100
elif prime_value == 'outdoor':
bpm = 100
elif prime_value == 'car':
bpm = 100
elif prime_value == 'bike':
bpm = 100
elif prime_value == 'drama':
bpm = 120
elif prime_value == 'picture':
bpm = 100
elif prime_value == 'rock':
bpm = 100
elif prime_value == 'electronic':
bpm = 100
elif prime_value == 'jazz':
bpm = 100
elif prime_value == 'ghost':
bpm = 100
elif prime_value == 'sword':
bpm = 100
elif prime_value == 'gun':
bpm = 100
elif prime_value == 'history':
bpm = 100
elif prime_value == 'chuni':
bpm = 100
elif prime_value == 'fairy':
bpm = 100
elif prime_value == 'child':
bpm = 100
elif prime_value == 'mystery':
bpm = 100
elif prime_value == 'shopping':
bpm = 100
else:
bpm = 100
return bpm
```
#### File: opt/movie_create/movie_create.py
```python
import moviepy.editor as mpy
import matplotlib.pyplot as plt
import numpy as np
import cv2, wave
import settings
# デバッグ用
import shutil, os
def get_music(id):
"""
音楽の長さが何秒か(浮動小数)と
フレーム数を返す関数
Parameter
---------
id : str
個人識別用uuid
Returns
-------
1.0 * music_frames / SAMPLING_RATE : float
音楽の長さ(秒)
"""
MOVIE_PATH = MOVIE_PATH = './movie/' + id + '/'
WAVE_PATH = MOVIE_PATH + settings.WAV_FILE_NAME
SAMPLING_RATE = 44100
with wave.open(WAVE_PATH, 'r') as music:
music_frames = music.getnframes()
return 1.0 * music_frames / SAMPLING_RATE
def create_clip(path, id, bpm=0, is_icon=False, is_related=False):
"""
画像から音楽と同じ長さの無音動画を作る関数
Parameters
----------
path : str
動画化したい画像のパス
id : str
個人識別用uuid
bpm : int
作成した曲のbpm
is_icon : bool
Twitterアイコンであるかどうか
is_related : bool
その人に関係ある画像であるかどうか
Return
------
concat_clip :
画像から生成した音楽と同じ長さの無音動画
"""
MOVIE_PATH = f'./movie/{id}/'
FPS = 30
SECONDS_PER_FRAME = 1/30
# 音楽の長さ,フレーム数を取得
music_length = get_music(id)
# 画像を格納する処理
clips = []
if is_icon: # Twitterアイコンのとき
img_list = clip_circle(path, id, bpm, music_length)
for i in img_list:
clip = mpy.ImageClip(i).set_duration(SECONDS_PER_FRAME)
clips.append(clip)
elif is_related: # 関係ある画像のとき
img_list = clip_related(path, id, bpm, music_length)
for i in img_list:
clip = mpy.ImageClip(i).set_duration(SECONDS_PER_FRAME)
clips.append(clip)
else: # 背景のとき
# 画像を取得
img = cv2.imread(path, -1)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
clip = mpy.ImageClip(img).set_duration(music_length)
clips.append(clip)
# 動画を作成する処理
concat_clip = mpy.concatenate_videoclips(clips, method='compose')
clip.close()
return concat_clip
def clip_circle(path, id, bpm, music_length):
"""
正方形のTwitterアイコンを円形に切り出し,
スライドショー用の配列に格納する関数
Parameters
----------
path : str
正方形のTwitterアイコンのパス
id : str
個人識別用uuid
bpm : int
作成した曲のbpm
music_length : float
音楽の長さ(秒)
Return
------
img_list : ndarray
円形に切り出したTwitterアイコンの配列
"""
MOVIE_PATH = './movie/' + id + '/'
FPS = 30
# 画像の読み込み
img_origin = cv2.imread(path, -1)
img_origin = cv2.cvtColor(img_origin, cv2.COLOR_BGRA2RGBA)
img_list = []
movie_frames = int(music_length * FPS)
for i in range(movie_frames):
'''
bpmに合わせて拡大縮小を行う.
bpm
60(s)でpbm(拍) = 60/bpm(s)で1(拍)
fps
1(s)で30(枚) = 60/bpm(s)で1800/bpm(枚)
'''
SECONDS_PER_MINUTE = 60
FPS = 30
FRAMES_PER_BEAT = SECONDS_PER_MINUTE * FPS // bpm
# 深いコピー
img = img_origin.copy()
# 画像の拡大縮小
if i % FRAMES_PER_BEAT < FRAMES_PER_BEAT // 2:
new_size = 200 - 50 * (i % (FRAMES_PER_BEAT // 2)) // (FRAMES_PER_BEAT // 2)
else:
new_size = 150 + 50 * (i % (FRAMES_PER_BEAT // 2)) // (FRAMES_PER_BEAT // 2)
# マスク作成 (黒く塗りつぶす画素の値は0)
mask = np.zeros((new_size, new_size), dtype=np.uint8)
# 円を描画する関数circle()を利用してマスクの残したい部分を 255 にしている。
cv2.circle(mask, center=(new_size//2, new_size//2), radius=new_size//2, color=255, thickness=-1)
# 画像の拡縮
img = cv2.resize(img, dsize=(new_size, new_size))
# maskの値が0の画素は透過する
img[mask==0] = [0, 0, 0, 0]
img_list.append(img)
return img_list
def clip_related(path, id, bpm, music_length):
"""
その人に関係ある画像を,
スライドショー用の配列に格納する関数
Parameters
----------
path : str
その人に関係ある画像のパス
id : str
個人識別用uuid
bpm : int
曲の速さ(♩/秒)
music_length : float
音楽の長さ(秒)
Return
------
img_list : ndarray
その人に関係ある画像の配列
"""
MOVIE_PATH = './movie/' + id + '/'
FPS = 30
# 画像の読み込み
img_origin = cv2.imread(path, -1)
img_origin = cv2.cvtColor(img_origin, cv2.COLOR_BGRA2RGBA)
height = img_origin.shape[0]
width = img_origin.shape[1]
img_list = []
movie_frames = int(music_length * FPS)
for i in range(movie_frames):
'''
bpmに合わせてスイングを行う.
bpm
60(s)でpbm(拍) = 60/bpm(s)で1(拍)
fps
1(s)で30(枚) = 60/bpm(s)で1800/bpm(枚)
'''
SECONDS_PER_MINUTE = 60
FPS = 30
FRAMES_PER_BEAT = SECONDS_PER_MINUTE * FPS // bpm
# 深いコピー
img = img_origin.copy()
# 画像を回転する角度を決定
if i % FRAMES_PER_BEAT < FRAMES_PER_BEAT // 2:
angle = 15 - 30 * (i % (FRAMES_PER_BEAT // 2)) // (FRAMES_PER_BEAT // 2)
else:
angle = -15 + 30 * (i % (FRAMES_PER_BEAT // 2)) // (FRAMES_PER_BEAT // 2)
rad_angle = np.radians(angle)
width_rot = int(np.round(width*abs(np.cos(rad_angle)) + height*abs(np.sin(rad_angle))))
height_rot = int(np.round(width*abs(np.sin(rad_angle)) + height*abs(np.cos(rad_angle))))
# 回転行列を生成
mat = cv2.getRotationMatrix2D((width//2, height), angle, 1)
mat[0][2] += -width/2 + width_rot/2
mat[1][2] += -height/2 + height_rot/2
# アフィン変換
affine_img = cv2.warpAffine(img, mat, (width_rot, height_rot))
img_list.append(affine_img)
return img_list
def movie_create(id, bpm, related_list):
"""
Parameters
----------
id : str
個人識別用uuid
bpm : int
作成した曲のbpm
related_list : array
関連するキーワードのリスト
"""
MOVIE_PATH = './movie/' + id + '/'
WAVE_PATH = MOVIE_PATH + settings.WAV_FILE_NAME
BASE_IMG_PATH = './movie_create/common_images/cake_background.PNG'
ICON_IMG_PATH = MOVIE_PATH + '/icon.png'
IMGAGES_PATH = './movie_create/images/'
BASE_HEIGHT = 720
BASE_WIDTH = 720
FPS = 30
# クリップを作成
base_clip = create_clip(BASE_IMG_PATH, id)
icon_clip = create_clip(ICON_IMG_PATH, id, bpm, is_icon=True)
#related_clip_0 = create_clip(IMGAGES_PATH + related_list[0] + '/01.PNG', id, bpm, is_related=True)
#related_clip_1 = create_clip(IMGAGES_PATH + related_list[1] + '/01.PNG', id, bpm, is_related=True)
#related_clip_2 = create_clip(IMGAGES_PATH + related_list[2] + '/01.PNG', id, bpm, is_related=True)
# クリップの合成
final_clip = mpy.CompositeVideoClip([base_clip, icon_clip.set_position((BASE_WIDTH * 0.38, BASE_HEIGHT * 0.2))])#, \
#related_clip_0.set_position((0, BASE_HEIGHT * 0.55)), related_clip_1.set_position((BASE_WIDTH * 0.37, BASE_HEIGHT * 0.65)), \
#related_clip_2.set_position((BASE_WIDTH * 0.7, BASE_HEIGHT * 0.55))])
# 音と動画を合成
final_clip = final_clip.set_audio(mpy.AudioFileClip(WAVE_PATH))
final_clip.write_videofile(filename = MOVIE_PATH + 'happy_birthday.mp4', codec='libx264', audio_codec='aac', fps=FPS)
final_clip.close()
#related_clip_2.close()
#related_clip_1.close()
#related_clip_0.close()
icon_clip.close()
base_clip.close()
```
#### File: opt/nlp/check.py
```python
import tweepy
import os
def check_id(account):
"""ツイッターアカウントのIDチェック
Parameters
----------
account : str
アカウントID
Returns
-------
str
エラーメッセージ
"""
if account == "":
return "アカウント名が入力されていません"
if account == "@":
return "アカウントが存在しません"
if account[0] == "@":
account = account[1:]
auth = tweepy.OAuthHandler(os.environ["T_key"], os.environ["T_keys"])
auth.set_access_token(os.environ["T_token"], os.environ["T_tokens"])
api = tweepy.API(auth)
try:
user = api.get_user(id=account)
except:
return "アカウントが存在しません"
if user.protected == True:
return "相手のアカウントが鍵アカになっています"
else:
return ""
if __name__ == "__main__":
error = check_id("yuukamiya68")
print(error)
```
#### File: opt/nlp/name_check.py
```python
from goolabs import GoolabsAPI
import time
def name_check(api_id, text_list):
"""固有表現抽出器で名前と判断された語のリストを取得
Parameters
----------
api_id : str
text_list : list[str]
ツイッターの文章のリスト
Returns
-------
name_list : list[str]
名前と判断された語(重複あり)
"""
n_list = ["鬼太郎", "ぬらりひょん", "コナン", "金田一", "モリアーティー", "ホームズ",
"水谷隼", "張本智和", "石川佳純", "丹羽孝希", "陳夢", "馬龍", "伊藤美誠", "宇田幸矢", "許キン",
"ロナウド", "リオネルメッシ", "リオネル・メッシ", "本田圭佑", "香川真司", "内田篤人", "三浦知良", "長友佑都",
"イチロー", "王貞治", "大谷翔平", "星野仙一",
]
name_list = []
api = GoolabsAPI(api_id)
for i in range(int(len(text_list)/100)+1):
if i != int(len(text_list)/100):
text = "".join(text_list[i*100:(i+1)*100])
elif len(text_list)%100 != 0:
text = "".join(text_list[i*100:])
ne_list = api.entity(sentence=text, class_filter="PSN")["ne_list"]
for name in ne_list:
if name[0] not in n_list:
name_list.append(name[0])
time.sleep(1)
return name_list
def main():
tweet_list = joblib.load("twitter_result2")
name_check("", tweet_list)
return
if __name__ == "__main__":
main()
``` |
{
"source": "jphacks/D_2110",
"score": 2
} |
#### File: symmetryshift/test/test_create_biological_structure_unit.py
```python
import filecmp
import os
import shutil
import sys
import tempfile
import unittest
from unittest.mock import patch
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from Bio.PDB import PDBParser, parse_pdb_header
from symmetryshift.cli import cli
from symmetryshift.create_biological_structure_unit import operator
class TestCore(unittest.TestCase):
def test_operator(self):
pdb_code = "1KZU"
parser = PDBParser(QUIET=True)
reference_file = "{}/assets/{}.pdb_rotated".format(
os.path.dirname(os.path.abspath(__file__)), pdb_code
)
reference_structure = parser.get_structure(pdb_code, reference_file)
original_file = "{}/assets/{}.pdb_original".format(
os.path.dirname(os.path.abspath(__file__)), pdb_code
)
original_structure = parser.get_structure(pdb_code, original_file)
original_header = parse_pdb_header(original_file)
created_structure = operator(
structure=original_structure, header=original_header, name=pdb_code
)
self.assertEqual(reference_structure, created_structure)
class TestAnotherPDB(unittest.TestCase):
def test_operator(self):
pdb_code = "3C70"
parser = PDBParser(QUIET=True)
reference_file = "{}/assets/{}.pdb_rotated".format(
os.path.dirname(os.path.abspath(__file__)), pdb_code
)
reference_structure = parser.get_structure(pdb_code, reference_file)
original_file = "{}/assets/{}.pdb_original".format(
os.path.dirname(os.path.abspath(__file__)), pdb_code
)
original_structure = parser.get_structure(pdb_code, original_file)
original_header = parse_pdb_header(original_file)
created_structure = operator(
structure=original_structure, header=original_header, name=pdb_code
)
self.assertEqual(reference_structure, created_structure)
class TestCli(unittest.TestCase):
def setUp(self) -> None:
self.tmp_dir = tempfile.mkdtemp()
self.output_filename = os.path.join(self.tmp_dir, "tmp.pdb")
self.pdb_code = "1KZU"
self.rotated_filename = "{}/assets/{}.pdb_rotated".format(
os.path.dirname(os.path.abspath(__file__)), self.pdb_code
)
self.original_filename = "{}/assets/{}.pdb_original".format(
os.path.dirname(os.path.abspath(__file__)), self.pdb_code
)
return super().setUp()
def tearDown(self) -> None:
shutil.rmtree(self.tmp_dir)
return super().tearDown()
@patch("builtins.input", return_value="y")
def test_pdb_code_and_outputfile(self, mock_input):
cli([self.pdb_code, "--output", self.output_filename])
self.assertTrue(filecmp.cmp(self.rotated_filename, self.output_filename))
def test_from_file(self):
cli(["--from-file", self.original_filename, "--output", self.output_filename])
self.assertTrue(filecmp.cmp(self.rotated_filename, self.output_filename))
```
#### File: web/functions/main.py
```python
from flask import jsonify
import symmetryshift.create_biological_structure_unit as assembler
import tempfile
import os
from google.cloud import storage as gcs
import logging
def create_assembly(pdb_code, work_dir):
# input_filename = os.path.join(work_dir, "tmp_in.pdb")
output_filename = os.path.join(work_dir, f"{pdb_code}_assembled.pdb")
structure = assembler.get_biological_assembly_from_pdb_code(pdb_code)
assembler.save_structure(structure, output=output_filename)
return output_filename
def upload_assembly(filename, pdb_code):
project_id = os.environ["PROJECT_ID"]
bucket_id = os.environ["ROTATED_PDB_FILE_BUCKECT_ID"]
client = gcs.Client(project_id)
bucket = client.get_bucket(bucket_id)
file_id = f"{pdb_code}_assembled.pdb"
blob_gcs = bucket.blob(file_id)
blob_gcs.upload_from_filename(filename, content_type="text/plain")
download_url = f"https://storage.googleapis.com/{bucket_id}/{file_id}"
return download_url
def upload_original(filename, pdb_code):
project_id = os.environ["PROJECT_ID"]
bucket_id = os.environ["ORIGINAL_PDB_FILE_BUCKECT_ID"]
client = gcs.Client(project_id)
bucket = client.get_bucket(bucket_id)
file_id = f"{pdb_code}.pdb"
blob_gcs = bucket.blob(file_id)
blob_gcs.upload_from_filename(filename, content_type="text/plain")
download_url = f"https://storage.googleapis.com/{bucket_id}/{file_id}"
return download_url
def fetch_biological_assembly(request):
# CORS enable
# fmt: off
headers = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST",
"Access-Control-Allow-Headers": "Origin, X-Requested-With, Content-Type, Accept"
}
# fmt: on
if request.method == "OPTIONS":
return ("", 204, headers)
data = request.get_json()
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
try:
pdb_code = data["pdb_code"]
if type(pdb_code) != str or len(pdb_code) != 4:
raise ValueError(f"pdb code must be 4 char. Input is {pdb_code}")
assembly_filename = create_assembly(pdb_code, tmp_dir)
assembly_url = upload_assembly(assembly_filename, pdb_code)
original_filename = os.path.join(tmp_dir, pdb_code, f"pdb{pdb_code}.ent")
original_url = upload_original(original_filename, pdb_code)
data = {
"message": "ok",
"assembly_url": assembly_url,
"original_url": original_url,
}
return jsonify(data), 200, headers
except FileNotFoundError as err:
logging.error(err)
return jsonify(
{
"message": "Requested PDB code does not exist"
"or were not submited as a PDB file format."
},
410,
headers,
)
except OSError as err:
logging.error(err)
return jsonify(
{
"message": "Requested PDB code does not exist"
"or were not submited as a PDB file format."
},
410,
headers,
)
except ValueError as err:
logging.error(err)
return jsonify({"message": str(err)}), 500, headers
except Exception as err:
logging.error(err)
return jsonify({"message": "Unexpected error occur"}), 500, headers
```
#### File: functions/test/test_fetch_biological_assembly.py
```python
import main as server
import os
import unittest
from unittest.mock import Mock
class TestFetchBiologicalAssembly(unittest.TestCase):
def setUp(self) -> None:
return super().setUp()
def tearDown(self) -> None:
self.rotated_pdbfile.close()
return super().tearDown()
def test_post_pdb_code(self):
pdb_code = "5V8K"
data = {"pdb_code": pdb_code}
request = Mock(get_json=Mock(return_value=data))
``` |
{
"source": "jphacks/E_2002",
"score": 2
} |
#### File: E_2002/AI_camera/jphack_demo.py
```python
from ctypes import *
import os
import cv2
import time
import darknet_jphack
import argparse
import math
import numpy as np
import datetime
import json
import subprocess
import glab
from natsort import natsorted
#学習されたクラス名を取得
def classname_management(altNames,data_path):
if altNames is None:
try:
with open(data_path) as metaFH:
metaContents = metaFH.read()
import re
match = re.search("names *= *(.*)$", metaContents, re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split("\n")
altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
return altNames
#クラスごとの枠を色を決める
def color_management(num_classes,class_colors,altNames):
class_colors_own = []
for i in range(0, num_classes):
hue = 255*i/num_classes
col = np.zeros((1,1,3)).astype("uint8")
col[0][0][0] = hue
col[0][0][1] = 128
col[0][0][2] = 255
cvcol = cv2.cvtColor(col, cv2.COLOR_HSV2BGR)
col = (int(cvcol[0][0][0]), int(cvcol[0][0][1]), int(cvcol[0][0][2]))
class_colors_own.append(col)
class_colors[altNames[i]] = class_colors_own[i]
#撮影で得られた画像をから動画を製作する
def video_creater(main_process,img_dir,original_video_path):
#フォルダの画像を取得
frame_img_folder = img_dir + '*.jpg'
frame_images = natsorted(glob.glob(frame_img_folder))
full_frame = len(frame_images)
#動画のfpsを決定
fps = math.ceil(full_frame/main_process)
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
sample_img = cv2.imread(frame_images[0],cv2.IMREAD_COLOR)
#動画の解像度を決定
height, width, channels = sample_img.shape[:3]
#動画を設定
recording = cv2.VideoWriter(original_video_path, fourcc, fps, (width, height))
#フレーム画像から動画を生成
for i in range(full_frame):
img = cv2.imread(frame_images[i],cv2.IMREAD_COLOR)
recording.write(img)
recording.release()
#YOLOによる物体検出
def image_detection(frame, network, class_names, class_colors, thresh,save_result_path,width,height,first_flag):
#物体検出のための画像の前処理
darknet_image = darknet_jphack.make_image(width, height, 3)
image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image_resized = cv2.resize(image_rgb, (width, height),
interpolation=cv2.INTER_LINEAR)
darknet_jphack.copy_image_from_bytes(darknet_image, image_resized.tobytes())
#YOLOによる物体検出
detections = darknet_jphack.detect_image(network, class_names, darknet_image, thresh=thresh)
#検出結果枠作成
image = darknet_jphack.draw_boxes(detections, image_resized, class_colors)
result_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#検出結果画像保存
pred_image = cv2.imwrite(save_result_path,result_image)
darknet_jphack.print_detections(detections)
return result_image, detections
#カメラからの映像を取得して物体検出した結果をリアルタイムで表示
def start_process(cap,save_img_dir,save_result_dir,width,height,network,class_names,class_colors,threshold):
first_flag = False
start = time.time()
while True:
#カメラからの映像取得
ret, frame = cap.read()
#画像のパス生成
dt_now = datetime.datetime.now()
date = dt_now.strftime('%Y%m%d_%H%M%S%f')
save_result_path = str(save_result_dir + date + ".jpg")
save_img_path = str(save_img_dir + date + ".jpg")
#カメラからの映像を画像として保存
cv2.imwrite(save_img_path,frame)
#YOLOによる物体検出
image, detections = image_detection(frame, network, class_names, class_colors, threshold,save_result_path,width,height,first_flag)
#画面に検出結果表示
cv2.imshow("YOLO",image)
cv2.moveWindow("YOLO", 1000, 200)
#何も検出しなかったとき
if len(detections) == 0:
monitor = str(0)
#何か検出したとき
else:
row = np.shape(detections)[0]
for r in range(row):
detection_result = detections[r][0]
if detection_result == "person":
monitor = str(1)
else:
monitor = str(0)
jphack_dict = {"person": monitor}
#ユーザーがベッドにいるとき
if monitor == "1":
result_message = "person in bed"
print(result_message)
#ユーザーがベッドにいないとき
else:
result_message = "person out bed"
print(result_message)
first_flag = True
elapsed = time.time() - start
#指定した時間が経過したとき
if elapsed > set_time:
print(" ---------- stop AI camera ----------")
cap.release()
video_creater(elapsed,save_img_dir,original_video_path)
video_creater(elapsed,save_result_dir,result_video_path)
break
def main():
#カメラ起動
cap = cv2.VideoCapture(0)
fps = cap.get(cv2.CAP_PROP_FPS)
#撮影時間 [seconds]
set_time = 15
#YOLOの物体検出に必要なファイルのパス
weights_path = "yolov4-tiny.weights"
cfg_path = "cfg/yolov4-tiny.cfg"
data_path = "cfg/coco.data"
threshold = 0.25
#CNNやデータのロード
network, class_names, class_colors = darknet_jphack.load_network(
cfg_path,
data_path,
weights_path,
batch_size=1
)
#検出結果の画像サイズ
width = darknet_jphack.network_width(network)
height = darknet_jphack.network_height(network)
#データセットの情報取得
global metaMain, netMain, altNames
altNames = None
altNames = classname_management(altNames,data_path)
num_classes = len(altNames)
#クラスごとの枠の色決定
color_management(num_classes,class_colors,altNames)
#画像を保存するディレクトリ
jphack_dir = "jphack"
ori_img_dir = "jphack/video"
result_img_dir = "jphack/result"
#ディレクトリがない場合にディレクトリ作成
if not os.path.isdir(img_dir):
os.makedirs(img_dir)
if not os.path.isdir(ori_img_dir):
os.makedirs(ori_img_dir)
if not os.path.isdir(result_img_dir):
os.makedirs(result_img_dir)
#撮影日時のフォルダ作成
dt_now = datetime.datetime.now()
date = dt_now.strftime('%Y%m%d_%H%M%S')
save_img_dir = ori_img_dir + "/" + date
save_result_dir = result_img_dir + "/" + date
os.makedirs(save_img_dir, exist_ok=True)
os.makedirs(save_result_dir, exist_ok=True)
#画像のディレクトリ・動画のパスを作成
save_img_dir = save_img_dir + "/"
save_result_dir = save_result_dir + "/"
original_video_path = save_img_dir + date + ".mp4"
result_video_path = save_result_dir + date + "_result.mp4"
print("--------------------------------------------------------------------------------")
#カメラからの映像に対してリアルタイムで検出結果表示
start_process(cap,save_img_dir,save_result_dir,width,height,network,class_names,class_colors,threshold)
if __name__ == '__main__':
main()
```
#### File: E_2002/AI_camera/logicamera.py
```python
import cv2
from datetime import datetime
import os
#カメラ動作確認
def main():
#カメラ起動
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
#カメラのプロパティの表示
print(width,height,fps)
#画像を保存するディレクトリ
jphack_dir = "jphack"
camera_dir = "jphack/camera"
#ディレクトリがない場合にディレクトリ作成
if not os.path.isdir(jphack_dir):
os.makedirs(jphack_dir)
if not os.path.isdir(camera_dir):
os.makedirs(camera_dir)
#カメラからの映像を取得
while True:
#カメラからの映像を取得
ret, frame = cap.read()
#カメラからの映像を画面に表示
cv2.imshow("camera", frame)
k = cv2.waitKey(1)&0xff
# 「s」キーで画像を保存
if k == ord('s'):
#画像のパス生成
date = datetime.now().strftime("%Y%m%d_%H%M%S")
path = camera_dir + date + ".jpg"
# 画像保存
cv2.imwrite(path, frame)
# 画像を表示
cv2.imshow(path, frame)
# 「q」キーが押されたら終了する
elif k == ord('q'):
break
#カメラ停止
cap.release()
#表示していた画面を終了
cv2.destroyAllWindows()
if __name__ == '__main__':
#カメラ動作確認
main()
```
#### File: E_2002/PC/connect_arduino.py
```python
import serial
import time
import requests
# ポートの指定
def initialize(port, bps):
port = serial.Serial(port, bps)
#print('圧力センサとの接続を開始しました')
time.sleep(1)
return port
# arduinoへのデータ送信
def send_command(port, byte):
write = port.write(byte)
data = wait_response(port)
#pass
return data
# arduinoから返信されるシリアル内容の表示
def wait_response(port):
while 1:
if port.in_waiting > 0:
time.sleep(0.01)
data = port.read_all().decode('utf-8')
#print(data)
break
return data
# 圧力センサの初期値をリセット
def pressure_reset(port):
pressure = {'reset':b"g", 'init': b"s", 'get':b"w"}
init_num = send_command(port, pressure['reset'])
#print('初期値をリセットしました')
# 圧力センサの初期値を更新
def pressure_init(port):
pressure = {'reset':b"g", 'init': b"s", 'get':b"w"}
init_num = send_command(port, pressure['init'])
#print('初期値を更新しました:', init_num)
# 圧力センサからベットにいるかの判定を取得
def pressure_get(port):
pressure = {'reset':b"g", 'init': b"s", 'get':b"w"}
press_data = send_command(port, pressure['get'])
#print('圧力センサの値を取得します')
return press_data
# 圧力センサとの接続を終了
def close_port(port):
#print('圧力センサとの接続を終了しました')
port.close()
pass
# ESP32と通信し,部屋の照明を点灯
def light_switch():
requests.post('http://192.168.11.37/ledon')
#requests.post('http://192.168.11.37/ledoff')
print(' 照明:点灯')
# ESP32と通信し,テレビの電源を入れる
def tv_switch_on(opt):
requests.post('http://192.168.11.37/tvon?broad='+opt[0]+'&ch='+opt[1])
print(' TV:起動')
# ESP32と通信し,テレビの電源を切る
def tv_switch_off():
requests.post('http://192.168.11.37/tvoff')
print(' TV:停止')
# ESP32と通信し,エアコンのスイッチを入れる
def air_switch():
requests.post('http://192.168.11.37/air')
print(' エアコン:起動')
# ESP32と通信し,サーボの動作させる
def Servomotor():
requests.post('http://192.168.11.37/servo')
print(' サーボモータ:スイッチ切り替え')
```
#### File: E_2002/PC/connect_jetson.py
```python
import requests
import time
import subprocess
# Jetson Nanoと通信し,Yoloを起動
def ready_yolo():
start_yolo = subprocess.Popen(['python', './subprocess_file/start_yolo.py'],shell=False)
print('Yoloを起動します')
time.sleep(1)
start_yolo.kill()
# Jetson Nanoと通信し,ベットに人がいるかの判定を取得
def predict_yolo():
pred_result = requests.get('http://192.168.11.38:5000/api/return_result').json()
#print('Yoloの結果を表示します')
#pred_result = 1
return pred_result
# Jetson Nanoと通信し,Yoloを停止,Jetson Nanoの再起動
def stop_yolo():
response = requests.post('http://192.168.11.38:5000/api/overwrite_state')
#print(' Yoloを停止しました')
#pass
```
#### File: PC/subprocess_file/start_yolo.py
```python
import requests
def main():
response = requests.get('http://192.168.11.38:5000/api/start_yolo')
if __name__ == "__main__":
main()
``` |
{
"source": "jphacks/E_2005",
"score": 3
} |
#### File: E_2005/raspi/record.py
```python
import pyaudio
import wave
def record():
chunk = 8194
FORMAT = pyaudio.paInt16
CHANNELS = 1
#サンプリングレート
RATE = 44100
p = pyaudio.PyAudio()
stream = p.open(
format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = chunk
)
print("=====RECORD START=====")
all = []
while True:
try:
data = stream.read(chunk)
all.append(data)
except KeyboardInterrupt:
break
print("=====RECORD END=====")
stream.close()
p.terminate()
data = b''.join(all)
#保存するファイル名、wは書き込みモード
out = wave.open('proken.wav','w')
out.setnchannels(1)
out.setsampwidth(2)
out.setframerate(RATE)
out.writeframes(data)
out.close()
def transcribe_file():
from google.cloud import speech
import io
client = speech.SpeechClient()
with io.open('proken.wav', 'rb') as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=44100,
language_code='ja-JP',
#audio_channel_count=2,
enable_separate_recognition_per_channel=True
)
operation = client.long_running_recognize(
request={"config": config, "audio": audio}
)
operation = client.long_running_recognize(config=config, audio=audio)
response = operation.result(timeout=90)
with io.open("proken.txt", "w", encoding="utf-8") as f:
for result in response.results:
f.write(u'{}'.format(result.alternatives[0].transcript))
def send_push_message():
import requests
import json
import io
with io.open('proken.txt', 'r') as f:
content = f.read()
f.close()
url = 'https://fraud-checker-test.herokuapp.com/raspi'
headers = {'Content-Type': 'application/json'}
payload = {'raspi_id': 'jphacks-e2005-kokokatu', 'content': content}
res = requests.post(url, data=json.dumps(payload), headers=headers)
print(res)
if __name__ == '__main__':
record()
transcribe_file()
send_push_message()
``` |
{
"source": "jphacks/E_2007",
"score": 3
} |
#### File: recently-reports/src/collect_reports.py
```python
def collect_recently_reports(tweets: list, num: int) -> list:
neg_cnt = pos_cnt = 0
loop_num = min(num, len(tweets))
for i in range(loop_num):
if tweets[i]["p_or_n"] == "positive":
pos_cnt += 1
else:
neg_cnt += 1
result = {
"positives": pos_cnt,
"negatives": neg_cnt,
"tweets": tweets[:loop_num]
}
return result
```
#### File: backend/sentiment-analysis/analysis.py
```python
import os
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdapterType
def get_analyzer():
adapter_path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "sst-2")
model = AutoModelForSequenceClassification.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking")
tokenizer = AutoTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking")
model.load_adapter(adapter_path)
def analyzer(sentence: str) -> "positive" or "negative":
token_ids = tokenizer.convert_tokens_to_ids(
tokenizer.tokenize(sentence))
input_tensor = torch.tensor([token_ids])
outputs = model(input_tensor, adapter_names=["sst-2"])
result = torch.argmax(outputs[0]).item()
return 'positive' if result == 1 else 'negative'
return analyzer
```
#### File: tweets-analysis/src/analyze_tweets.py
```python
from .sentiment_analysis.analyzer import SA
def analyze_tweets(tweets: list) -> list:
"""ツイートのポジティブ・ネガティブを判定する
Args:
tweets (list): ツイート一覧
Returns:
list: 判定結果が入ったリスト
"""
sa = SA()
result = []
for tweet_info in tweets:
p_or_n = sa.detect_p_or_n(tweet_info["text"])
result.append({
**tweet_info,
"p_or_n": p_or_n
})
return result
```
#### File: tweets-analysis/src/collect_tweets.py
```python
import datetime
import tweepy
from .config import CONSUMER_API_KEY, CONSUMER_SECRET_KEY, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET, DATE_FORMAT
auth = tweepy.OAuthHandler(CONSUMER_API_KEY, CONSUMER_SECRET_KEY)
auth.set_access_token(ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
def get_all_tweets(user_id: str, from_dt: datetime or None = None) -> list:
"""あるユーザーの、指定した日以降のツイート情報を取得する
Args:
user_id (str): ツイッターid
from_dt (datetimeorNone, optional): 指定日. Defaults to None.
Returns:
list: ツイートのリスト
"""
result = []
# 大きすぎると大変なので、最大で100ツイートにする。
for tweet in tweepy.Cursor(api.user_timeline, id=user_id).items(50):
tweeted_at = tweet.created_at + datetime.timedelta(hours=9)
# 更新日より前のツイートを持ってきた段階で切る。
if from_dt is not None and tweeted_at <= from_dt:
break
tweet_info = {
"text": tweet.text,
"created_at": tweeted_at.strftime(DATE_FORMAT)
}
result.append(tweet_info)
return result
def filter_tweets(tweets: list) -> list:
return tweets
def get_user(user_id: str) -> tweepy.User:
user = api.get_user(user_id)
return user
```
#### File: backend/twitter-timeline/__init__.py
```python
import datetime
import logging
import json
import os
import azure.functions as func
import tweepy
consumer_key = os.environ["CONSUMER_API_KEY"]
consumer_secret = os.environ["CONSUMER_SECRET_KEY"]
access_token= os.environ["ACCESS_TOKEN_KEY"]
access_token_secret = os.environ["ACCESS_TOKEN_SECRET"]
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
user_id = req.params.get('user_id')
if not user_id:
try:
req_body = req.get_json()
except ValueError:
pass
else:
user_id = req_body.get('user_id')
if not user_id:
return func.HttpResponse(
"This HTTP triggered function executed successfully. Pass a user_id in the query string or in the request body for a personalized response.",
status_code=200
)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
result = []
for tweet in tweepy.Cursor(api.user_timeline, id=user_id).items(10):
print("="*8)
print(tweet.user.profile_image_url)
print(tweet.user.name)
print(tweet.user.screen_name)
print(tweet.text)
print(tweet.created_at)
tweet_info = {
"user_id": tweet.user.screen_name,
"text": tweet.text,
"created_at": tweet.created_at.strftime("%Y/%m/%d %H:%M:%S")
}
result.append(tweet_info)
return func.HttpResponse(json.dumps({"summary": result}), headers={"Content-Type": "application/json"})
``` |
{
"source": "jphacks/E_2010",
"score": 2
} |
#### File: E_2010/api/serializers.py
```python
from django.contrib.auth.hashers import make_password
from rest_framework import serializers, validators
from .models import User, Invitation, Application
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'email', 'name', 'university', 'research', 'gender', 'age', 'position', 'self_introduction', 'birthday')
extra_kwargs = {
'password': {'write_only': True},
}
# GET 時は User の中身を見たい、かつ POST 時は id だけ指定したい
# https://sakataharumi.hatenablog.jp/entry/2018/10/20/010806
class InvitationSerializer(serializers.ModelSerializer):
author = UserSerializer(read_only=True)
user_id = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), write_only=True)
class Meta:
model = Invitation
fields = ('id', 'author', 'user_id', 'title', 'content', 'date',
'place', 'created_at', 'tags', 'status',)
read_only_fields = ('author', 'created_at', 'status',)
def create(self, validated_data):
validated_data['author'] = validated_data.get('user_id', None)
if validated_data['author'] is None:
raise serializers.ValidationError("author not found")
del validated_data['user_id']
validated_data['status'] = 'seeking'
return Invitation.objects.create(**validated_data)
class ApplicationSerializer(serializers.ModelSerializer):
invitation = InvitationSerializer(read_only=True)
inv_id = serializers.PrimaryKeyRelatedField(queryset=Invitation.objects.all(), write_only=True)
class Meta:
model = Application
fields = ('id', 'invitation', 'inv_id', 'applicant', 'status',)
def create(self, validated_data):
validated_data['invitation'] = validated_data.get('inv_id', None)
if validated_data['invitation'] is None:
raise serializers.ValidationError("invitation not found")
del validated_data['inv_id']
# validated_data['status'] = 'seeking'
return Application.objects.create(**validated_data)
``` |
{
"source": "jphacks/E_2105",
"score": 3
} |
#### File: jphacks/E_2105/fetch_arxiv.py
```python
import arxiv
import pandas as pd
import datetime
def fetch_search_result(search_query, within_5years=False):
five_years_ago = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=5*365)
max_results = 50
search = arxiv.Search(
query=search_query,
max_results=max_results * (3 if within_5years else 1),
)
titles = []
absts = []
urls = []
years = []
for result in search.results():
if within_5years and result.published < five_years_ago:
continue
titles.append(result.title)
absts.append(result.summary.replace('\n', ' '))
urls.append(result.entry_id)
years.append(result.published.year)
num_results = len(titles)
keywords = [search_query] * num_results
rankings = list(range(1, num_results + 1))
df = pd.DataFrame(data=dict(
keyword=keywords[:max_results],
site_name=titles[:max_results],
URL=urls[:max_results],
snippet=absts[:max_results],
ranking=rankings[:max_results],
year=years[:max_results],
))
return df
if __name__ == '__main__':
import time
search_str = input("> ")
start = time.time()
df = fetch_search_result(search_str, True)
duration = time.time() - start
print(f"duration: {duration}s")
df.to_csv(search_str + ".csv")
```
#### File: E_2105/webapp/figure_maker.py
```python
import pathlib
import pandas as pd
from fetch_arxiv import fetch_search_result
from preprocessing_of_words import make_bow
import numpy as np
import pickle
import plotly.graph_objects as go
from jax_tsom import ManifoldModeling as MM
from sklearn.decomposition import NMF
from scipy.spatial import distance as dist
from Grad_norm import Grad_Norm
from webapp import logger
resolution = 10
u_resolution = 10
word_num = 200
CCP_VIEWER = 'CCP'
UMATRIX_VIEWER = 'U-matrix'
TOPIC_VIEWER = 'topic'
ycolors = [[255, 255, 230], [255, 255, 180], [255, 253, 140], [255, 250, 115], [255, 235, 80], [231, 223, 37], [210, 200, 5], [155, 148, 15]]
gcolors = [[255, 255, 230], [255, 255, 180], [230, 247, 155], [211, 242, 132], [180, 220, 110], [144, 208, 80], [120, 180, 75]]
PAPER_COLORS = list(map(lambda i: 'rgb({},{},{})'.format(*i), gcolors))
WORD_COLORS = list(map(lambda i: 'rgb({},{},{})'.format(*i), ycolors))
PAPER_COLOR = PAPER_COLORS[3]
WORD_COLOR = WORD_COLORS[3]
def prepare_umatrix(keyword, X, Z1, Z2, sigma, labels, u_resolution, within_5years):
within_5years_sign = '_within5y' if within_5years else ''
umatrix_save_path = 'data/tmp/'+ keyword + within_5years_sign + '_umatrix_history.pickle'
if pathlib.Path(umatrix_save_path).exists():
logger.debug("U-matix already calculated")
with open(umatrix_save_path, 'rb') as f:
umatrix_history = pickle.load(f)
else:
logger.debug("Umatrix calculating")
umatrix = Grad_Norm(
X=X,
Z=Z1,
sigma=sigma,
labels=labels,
resolution=u_resolution,
title_text="dammy"
)
U_matrix1, _, _ = umatrix.calc_umatrix()
umatrix2 = Grad_Norm(
X=X.T,
Z=Z2,
sigma=sigma,
labels=labels,
resolution=u_resolution,
title_text="dammy"
)
U_matrix2, _, _ = umatrix2.calc_umatrix()
umatrix_history = dict(
umatrix1=U_matrix1.reshape(u_resolution, u_resolution),
umatrix2=U_matrix2.reshape(u_resolution, u_resolution),
zeta=np.linspace(-1, 1, u_resolution),
)
logger.debug("Calculating finished.")
with open(umatrix_save_path, 'wb') as f:
pickle.dump(umatrix_history, f)
return umatrix_history
def prepare_materials(keyword, model_name, within_5years):
logger.info(f"Preparing {keyword} map with {model_name}")
base_filename = f"{keyword}{'_within5y' if within_5years else ''}"
# Learn model
nb_epoch = 50
sigma_max = 2.2
sigma_min = 0.2
tau = 50
latent_dim = 2
seed = 1
# Load data
if pathlib.Path(f"{base_filename}.csv").exists():
logger.debug("Data exists")
csv_df = pd.read_csv(f"{base_filename}.csv")
paper_labels = csv_df['site_name']
rank = csv_df['ranking']
X = np.load(f"data/tmp/{base_filename}.npy")
word_labels = np.load(f"data/tmp/{base_filename}_label.npy")
else:
logger.debug("Fetch data to learn")
csv_df = fetch_search_result(keyword, within_5years)
paper_labels = csv_df['site_name']
X , word_labels = make_bow(csv_df)
rank = np.arange(1, X.shape[0]+1) # FIXME
csv_df.to_csv(f"{base_filename}.csv")
feature_file = f'data/tmp/{base_filename}.npy'
word_label_file = f'data/tmp/{base_filename}_label.npy'
np.save(feature_file, X)
np.save(word_label_file, word_labels)
labels = (paper_labels, word_labels)
model_save_path = f'data/tmp/{base_filename}_history.pickle'
if pathlib.Path(model_save_path).exists():
logger.debug("Model already learned")
with open(model_save_path, 'rb') as f:
history = pickle.load(f)
else:
logger.debug("Model learning")
np.random.seed(seed)
mm = MM(
X,
latent_dim=latent_dim,
resolution=resolution,
sigma_max=sigma_max,
sigma_min=sigma_min,
model_name=model_name,
tau=tau,
init='parafac'
)
mm.fit(nb_epoch=nb_epoch)
history = dict(
Z1=mm.history['z1'][-1],
Z2=mm.history['z2'][-1],
Y=mm.history['y'][-1],
sigma=mm.history['sigma'][-1],
Zeta=mm.Zeta1,
resolution=mm.resoluton
)
logger.debug("Learning finished.")
with open(model_save_path, 'wb') as f:
pickle.dump(history, f)
umatrix_history = prepare_umatrix(
keyword,
X,
history['Z1'],
history['Z2'],
history['sigma'],
None,
u_resolution,
within_5years,
)
return csv_df, labels, X, history, rank, umatrix_history
def draw_umatrix(fig, umatrix_history, viewer_id):
if viewer_id == 'viewer_1':
z = umatrix_history['umatrix1']
elif viewer_id == 'viewer_2':
z = umatrix_history['umatrix2']
zeta = umatrix_history['zeta']
fig.add_trace(
go.Contour(
x=zeta,
y=zeta,
z=z,
name='contour',
colorscale="gnbu",
hoverinfo='skip',
showscale=False,
)
)
return fig
def draw_topics(fig, Y, n_components, viewer_id):
# decomposed by Topic
Y = Y.reshape(Y.shape[0], Y.shape[0])
model_t3 = NMF(
n_components=n_components,
init='nndsvd',
random_state=2,
max_iter=300,
solver='cd'
)
W = model_t3.fit_transform(Y)
if viewer_id == 'viewer_2':
W = model_t3.components_.T
# For mask and normalization(min:0, max->1)
mask_std = np.zeros(W.shape)
mask = np.argmax(W, axis=1)
for i, max_k in enumerate(mask):
mask_std[i, max_k] = 1 / np.max(W)
W_mask_std = W * mask_std
DEFAULT_PLOTLY_COLORS = [
'rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)'
]
alpha = 0.1
DPC_with_Alpha = [k[:-1] + ', ' + str(alpha) + k[-1:] for k in DEFAULT_PLOTLY_COLORS]
for i in range(n_components):
fig.add_trace(
go.Contour(
x=np.linspace(-1, 1, resolution),
y=np.linspace(-1, 1, resolution),
z=W_mask_std[:, i].reshape(resolution, resolution),
name='contour',
colorscale=[
[0, "rgba(0, 0, 0,0)"],
[1.0, DPC_with_Alpha[i]]],
hoverinfo='skip',
showscale=False,
)
)
return fig
def draw_ccp(fig, Y, Zeta, resolution, clickedData, viewer_id):
logger.debug('ccp')
if viewer_id == 'viewer_1':
y = Y[:, get_bmu(Zeta, clickedData)].reshape(resolution, resolution)
colors = WORD_COLORS
elif viewer_id == 'viewer_2':
y = Y[get_bmu(Zeta, clickedData), :].reshape(resolution, resolution)
colors = PAPER_COLORS
fig.add_trace(
go.Contour(
x=np.linspace(-1, 1, resolution),
y=np.linspace(-1, 1, resolution),
z=y,
name='contour',
colorscale=colors,
hoverinfo='skip',
showscale=False,
)
)
return fig
def get_bmu(Zeta, clickData):
clicked_point = [[clickData['points'][0]['x'], clickData['points'][0]['y']]] if clickData else [[0, 0]]
clicked_point = np.array(clicked_point)
dists = dist.cdist(Zeta, clicked_point)
unit = np.argmin(dists, axis=0)
return unit[0]
def draw_scatter(fig, Z, labels, rank, viewer_name):
rank = np.linspace(1, len(labels), len(labels))
logger.debug(f"viewer_name: {viewer_name}")
logger.debug(f"Z: {Z.shape}, labels:{len(labels)}, rank:{len(rank)}")
color = PAPER_COLORS[-1]
if viewer_name == 'viewer_2':
Z = Z[:word_num]
labels = labels[:word_num]
rank = rank[:word_num]
color = WORD_COLORS[-1]
fig.add_trace(
go.Scatter(
x=Z[:, 0],
y=Z[:, 1],
mode=f"markers+text",
name="",
marker=dict(
size=(rank[::-1])*(1 if viewer_name == 'viewer_1' else 0.5),
sizemode='area',
sizeref=2. * max(rank) / (40. ** 2),
sizemin=10,
),
text=(labels if viewer_name == 'viewer_2' else rank),
textfont=dict(
family="sans serif",
size=10,
color='black'
),
hovertext=labels,
hoverlabel=dict(
bgcolor="rgba(255, 255, 255, 0.75)",
),
textposition='top center',
hovertemplate="<b>%{hovertext}</b>",
)
)
# fig.add_annotation(
# x=Z[:, 0],
# y=Z[:, 1],
# text=(labels if viewer_name == 'viewer_2' else list(map(lambda i: str(i), rank))),
# showarrow=False,
# yshift=10)
return fig
def make_figure(history, umatrix_hisotry, X, rank, labels, viewer_name='U_matrix', viewer_id=None, clicked_z=None):
logger.debug(viewer_id)
if viewer_id == 'viewer_1':
Z, Y = history['Z1'], history['Y']
labels = labels[0] if isinstance(labels[0], list) else labels[0].tolist()
elif viewer_id == 'viewer_2':
Z, Y = history['Z2'], history['Y']
X = X.T
labels = labels[1] if isinstance(labels[1], list) else labels[1].tolist()
logger.debug(f"LABELS: {labels[:5]}")
else:
logger.debug("Set viewer_id")
# Build figure
x1, x2 = Z[:, 0].min(), Z[:, 0].max()
y1, y2 = Z[:, 1].min(), Z[:, 1].max()
fig = go.Figure(
layout=go.Layout(
xaxis=dict(
range=[Z[:, 0].min() + 0.05, Z[:, 0].max() + 0.05],
visible=False,
autorange=True,
),
yaxis=dict(
range=[Z[:, 1].min() - 0.1, Z[:, 1].max() + 0.2],
visible=False,
scaleanchor='x',
scaleratio=1.0,
),
showlegend=False,
autosize=True,
plot_bgcolor="#FFFFFF",
margin=dict(
b=0,
t=0,
l=0,
r=0,
),
),
)
if viewer_name == "topic":
n_components = 5
fig = draw_topics(fig, Y, n_components, viewer_id)
elif viewer_name == "CCP":
fig = draw_ccp(fig, Y, history['Zeta'], history['resolution'], clicked_z, viewer_id)
else:
fig = draw_umatrix(fig, umatrix_hisotry, viewer_id)
if viewer_id == 'viewer_2':
_, unique_Z_idx = np.unique(Z, axis=0, return_index=True)
logger.debug(unique_Z_idx)
duplicated_Z_idx = np.setdiff1d(np.arange(Z.shape[0]), unique_Z_idx)
labels = np.array(labels)
labels[duplicated_Z_idx] = ''
fig = draw_scatter(fig, Z, labels, rank, viewer_id)
fig.update_coloraxes(
showscale=False
)
fig.update_layout(
plot_bgcolor=(PAPER_COLOR if viewer_id == 'viewer_1' else WORD_COLOR),
)
fig.update(
layout_coloraxis_showscale=False,
layout_showlegend=False,
)
fig.update_yaxes(
fixedrange=True,
)
fig.update_xaxes(
fixedrange=True,
)
return fig
def make_first_figure(viewer_id):
_, labels, X, history, rank, umatrix_hisotry = prepare_materials('Machine Learning', 'TSOM', False)
return make_figure(history, umatrix_hisotry, X, rank, labels, 'U-matrix', viewer_id, None)
def draw_toi(fig, clickData, view_method, viewer_id):
if not clickData:
return fig
color = {
CCP_VIEWER: 'green',
UMATRIX_VIEWER: '#ffd700',
TOPIC_VIEWER: 'yellow',
}[view_method]
color = PAPER_COLORS if viewer_id == 'viewer_1' else WORD_COLORS
x, y = clickData['points'][0]['x'], clickData['points'][0]['y']
radius = 0.15
fig.add_shape(
type='circle',
line=dict(
color=color[0],
width=9.0,
),
x0=(x - radius),
y0=(y - radius),
x1=(x + radius),
y1=(y + radius),
)
fig.add_shape(
type='circle',
line=dict(
color=color[-1],
width=5,
),
x0=(x - radius),
y0=(y - radius),
x1=(x + radius),
y1=(y + radius),
)
return fig
``` |
{
"source": "jphacks/F_2004",
"score": 3
} |
#### File: F_2004/src/model.py
```python
from . import app
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql.functions import current_timestamp
db = SQLAlchemy(app)
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(100), nullable=False)
group_id = db.Column(db.Integer, nullable=False)
group_name = db.Column(db.String(100), nullable=True)
is_watch = db.Column(db.Boolean, default=False, nullable=False)
created_at = db.Column(
db.TIMESTAMP,
server_default=current_timestamp(),
default=current_timestamp(),
nullable=False
)
def to_dict(self):
return dict(
id=self.id,
name=self.name,
group_id=self.group_id,
group_name=self.group_name,
is_watch=self.is_watch,
created_at=self.created_at
)
def __repr__(self):
return f'<User {self.id}:{self.name},{self.created_at}>'
class ConcentrationValue(db.Model):
__tablename__ = "concentration_values"
user_id = db.Column(
db.Integer,
db.ForeignKey("users.id"),
primary_key=True,
nullable=False)
concentration_value = db.Column(db.SMALLINT, nullable=False)
is_sitting = db.Column(db.Boolean, nullable=False)
created_at = db.Column(
db.TIMESTAMP,
server_default=current_timestamp(),
default=current_timestamp(),
primary_key=True,
nullable=False
)
def to_dict(self):
return dict(
user_id=self.user_id,
concentration_value=self.concentration_value,
is_sitting=self.is_sitting,
created_at=self.created_at
)
def __repr__(self):
return f'<ConcentrationValue {self.user_id}({self.created_at}):{self.concentration_value},{self.is_sitting}>'
``` |
{
"source": "jphacks/F_2006",
"score": 2
} |
#### File: jphacks/F_2006/main.py
```python
from flask import Flask, request, render_template, jsonify, redirect, url_for
from flask_cors import CORS
import os
import json
from datetime import datetime
import uuid
import morph
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_marshmallow.fields import fields
from flask_login import LoginManager, login_user, logout_user, login_required, UserMixin, current_user
import urllib.parse
from Cryptodome.Cipher import AES
app = Flask(__name__)
CORS(app)
db_uri = os.environ.get('DATABASE_URL') or "postgresql://admin:admin@localhost:5433/flash-reading-db"
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
db = SQLAlchemy(app)
class tDocuments(db.Model):
__tablename__ = 't_documents'
uuid = db.Column(db.String(36), primary_key=True)
user_uuid = db.Column(db.String(36), primary_key=False)
name = db.Column(db.Text, primary_key=False)
content = db.Column(db.Text, primary_key=False)
current_pos = db.Column(db.Integer, primary_key=False)
created_at = db.Column(db.DateTime(), primary_key=True)
updated_at = db.Column(db.DateTime(), primary_key=True)
def __init__(self, user_uuid, uuid, name, content, current_pos):
self.uuid = uuid
self.user_uuid = user_uuid
self.name = name
self.content = content
now = datetime.now()
self.created_at = now
self.updated_at = now
self.current_pos = current_pos
ma = Marshmallow(app)
class tDocumentsSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = tDocuments
load_instance = True
created_at = fields.DateTime('%Y-%m-%dT%H:%M:%S+09:00')
updated_at = fields.DateTime('%Y-%m-%dT%H:%M:%S+09:00')
class tSplitUnits(db.Model):
__tablename__ = 't_split_units'
uuid = db.Column(db.String(36), primary_key=True)
doc_uuid = db.Column(db.String(36), primary_key=False)
index = db.Column(db.Integer, primary_key=False)
content = db.Column(db.Text, primary_key=False)
class tSplitUnitsSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = tSplitUnits
load_instance = True
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
app.config['SECRET_KEY'] = "secret"
class tUsers(UserMixin, db.Model):
__tablename__ = 't_users'
id = db.Column(db.String(36), primary_key=True)
user_name = db.Column(db.String(36), primary_key=False)
password = db.Column(db.String(36), primary_key=False)
# キー設定関数
def create_key(KeyWord):
key_size = 32
KeySizeFill = KeyWord.zfill(key_size)
Key = KeySizeFill[:key_size].encode('utf-8')
return Key
# パスワードの暗号化関数
def encryptPassword(PassWord, KeyWord):
iv = b"1234567890123456"
Key = create_key(KeyWord)
obj = AES.new(Key, AES.MODE_CFB, iv)
ret_bytes = obj.encrypt(PassWord.encode(encoding='utf-8'))
return ret_bytes
# パスワードの複合化関数
def decodePassword(Password, KeyWord):
iv = b"1234567890123456" # 初期化ベクトル設定
key = create_key(KeyWord) # キー設定
obj = AES.new(key, AES.MODE_CFB, iv)
OPassword = obj.decrypt(PassWord.encode(encoding='utf-8')).decode('utf-8') #パスワードの複合化
return OPassword
@login_manager.user_loader
def load_user(uuid):
userDoc = db.session.\
query(tUsers).\
filter(tUsers.id==uuid).\
first()
return userDoc
@app.route('/')
def home():
return render_template('home.html', baseUrl=request.base_url, docObj="{}")
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
user_name = request.form['user-name']
plain_password = request.form['password']
password = str(encryptPassword(plain_password, user_name))
userDoc = db.session.\
query(tUsers).\
filter(tUsers.user_name==user_name).\
filter(tUsers.password==password).\
first()
print(userDoc)
if userDoc:
login_user(userDoc)
return redirect(url_for('home'))
else:
return render_template('error.html', baseUrl=request.base_url, canInsert=False, docObj="{}", sentence="ログインエラーが発生しました")
else:
return render_template('login.html', buttonName="Login", formName='ログイン', action="login", docObj="{}")
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
user_name = request.form['user-name']
password = request.form['password']
if len(password) > 32:
return render_template('error.html', baseUrl=request.base_url, canInsert=False, docObj="{}", sentence="パスワードが長すぎます (32 文字以内にしてください)")
password = str(encryptPassword(password, user_name))
userDoc = db.session.\
query(tUsers).\
filter(tUsers.user_name==user_name).\
first()
if userDoc:
return render_template('error.html', baseUrl=request.base_url, canInsert=False, docObj="{}", sentence="すでにユーザーが存在します")
userDoc = tUsers(id=uuid.uuid4(), user_name=user_name, password=password)
db.session.add(userDoc)
db.session.commit()
return redirect(url_for('home'))
else:
return render_template('login.html', buttonName="Register", formName='ユーザー登録', action="register", docObj="{}")
@app.route('/list')
@login_required
def list():
docs = db.session.\
query(tDocuments).\
filter(tDocuments.user_uuid == current_user.id).\
all()
print(docs)
docsJson = tDocumentsSchema(many=True).dump(docs)
docList = docs
docsObj = {}
docsObj['docs'] = docsJson
return render_template('list.html', baseUrl=request.base_url, docList=docList, docObj="{}", docsObj=docsObj)
# uuid 既存の文書のみ
@app.route('/doc/<string:uuid>')
@login_required
def doc(uuid):
doc = db.session.\
query(tDocuments).\
filter(tDocuments.uuid==uuid).\
first()
print(doc)
docJson = tDocumentsSchema().dump(doc)
print(docJson)
docObj = {}
docObj['doc'] = docJson
units = db.session.\
query(tSplitUnits).\
filter(tSplitUnits.doc_uuid == doc.uuid).\
order_by(tSplitUnits.index).\
all()
unitsJson = tSplitUnitsSchema(many=True).dump(units)
print(unitsJson)
docObj['units'] = unitsJson
return render_template('index.html', baseUrl=request.base_url, canInsert=False, docObj=docObj, sentence="", clipboard=False)
@app.route('/read')
def read():
sentence = request.args.get('q')
clipboard = request.args.get('clipboard')
if not sentence:
sentence = ""
if clipboard == 'true':
clipboard = True
return render_template('index.html', baseUrl=request.base_url, canInsert=True, docObj="{}", sentence=sentence, clipboard=clipboard)
@app.route('/result', methods=["POST"])
def result():
if request.headers['Content-Type'] != 'application/json':
print(request.headers['Content-Type'])
return render_template('error.html', baseUrl=request.base_url, canInsert=False, docObj="{}", sentence="JSON を POST してください")
data = request.json
print(data)
data = data['text']
# ここで処理
texts = morph.morph(data)
data = {
'text': texts
}
return jsonify(data)
# required param: content, name, current_pos, split_units, user_uuid
@app.route('/insert', methods=["POST"])
@login_required
def insert():
if request.headers['Content-Type'] != 'application/json':
print(request.headers['Content-Type'])
return render_template('error.html', baseUrl=request.base_url, canInsert=False, docObj="{}", sentence="JSON を POST してください")
data = request.json
print(data)
docUuid = uuid.uuid4()
doc = tDocuments(uuid=docUuid, user_uuid=current_user.id, content=data['content'], name=data['name'], current_pos=data['current_pos'])
db.session.add(doc)
idx = 0
for unit in data['split_units']:
unitUuid = uuid.uuid4()
unitDoc = tSplitUnits(uuid=unitUuid, doc_uuid=docUuid, index=idx, content=unit)
db.session.add(unitDoc)
idx += 1
db.session.commit()
return jsonify(success=True)
# required param: uuid, current_pos
@app.route('/update', methods=["POST"])
@login_required
def update():
if request.headers['Content-Type'] != 'application/json':
print(request.headers['Content-Type'])
return render_template('error.html', baseUrl=request.base_url, canInsert=False, docObj="{}", sentence="JSON を POST してください")
data = request.json
print(data)
doc = db.session.\
query(tDocuments).\
filter(tDocuments.uuid==data['uuid']).\
first()
doc.current_pos = data['current_pos']
doc.updated_at = datetime.now()
db.session.commit()
return jsonify(success=True)
# required param: uuid のみ
@app.route('/delete', methods=["POST"])
@login_required
def delete():
if request.headers['Content-Type'] != 'application/json':
print(request.headers['Content-Type'])
return render_template('error.html', baseUrl=request.base_url, canInsert=False, docObj="{}", sentence="JSON を POST してください")
data = request.json
print(data)
doc = db.session.\
query(tDocuments).\
filter(tDocuments.uuid==data['uuid']).\
first()
db.session.delete(doc)
units = db.session.\
query(tSplitUnits).\
filter(tSplitUnits.doc_uuid==data['uuid']).\
all()
for unit in units:
db.session.delete(unit)
db.session.commit()
return jsonify(success=True)
#おまじない
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "jphacks/F_2009",
"score": 3
} |
#### File: flask/modules/history_module.py
```python
import datetime
from .bathroom_monitor import BathroomMonitor
def get_range(time_list,range):
if (len(time_list['grandma_list']) >= 7):
if (range == 'week'):
week_list = time_list['grandma_list'][0:7]
return week_list
if (range == 'all'):
all_list = time_list['grandma_list']
return all_list
else:
# データが一週間分なければすべて返す
if (range == 'all'):
all_list = time_list['grandma_list']
return all_list
def datetime_to_date(datetime_str):
# datetime型に変換
tdatetime = datetime.datetime.strptime(datetime_str, '%Y%m%d%H%M%S')
# 任意の文字列に変換
tstr = tdatetime.strftime('%Y-%m-%d')
return tstr
def calc_mean(time_list):
# 平均値取得
if len(time_list) == 0:
return 0
else:
sum = 0
for item in time_list:
sum += int(item['bath_time'])
mean = int(sum / len(time_list))
return mean
def reverse_list(time_list):
# 配列を逆順にする
time_list.reverse()
return time_list
def check_history(user_id, window):
"""
:param user_id:
:param window: 範囲、all, week ...
:return:
"""
bm = BathroomMonitor(user_id)
# デバイスから入浴履歴取得
grandparents_time_dict = bm.fast_list()
if grandparents_time_dict["status"] == 400:
return {"result":"error","message":"データの取得に失敗しました"}
# window分の期間を抽出
week_list = get_range(grandparents_time_dict,window)
# リストを逆順に並べ替え
week_list_reverse = reverse_list(week_list)
result_dict = {"result": "success", "history": [], "mean": None, "message": "データの取得に成功しました"}
for index, item in enumerate(week_list_reverse):
# 日付抽出
date = datetime_to_date(item["checkin_time"])
result_dict["history"].append({"date": date, "minute": int(item["bath_time"])})
# 平均計算
result_dict["mean"] = calc_mean(week_list)
return result_dict
``` |
{
"source": "jphacks/F_2110",
"score": 3
} |
#### File: demo/demo/save_epoch_1000_to_DB.py
```python
import glob
import base64
import os
import psycopg2
import base64
import yaml
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import settings
#DB接続用ステータス設定
path = "localhost"
port = "5432"
dbname = settings.DBNAME
user = settings.USER
password = settings.PW
# PATH読み込み
top = settings.top
imgsplit = settings.imgsplit
EPOCH_1000_IMG = '20211028_200545epoch_1000.png_split.png'
def main():
# 拡張子なしのファイル名を取得
filename = os.path.splitext(os.path.basename(EPOCH_1000_IMG))[0]
print(filename)
# ファイルにbase64の内容を書き込む
file_data = open(os.path.join(top, imgsplit,'') + EPOCH_1000_IMG, "rb").read()
b64_data = base64.b64encode(file_data).decode('utf-8')
#接続部分
conText = "host={} port={} dbname={} user={} password={}"
conText = conText.format(path,port,dbname,user,password)
connection = psycopg2.connect(conText)
cur = connection.cursor()
#DBにデータを保存
sql = "insert into tbl_create_image(user_id,image) values('masaru',%s) on conflict (user_id) do update set image=%s;"
cur.execute(sql,(b64_data,b64_data,))
connection.commit()
print("--- end ---")
if __name__ == '__main__':
main()
```
#### File: make_2dimage/postgres/take_img_from_db.py
```python
import base64
import os
import psycopg2
import base64
from io import BytesIO
from PIL import Image
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import settings
# PATH読み込み
top = settings.top
script = settings.script
image = settings.image
#DB接続用ステータス設定
path = "localhost"
port = "5432"
dbname = settings.DBNAME
user = settings.USER
password = <PASSWORD>
def main():
print('------------------ START OF THIS SERIES IF PROCESSING ------------------')
print('--------- THIS FILE IS take_img_from_db.py ---------')
#DBへの接続部分
conText = "host={} port={} dbname={} user={} password={}"
conText = conText.format(path,port,dbname,user,password)
connection = psycopg2.connect(conText)
cur = connection.cursor()
#DBにデータを保存
sql = "select id, image from tbl_save_image where user_id='masaru';"
cur.execute(sql)
result = cur.fetchall()
for row in result:
# 左側についている'data:image/png;base64,'を除去
img_base64 = row[1].rsplit('data:image/png;base64,')[-1]
# base64をPNGにデコードして、保存
im = Image.open(BytesIO(base64.b64decode(img_base64)))
im.save(os.path.join(top, image, '') + str(row[0]) +'_image.png', 'PNG')
connection.close()
print('--------- EOF ---------')
if __name__ == '__main__':
main()
# dcgan_model2.pyを走らせる。
exec(open(os.path.join(top,script,"dcgan_model2.py")).read())
```
#### File: make_2dimage/script/pngtobase64.py
```python
import glob
import base64
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import settings
# PATH読み込み
top = settings.top
imgsplit = settings.imgsplit
# メイン関数
def main():
print('--------- THIS FILE IS pngtobase64.py ---------')
# フォルダ内にあるデータから、一番最新のものをとってくる。
# imgsplitフォルダ配下からのファイルを全て取得してリストにする。
last_img = os.listdir(os.path.join(top, imgsplit,''))
# 昇順になるように並び替える
last_img.sort()
# 一番最新のものを持ってくる。(昇順に並べられてるので、最新のものは最後尾。)
last_img_name = last_img[-1]
# 'img_b64.js'ファイルに書き込む。
f = open(os.path.join(top, 'img_b64.js'), 'w')
# 拡張子なしのファイル名を取得
filename = os.path.splitext(os.path.basename(last_img_name))[0]
# ファイルにbase64にしたものを書き込む
f.write("const " + filename + "_B64 = \"data:image/png;base64,")
file_data = open(os.path.join(top, imgsplit,'') + last_img_name, "rb").read()
b64_data = base64.b64encode(file_data).decode('utf-8')
f.write(b64_data)
f.write("\";\n")
f.close()
print('--------- EOF ---------')
if __name__ == '__main__':
main()
# フォルダの中身を全てbase64にする
'''
# ソートしたファイルリストを取得
images = sorted(glob.glob(IMG_FOLDER_PATH))
f = open(SAVE_FILE_PATH, 'w')
for fpath in images:
# 拡張子なしのファイル名を取得
filename = os.path.splitext(os.path.basename(fpath))[0]
print(filename)
# ファイルにbase64の内容を書き込む
f.write("const " + filename + "_B64 = \"data:image/png;base64,")
file_data = open(fpath, "rb").read()
b64_data = base64.b64encode(file_data).decode('utf-8')
f.write(b64_data)
f.write("\";\n")
f.close()
print("--- end ---")
'''
``` |
{
"source": "jphacks/FK_1707",
"score": 3
} |
#### File: linebot/bot/touroku.py
```python
import requests
import json
import re
import psycopg2.extensions
import bot.secret as secret
def reply_text(cur, reply_token, REPLY_ENDPOINT, HEADER, text, userid):
reply = ''
"""
url= secret.WCDAPI
response = requests.get(url)
tenki = json.loads(response.text)
"""
if re.match('登録 ', text):
memo = text[3:]
cur.execute("INSERT INTO touroku(userid, data) VALUES(%s, %s);", [
userid, memo])
reply += "「" + memo + '」を登録しました。'
elif re.match('削除 ', text):
memo = text[3:]
if memo == '全部' or memo == 'ぜんぶ' or memo == 'すべて' or memo == '全て':
cur.execute("DELETE FROM touroku WHERE userid=%s", [userid])
reply += "すべてのメモを削除しました。"
elif memo == '最後' or memo == 'さいご':
cur.execute("SELECT * FROM touroku WHERE userid=%s", [userid])
sakujo_taplelist = cur.fetchall()
last_memo = len(sakujo_taplelist) - 1
idz = sakujo_taplelist[last_memo][0]
reply += "「" + sakujo_taplelist[last_memo][2] + "」を削除しました。"
cur.execute("DELETE FROM touroku WHERE id=%s", [idz])
else:
memo = int(memo) - 1
cur.execute("SELECT * FROM touroku WHERE userid=%s", [userid])
sakujo_taplelist = cur.fetchall()
idz = sakujo_taplelist[memo][0]
reply += "「" + sakujo_taplelist[memo][2] + "」を削除しました。"
cur.execute("DELETE FROM touroku WHERE id=%s", [idz])
elif text == '一覧':
cur.execute("SELECT * FROM touroku WHERE userid = %s", [userid])
itiran_taplelist = cur.fetchall()
if len(itiran_taplelist) is not 0:
print(itiran_taplelist)
for i, j in enumerate(itiran_taplelist):
reply += str(i+1) + " " + j[2] + '\n'
reply = reply[:-1]
else:
reply += "何も登録されていません!"
elif re.match('おうむがえし ', text):
reply += text[7:]
elif re.match('userid', text):
reply += userid
payload = {
"replyToken": reply_token,
"messages": [
{
"type": "text",
"text": reply
}
]
}
requests.post(REPLY_ENDPOINT, headers=HEADER,
data=json.dumps(payload)) # LINEにデータを送信
return reply
``` |
{
"source": "jphacks/FK_1809",
"score": 3
} |
#### File: FK_1809/recommend/extract_color.py
```python
from PIL import Image
import cv2
import sklearn
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
def extract_main_color(img_path, k_num):
cv2_img = cv2.imread(img_path)
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
cv2_img = cv2_img.reshape((cv2_img.shape[0] * cv2_img.shape[1], 3))
cluster = KMeans(n_clusters=k_num)
cluster.fit(X=cv2_img)
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
n_clusters=5, n_init=10, n_jobs=1, precompute_distances='auto',
random_state=None, tol=0.0001, verbose=0)
hist = [0] * k_num
result = cluster.fit_predict(cv2_img)
for item in result:
hist[item] += 1
sorted_index = np.argsort(hist)
print(sorted_index)
hist = np.array(hist)[sorted_index[:-1]]
hist = hist / hist.sum()
color_list = cluster.cluster_centers_[sorted_index[:-1]]
plt.bar(np.arange(1,k_num), hist, color=cluster.cluster_centers_[sorted_index[:-1]]/256)
plt.show()
res = [{"color": c, "percent": h} for c, h in zip(color_list, hist)]
return res
hoge = extract_main_color('/home/sotaro/develop/hachathon/FK_1809/wearlog/app/assets/images/wear_images/20171201112823521_320.jpg', 4)
``` |
{
"source": "jphacks/FK_1906",
"score": 2
} |
#### File: jphacks/FK_1906/server.py
```python
import sys
import time
import math
import cv2
import json
import base64
import requests
import numpy
import threading
import queue
import numpy as np
import os
import io
# request フォームから送信した情報を扱うためのモジュール
# redirect ページの移動
# url_for アドレス遷移
from flask import Flask, request, redirect, url_for, render_template, jsonify
# ファイル名をチェックする関数
from werkzeug.utils import secure_filename
# 画像のダウンロード
from flask import send_from_directory
from api import videoReader
from sound import analyze_sound
import matplotlib.pyplot as plt
import moviepy.editor as mp
from models.models import Progress
from models.database import db_session
app = Flask(__name__)
# 学習済みモデルのロード
import pickle
models = {}
for filename in os.listdir('data'):
label = filename.split('.')[0]
models[label] = pickle.load(open(os.path.join('data', filename), 'rb'))
# 画像のアップロード先のディレクトリ
UPLOAD_FOLDER = './uploads'
# アップロードされる拡張子の制限
ALLOWED_EXTENSIONS = set(['mp4'])
def digitize_score(target, begin, end, digits=5):
return np.digitize(target, bins=np.linspace(begin, end, digits+1)[1:-1])
def allwed_file(filename):
# .があるかどうかのチェックと、拡張子の確認
# OKなら1、だめなら0
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def set_progress_data(frames, progress):
progress_data = Progress.query.first()
progress_data.movie_frames = frames
progress_data.movie_progress = progress
db_session.add(progress_data)
db_session.commit()
def user_pich_image(pich_mean_score):
image_path = "/static/images/"
if(pich_mean_score > 20):
image_path += "top.png"
elif(pich_mean_score > 10):
image_path += "middle.png"
elif(pich_mean_score >= 0):
image_path += "buttom.png"
return image_path
def user_yaw_image(yaw_var_score, yaw_mean):
image_path = "/static/images/"
if(yaw_var_score >= 10):
image_path += "center_five.png"
else:
if(-10 <= yaw_mean and yaw_mean <= 10):
image_path += "center_one.png"
elif(yaw_mean > 10):
image_path += "left_two.png"
elif(yaw_mean < -10):
image_path += "right_two.png"
return image_path
# ファイルを受け取る方法の指定
@app.route('/', methods=['GET', 'POST'])
def uploads_file():
now_loading = True
if request.method == "POST":
if 'file' not in request.files:
print("ファイルがありません")
else:
img = request.files["file"]
filename = secure_filename(img.filename)
root, ext = os.path.splitext(filename)
ext = ext.lower()
gazouketori = set([".mp4"])
if ext not in gazouketori:
return render_template('index.html',massege = "対応してない拡張子です",color = "red")
print("success")
try:
### Main ######################################################################
file = request.files['file']
videoSource = os.path.join(app.config['UPLOAD_FOLDER'], img.filename)
file.save(videoSource)
print("videonSouse", videoSource)
print("app",app.config['UPLOAD_FOLDER'])
sound_analize_result = analyze_sound(videoSource)
# Extract audio from input video.
clip_input = mp.VideoFileClip(videoSource).subclip()
clip_input.audio.write_audiofile('audio.mp3')
gaze_list = videoReader(videoSource)
set_progress_data(-1, -1) #progress go to write video
editedVideoSource = os.path.join(app.config['UPLOAD_FOLDER'], "edited.avi")
# Add audio to output video.
clip_output = mp.VideoFileClip(editedVideoSource).subclip()
clip_output.write_videofile(editedVideoSource.replace('.avi', '.mp4'), audio='audio.mp3')
set_progress_data(0, 0) #progress go to finish and database reset
yaw_list, pich_list = zip(*gaze_list)
yaw_list, pich_list = np.array(yaw_list), np.array(pich_list)
yaw_mean, yaw_var = np.mean(yaw_list), np.var(yaw_list)
pich_mean, pich_var = np.mean(pich_list), np.var(pich_list)
print("[yaw] mean: {}, var: {}".format(yaw_mean, yaw_var))
print("[pich] mean: {}, var: {}".format(pich_mean, pich_var))
center_range = np.array([-10, 10])
LEFT = 0
CENTER = 1
RIGHT = 2
yaw_distribution = {LEFT: 0, CENTER: 0, RIGHT: 0}
for yaw in yaw_list:
pos = np.digitize(yaw, bins=center_range)
yaw_distribution[pos] += 1
num_total = float(len(yaw_list))
left_rate = yaw_distribution[LEFT] / num_total
center_rate = yaw_distribution[CENTER] / num_total
right_rate = yaw_distribution[RIGHT] / num_total
print("left: {}, center: {}, right: {}".format(left_rate, center_rate, right_rate))
img = io.BytesIO()
plt.hist(yaw_list, bins=50)
plt.savefig(img, format='png')
# img.seek(0)
plot_b64str = base64.b64encode(img.getvalue()).decode("utf-8")
plot_b64data = "data:image/png;base64,{}".format(plot_b64str)
plt.clf()
amp_mean = sound_analize_result["volume_mean"]
amp_var = sound_analize_result["volume_var"]
fle_var = sound_analize_result["tone_var"]
# スコアの計算
# ヒューリスティック ver
#yaw_mean_score = digitize_score(yaw_mean, 0.3, 0.8)
yaw_var_score = digitize_score(yaw_var, 30, 10)
pich_mean_score = digitize_score(pich_mean, 20, 10)
amp_var_score = digitize_score(amp_var, 5, 10)
fle_var_score = digitize_score(fle_var, 10, 20)
#gaze_score = sum((yaw_mean_score, yaw_var_score, pich_mean_score)) * 5
intonation_score = sum((amp_var_score, fle_var_score) * 5)
# 機械学習 ver
yaw_var = yaw_var.reshape(-1, 1)
pich_mean = pich_mean.reshape(-1, 1)
volume_mean = amp_mean.reshape(-1, 1) # Renaming
tone_var = fle_var.reshape(-1, 1) # Renaming
yaw_var_score = int(models['yaw_var_score'].predict(yaw_var)*0.2)
pich_mean_score = int(models['pich_mean_score'].predict(pich_mean)*0.3)
volume_mean_score = int(models['volume_mean_score'].predict(volume_mean)*0.3)
tone_var_score = int(models['tone_var_score'].predict(tone_var)*0.2)
total_score = yaw_var_score + pich_mean_score + volume_mean_score + tone_var_score
print("yaw_var_score: ", yaw_var_score)
print("pich_mean_score: ", pich_mean_score)
print("volume_mean_score: ", volume_mean_score)
print("tone_var_score: ", tone_var_score)
print("[total_score]: ", total_score)
print("yaw_mean:", yaw_mean)
#Image Path の指定
pich_image_path = user_pich_image(pich_mean_score)
yaw_image_path = user_yaw_image(yaw_var_score, yaw_mean)
kwargs = {
"predicted" : True,
"yaw_mean" : yaw_mean,
"yaw_var" : yaw_var,
"pich_mean" : pich_mean,
"pich_var" : pich_var,
"left_rate" : left_rate,
"center_rate": center_rate,
"right_rate" : right_rate,
"amp_mean" : amp_mean,
"amp_var" : amp_var,
"fle_var" : fle_var,
"yaw_var_score": yaw_var_score,
"pich_mean_score": pich_mean_score,
"amp_var_score": amp_var_score,
"fle_var_score": fle_var_score,
"intonation_score": intonation_score,
"plot_url" : plot_b64data,
"total_score": total_score,
"volume_mean_score": volume_mean_score,
"tone_var_score": tone_var_score,
"pich_image_path": pich_image_path,
"yaw_image_path": yaw_image_path
}
params_for_train = {
"yaw_var" : yaw_var, # 目線の左右の分散
"pich_mean" : pich_mean, # 目線の高さの平均
"volume_mean": amp_mean, # 声の大小の平均
"tone_var" : fle_var # 声のトーンの分散
}
now_loading = False
write_analysis_result(filename, params_for_train)
return render_template("index.html", now_loading=now_loading, **kwargs)
except Exception as e:
print(e)
return render_template('index.html',massege = "解析出来ませんでした",color = "red")
else:
print("get request")
return render_template('index.html', now_loading=now_loading)
def write_analysis_result(filepath, results):
filename = os.path.basename(filepath)
with open(os.path.join("./results", filename+".txt"), mode='w') as f:
for key, value in results.items():
result_str = "{}:{}\n".format(key, value)
f.write(result_str)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/uploads/#')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
@app.route("/progress",methods=["post"])
def progress():
progress = Progress.query.first()
text = ""
if progress is None:
text = "処理中"
elif progress.movie_frames <= 0 and progress.movie_progress <= 0:
status = progress.movie_frames
if status == 0:
text = ""
elif status == -1:
text = "動画保存中"
else :
text = str(progress.movie_progress) + "/" + str(progress.movie_frames)
return jsonify({'text' : text})
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
import subprocess
def analyze_localy(dirname):
video_files = [os.path.join(dirname, video_filename) for video_filename in os.listdir(dirname)]
for filename in video_files:
sound_analize_result = analyze_sound(filename)
gaze_list = videoReader(filename)
try:
yaw_list, pich_list = zip(*gaze_list)
except:
print("##############################")
print("gaze_list: ", gaze_list)
print("##############################")
continue
yaw_list, pich_list = np.array(yaw_list), np.array(pich_list)
yaw_mean, yaw_var = np.mean(yaw_list), np.var(yaw_list)
pich_mean, pich_var = np.mean(pich_list), np.var(pich_list)
amp_mean = sound_analize_result["volume_mean"]
fle_var = sound_analize_result["tone_var"]
params_for_train = {
"yaw_var" : yaw_var, # 目線の左右の分散
"pich_mean" : pich_mean, # 目線の高さの平均
"volume_mean": amp_mean, # 声の大小の平均
"tone_var" : fle_var # 声のトーンの分散
}
write_analysis_result(filename, params_for_train)
basename = os.path.basename(filename)
subprocess.run(['mv', 'uploads/edited.avi', 'uploads/edited_'+basename+'.avi'])
if __name__ == '__main__':
app.run(host='0.0.0.0')
```
#### File: jphacks/FK_1906/sound.py
```python
import numpy as np
import audiosegment
from matplotlib import pyplot as plt
import time
from pydub import AudioSegment
plt.ion()
def analyze_sound(video_source):
volume_mean, volume_var = analyze_volume(video_source)
tone_var = analyze_tone(video_source, display=False)
return {'volume_mean': volume_mean, 'volume_var': volume_var, 'tone_var': tone_var}
def analyze_volume(video_source):
sound = AudioSegment.from_file(video_source, format="mp4")
sound = np.array(sound.get_array_of_samples())
sound = np.abs(sound)
mean = sound.mean()
var = sound.var()
return mean, var
def analyze_tone(video_source, display=False):
sound = audiosegment.from_file(video_source).resample(sample_rate_Hz=24000, sample_width=2, channels=1)
hist_bins, hist_vals = sound.fft()
hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals)
mean = np.mean(hist_vals_real_normed)
max_hz, min_hz = 1200, 400
num_samples = len(sound)
offset = 1000
total_data = np.zeros(max_hz-min_hz)
for i in range(0, num_samples-offset, offset):
hist_bins, hist_vals = sound[i:i+offset].fft()
hist_bins, hist_vals = hist_bins[min_hz:max_hz], hist_vals[min_hz:max_hz]
hist_vals = np.abs(hist_vals) / len(hist_vals)
hist_vals = np.where(hist_vals >= 500, hist_vals / mean, 0)
total_data += hist_vals
if display:
plt.plot(hist_bins, hist_vals)
plt.xlabel("Hz")
plt.ylabel("dB")
plt.draw()
plt.pause(1)
mean = np.mean(total_data)
# total_data /= mean
distribution = np.array([], dtype="int32")
for i, num_samples in enumerate(total_data):
hz = i + min_hz
hz_array = np.full(int(num_samples), hz)
distribution = np.append(distribution, hz_array)
return np.var(distribution)
if __name__ == '__main__':
import sys
video_source = sys.argv[1]
print("tone var: ", analyze_tone(video_source, True))
volume_result = analyze_volume(video_source)
print("volume:", volume_result)
time.sleep(10)
``` |
{
"source": "jphacks/FK_1908",
"score": 3
} |
#### File: server/recognition/api.py
```python
import flask
import os
import cv2
import numpy as np
import tensorflow as tf
from flask import Flask, request
from keras.models import load_model
classes = ['no_breads', 'breads']
global model, graph
model = load_model(os.path.dirname(os.path.abspath(__file__)) + '/vgg16.h5')
from config import *
global model, graph
model = load_model(os.path.dirname(os.path.abspath(__file__)) + '/' + conf.model_na
graph = tf.compat.v1.get_default_graph()
app = flask.Flask(__name__)
@app.route('/', methods = ['POST'])
def handle_request():
stream = request.files['image'].stream
img_array = np.asarray(bytearray(stream.read()), dtype=np.uint8)
img = cv2.imdecode(img_array, 1)
img = np.asarray(img)
img = img.reshape((1, conf.image_size, conf.image_size, 3))
with graph.as_default():
result = model.predict(img)
print(parse2label(result)[0])
return parse2label(result)[0]
@app.route('/hello', methods = ['GET'])
def handle_hello():
return 'hello'
def parse2label(predicted_arr):
return [conf.classes[np.argmax(result)] for result in predicted_arr]
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
``` |
{
"source": "jphacks/KB_02",
"score": 3
} |
#### File: KB_02/source/face_recognizer.py
```python
import matplotlib
matplotlib.use('TkAgg')
import cv2
import math
import numpy as np
import os
from PIL import Image, ImageDraw, ImageFont
from speech_recognizer import SpeechRecognizer
from Face import GeoInfo,Face
from graph_drawer import GraphDrawer,Graph
from word_analyze import WordAnalyze
from omoroi_data import OmoroiData
from face_history import FaceHistories
from fig2img import fig2data,fig2img
import matplotlib.pyplot as plt
import numpy
import time
face_feature_path = "../dataset/haarcascade_frontalface_alt.xml"
smile_feature_path = "../dataset/smiled_04.xml"
def _rect_parallel_translation(lrect,translation):
lrect[0:2] = [lrect[0]+translation[0],lrect[1]+translation[1]]
class FaceRecognizer(object):
def __init__(self,capture):
self.faces = []
self.smile_matrix = [[]] * 50
# カメラからキャプチャー
self.cap = capture
# 顔データの履歴
self.histories = FaceHistories()
def get_features(self, image, feature_path,min_neighbors=1,min_size=(200, 200)):
"""
与えた特徴量, 教師によって学習したcascade adaboostで領域分割.
input
image: cv2.imreadで読み取った変数
feature_path: trainingデータ
min_size: 顔判定する最小サイズ指定
output
faces: 顔の座標情報
"""
image = np.array(image)
cascade = cv2.CascadeClassifier(feature_path)
#グレースケール
frame_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#顔判定
"""
minSizeで顔判定する際の最小の四角の大きさを指定できる.
(小さい値を指定し過ぎると顔っぽい小さなシミも顔判定されるので)
"""
faces = cascade.detectMultiScale(frame_gray, scaleFactor=1.1, minNeighbors=min_neighbors, minSize=min_size)
return faces
def update(self, speech, recogflg, min_size=(200, 200)):
"""
顔を四角で囲うメソッド.
input
image: cv2.imreadで読み取った変数
speech: 音声認識によって推定されたstringのテキスト
min_size: 顔判定する最小サイズ指定
output:
enclosed_facs: 囲まれた顔画像
"""
ret, image = self.cap.read()
# 出力結果を格納する変数
enclosed_faces = image
# 顔認識
face_rects = self.get_features(image, face_feature_path, min_neighbors=1, min_size=min_size)
# 顔認識の枠の色
color_face = (255, 0, 0)
# 笑顔認識の枠の色
color_smile = (0, 0, 255)
# 新しい顔
new_faces = []
for face_rect in face_rects:
new_faces.append(Face(geoinfo=GeoInfo(face_rect)))
image_ = Image.fromarray(np.uint8(image))
# 現在トラッキングしている顔を更新
self.update_faces(self.faces, new_faces, image_)
# 音声信号のない場合は、発話者の判定処理をスキップする
if True:
speaker_index = -1
value = 0
for i, face in enumerate(self.faces):
tmp = face.mouth_images.compute_variability()
if tmp > value:
speaker_index = i
value = tmp
else:
speaker_index = -1
for i, face in enumerate(self.faces):
# 笑顔認識 顔の下半分だけ笑顔(笑顔唇)判定
x1, y1 = face.geoinfo.coordinates[0][0], face.geoinfo.coordinates[0][1]+face.geoinfo.length[1]/2
x2, y2 = face.geoinfo.coordinates[1]
face_image = image_.crop((x1, y1, x2, y2))
smile_rects = self.get_features(face_image, smile_feature_path, min_neighbors=1,
min_size=(int(face.geoinfo.length[0]*0.25), int(face.geoinfo.length[1]*0.25)))
#[For debug]認識している笑顔の唇の枠表示
#for smile_rect in smile_rects:
# _rect_parallel_translation(smile_rect,face.geoinfo.coordinates[0])
# _rect_parallel_translation(smile_rect,(0,face.geoinfo.length[1]/2))
# smile_geoinfo = GeoInfo(smile_rect)
# cv2.rectangle(enclosed_faces,
# smile_geoinfo.coordinates[0],
# smile_geoinfo.coordinates[1],
# (0,0,255), thickness=3)
#ひとつでも笑顔唇を認識している場合「笑っている」と判定
if len(smile_rects) > 0:
face.is_smiling = True
frame_color = color_smile
else:
face.is_smiling = False
frame_color = color_face
if i == speaker_index:
# 顔の下半分の領域から口を含む矩形を決め打ちで表示
w = x2 - x1
h = y2 - y1
if(recogflg):
cv2.rectangle(enclosed_faces, (x1 + int(w * 0.25), y1 + int(h * 0.3)),
(x2 - int(w * 0.25), y2 - int(h * 0.1)), (255, 0, 255), thickness=3)
cv2.rectangle(enclosed_faces,
face.geoinfo.coordinates[0],
face.geoinfo.coordinates[1],
frame_color, thickness=3)
# enclosed_faces = self.write_speech(enclosed_faces,
# face.geoinfo.coordinates[0],
# face.geoinfo.coordinates[1],
# speech, str(i))
face.update()
return enclosed_faces
def update_faces(self, faces, new_faces, image):
"""
顔を更新
input
faces:現在tracking中の顔リスト
new_faces:新たにdetectした顔リスト
"""
#今現在トラッキングしている顔座標と新たに取得した顔座標同士の距離を計算
distances_matrix = []
for face in faces:
distances = []
for new_face in new_faces:
euc_distance = (face.geoinfo.center[0] - new_face.geoinfo.center[0])**2 \
+ (face.geoinfo.center[1] - new_face.geoinfo.center[1])**2
distances.append(euc_distance)
distances_matrix.append(distances)
face_indexes = [ i for i in xrange(len(faces))]
new_face_indexes = [ i for i in xrange(len(new_faces))]
# O( (顔の数)^3 )の計算量。 O( (顔の数)^2 log(顔の数) )の計算量にできるが。
while(len(face_indexes)>0):
if (len(new_face_indexes) == 0):
face_indexes.reverse()
# トラッキングしていたが顔がなくなったので、消す前に履歴に残す
for i in face_indexes:
print '顔が消えた'
if faces[i].face_images.is_enough_images():
# 十分に枚数を取得できている場合にのみ履歴を保存
self.histories.set_history(faces[i].face_images.images, faces[i])
del faces[i]
break
min_distance = np.inf
for i in xrange(len(face_indexes)):
for j in xrange(len(new_face_indexes)):
if ( distances_matrix[face_indexes[i]][new_face_indexes[j]] < min_distance):
min_distance = distances_matrix[face_indexes[i]][new_face_indexes[j]]
min_i = i
min_j = j
faces[face_indexes[min_i]].geoinfo = new_faces[new_face_indexes[min_j]].geoinfo
# geoinfoに対応する領域の画像を取得、faceに保存
geoinfo = new_faces[new_face_indexes[min_j]].geoinfo
# 顔画像の処理
x1, y1 = geoinfo.coordinates[0]
x2, y2 = geoinfo.coordinates[1]
face_image = np.asarray(image.crop((x1, y1, x2, y2)))
faces[face_indexes[min_i]].face_images.add_face_image(face_image)
# 口元の画像の処理 領域の大きさは決め打ち
w = x2 - x1
h = (y2 - y1) / 2
y3 = y1 + h
mouth_image = np.asarray(image.crop((x1 + int(w * 0.25), y3 + int(h * 0.3),
x2 - int(w * 0.25), y2 - int(h * 0.1))))
faces[face_indexes[min_i]].mouth_images.add_mouth_image(mouth_image)
del face_indexes[min_i]
del new_face_indexes[min_j]
# 新しい顔が見つかったので、過去の履歴にないか調べる
for j in new_face_indexes:
print '顔が出てきた'
new_face = new_faces[j]
# 顔画像を取得
face_image = image.crop((new_face.geoinfo.coordinates[0][0],
new_face.geoinfo.coordinates[0][1],
new_face.geoinfo.coordinates[1][0],
new_face.geoinfo.coordinates[1][1],))
face_image_ = np.asarray(face_image)
# 履歴に照らし合わせてfaceを追加
faces.append(self.histories.get_history(face_image_, new_face))
# faces.append(new_faces[j])
def write_speech(self, image, coordinates, length, speech, label):
"""
顔枠の下に文字を書き込むメソッド.
input
image: 元画像(フレーム)
coordinates: 顔枠の左上の座標
length: 縦横の長さ
speech: 発話内容
label: 人物分類の結果
output:
image: 顔フレームの下に発話を書き込んだもの
"""
# # イメージをpillowで扱うことのできる形式に変換
# img_edit = Image.fromarray(image)
# font = cv2.FONT_HERSHEY_PLAIN
# font_size = 3.5
# text = "wei"
# #文字の書き込み
# cv2.putText(image, text, (coordinates[0], length[1] + 40), font, font_size,(255,255,0))
img_edit = Image.fromarray(image)
font = ImageFont.truetype('../fonts/ヒラギノ角ゴシック W0.ttc',
40, encoding='unic')
# ポジネガ判定(todo)
#words = word_analyze.morphological_analysis(speech)
draw = ImageDraw.Draw(img_edit)
draw.text((coordinates[0], length[1]), label, font = font, fill='#FFFFFF')
draw.text((coordinates[0], length[1]), speech, font = font, fill='#FFFFFF')
image = np.asarray(img_edit)
return image
def get_mean_of_smiles(self):
ret = 0
for face in self.faces:
ret += int(face.is_smiling)
return ret/(len(self.faces)+1e-6)
if __name__ == '__main__':
word_analyze = WordAnalyze()
capture = cv2.VideoCapture(0)
face_recognizer = FaceRecognizer(capture=capture)
speech_recognizer = SpeechRecognizer()
speech_recognizer.start()
w=int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH ))
h=int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT ))
fourcc = cv2.cv.CV_FOURCC('m', 'p', '4', 'v')
graph_drawer = GraphDrawer()
graph_drawer.start()
all_omorosa = OmoroiData()
all_graph = Graph(color=(1.0,0.0,1.0),ylim=[all_omorosa.omoroi_min-1.0,all_omorosa.omoroi_max+1.0],ylabel="Omorosa")
#if os.path.exists('movie.avi'):
# os.remove('movie.avi')
#out = cv2.VideoWriter('movie.avi',fourcc,7.5,(w,h))
count = 0
while(True):
# 動画ストリームからフレームを取得
speech = speech_recognizer.get_speech()
recogflg = speech_recognizer.recogflg
print "uwaaaaaaaaa----- " + str(recogflg)
# frameで切り取り画像を受け取る
frame_face = face_recognizer.update(speech,recogflg)
all_omorosa.update_omoroi_sequence(face_recognizer.get_mean_of_smiles())
# 盛り上がり度の部分時系列を取得
length = 20
all_omoroi_subsequence = all_omorosa.get_subsequence(all_omorosa.omoroi_sequence,length)
all_graph.set_graph_data(x = numpy.arange(len(all_omoroi_subsequence)),
y = all_omoroi_subsequence,
pos = (w-300,h-300))
#graph_drawer内のgraphを更新
graphs =[]
for face in face_recognizer.faces:
graphs.append(face.graph)
graphs.append(all_graph)
graph_drawer.reprace_graphs(graphs)
frame_face = graph_drawer.draw_graphs(frame_face)
#out.write(np.asarray(frame_face,np.uint8))
#表示 なぜか初期は
if(count > 10):
cv2.imshow('FACE', frame_face)
else:
count += 1
#if omorosa.omoroi_sequence[-1] > omorosa.omoroi_max*0.9:
# _,image = face_recognizer.cap.read()
# cv2.imwrite("image.png",image )
# break
#if omorosa.omoroi_sequence[-1] > omorosa.omoroi_max*0.9:
# _,image = face_recognizer.cap.read()
# cv2.imwrite("image.png",image )
# break
# qを押したら終了
k = cv2.waitKey(1)
if k == ord('q'):
break
capture.release()
#out.release()
cv2.destroyAllWindows()
graph_drawer.stop()
speech_recognizer.stop()
```
#### File: KB_02/source/preprocessing.py
```python
import numpy
import re
import os
import errno
import sys
import MeCab
import cPickle
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def divide_sentence_and_label():
load_dir_path = "../dataset/sentiment/"
save_sentences_dir_path = "../dataset/sentences/"
save_trdata_dir_path = "../dataset/training_dataset/"
make_sure_path_exists(save_sentences_dir_path)
make_sure_path_exists(save_trdata_dir_path)
labeled_text_file = open(load_dir_path + "labeled_text.txt", "r")
sentences = open(save_sentences_dir_path + "labeled_sentences.txt", "wr")
labels=[]
inline = labeled_text_file.readline()
while inline:
label,sentence = inline.split(":")
labels.append(label)
sentences.writelines(sentence)
inline = labeled_text_file.readline()
cPickle.dump(labels,open(save_trdata_dir_path + "labels.pkl","wb"))
def rawtext2sentences():
load_dir_path = "../dataset/nuc/"
save_dir_path = "../dataset/sentences/"
make_sure_path_exists(save_dir_path)
senteneces = open(save_dir_path + "unlabeled_sentences.txt", "wr")
for doc_index in xrange(1, 130):
text_file = open(load_dir_path + "data%03d.txt" % doc_index, "r")
inline = text_file.readline()
while inline:
if not re.search(r"@", inline):
# 会話文の取得
sentence = inline.split(":")[-1]
senteneces.writelines(sentence)
inline = text_file.readline()
senteneces.close()
def remove_specific_symbols(sentence_divided):
i = 0
while(i < len(sentence_divided)):
w = sentence_divided[i]
if w == "<" or w == "(":
print w
j = i
while(j < len(sentence_divided)):
w = sentence_divided[j]
if w == ">" or w == ")" or j == len(sentence_divided)-1:
del sentence_divided[i:j+1]
print "deleted"
break
else:
j += 1
else:
i += 1
def sentences2divsentences(filename):
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
load_dir_path = "../dataset/sentences/"
save_dir_path = "../dataset/sentences_divided/"
make_sure_path_exists(save_dir_path)
t = MeCab.Tagger("-Owakati")
sentences = open(load_dir_path + filename + '.txt', "r")
line = sentences.readline()
sentences_divided = []
while line:
print line
sentence_divided = t.parse(line).split(" ")
#後ろの3文字[。,\r,\n]は除去
sentence_divided = sentence_divided[:-3]
remove_specific_symbols(sentence_divided)
sentences_divided.append(sentence_divided)
line = sentences.readline()
cPickle.dump(sentences_divided, open(save_dir_path + filename + '.pkl', "wb"))
sentences.close()
def remove_short_sentences(sentences,len_threshold=10):
print "len(sentences) before removing short sentences" + str(len(sentences))
index = 0
while index < len(sentences):
sentence = sentences[index]
if(len(sentence)<len_threshold):
del sentences[index]
else:
index += 1
print "len(sentences) after removing short sentences" + str(len(sentences) )
```
#### File: KB_02/train_nn/train_sup.py
```python
from docopt import docopt
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import cPickle
from vat.source import optimizers
from vat.source import costs
import os
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
from vat.models.fnn import FNN
import vat.source.layers as L
class FNN_sentiment(FNN):
def __init__(self,layer_sizes):
self.linear_layers = []
self.bn_layers = []
self.act_layers = []
self.params = []
for m,n in zip(layer_sizes[:-1],layer_sizes[1:]):
l = L.Linear(size=(m,n))
bn = L.BatchNormalization(size=(n))
self.linear_layers.append(l)
self.bn_layers.append(bn)
self.params += l.params + bn.params
for i in xrange(len(self.linear_layers)-1):
self.act_layers.append(L.relu)
self.act_layers.append(L.softmax)
def forward_for_finetuning_batch_stat(self,input):
return self.forward(input,finetune=True)
def forward_no_update_batch_stat(self,input,train=True):
return self.forward(input,train,False)
def forward(self,input,train=True,update_batch_stat=True,finetune=False):
h = input
for l,bn,act in zip(self.linear_layers,self.bn_layers,self.act_layers):
h = l(h)
h = bn(h,train=train,update_batch_stat=update_batch_stat,finetune=finetune)
h = act(h)
return h
def train(args,x_train,t_train,x_test,t_test,ul_x_train=None):
print args
numpy.random.seed(int(args['--seed']))
layer_sizes = [int(layer_size) for layer_size in args['--layer_sizes'].split('-')]
model = FNN_sentiment(layer_sizes=layer_sizes)
x = T.matrix()
ul_x = T.matrix()
t = T.ivector()
if(args['--cost_type']=='MLE'):
cost = costs.cross_entropy_loss(x=x,t=t,forward_func=model.forward_train)
elif(args['--cost_type']=='L2'):
cost = costs.cross_entropy_loss(x=x,t=t,forward_func=model.forward_train) \
+ costs.weight_decay(params=model.params,coeff=float(args['--lamb']))
elif(args['--cost_type']=='AT'):
cost = costs.adversarial_training(x,t,model.forward_train,
'CE',
epsilon=float(args['--epsilon']),
lamb=float(args['--lamb']),
norm_constraint = args['--norm_constraint'],
forward_func_for_generating_adversarial_examples=model.forward_no_update_batch_stat)
elif(args['--cost_type']=='VAT'):
cost = costs.virtual_adversarial_training(x,t,model.forward_train,
'CE',
epsilon=float(args['--epsilon']),
lamb=float(args['--lamb']),
norm_constraint = args['--norm_constraint'],
num_power_iter = int(args['--num_power_iter']),
x_for_generating_adversarial_examples=ul_x,
forward_func_for_generating_adversarial_examples=model.forward_no_update_batch_stat)
elif(args['--cost_type']=='VAT_finite_diff'):
cost = costs.virtual_adversarial_training_finite_diff(x,t,model.forward_train,
'CE',
epsilon=float(args['--epsilon']),
lamb=float(args['--lamb']),
norm_constraint = args['--norm_constraint'],
num_power_iter = int(args['--num_power_iter']),
x_for_generating_adversarial_examples=ul_x,
unchain_y = False,
forward_func_for_generating_adversarial_examples=model.forward_no_update_batch_stat)
nll = costs.cross_entropy_loss(x=x,t=t,forward_func=model.forward_test)
error = costs.error(x=x,t=t,forward_func=model.forward_test)
optimizer = optimizers.ADAM(cost=cost,params=model.params,alpha=float(args['--initial_learning_rate']))
f_train = theano.function(inputs=[], outputs=cost, updates=optimizer.updates,
givens={
x:x_train,
t:t_train,
ul_x:ul_x_train},on_unused_input='warn')
f_nll_train = theano.function(inputs=[], outputs=nll,
givens={
x:x_train,
t:t_train})
f_nll_test = theano.function(inputs=[], outputs=nll,
givens={
x:x_test,
t:t_test})
f_error_train = theano.function(inputs=[], outputs=error,
givens={
x:x_train,
t:t_train})
f_error_test = theano.function(inputs=[], outputs=error,
givens={
x:x_test,
t:t_test})
f_lr_decay = theano.function(inputs=[],outputs=optimizer.alpha,
updates={optimizer.alpha:theano.shared(numpy.array(args['--learning_rate_decay']).astype(theano.config.floatX))*optimizer.alpha})
statuses = {}
statuses['nll_train'] = []
statuses['error_train'] = []
statuses['nll_test'] = []
statuses['error_test'] = []
n_train = numpy.asarray(x_train.get_value().shape[0],theano.config.floatX)
n_test = numpy.asarray(x_test.get_value().shape[0],theano.config.floatX)
statuses['nll_train'].append(f_nll_train())
statuses['error_train'].append(f_error_train()/n_train)
statuses['nll_test'].append(f_nll_test())
statuses['error_test'].append(f_error_test()/n_test)
print "[Epoch]",str(-1)
print "nll_train : " , statuses['nll_train'][-1], "error_train : ", statuses['error_train'][-1], \
"nll_test : " , statuses['nll_test'][-1], "error_test : ", statuses['error_test'][-1]
print "training..."
make_sure_path_exists("./trained_model")
for epoch in xrange(int(args['--num_epochs'])):
cPickle.dump((statuses,args),open('./trained_model/'+'tmp-' + args['--save_filename'],'wb'),cPickle.HIGHEST_PROTOCOL)
### update parameters ###
f_train()
#########################
statuses['nll_train'].append(f_nll_train())
statuses['error_train'].append(f_error_train()/n_train)
statuses['nll_test'].append(f_nll_test())
statuses['error_test'].append(f_error_test()/n_test)
print "[Epoch]",str(epoch)
print "nll_train : " , statuses['nll_train'][-1], "error_train : ", statuses['error_train'][-1], \
"nll_test : " , statuses['nll_test'][-1], "error_test : ", statuses['error_test'][-1]
f_lr_decay()
return f_error_train()/n_train, f_error_test()/n_test
#make_sure_path_exists("./trained_model")
#cPickle.dump((model,statuses,args),open('./trained_model/'+args['--save_filename'],'wb'),cPickle.HIGHEST_PROTOCOL)
import numpy
from scipy import linalg
from sklearn.base import TransformerMixin, BaseEstimator
"""
class ZCA(BaseEstimator, TransformerMixin):
def __init__(self, regularization=10**-5, copy=False):
self.regularization = regularization
def fit(self, _X, y=None):
X = _X.copy()
self.mean_ = numpy.mean(X, axis=0)
X -= self.mean_
sigma = numpy.dot(X.T,X) / X.shape[0] + self.regularization*numpy.eye(X.shape[1])
U, S, V = linalg.svd(sigma)
tmp = numpy.dot(U, numpy.diag(1/numpy.sqrt(S)))
self.components_ = numpy.dot(tmp, U.T)
return self
def transform(self, X):
X_transformed = X - self.mean_
X_transformed = numpy.dot(X_transformed, self.components_.T)
return X_transformed
"""
if __name__=='__main__':
args = docopt(__doc__)
x,y = cPickle.load(open("../dataset/training_dataset/dataset.pkl"))
l_x = x[numpy.where(y != 0.5)[0]].astype(theano.config.floatX)
l_y = y[numpy.where(y != 0.5)[0]].astype("int32")
ul_x = x[numpy.where(y == 0.5)[0]].astype(theano.config.floatX)
num_samples = l_x.shape[0]
num_groups = 10
print "chance level :" + str(1-numpy.mean(l_y))
accs = numpy.zeros(num_groups)
for i in xrange(num_groups):
valid_index = numpy.arange(int(i*num_samples/num_groups),int((i+1)*num_samples/num_groups))
train_index =numpy.delete(numpy.arange(num_samples), valid_index)
x_train = theano.shared(l_x[train_index])
t_train = theano.shared(l_y[train_index])
x_valid = theano.shared(l_x[valid_index])
t_valid = theano.shared(l_y[valid_index])
ul_x_train = theano.shared(numpy.concatenate((l_x[train_index],ul_x),axis=0))
error_train, error_valid = train(args,x_train,t_train,x_valid,t_valid,x_train)
accs[i] = 1-error_valid
print "valid error : " + str(accs.mean()) + "%"
cPickle.dump(accs.mean(),open("Valid_accuracy_" + args["--cost_type"] + "_epsilon" + args["--epsilon"] + ".pkl","wb"))
``` |
{
"source": "jphacks/KB_1712",
"score": 3
} |
#### File: KB_1712/model/analysis_receipt.py
```python
from sklearn.externals import joblib
from PIL import Image
import pandas as pd
import numpy as np
import unicodedata
import json
import os
def merge_block(head_block, block_list):
guide_text = head_block["description"].split("\n")
guide_text_index = 0
merged_block_list = []
temp_block_list = []
check_text = ""
for block in block_list:
check_text += block["description"]
temp_block_list.append(block)
if check_text == guide_text[guide_text_index].replace(' ',''):
merged_block_list.append({'boundingPoly': {'vertices': [temp_block_list[0]["boundingPoly"]["vertices"][0],\
temp_block_list[-1]["boundingPoly"]["vertices"][1],\
temp_block_list[-1]["boundingPoly"]["vertices"][2],\
temp_block_list[0]["boundingPoly"]["vertices"][3]]},\
'description': check_text})
check_text = ""
temp_block_list = []
guide_text_index += 1
return merged_block_list
def translate_to_plot_box(block_list):
return [{"x" : block["boundingPoly"]["vertices"][0]["x"], \
"y" : block["boundingPoly"]["vertices"][0]["y"], \
"w" : block["boundingPoly"]["vertices"][2]["x"] - block["boundingPoly"]["vertices"][0]["x"], \
"h" : block["boundingPoly"]["vertices"][2]["y"] - block["boundingPoly"]["vertices"][0]["y"], \
} for block in block_list]
def plot_reciept(image_path, box_list):
im = np.array(Image.open(image_path), dtype=np.uint8)
# Create figure and axes
fig,ax = plt.subplots(1)
fig.set_size_inches(20, 10)
# Display the image
ax.imshow(im)
# Create a Rectangle patch
for box in box_list:
rect = patches.Rectangle((box["x"],box["y"]),box["w"],box["h"],linewidth=1,edgecolor='r',facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.show()
def get_un_count(sentence,target_type):
return [unicodedata.name(ch).split(" ")[0] for ch in sentence].count(target_type)
def translate_train_data(head_block, block_list):
un_ja_types = ["CJK", "HIRAGANA", "KATAKANA"]
un_digit_types = ["DIGIT"]
return [{"x": (block["boundingPoly"]["vertices"][0]["x"] - head_block["boundingPoly"]["vertices"][0]["x"]) / head_block["boundingPoly"]["vertices"][1]["x"],\
"y": (block["boundingPoly"]["vertices"][0]["y"] - head_block["boundingPoly"]["vertices"][0]["y"]) / head_block["boundingPoly"]["vertices"][2]["y"],\
"num_ja": sum([get_un_count(block["description"], types) for types in un_ja_types]) / len(block["description"]),\
"num_digit": sum([get_un_count(block["description"], types) for types in un_digit_types]) / len(block["description"]),\
"description": block["description"]\
} for block in block_list]
def get_train_data(json_file):
head_block, content_block_list = json_file[0], json_file[1:]
merged_block_list = merge_block(head_block, content_block_list)
return translate_train_data(head_block, merged_block_list)
def convert_price(price):
return int("".join([c for c in price if unicodedata.name(c).split(' ')[0] == "DIGIT"]))
def parse_info(result):
date = result[result["label"] == "date"]["description"].tolist()[0]
product = result[result["label"] == "product"]
price = result[result["label"] == "price"]
cleared_price = [ convert_price(price[ abs(price["y"] - product_y) < 0.005]["description"].tolist()[0]) for product_y in product["y"] ]
return [{"date" : date, "product" : pro, "price" : pri} for pro, pri in zip(product["description"], cleared_price)]
def predict_receipt(responses_json_path, model_path):
responses_json = json.load(open(responses_json_path, 'r'))
responses_df = pd.DataFrame([data for data in get_train_data(responses_json["responses"][0]["textAnnotations"])])
features_test = np.array([responses_df['x'].tolist(),responses_df['y'].tolist(),\
responses_df['num_digit'].tolist(),responses_df['num_ja'].tolist()], np.float64)
features_test = features_test.T
clf = joblib.load(model_path)
predict_result = clf.predict(features_test)
merged_result = pd.DataFrame([responses_df["description"],responses_df["x"],responses_df["y"] ,predict_result]).T
merged_result.columns = ['description', 'x', 'y', 'label']
return parse_info(merged_result)
``` |
{
"source": "jphacks/KB_1804",
"score": 3
} |
#### File: KB_1804/jp_clova/check.py
```python
import requests
import time
class check_clova:
def __init__(self,):
self.num = 0
self.url = "https://4b9a3cba.ngrok.io/rpi0"
self.pre = None
def check(self,):
# print(url)
r = requests.get(self.url)
# print('pre',pre)
# print('num',num)
self.pre = self.num
# print('pre',pre)
# print('num',num)
if r.text.find('<!DOCTYPE html>') != -1:
print('text')
#None
else:
# print('aaaaa')
received_text = r.text.split('_')[0]
self.num = int(r.text.split('_')[-1])
print('received text:', received_text)
# print(num)
self.url = "https://4b9a3cba.ngrok.io/rpi" + str(self.num)
return received_text
"""
num = 0
url = "https://4b9a3cba.ngrok.io/rpi0"
pre = None
while True:
print(url)
r = requests.get(url)
print('pre',pre)
print('num',num)
pre = num
print('pre',pre)
print('num',num)
if r.text.find('<!DOCTYPE html>') != -1:
print('text')
#None
else:
# print('aaaaa')
# print(r.text)
num = int(r.text.split('_')[-1])
print(num)
# any process.
url = "https://4b9a3cba.ngrok.io/rpi" + str(num)
time.sleep(5)
"""
```
#### File: KB_1804/venv_YOLO/Human_Reco.py
```python
import cv2
def main():
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, frame = cap.read()
# 入力画像の読み込み
# img = cv2.imread("input.jpg")
# グレースケール変換
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# HoG特徴量 + SVMで人の識別器を作成
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
hogParams = {'winStride': (8, 8), 'padding': (32, 32), 'scale': 1.05}
# 作成した識別器で人を検出
human, r = hog.detectMultiScale(gray, **hogParams)
# 人の領域を赤色の矩形で囲む
for (x, y, w, h) in human:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 200), 3)
# 結果を出力
cv2.imshow("Frame", frame)
# cv2.imwrite("result.jpg", frame)
if __name__ == '__main__':
main()
``` |
{
"source": "jphacks/KB_1805",
"score": 3
} |
#### File: KB_1805/tool/img2md.py
```python
import pyocr
import pyocr.builders
import cv2
from PIL import Image
import numpy as np
import sys
import os
import codecs
def img2wordbox(img):
tools = pyocr.get_available_tools()
if len(tools) == 0:
print("No OCR tool found")
exit()
tool = tools[0]
# Word Recognition
word_box = tool.image_to_string(
img,
lang="eng",
builder=pyocr.builders.LineBoxBuilder()
)
return word_box
def delwords(img, word_box):
for box in word_box:
hight = box.position[1][0] - box.position[0][0]
width = box.position[1][1] - box.position[0][1]
white_img = Image.new("L", (hight, width), 255)
img.paste(white_img, (box.position[0][0], box.position[0][1]))
return img
def img2figure(img, texts, output_path):
if(os.path.exists(output_path + "/fig") == False):
os.mkdir(output_path + "/fig")
img_word_deleted = delwords(img, texts)
edge = np.asarray(img_word_deleted)
edge = cv2.cvtColor(edge, cv2.COLOR_BGR2GRAY)
ret, edge = cv2.threshold(edge, np.average(edge), 255, 0)
edge = cv2.bitwise_not(edge)
edge, contours, hierarchy = cv2.findContours(edge, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
fig_pos = []
figure = np.asarray(img)
idx = 0
MINIMUM_FIGURE_SIZE = 100
for (i, cnt) in enumerate(contours):
if(hierarchy[0][i][3] != -1): continue
x,y,w,h = cv2.boundingRect(cnt)
if w * h < MINIMUM_FIGURE_SIZE: continue
fig_pos.append([x,y,w,h])
cv2.imwrite((output_path + "/fig/%d.png" %(idx)), figure[y:y+h, x:x+w])
idx += 1
return fig_pos
def img2md(input_path, output_path):
if(os.path.exists(output_path) == False):
os.mkdir(output_path)
# Load Image
img = cv2.imread(input_path, cv2.IMREAD_GRAYSCALE)
if(img is None):
sys.stderr.write("Cannot open file!!")
exit(1)
thresh, img = cv2.threshold(img, np.average(img), 255, cv2.THRESH_BINARY)
img = Image.fromarray(np.uint8(img))
word_box = img2wordbox(img)
fig_box = img2figure(Image.open(input_path), word_box, output_path)
words = []
for box in word_box:
words.append(box.content)
f = codecs.open((output_path + "/output.md"), "w", "utf-8")
word_idx = 0
figure_idx = 0
while(word_idx < len(word_box) or figure_idx < len(fig_box)):
if(word_idx >= len(word_box)):
f.writelines(" \n" %(figure_idx, figure_idx))
figure_idx += 1
elif(figure_idx >= len(fig_box)):
f.writelines("%s \n" %(word_box[word_idx].content))
word_idx += 1
else:
if(word_box[word_idx].position[0][1] <= fig_box[figure_idx][0]):
f.writelines("%s \n" %(word_box[word_idx].content))
word_idx += 1
else:
f.writelines(" \n" %(figure_idx, figure_idx))
figure_idx += 1
f.close()
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input_path', type=str)
parser.add_argument('output_path', type=str)
return parser.parse_args()
def main():
args = get_args()
img2md(args.input_path, args.output_path)
if __name__ == "__main__":
main()
``` |
{
"source": "jphacks/KB_1810",
"score": 3
} |
#### File: KB_1810/learn/main.py
```python
from darkflow.net.build import TFNet
import cv2
import os
import json
import numpy as np
from PIL import Image
import random
import csv
import sys
import math
from scipy import genfromtxt
from sklearn.cluster import KMeans
import pandas as pd
from sklearn.externals import joblib
from sklearn import svm
import matplotlib
save_path = 'sample_img/output/'
open_path = 'sample_img/'
#detection関数
def inputdata(image_name):
os.chdir('/var/www/KB_1810/learn/')
options = {"model": "/var/www/KB_1810/learn/cfg/yolov2-voc.cfg", "load": "/var/www/KB_1810/learn/bin/yolo_learn.weights", "threshold": 0.4, "gpu": 0.3}
tfnet = TFNet(options)
input_image = image_name
image_folder = "sample_img"
current_path = os.getcwd()
output_file = "out"
current_path = os.path.join(current_path,image_folder)
output_path = os.path.join(current_path,output_file)
if not os.path.exists(output_path):
print('Creating output path {}'.format(output_path))
os.mkdir(output_path)
src = cv2.imread(os.path.join(current_path,input_image))
dst = src
#cv2.imshow("img", src)
result, result1 = tfnet.return_predict(src,dst)
print(result)
#cv2.imshow("img_out", dst)
cv2.waitKey()
cv2.imwrite(output_path + '\\' + input_image, dst)
cv2.imwrite("result1.png",dst)
return result1
#detectionされた部分を画像にする
def image_split(img_name):
global save_path
global open_path
#save_path1 = 'sample_img'
img = img_name
#detectionする boxdataにはobjectの座標が入る
boxdata = inputdata(img)
subregion = list()
pic = Image.open(open_path + img)
#detection画像の分割
for boxs in boxdata:
box = (int(boxs[0]), int(boxs[2]), int(boxs[1]), int(boxs[3]))
#print(box)
subregion.append(pic.crop(box))
for num in range(len(boxdata)):
subregion[num].save(save_path +str(num) + 'bus.jpg',"JPEG")
return boxdata
# 点p0に一番近い点を点群psから抽出
def serch_neighbourhood(p0, ps):
L = np.array([])
for i in range(ps.shape[0]):
L = np.append(L,np.linalg.norm(ps[i]-p0))
return ps[np.argmin(L)]
#detection画像の色抽出
def color_detection(image_name):
box = image_split(image_name)
print(box)
color = np.empty((0,3), int)
count1 = 0
while(count1 <= len(box) -1):
#global save_path
#global open_path
z=str(count1)
gazou = save_path + z + 'bus.jpg'
imag = Image.open(gazou)
a = imag.size[0]
r = imag.size[1]
#img = np.array(Image.open(gazosu))
img = cv2.imread(gazou)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#print(img)
img_base = img.reshape(-1,3)
img_flat=img_base
color = np.append(color, img_flat,axis = 0)
count1 = count1 + 1
'''
#クラスタリング
kmeans_modeluv = KMeans(n_clusters=5).fit(color)
labelsuv = kmeans_modeluv.labels_
#print 'center' + str(kmeans_modeluv.cluster_centers_)
#代表色
center = kmeans_modeluv.cluster_centers_
print(center)
#離散化
ps = np.array([[255,255,255],[0,0,0],[159,160,160],[255,0,0],[0,0,255],[0,105,62],[255,241,0],[235,97,0],[241,158,194],[146,7,131],[149,86,41],[128,128,0],[0,0,128]])
for i, repcolor in enumerate(center):
center[i] = serch_neighbourhood(repcolor ,ps)
print(center)
dis_center = np.array([])
for i in range(len(center)):
for j in range(len(ps)):
if all(center[i] == ps[j]):
dis_center = np.append(dis_center, [j])
print(dis_center)
#降順
labels = kmeans_modeluv.labels_
print(np.sum(labels == 0), np.sum(labels == 1), np.sum(labels == 2), np.sum(labels == 3), np.sum(labels == 4))
print(len(labels))
'''
kmeans_modeluv = KMeans(n_clusters=5).fit(color)
labelsuv = kmeans_modeluv.labels_
#print 'center' + str(kmeans_modeluv.cluster_centers_)
#代表色
center = kmeans_modeluv.cluster_centers_
#print(center)
img_compuv=kmeans_modeluv.cluster_centers_[kmeans_modeluv.labels_]
uvlabel2 = np.c_[img_compuv,labelsuv]
rows, cols = np.where(uvlabel2 != 0)
np.delete(uvlabel2,rows[np.where(cols==3)],0)
uv20 = np.delete(uvlabel2,np.where(uvlabel2 != 0)[0][np.where(np.where(uvlabel2 != 0)[1] == 3)],0)
uv20 = uv20[0]
rows, cols = np.where(uvlabel2 != 1)
np.delete(uvlabel2,rows[np.where(cols==3)],0)
uv21 = np.delete(uvlabel2,np.where(uvlabel2 != 1)[0][np.where(np.where(uvlabel2 != 1)[1] == 3)],0)
uv21 = uv21[0]
rows, cols = np.where(uvlabel2 != 2)
np.delete(uvlabel2,rows[np.where(cols==3)],0)
uv22 = np.delete(uvlabel2,np.where(uvlabel2 != 2)[0][np.where(np.where(uvlabel2 != 2)[1] == 3)],0)
uv22 = uv22[0]
rows, cols = np.where(uvlabel2 != 3)
np.delete(uvlabel2,rows[np.where(cols==3)],0)
uv23 = np.delete(uvlabel2,np.where(uvlabel2 != 3)[0][np.where(np.where(uvlabel2 != 3)[1] == 3)],0)
uv23 = uv23[0]
rows, cols = np.where(uvlabel2 != 4)
np.delete(uvlabel2,rows[np.where(cols==3)],0)
uv24 = np.delete(uvlabel2,np.where(uvlabel2 != 4)[0][np.where(np.where(uvlabel2 != 4)[1] == 3)],0)
uv24 = uv24[0]
uv20 = np.delete(uv20,3)
uv21 = np.delete(uv21,3)
uv22 = np.delete(uv22,3)
uv23 = np.delete(uv23,3)
uv24 = np.delete(uv24,3)
#uv24 = np.delete(uv24, 2)
labels = kmeans_modeluv.labels_
uv20 = np.append(uv20, np.sum(labels == 0)/len(labels))
uv21 = np.append(uv21, np.sum(labels == 1)/len(labels))
uv22 = np.append(uv22, np.sum(labels == 2)/len(labels))
uv23 = np.append(uv23, np.sum(labels == 3)/len(labels))
uv24 = np.append(uv24, np.sum(labels == 4)/len(labels))
#uv20 = np.array([uv20])
arr = np.empty((0,4), int)
arr = np.append(arr, np.array([uv20]), axis=0)
arr = np.append(arr, np.array([uv21]), axis=0)
arr = np.append(arr, np.array([uv22]), axis=0)
arr = np.append(arr, np.array([uv23]), axis=0)
arr = np.append(arr, np.array([uv24]), axis=0)
#print(arr)
daihyousp = arr[arr[:,3].argsort()[::-1],:]
#print(daihyousp)
daihyouspi = np.delete(daihyousp, 3, 1)
print(daihyousp)
per_list = daihyousp[:,[3]]
#print(daihyousp)
#離散化
ps = np.array([[255,255,255],[0,0,0],[159,160,160],[255,0,0],[0,0,255],[0,105,62],[255,241,0],[235,97,0],[241,158,194],[146,7,131],[149,86,41],[128,128,0],[0,0,128]])
for i, repcolor in enumerate(daihyouspi):
daihyouspi[i] = serch_neighbourhood(repcolor ,ps)
dis_center = np.array([])
for i in range(len(daihyouspi)):
for j in range(len(ps)):
if all(daihyouspi[i] == ps[j]):
dis_center = np.append(dis_center, [j])
return dis_center,per_list
def color_point(img_name):
center, per_list = color_detection(img_name)
target = np.array([center])
clf = joblib.load('clf.pkl')
point = clf.predict(target)
return center, per_list, point
#center, per_list, point = color_point('kondate.jpg')
#print(center, per_list, point)
``` |
{
"source": "jphacks/KB_1915",
"score": 3
} |
#### File: flask_app/api/mojioko.py
```python
from flask_app import config
import requests
import base64
import json
def mojioko_main(img_url):
img_base64 = img_to_base64("flask_app/static/img/kb-1915.png")
# img_base64 = img_url_to_base64(img_url)
result = request_cloud_vison_api(img_base64)
text_r = result["responses"][0]["fullTextAnnotation"]["text"]
return text_r
# APIを呼び、認識結果をjson型で返す
def request_cloud_vison_api(image_base64):
api_url = config.MURL + config.MKEY
req_body = json.dumps({
'requests': [{
'image': {
'content': image_base64.decode('utf-8') # jsonに変換するためにstring型に変換する
},
'features': [{
'type': 'TEXT_DETECTION',
'maxResults': 10,
}]
}]
})
res = requests.post(api_url, data=req_body)
return res.json()
# 画像読み込み
def img_to_base64(filepath):
with open(filepath, 'rb') as img:
img_byte = img.read()
return base64.b64encode(img_byte)
def img_url_to_base64(img_url):
img_con = requests.get(img_url).content
return base64.b64encode(img_con)
``` |
{
"source": "jphacks/NG_1703",
"score": 2
} |
#### File: NG_1703/webui/app.py
```python
import komagen_sd_client
from flask import Flask, render_template, abort
from flaski.models import WikiContent
import komates
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route("/")
def index():
contents = WikiContent.query.all()
return render_template("index.html", contents = contents)
@app.route("/", methods=["POST"])
def callkomagen():
koma = komates.komagen()
return None
if __name__ == "__main__":
app.run()
```
#### File: webui/flaski/database.py
```python
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import os
databese_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'wiki.db')
engine = create_engine('sqlite:///' + databese_file, convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import flaski.models
Base.metadata.create_all(bind=engine)
``` |
{
"source": "jphacks/NG_1707",
"score": 3
} |
#### File: src/scripts/drunkjudge.py
```python
import numpy as np
import cv2
import sys
def drunkjudge(file_name):
# 画像の読み込み
img_color = cv2.imread(file_name)
# RGB値をそれぞれ抽出
imgR = img_color[:,:,2]
imgG = img_color[:,:,1]
imgB = img_color[:,:,0]
average_color_per_row = np.average(img_color,axis=0)
average_color = np.average(average_color_per_row, axis=0)
average_color = np.uint8(average_color)
average_sum = int(average_color[1])+int(average_color[0])+int(average_color[2])
average_judge = (int(average_color[1])+int(average_color[0])) / float(average_color[2])
#average_color_img = np.array([[average_color]*500]*500, np.uint8)
#cv2.imshow('average',average_color_img)
# 何かしらのキーを押せばwindowを閉じる
#if cv2.waitKey(0):
# cv2.destroyAllWindows()
if average_judge <= 1.2 :
return -1
elif average_judge <= 1.25 :
return 0
else :
return 1
```
#### File: src/scripts/trimming.py
```python
import cv2
# @brief 画像を正方形にトリミングする
#
# @param[in] image 画像情報を扱うnumpy.ndarray
# @param[in] top トリミングしたい矩形の左上の頂点のy座標
# @param[in] left トリミングしたい矩形の左上の頂点のx座標
# @param[in] width トリミングしたい矩形の幅
# @param[in] height トリミングしたい矩形の高さ
# @param[in] file_name トリミングする画像の名前
# @param[in] serial_number トリミングする画像の通し番号
#
# @return image_name トリミング後の画像のファイル名
def trimming(image, top, left, width, height, file_name, serial_number):
image_name = file_name+str(serial_number)+".png"
# 矩形を正方形に変換
if(height > width):
height -= (height - width) / 2
top += (height - width) / 2
else:
width -= (width - height) / 2
left += (width - height) / 2
# 画像を256*256にトリミング
trim = image[top:top+height, left:left+width]
size = (256, 256)
trim = cv2.resize(trim, size)
cv2.imwrite(image_name,trim)
return image_name
``` |
{
"source": "jphacks/NG_1807",
"score": 3
} |
#### File: jphacks/NG_1807/record.py
```python
import pyaudio
import wave
# import numpy as np
def record_start():
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 4
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = CHUNK)
print("recording")
frames = []
for i in range(0,int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
# buf = np.frombuffer(data,dtype="int16")
# frames.append(b''.join(buf[::3]))
print("done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME,'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
if __name__ == "__main__":
record_start()
``` |
{
"source": "jphacks/NG_1901",
"score": 3
} |
#### File: NG_1901/noti_purchase/list_other_picture.py
```python
import requests
import re
from bs4 import BeautifulSoup
#関数serch_pictureの引数に消耗品を入力して実行すると、その消耗品が映った画像のURLが1つ出力されます
def serch_picture(serch_word):
url = "https://search.nifty.com/imagesearch/search?select=1&q=%s&ss=up"
keyword = serch_word
r = requests.get(url%(keyword))
soup = BeautifulSoup(r.text,'lxml')
imgs = soup.find_all('img',src=re.compile('^https://msp.c.yimg.jp/yjimage'))
#条件により検索ワードに近い画像が20個くらいに絞られています
#countにより無理やり一個めの要素だけ出力しています
count = 0
for img in imgs:
if count == 1:
break
print(img['src'])
count = count+1
```
#### File: NG_1901/noti_purchase/test.py
```python
from bs4 import BeautifulSoup
import requests
#アマゾンで検索したい言葉とページ数を引数に取る関数 検索結果の「商品名」と「値段」「URL」をCSVにまとめて保存してくれる
def serch_amazon(serch_word, get_pages):
#引数の中に空白があったとき、空白を+に置き換える
words = serch_word.split(" ")
serch_words = words[0]
for i in range(1, len(words)):
serch_words = serch_words + "+" + words[i]
#スクレイピングするサイトのURLを作成
url = "https://www.amazon.co.jp/s/ref=nb_sb_noss_2?__mk_ja_JP=カタカナ&url=search-alias%3Daps&field-keywords=" + serch_words + "&rh=i%3Aaps%2Ck%3A" + serch_words
#リストを作成
columns = ["Name", "Price", "Url"]
#次のページに行く際にこのURLが必要
amazon = "https://www.amazon.co.jp"
#ページ番号
page = 1
#ページ数が足りないときはエラーではなく、そのページ以降はないことを知らせてくれる
try:
#「 get_pages」のページ分だけ繰り返す
while page < get_pages + 1:
#何ページ目を取得している最中なのか表示
print(page,"ページを取得中.....")
#作成したURLからHTMLを取得
response = requests.get(url).text
#BeautifulSoupの初期化
soup = BeautifulSoup(response, 'html.parser')
#どこの部分を取得するか指定
items = soup.find_all('li', {"class":"s-result-item"})
#いくつかの商品を同時に取得しているので商品ごとに商品名と値段とURLを取得
for item in items:
#商品名付近のHTML取得
name = item.h2
#商品の値段付近のHTML取得
price = item.find("span", {"class":"a-size-base"})
#商品名の情報も値段の情報も含まないHTMLがあってそのままだとエラーが起きたので、Noneになる部分は除外
if name != None and price != None:
#商品名取得
nameTitle = name.string
#商品の値段取得
priceText = price.string
#商品のURL取得
item_url = item.a.get("href")
#ページの下の方の「次のページ」のURLを取得
NextUrl = soup.find('span', {"class":"pagnRA"})
#Url = NextUrl.a.get("href")
#そのままでは次のページに行けないのでちゃんとしたものに変更
#url = amazon + Url
#次のページに行くので変数pageの値を1大きくする
page += 1
except:
#取得しようと思ったページ数まで到達する前に終わったらそのページ以降はなかったと出力
print(page+1 + "以降のページはなかった")
finally:
#終わったことを出力
print(url)
print(item_url)
print("finish")
serch_amazon("ティッシュ", 1)
``` |
{
"source": "jphacks/NG_1902",
"score": 3
} |
#### File: NG_1902/rasp/getschedule.py
```python
import urllib.request #do with python3
import json
def getsched(token):
url= 'https://us-central1-come-in-pillow.cloudfunctions.net/getSchedule?token={}'.format(token)
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as res:
body=res.read()
return(body.decode())
if __name__ == '__main__':
f = open("reference.json",'r')
ref = json.load(f)
f.close()
print(getsched(ref["token"]))
``` |
{
"source": "jphacks/NG_1903",
"score": 2
} |
#### File: jphacks/NG_1903/app.py
```python
from flask import Flask, jsonify, request, redirect
import json
import time
import datetime, pytz
import os
from google.auth.transport import requests
import hashlib
from google.oauth2 import id_token
from flask_cors import CORS
import firebase_admin
from firebase_admin import credentials, db
from flask_login import (
LoginManager,
current_user,
login_required,
login_user,
logout_user,
)
from oauthlib.oauth2 import WebApplicationClient
import uuid
import urllib
from batch import update_rate, update_rank, update_team
cred = credentials.Certificate("tapidora-63973-firebase-adminsdk-lzcfe-fb0238ec1f.json")
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://tapidora-63973.firebaseio.com/'
})
app = Flask(__name__)
CORS(app)
# tokenの有効時間(秒)
TOKEN_VALID_TIME = 1.0 * 60.0 * 60.0 * 24 * 7 * 100
# GOOGLE_CLIENT_ID
GOOGLE_CLIENT_ID = '142703424738-kkqmrm6eejec9hnkdglr7npotj1ijqr4.apps.googleusercontent.com'
def token_verified(token, userid):
ref = db.reference('/Users/' + userid)
user_info = ref.get()
db_token = user_info['apiToken']
if token[0:7] == "Bearer ":
token = token[7:]
if db_token != token:
return False
valid_time = user_info['valid_time']
if valid_time < time.time():
return False
return True
@app.route('/login', methods=["POST"])
def verify_token():
if request.method == "POST":
user_token = json.loads(request.get_data())['token']
try:
# Specify the CLIENT_ID of the app that accesses the backend:
idinfo = id_token.verify_oauth2_token(user_token, requests.Request(), GOOGLE_CLIENT_ID)
# Or, if multiple clients access the backend server:
# idinfo = id_token.verify_oauth2_token(token, requests.Request())
# if idinfo['aud'] not in [CLIENT_ID_1, CLIENT_ID_2, CLIENT_ID_3]:
# raise ValueError('Could not verify audience.')
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise ValueError('Wrong issuer.')
# If auth request is from a G Suite domain:
# if idinfo['hd'] != GSUITE_DOMAIN_NAME:
# raise ValueError('Wrong hosted domain.')
# ID token is valid. Get the user's Google Account ID from the decoded token.
# sub is user primary id
googleId = idinfo['sub']
email_verified = idinfo['email_verified']
# email検証がTrueかどうか
if not email_verified:
raise ValueError("email_verified false")
# クライアントの検証
if idinfo['aud'] != GOOGLE_CLIENT_ID:
raise ValueError("Bad")
email = idinfo['email']
userName = idinfo['name']
now_time = time.time()
global TOKEN_VALID_TIME
valid_time = now_time + TOKEN_VALID_TIME
token_org_str = userName + googleId + str(valid_time)
return_token = hashlib.sha256(token_org_str.encode()).hexdigest()
ref = db.reference('/Users')
userId = ""
snapshot = ref.order_by_child("googleId").equal_to(googleId).get()
if not snapshot:
user_ref = ref.push()
userId = user_ref.key
else:
if len(snapshot) == 1:
# 普通にやっていればgoogleIdは重複しない
for key in snapshot:
userId = key
user_ref = ref.child(userId)
batch_ref = db.reference('/batch')
batch_data = batch_ref.get()
weekId = batch_data['current_week_id']
weekId = str(weekId)
user_ref.update({
'apiToken': return_token,
'valid_time': valid_time,
'userName': userName,
'email': email,
'googleToken' : user_token,
"googleId" : googleId
})
if 'totalDistance' not in user_ref.get():
user_ref.update({
'totalDistance' : 0
})
if 'before_pull_time' not in user_ref.get():
default_push_time = nowtime_to_rcf3339()
user_ref.update({
'before_pull_time': default_push_time
})
if 'weeklyDistance' not in user_ref.get():
weeklyDistance_ref = user_ref.child('weeklyDistance')
weeklyDistance_ref.update({
weekId: 0
})
else:
weeklyDistance_ref = user_ref.child('weeklyDistance')
if weekId not in weeklyDistance_ref.get():
weeklyDistance_ref.update({
weekId : 0
})
now_week_tames = db.reference('/Teams/' + weekId)
new_Team_ref = now_week_tames.push()
new_TeamId = new_Team_ref.key
# 1人チームを作成
new_Team_ref.update({
"teamGoal" : 10,
"users": {
"1" : {
"userId" : userId
}
}
})
user_ref.update({
'teamId' : {
weekId : new_TeamId
},
'rate' : {
weekId : 0
}
})
ret_data = {
'userId' : userId,
'apiToken' : return_token,
'teamID' : new_TeamId,
'userName' : userName,
'verified' : True
}
print(ret_data)
return jsonify(ret_data)
except Exception as e:
print(e)
# Invalid token
ret_data = {
'userId' : "Notset",
'apiToken' : 'Not<PASSWORD>',
"teamID": "Notset",
"userName": "Notset",
'verified' : False
}
return ret_data
@app.route('/user',methods=['OPTION'])
def preflight():
return "OK"
@app.route('/user/<userId>', methods=['GET'])
def get_user_info(userId):
ret_data = {
'rate': -99,
'weeklyDistance': -99,
'totalDistance': -99,
'token_verified_flag': False
}
if request.method == 'GET':
# posted_json = json.loads(request.get_data())
# token = posted_json['apiToken']
# userId = posted_json['userId']
token = request.headers.get("Authorization")
if not token_verified(token=token, userid=userId):
return ret_data
user_ref = db.reference('/Users/' + userId)
user_info = user_ref.get()
batch_ref = db.reference('/batch')
batch_data = batch_ref.get()
totalDistance = user_info['totalDistance']
rate_ref =user_ref.child('rate')
weekId = batch_data['current_week_id']
weekId = str(weekId)
rate_data = rate_ref.get()
rate = rate_data[weekId]
weeklyDistance = user_info['weeklyDistance'][weekId]
ret_data = {
'rate' : rate,
'weeklyDistance': weeklyDistance,
'totalDistance': totalDistance,
'token_verified_flag': True
}
print(ret_data)
return jsonify(ret_data)
@app.route('/team',methods=['OPTION'])
def team_preflight():
return "OK"
@app.route('/team/<teamId>', methods=["GET"])
def get_team_info(teamId):
ret_data = {
'teamGoal': -99,
'teamMember' : [
{
'userName': 'dummy',
'userData':{
'rate': -50,
'weeklyDistance': -50,
'totalDistance': -50
}
}
],
'token_verified_flag': False
}
if request.method == "GET":
# posted_json = json.loads(request.get_data())
# token = posted_json['apiToken']
# userid = posted_json['userId']
token = request.headers.get("Authorization")
userId = request.headers.get("UserID")
if not token_verified(token=token, userid=userId):
return ret_data
user_ref = db.reference('/Users/' + userId)
user_info = user_ref.get()
batch_ref = db.reference('/batch')
batch_data = batch_ref.get()
user_teamId_ref = user_ref.child('teamId')
weekId = batch_data['current_week_id']
weekId = str(weekId)
teamId = user_teamId_ref.get()[weekId]
teams_teamId_ref = db.reference('Teams/' + weekId + '/' + teamId)
team_data = teams_teamId_ref.get()
teamGoal = team_data['teamGoal']
users = team_data['users']
team_menber = []
for index in users:
if index == None:
continue
loop_user_id = index["userId"]
loop_user_ref = db.reference('Users/' + loop_user_id)
loop_user_data = loop_user_ref.get()
loop_user_rate = loop_user_data['rate'][weekId]
loop_user_weeklyDistance = loop_user_data['weeklyDistance'][weekId]
loop_user_totalDistance = loop_user_data['totalDistance']
loop_userName = loop_user_data['userName']
team_menber_add_data = {
"userName": loop_userName,
"userData":{
"rate": loop_user_rate,
"weeklyDistance": loop_user_weeklyDistance,
"totalDistance": loop_user_totalDistance
}
}
team_menber.append(team_menber_add_data)
ret_data = {
"teamGoal": teamGoal,
"teamMember" : team_menber
}
return jsonify(ret_data)
# google fit とかでデータを更新した時に呼んで欲しい関数
# 最後にデータを送信した時間を更新します
def push_data_time_update(userId):
user_ref = db.reference('/Users/' + userId)
user_ref.update({
"before_pull_time" : nowtime_to_rcf3339()
})
# 今の所タイムゾーンは全て日本
def nowtime_to_rcf3339():
dt = datetime.datetime.now(tz=pytz.timezone('US/Pacific'))
ret_str = dt.strftime('%Y/%m/%dT%H:%M:%S%z')
return ret_str
# デバッグ用
@app.route("/")
def dummy_data_create():
ref = db.reference()
names = ["Tom", "Ant", "Ken", "Bob", "Rinrin", "Sayo", "Rute", "Rob"]
import random
for index, userName in enumerate(names):
apiToken = "apiToken"
googleToken = "googleToken"
totalDistance = 0
batch_ref = db.reference('/batch')
batch_data = batch_ref.get()
user_teamId_ref = user_ref.child('teamId')
weekId = batch_data['current_week_id']
weekId = str(weekId)
rate = {
weekId : random.randint(0, 500)
}
weeklyDistance = {
weekId : random.randint(0, 20)
}
user_teamId = "402"
if index >= len(names)/2:
user_teamId = "5000"
teamId = {
weekId : user_teamId
}
userId = hashlib.sha256(userName.encode()).hexdigest()
push_data = {
"userName": userName,
"googleToken": googleToken,
"apiToken" : apiToken,
"totalDistance" : totalDistance,
"googleId" : "googleId" + str(index),
# "rate" : rate,
# "weeklyDistance" : weeklyDistance,
"teamId" : teamId,
"before_pull_time" : nowtime_to_rcf3339()
}
user_ref = db.reference("/Users/" + userId)
user_ref.update(push_data)
team_ref = db.reference("/Teams/" + weekId + '/' + user_teamId + "/users/")
# team_ref.update({
# "teamGoal": random.randint(100, 200)
# })
team_ref = team_ref.child(str((index)%4 + 1 ))
team_ref.update({
"teamGoal": 100,
"userId" : userId
})
return "OK"
@app.route("/test")
def test():
ref = db.reference('/Users')
googleId = "45"
snapshot = ref.order_by_child("googleId").equal_to(googleId).get()
if not snapshot:
user_ref = ref.push()
userId = user_ref.key
else:
if len(snapshot) == 1:
# 普通にやっていればgoogleIdは重複しない
for key in snapshot:
userId = key
user_ref = ref.child(userId)
return snapshot
@app.route('/pull/google-fit')
def pull_google_fit_data():
import requests
# todo: アクセストークン失効時の処理
# token検証
# token = request.headers.get("Authorization")
user_id = request.headers.get("UserID")
# if not token_verified(token=token, userid=user_id):
# return "ng"
# todo: remove
# user_id = 'b3cd90b170e623ff13c227330f5a3e0de0d3148801119ebcc4472b9e099dca36'
# google_access_token = ''
# user_ref = db.reference('/Users/'+user_id)
# user_info = user_ref.get()
user_ref = db.reference('/Users/' + user_id)
user_info = user_ref.get()
google_access_token = user_info['googleToken']
if 'before_pull_time' in user_info:
before_pull_time = user_info['before_pull_time']
# before_pull_time = '2018-12-31T23:59:59-08:00'
else:
before_pull_time = '2018-12-31T23:59:59-08:00'
latest_pull_time = nowtime_to_rcf3339()
push_data_time_update(user_id)
res = requests.get(
'https://www.googleapis.com/fitness/v1/users/me/sessions',
params={
'startTime': before_pull_time,
},
headers={
'Authorization': 'Bearer ' + google_access_token
}
)
all_sessions = json.loads(res.text)['session']
running_sessions = []
for sessions in all_sessions:
if sessions['activityType'] == 8 or sessions['activityType'] == 57 or sessions['activityType'] == 58:
running_sessions.append(sessions)
pulled_running_session = running_sessions[0] # todo
session_start_time_millis = pulled_running_session['startTimeMillis']
session_end_time_millis = pulled_running_session['endTimeMillis']
url = 'https://www.googleapis.com/fitness/v1/users/me/dataset:aggregate'
headers = {
'Authorization': 'Bearer ' + google_access_token
}
post_body = {
"aggregateBy": [{
"dataTypeName": "com.google.distance.delta",
"dataSourceId": "derived:com.google.distance.delta:com.google.android.gms:merge_distance_delta"
}],
"startTimeMillis": session_start_time_millis,
"endTimeMillis": session_end_time_millis
}
session_dataset = json.loads(requests.post(url, headers=headers, json=post_body).text)['bucket'][0]['dataset'][0]
ran_distance_km = float(session_dataset['point'][0]['value'][0]['fpVal'])/1000
batch_ref = db.reference('/batch')
batch = batch_ref.get()
current_week_id = batch['current_week_id']
current_user_weekly_distance = float(0)
if 'weeklyDistance' in user_info:
if current_week_id in user_info['weeklyDistance']:
current_user_weekly_distance = float(user_info['weeklyDistance'][current_week_id])
user_weekly_distance = current_user_weekly_distance + ran_distance_km
ref = user_ref.child('weeklyDistance')
ref.update({
current_week_id: user_weekly_distance
})
current_user_total_distance = 0
if 'totalDistance' in user_info:
current_user_total_distance = float(user_info['totalDistance'])
user_total_distance = current_user_total_distance + ran_distance_km
user_ref.update({
'totalDistance': user_total_distance
})
return {"status": "ok"}
@app.route("/cron")
def cron_run():
batch_ref = db.reference('/batch')
batch = batch_ref.get()
# ロック状態にする
updates = {}
updates['/is_running'] = True
batch_ref.update(updates)
current_week_id = batch['current_week_id']
next_week_id = str(uuid.uuid4())
# レート更新
update_rate.update_rate(current_week_id=current_week_id, next_week_id=next_week_id)
# ランキング生成
update_rank.update_rank(current_week_id=current_week_id, next_week_id=next_week_id)
# チームマッチング
update_team.update_team(current_week_id=current_week_id, next_week_id=next_week_id)
# week_idの更新
updates = {}
updates['/current_week_id'] = next_week_id
batch_ref.update(updates)
# ロック状態を解除
updates = {}
updates['/is_running'] = False
batch_ref.update(updates)
return jsonify({'result': 'ok'})
if __name__ == '__main__':
app.run()
```
#### File: NG_1903/batch/update_rank.py
```python
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
def update_rank(current_week_id, next_week_id):
ranks_ref = db.reference('/ranks')
ranking = {}
users_ref = db.reference('/Users')
users = users_ref.get()
for users_key in users:
user = users[users_key]
ranking[users_key] = {
"rate": user['rate'][next_week_id],
"name": user['userName']
}
ranking = sorted(ranking.items(), key=lambda x: x[1]['rate'], reverse=True)
print(ranking)
rank_num = 1
rank = {}
for rank_data in ranking:
rank[str(rank_num)] = {
"user_id": rank_data[0],
"rate": rank_data[1]['rate'],
"name": rank_data[1]["name"]
}
rank_num += 1
ranks_ref.child(next_week_id).set(
rank
)
```
#### File: NG_1903/batch/update_team.py
```python
import random
import uuid
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
NUM_TEAM_MEMBER = 3
BLOCK_SIZE = NUM_TEAM_MEMBER * 2
def create_team(new_team_id, current_week_id, next_week_id, arr_members_id):
users_ref = db.reference('/Users')
users = users_ref.get()
teams_ref = db.reference('/Teams')
team_goal = 0
members = {}
member_index = 1
for member_id in arr_members_id:
team_goal += users[str(member_id)]['weeklyDistance'][str(current_week_id)]
members[member_id] = {
"userId": member_id
}
member_index += 1
teams_ref.child(next_week_id).child(new_team_id).set({
"teamGoal": team_goal,
"users": members
})
def update_team(current_week_id, next_week_id):
ranks_ref = db.reference('/ranks')
ranks = ranks_ref.get()
ranks = ranks[next_week_id]
ranks = [x for x in ranks if x is not None] # 謎None対策
ind = 0
_ranks = {}
for rank in ranks:
_ranks[ind] = ranks[ind]
ind += 1
num_full_member_block = len(_ranks) / BLOCK_SIZE
for i in range(int(num_full_member_block)):
tmp = {}
# ブロックサイズのユーザを取り出す
for j in range(BLOCK_SIZE):
index = i * int(num_full_member_block) + j
tmp[index] = _ranks[index]
# チームをつくる
for j in range(int(BLOCK_SIZE / NUM_TEAM_MEMBER)):
team_members_keys = random.sample(list(tmp.keys()), NUM_TEAM_MEMBER)
team_members_ids = []
for key in team_members_keys:
team_members_ids.append(_ranks[key]['user_id'])
tmpi = sorted(team_members_keys, reverse=True)
for key in tmpi:
del tmp[key]
del _ranks[key]
create_team(new_team_id=str(uuid.uuid4()), current_week_id=current_week_id, next_week_id=next_week_id, arr_members_id=team_members_ids)
# len(ranks) <= NUM_TEAM_MEMBERなときは、ブロック関係なく組んでしまう
if len(ranks) <= NUM_TEAM_MEMBER:
team_members_keys = random.sample(list(_ranks.keys), NUM_TEAM_MEMBER)
team_members_ids = []
for key in team_members_keys:
team_members_ids.append(_ranks[key]['user_id'])
tmpi = sorted(team_members_keys, reverse=True)
for key in tmpi:
del ranks[key]
create_team(new_team_id=str(uuid.uuid4()), current_week_id=current_week_id, next_week_id=next_week_id, arr_members_id=team_members_ids)
# 残り
team_members_keys = list(_ranks.keys())
team_members_ids = []
tmpi = sorted(team_members_keys, reverse=True)
for key in tmpi:
team_members_ids.append(_ranks[key]['user_id'])
del _ranks[key]
create_team(new_team_id=str(uuid.uuid4()), current_week_id=current_week_id, next_week_id=next_week_id, arr_members_id=team_members_ids)
``` |
{
"source": "jphacks/NG_1907",
"score": 2
} |
#### File: NG_1907/client/client_test.py
```python
import requests
import json
URL = ''
def send_bpm(userID, musicID, BPM):
#POSTパラメータは二つ目の引数に辞書で指定する
response = requests.post(
URL,
json.dumps({'userID': userID, 'musicID': musicID, 'BPM': BPM}),
headers={'Content-Type': 'application/json'})
if __name__=='__main__':
send_bpm('5555', '144', '20')
```
#### File: jphacks/NG_1907/MusicSelect.py
```python
import random
import math
# テスト用
# 現在の時刻(開始時刻からの経過時間)
now = 54
from datetime import datetime
# テスト用
textTime = datetime.now()
textStartTime = 1571516344.799226
# timestampをunixtimeに変換
def timeChanger(textTime):
return textTime.timestamp()
# 経過時間を演算
def ElapsedTime(now):
return math.floor(timeChanger(textTime) - textStartTime)
# 時刻ごとの盛り上がり度の登録時の演算 (修正あり)
# 盛り上がり度 = 現在時刻でのbpmの平均 と考える
def GetExcitementForTime(now): # 引数は現在の時刻(開始時刻からの経過時間)
bpmSum = 0
# UserIDごとに[bpm, bpm, bpm, ....]の形式で配列を生成 (修正)
listDataOfMusicData = []
# 時刻ごとのbpmの平均
for data in listDataOfMusicData:
bpmSum += data[1]
bpmAverage = bpmSum / len(listDataOfMusicData)
return [now, bpmAverage]
# 共分散
def GetCovariance(listData):
averageX = 0
averageY = 0
for data in listData:
averageX += data[0]
averageY += data[1]
averageX = averageX / len(listData)
averageY = averageY / len(listData)
covariance = 0
for data in listData:
covariance += (data[0] - averageX) * (data[1] - averageY)
return covariance / len(listData)
# 分散
def GetDispersion(listData):
averageX = 0
for data in listData:
averageX += data[0]
averageX = averageX / len(listData)
dispersion = 0
for data in listData:
dispersion += (data[0] - averageX) * (data[0] - averageX)
return dispersion / len(listData)
# 最小二乗法による回帰直線の傾きを演算 (修正あり)
def LeastSquareMethod(num):
# excitementデータベースから取得した盛り上がり度データ(新しい順)
excitementData = [[5, 100], [6, 80], [7, 90]]
return GetCovariance(excitementData[:num]) / GetDispersion(excitementData[:num])
# print(LeastSquareMethod(5))
# 乱数
def RandomNumber(num):
print(num)
return random.randrange(num)
# 同じ系統の曲を抽出(論理積)
def musicSiftAnd(data, bpm, genre):
musicData = data
resultData = []
for d in musicData:
if d[1] == bpm and d[2] == genre:
resultData.append(d)
return resultData
# 同じ系統の曲を抽出(論理和)
def musicSiftOr(data, bpm, genre):
musicData = data
resultData = []
for d in musicData:
if d[1] == bpm or d[2] == genre:
resultData.append(d)
return resultData
# テスト用
nowMusicID = 100
# 選曲関数 (修正あり)
def SelectMusic(nowMusicID): # 引数は現在の流れている曲のID
# 引数のnowMusicIDから現在の曲のbpmを取得する
musicData = [[222, 1, "free"], [100, 1, "free"], [333, 2, "hhh"], [4444, 3, "ggg"], [555, -1, "free"], [999, 3, "free"], [888, 2, ""]]
nowMusicBpmType = 0
nowMusicGenre = ""
count = 0
for data in musicData:
if data[0] == nowMusicID:
nowMusicBpmType = data[1]
nowMusicGenre = data[2]
musicData.pop(count)
count += 1
# 最近5回分のデータの傾向を演算
inclination = LeastSquareMethod(5)
# 次の曲のbpmを決定
if nowMusicBpmType == 3:
if inclination < 0:
nowMusicBpmType -= 2
elif nowMusicBpmType == -1:
if inclination > 0:
nowMusicBpmType += 1
else:
if inclination > 0.5:
nowMusicBpmType += 1
elif inclination < 0.5:
nowMusicBpmType -= 1
# musicデータベースからmusicBpmTypeとジャンルが一致するmusicDataのlist
musicIdListOfMusicBpmType = musicSiftAnd(musicData ,nowMusicBpmType, nowMusicGenre)
if len(musicIdListOfMusicBpmType) == 0:
musicIdListOfMusicBpmType = musicSiftOr(musicData ,nowMusicBpmType, nowMusicGenre)
return musicIdListOfMusicBpmType[RandomNumber(len(musicIdListOfMusicBpmType))][0]
print(SelectMusic(nowMusicID))
# -------------------------------------------------------------------------------
# 以下はいらなくなった関数たち
# # 基準値(大)を抽出
# def GetStandardMax(listData, i):
# listSort = sorted(listData, reverse = True)
# maxList = listSort[:i]
# return sum(maxList) / len(maxList)
# # 平均値を出力
# def GetAverage(listData):
# return sum(listData) / len(listData)
# # 個人の盛り上がり度を曲内で演算
# def GetExcitementPoint(listData):
# i = 0
# standard = GetStandardMin(listData, 5)
# excitementPointList = []
# while i < len(listData):
# if listData[i] - standard > 10:
# excitementPointList.append(i)
# i += 1
# return excitementPointList
# # 個人の盛り上がり度を1曲単位で演算
# def GetExcitementFull(listData):
# standardMin = GetStandardMin(listData, 10)
# standardMax = GetStandardMax(listData, 10)
# average = GetAverage(listData)
# return standardMax - standardMin + average
# # 全体の盛り上がり度を1曲単位で演算
# def GetExcitementFullAll(musicID):
# excitementSum = 0
# for data in listDataOfMusicData:
# excitementSum += GetExcitementFull(data[1])
# return [musicID, excitementSum / len(listDataOfMusicData)]
# # 曲のランキング
# musicRanking = []
# # 曲をランキングする関数
# def MusicRanking(musicID):
# musicData = GetExcitementFullAll(musicID)
# i = 0
# while i < len(musicRanking):7
# if musicRanking[i][1] < musicRanking[1]:
# musicRanking.insert(i, musicData)
# break
# i += 1
# if i == len(musicRanking): musicRanking.append(musicData)
# if len(musicRanking) > 10: musicRanking.pop(10)
# マッチング用リスト追加関数
# def AscendingQuickSort(matchingList):
# if len(matchingList) < 2:
# return matchingList
# head = matchingList[0][1]
# left = []
# middle = []
# right = []
# for data in matchingList:
# if data[1] < head: left.append(data)
# elif data[1] == head: middle.append(data)
# else: right.append(data)
# return AscendingQuickSort(left) + middle + AscendingQuickSort(right)
# 変位抽出関数
# def DisplacementChanger(listData):
# i = 0
# displacementList = []
# while i < len(listData) - 2:
# displacementList.append(listData[i + 1] - listData[i])
# i += 1
# return displacementList
# displacementList1 = DisplacementChanger(test1)
# displacementList2 = DisplacementChanger(test2)
# print(displacementList1)
# print("\n")
# print(displacementList2)
# print("\n")
# def SelectMusic2(nowMusicID):
# music = []
# return music[RandomNumber(len(music))]
```
#### File: NG_1907/RPi/emove_json.py
```python
import json
import requests
JSON_FILE_PATH = '/home/pi/EMOVE/test_bpm.json'
SEND_URL = 'https://jphacks_noblesseoblige07.serveo.net/bpm'
def send_bpm(userID, twitterID, BPM, name, age, sex):
#POSTパラメータは二つ目の引数に辞書で指定する
dict = json.dumps({
'userID': userID,
'twitterID': twitterID,
'BPM': BPM,
'name': name,
'age': age,
'sex': sex})
print(dict)
response = requests.post(
SEND_URL,
dict,
headers={'Content-Type': 'application/json'})
def save_music_list_json():
dic = {}
musicID = '4567'
name = 'no1'
genre = 'Pop'
part = {
"Name": name,
"Genre": genre
}
dic[musicID] = part
a = open(JSON_FILE_PATH, 'w')
json.dump(dic, a)
def send_json(path, url):
file = {'upload_file': open(path, 'rb')}
headers = {'content-type': 'application/json'}
res = requests.post(url, files=file, headers=headers)
def main():
userID = '3125'
BPM = 70
twitterID = '@shikishijun'
name = 'shikishima'
age = 22
sex = 'male'
send_bpm(userID, twitterID, BPM, name, age, sex)
if __name__ == "__main__":
main()
``` |
{
"source": "jphacks/NG_1911",
"score": 3
} |
#### File: NG_1911/raspberrypi/client.py
```python
import urllib.request
import threading
import json
import wiringpi as pi , time
BASE_URL = "http://ec2-13-114-247-66.ap-northeast-1.compute.amazonaws.com"
pi.wiringPiSetupGpio()
class Motor:
def __init__(self, OUTPUT_PINS, TIME_SLEEP):
self.OUTPUT_PINS = OUTPUT_PINS
self.TIME_SLEEP = TIME_SLEEP
self.is_open = False
# GPIOピンを出力モードにする
for OUTPUT_PIN in self.OUTPUT_PINS:
pi.pinMode(OUTPUT_PIN, pi.OUTPUT)
def open(self):
if self.is_open == True:
return
for k in range(128):
for p in range (4):
for i in range (4):
if p == i:
pi.digitalWrite (self.OUTPUT_PINS [i], pi.HIGH)
else :
pi.digitalWrite (self.OUTPUT_PINS [i], pi.LOW)
time.sleep (self.TIME_SLEEP)
self.is_open = True
def close(self):
if self.is_open == False:
return
for k in range(128):
for p in range (4):
for i in range (4):
if 3 - p == i:
pi.digitalWrite (self.OUTPUT_PINS [i], pi.HIGH)
else :
pi.digitalWrite (self.OUTPUT_PINS [i], pi.LOW)
time.sleep (self.TIME_SLEEP)
self.is_open = False
class Buzzer:
def __init__(self, OUTPUT_PIN):
self.OUTPUT_PIN = OUTPUT_PIN
pi.pinMode(OUTPUT_PIN, pi.OUTPUT)
self.is_buzzing = False
self.thread = threading.Thread(target=self.buzz)
self.thread.start()
def buzz(self):
while True:
if self.is_buzzing == True:
pi.digitalWrite (self.OUTPUT_PIN, pi.HIGH)
time.sleep(1)
pi.digitalWrite (self.OUTPUT_PIN, pi.LOW)
time.sleep(1)
def start(self):
if self.is_buzzing == True:
return
self.is_buzzing = True
def stop(self):
if self.is_buzzing == False:
return
pi.digitalWrite (self.OUTPUT_PIN, pi.LOW)
self.is_buzzing = False
motor = Motor(OUTPUT_PINS=[6 , 13 , 19 , 26], TIME_SLEEP=0.002)
buzzer = Buzzer(OUTPUT_PIN=4)
while True:
req = urllib.request.Request(BASE_URL+"/api/status")
try:
with urllib.request.urlopen(req) as res:
data = json.loads(res.read().decode('utf-8'))
print(data)
# 鍵の開閉
if data["status"] == 0:
motor.close()
elif data["status"] == 1:
motor.open()
# ブザーを鳴らす
if data["alert"] == 0:
buzzer.stop()
elif data["alert"] == 1:
buzzer.start()
except urllib.error.HTTPError as err:
print(err.code)
except urllib.error.URLError as err:
print(err.reason)
time.sleep(1)
``` |
{
"source": "jphacks/NG_1912",
"score": 3
} |
#### File: app/foods/models.py
```python
from django.db.models import (
PositiveIntegerField,
CharField,
Model
)
class Food(Model):
name = CharField(max_length=100, unique=True)
price = PositiveIntegerField(blank=True, default=0)
def __str__(self):
return self.name
``` |
{
"source": "jphacks/OK_1905",
"score": 2
} |
#### File: OK_1905/flask/image_process.py
```python
import cv2
def canny(image):
return cv2.Canny(image, 100, 200)
``` |
{
"source": "jphacks/ON_1801",
"score": 2
} |
#### File: ON_1801/src_flask/application.py
```python
import sqlite3
from flask import Flask, request, jsonify, g
import logging
import cek
import os
import peewee as pe
import zaim
from datetime import datetime, date,timedelta
db = pe.SqliteDatabase('my_database.db')
class BaseModel(pe.Model):
class Meta:
database = db
# データテーブルのモデル
class User(BaseModel):
id = pe.IntegerField()
class ZaimAccesstoken(BaseModel):
user = pe.ForeignKeyField(User, related_name='zaimaccesstokens')
access = pe.CharField()
class Zaim(BaseModel):
id = pe.IntegerField()
user = pe.ForeignKeyField(User, related_name='zaims')
money = pe.IntegerField()
application = Flask(__name__)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
clova = cek.Clova(
application_id=os.environ['CLOVA_ID'],
default_language="ja",
debug_mode=True)
@application.route('/', methods=['GET', 'POST'])
def lambda_handler(event=None, context=None):
logger.info('Lambda function invoked index()')
return 'hello from Flask!'
# /clova に対してのPOSTリクエストを受け付けるサーバーを立てる
@application.route('/clova', methods=['POST'])
def my_service():
body_dict = clova.route(body=request.data, header=request.headers)
response = jsonify(body_dict)
response.headers['Content-Type'] = 'application/json;charset-UTF-8'
return response
# 起動時の処理
@clova.handle.launch
def launch_request_handler(clova_request):
welcome_japanese = cek.Message(message="金なら残っていないぞ", language="ja")
response = clova.response([welcome_japanese])
return response
# WifeStatusIntentの発火箇所
@clova.handle.intent("StatusIntent")
def wife_status_handler(clova_request):
VALUE = (yesterday_sum() - today_sum())
print(VALUE)
money_msg = clova_request.slot_value('money_chan')
response = clova.response("もう一回言ってね")
print(money_msg)
if money_msg is not None:
if money_msg == "差額":
response = clova.response("先月との差額は"+str(VALUE)+"円だよ")
if VALUE < 0:
response = clova.response("先月との差額はマイナス"+str(VALUE)+"円だよ!使いすぎです。")
elif money_msg == "残高":
response = clova.response("残っていないよ")
return response
# 終了時
@clova.handle.end
def end_handler(clova_request):
# Session ended, this handler can be used to clean up
logger.info("Session ended.")
# 認識できなかった場合
@clova.handle.default
def default_handler(request):
return clova.response("Sorry I don't understand! Could you please repeat?")
# zaimに問い合わせ
def request_zaim_setup():
zapi = zaim.Api(consumer_key=os.environ['ZAIM_KEY'],
consumer_secret=os.environ['ZAIM_SECRET'],
access_token=os.environ['ACCESS_TOKEN_ZAIM'],
access_token_secret=os.environ['ACCESS_TOKEN_ZAIM_SECRET'])
return zapi
def request_zaim_money_day(zapi,calc_days=0):
d_day = datetime.today()
if calc_days !=0 :
if calc_days < 0 :
calc_days *= -1
d_day = d_day - timedelta(days=calc_days)
print(d_day.strftime('%Y-%m-%d'))
day_moneys_json = zapi.money(mapping=1,
start_date=d_day.strftime('%Y-%m-%d'),
mode='payment',
end_date=d_day.strftime('%Y-%m-%d')
)
return day_moneys_json
def today_sum():
return calc_money_sum(request_zaim_money_day(request_zaim_setup()))
def calc_money_sum(moneys):
summoney = 0
for money in moneys['money']:
summoney += money['amount']
return summoney
def yesterday_sum():
return calc_money_sum(request_zaim_money_day(request_zaim_setup(),-1))
if __name__ == '__main__':
application.run()
``` |
{
"source": "jphacks/ON_1802",
"score": 3
} |
#### File: task_api/manage_data/read.py
```python
import pandas as pd
import os
def json_to_data(user_id):
path = './data/'
file_name = str(user_id) + '.json'
if os.path.isfile(path + file_name):
df = pd.read_json(path + file_name)
task_list = []
for _, task in df.iterrows():
user_id = task['user_id']
task_name = task['task_name']
task_info = task['task_info']
time_limit = task['time_limit']
task_dict = {'user_id':user_id, 'task_name':task_name, 'task_info':task_info, 'time_limit':time_limit}
task_list.append(task_dict)
return task_list
else:
return False
# this is test
if __name__ == '__main__':
user_id = 1
task_data = json_to_data(user_id)
print(task_data)
``` |
{
"source": "jphacks/ON_1804",
"score": 2
} |
#### File: MuscleBattle_django/ranking/views.py
```python
from django.shortcuts import render
# Create your views here.
def ranking(request):
return render(request, 'ranking/index.html')
``` |
{
"source": "jphacks/SD_1702",
"score": 2
} |
#### File: SD_1702/learnSVM/generate.py
```python
import cv2
import sys
import glob
import os
import os.path
import random
import numpy as np
from generate_settings import (
GENERATE_PAR_IMAGE,
GENERATED_IMAGE_WIDTH,
GENERATED_IMAGE_HEIGHT,
RANDOMIZE_RADIUS,
OUTPUT_DIR,
)
dic = {
"1m":0,
"2m":1,
"3m":2,
"4m":3,
"5m":4,
"6m":5,
"7m":6,
"8m":7,
"9m":8,
"1p":9,
"2p":10,
"3p":11,
"4p":12,
"5p":13,
"6p":14,
"7p":15,
"8p":16,
"9p":17,
"1s":18,
"2s":19,
"3s":20,
"4s":21,
"5s":22,
"6s":23,
"7s":24,
"8s":25,
"9s":26,
"1z":27,
"2z":28,
"3z":29,
"4z":30,
"5z":31,
"6z":32,
"7z":33,
}
def main(dirpath):
absdirpath = os.path.abspath(dirpath)
files = os.listdir(absdirpath + '/')
jpgfiles = []
for fp in files:
if fp[-4:] == '.JPG':
jpgfiles.append(fp)
count = len(jpgfiles)
print(' * [{}] .jpg files detected. generate [{}] images.'.format(count, count * 2 * GENERATE_PAR_IMAGE))
outarray = np.empty((0,GENERATED_IMAGE_HEIGHT * GENERATED_IMAGE_WIDTH + 1), int)
# print(outarray.shape)
for jpgfile in jpgfiles:
jpgpath = dirpath + '/' + jpgfile
print(' * {}'.format(jpgpath))
tag = jpgfile[:2]
if tag not in dic:
print(' * unknown label: {}'.format(tag))
label = dic[tag]
image = cv2.imread(jpgpath)
index = 0
inv = ''
for _ in range(2):
for index in range(GENERATE_PAR_IMAGE):
generated_image = generate(image)
outjpgname = '{}_{}{}.JPG'.format(jpgfile[:-4], index, inv)
outjpgpath = os.path.join(OUTPUT_DIR, outjpgname)
cv2.imwrite(outjpgpath, generated_image)
array = np.append([label], generated_image.flatten().copy() // 255)
# print(array.shape)
outarray = np.append(outarray, [array], axis=0)
# inverse image
image = cv2.flip(image, -1)
index = 0
inv = 'inv'
#print(outarray)
#print(outarray.shape)
#input()
np.save('mahjong_dataset', outarray)
def generate(image):
size = tuple(np.array([image.shape[1], image.shape[0]]))
width = size[0]
height = size[1]
rands = [random.randint(- RANDOMIZE_RADIUS, RANDOMIZE_RADIUS) for _ in range(8)]
src_prs = np.float32(
[
[0 + rands[0], 0 + rands[1]],
[width + rands[2], 0 + rands[3]],
[width + rands[4], height + rands[5]],
[0 + rands[6], height + rands[7]]
]
)
dst_prs = np.float32(
[
[0, 0],
[GENERATED_IMAGE_WIDTH, 0],
[GENERATED_IMAGE_WIDTH, GENERATED_IMAGE_HEIGHT],
[0, GENERATED_IMAGE_HEIGHT]
]
)
transform = cv2.getPerspectiveTransform(src_prs, dst_prs)
outsize = tuple([GENERATED_IMAGE_WIDTH, GENERATED_IMAGE_HEIGHT])
randomized = cv2.warpPerspective(image, transform, size, borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))
randomized = randomized[0:GENERATED_IMAGE_HEIGHT, 0:GENERATED_IMAGE_WIDTH]
randomized = cv2.cvtColor(randomized, cv2.COLOR_RGBA2GRAY)
ret, thres = cv2.threshold(randomized,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
return thres
if __name__ == '__main__':
if len(sys.argv) < 2:
print(' * please specify target image')
else:
dirpath = sys.argv[1]
if (os.path.isfile(dirpath)):
print(' * {} is not directory'.format(dirpath))
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
main(dirpath)
``` |
{
"source": "jphacks/SD_1802",
"score": 2
} |
#### File: SD_1802/LINE_Bot/Reply_Message.py
```python
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
# 中身
app = Flask(__name__)
line_bot_api = LineBotApi('Access Token')
handler = WebhookHandler('Channel Secret')
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="event.message.text"))
if __name__ == "__main__":
app.run()
```
#### File: products/python/compare.py
```python
import time
start = time.time()
import argparse
import cv2
import itertools
import os
import re
import requests
import sqlite3
import glob
import numpy as np
np.set_printoptions(precision=2)
import openface
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true')
def getRep(imgPath, align, net, imgDim, verbose=False):
"""
入力された画像の認証スコアを算出する
imgPath: 画像の path
align: dlib の顔検出器
net: 認証スコアを計算する facenet
imgDim: 画像の次元 default=96 なんでこれに従う
verbose: ログを出力するかどうか
returns: 認証スコア
"""
if verbose:
print("Processing {}.".format(imgPath))
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
start = time.time()
bb = align.getLargestFaceBoundingBox(rgbImg)
if bb is None:
raise Exception("Unable to find a face: {}".format(imgPath))
if verbose:
print(" + Face detection took {} seconds.".format(time.time() - start))
##############################################################
#ul = (bb.left(), bb.top())
#br = (bb.right(), bb.bottom())
#cv2.rectangle(bgrImg, ul, br, (0, 0, 255), 2)
#png_path = os.path.basename(imgPath).replace('.jpg', '.png')
#png_path = os.path.join('/home/ubuntu/', png_path)
#print('save path:', png_path)
#cv2.imwrite(imgPath, bgrImg)
#############################################################
start = time.time()
alignedFace = align.align(imgDim, rgbImg, bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if verbose:
print(" + Face alignment took {} seconds.".format(time.time() - start))
start = time.time()
rep = net.forward(alignedFace)
if verbose:
print(" + OpenFace forward pass took {} seconds.".format(time.time() - start))
#print("Representation:")
#print(rep)
print("-----\n")
return rep
def getDistance(reg_img_path, input_img_path, align, net, imgDim, verbose=False):
"""
登録画像と入力画像との間の顔認証スコアを計算し、距離を出す
reg_img_path: 登録画像の path
input_img_path: 入力画像の path
align: dlib の顔検出器
net: 認証スコアを計算する facenet
verbose: ログを出力するかどうか
returns: 画像間のスコアから計算した距離(L2 norm)
"""
reg_rep = getRep(reg_img_path, align, net, imgDim, verbose=verbose)
input_rep = getRep(input_img_path, align, net, imgDim, verbose=verbose)
dis = np.linalg.norm(reg_rep - input_rep)
return dis
def get_all_images(root_dir):
"""
ディレクトリを再帰的に探って画像ファイルを抽出
"""
for root, dirs, files in os.walk(root_dir):
for f in files:
if re.match(r'enrolled_image.jpg', f) or re.match(r'enrolled_image.png', f) or re.match('enolled_image.jpeg', f):
yield os.path.join(root, f)
def Line(mes):
TOKEN = ""
URL = "https://notify-api.line.me/api/notify"
message = "\n" + mes
payload = {"message": message}
headers = {"Authorization": "Bearer " + TOKEN}
lineNotify = requests.post(URL, data=payload, headers=headers)
def get_user_info(user_info_id):
"""
rails で登録されている database から user_id に紐づく user_infos
user_info_id: user_infos の外部キーになっている user_id
return: user_info の辞書
"""
dbpath = '/home/ubuntu/Documents/SD_1802/products/db/development.sqlite3'
connection = sqlite3.connect(dbpath)
cursor = connection.cursor()
try:
cursor.execute('SELECT * FROM user_infos WHERE id = {}'.format(user_info_id))
res = cursor.fetchone()
except sqlite3.Error as e:
print('sqlite3.Error occurred:', e.args[0])
print(res)
dic = {'name': res[1], 'memo': res[3]}
return dic
def main():
# general setting
args = parser.parse_args()
dlib_path = '/home/ubuntu/Documents/SD_1802/products/python/shape_predictor_68_face_landmarks.dat'
facenet_model_path = '/home/ubuntu/Documents/SD_1802/products/python/nn4.small2.v1.t7'
imgDim = 96
verbose = args.verbose
start = time.time()
align = openface.AlignDlib(dlib_path)
net = openface.TorchNeuralNet(facenet_model_path, imgDim)
if verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
# get register images & input image
register_root_dir = '/home/ubuntu/Documents/SD_1802/products/public/uploads/'
input_img_path = '/var/www/html/upload/image.jpg'
mini = np.inf
candidate = ""
while(1):
if os.path.exists(input_img_path):
print('start to calc')
for reg_img_path in get_all_images(register_root_dir):
d = getDistance(reg_img_path, input_img_path, align, net, imgDim, verbose=verbose)
if mini > d:
mini = d
candidate = reg_img_path
user_info_id = int(os.path.dirname(candidate).split('/')[-1])
user_info = get_user_info(user_info_id)
msg = '名前: {}\n情報: {}'.format(user_info['name'].encode('utf-8'), user_info['memo'].encode('utf-8'))
Line(msg)
os.remove(input_img_path)
else:
print('Not found the image.jpg in upload directory')
time.sleep(5)
if __name__ == '__main__':
main()
``` |
{
"source": "jphacks/SD_1806",
"score": 2
} |
#### File: SD_1806/driver/driver.py
```python
import wiringpi as wp
import time
import atexit
import datetime
import subprocess
import requests
import threading
import spidev
import RPi.GPIO as GPIO
import sys
import os
pd_pins = [19, 26, 21, 6, 5]
human_pin = 20
loop_interval = 0.3
notify_interval = 30
api_url = 'https://sugoigomibako.herokuapp.com/'
notify_sound = './gomi_today.wav'
post_amount_interval = 1.1
threshold = 500
smell_val = 0
GPIO.setmode(GPIO.BCM)
GPIO.setup(17,GPIO.OUT)
GPIO.setup(22,GPIO.OUT)
spi = spidev.SpiDev()
spi.open(0,0)
spi.max_speed_hz=1000000
spi.bits_per_word=8
dummy = 0xff
start = 0x47
sgl = 0x20
ch0 = 0x00
msbf = 0x08
class SmellThread(threading.Thread):
def __init__(self):
super(SmellThread, self).__init__()
self.stop_event = threading.Event()
self.setDaemon(True)
def stop(self):
self.stop_event.set()
def smell_measure(self, ch):
ad = spi.xfer2( [ (start + sgl + ch + msbf), dummy ] )
val = ((ad[0] & 0x03) << 8) + ad[1]
return val
def run(self):
try:
while not self.stop_event.is_set():
time.sleep(0.237)
GPIO.output(22,True)
time.sleep(0.003)
ch0_val = SmellThread.smell_measure(self,ch0)
Val = 1023 - ch0_val
time.sleep(0.002)
GPIO.output(22,False)
GPIO.output(17,True)
time.sleep(0.008)
GPIO.output(17,False)
global smell_val
smell_val = Val
except KeyboardInterrupt:
pass
spi.close()
def cleanup():
print('cleanup completed')
def setup():
wp.wiringPiSetupSys()
for pd_pin in pd_pins:
wp.pinMode(pd_pin, wp.GPIO.INPUT)
atexit.register(cleanup)
print('setup completed')
def get_amount():
pd_stats = [wp.digitalRead(pd_pin) for pd_pin in pd_pins]
num_filled = len([0 for pd_stat in pd_stats if not pd_stat])
return num_filled
def is_human_detected():
return wp.digitalRead(human_pin)
def notify():
subprocess.call('aplay {}'.format(notify_sound), shell=True)
def post_amount(amount):
print('\033[32;1m==== posting amount: ' + str(amount) + ' ====\033[0m')
try:
responce = requests.post(api_url + 'amount', {'amount': amount})
if responce.status_code != 200:
print('\033[31;1m==== failed to post amount: ' + responce.status_code + ' ====\033[0m')
except:
pass
def post_smell_val(smell_val):
print('\033[32;1m==== posting smell_val: ' + str(smell_val) + ' ====\033[0m')
try:
response = requests.post(api_url + 'smell', {'smell': smell_val})
if response.status_code != 200:
print('\033[31;1m==== failed to post smell_vall: ' + response.status_code + ' ====\033[0m')
except:
pass
def main():
setup()
smell_thread = SmellThread();
green = '\033[32;1m'
red = '\033[31;1m'
clear = '\033[0m'
last_notified = time.time() - notify_interval
last_posted_amount = 0
last_amount_changed = time.time() - notify_interval
previous_amount = 0
previous_smell_val = 0
smell_thread.start()
while True:
now = time.time()
# current status
pd_stats = [wp.digitalRead(pd_pin) for pd_pin in pd_pins]
os.system('clear')
print(datetime.datetime.now())
for i, (pd_pin, pd_stat) in enumerate(zip(pd_pins, pd_stats), 1):
print(' [{}] {} {}{}{} (GPIO #{})'.format(
i,
pd_stat,
green if pd_stat else red,
'EMPTY' if pd_stat else 'FILLED',
clear,
pd_pin
))
amount = get_amount()
print(' amount: {}{} ({}%){}'.format(
red if amount > 0.5 else green,
amount,
amount / len(pd_pins) * 100,
clear
))
# notification triggered by human sensor
print(' human sensor: {}'.format(is_human_detected()))
if is_human_detected() and amount >= len(pd_pins) - 1:
if now - last_notified > notify_interval:
print('play notification...')
notify()
last_notified = now
# TODO: consider day of week
# smell sensor
print(' smell sensor: {}'.format(smell_val))
# amount smell control
if previous_amount != amount:
last_amount_changed = time.time()
keep_time = now - last_amount_changed
print('amount keep time: ' + str(keep_time))
if keep_time > post_amount_interval:
if last_posted_amount != amount:
post_amount_thread = threading.Thread(target=post_amount, args=([amount]))
post_amount_thread.start()
last_posted_amount = amount
previous_amount = amount
if abs(smell_val - previous_smell_val) > 100:
post_smell_thread = threading.Thread(target=post_smell_val, args=([smell_val]))
post_smell_thread.start()
previous_smell_val = smell_val
time.sleep(loop_interval)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
```
#### File: SD_1806/server/amount.py
```python
import random
def get_amount(full=False):
return 4 if full else random.randint(0, 4)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.